content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
from django.apps import AppConfig
[ 6738, 42625, 14208, 13, 18211, 1330, 2034, 16934, 628 ]
3.888889
9
import tensorflow as tf from onnx_tf.handlers.backend_handler import BackendHandler from onnx_tf.handlers.handler import onnx_op from onnx_tf.handlers.handler import tf_func
[ 11748, 11192, 273, 11125, 355, 48700, 198, 198, 6738, 319, 77, 87, 62, 27110, 13, 4993, 8116, 13, 1891, 437, 62, 30281, 1330, 5157, 437, 25060, 198, 6738, 319, 77, 87, 62, 27110, 13, 4993, 8116, 13, 30281, 1330, 319, 77, 87, 62, 4...
2.885246
61
from http import HTTPStatus import time import logging import pytest import grequests from flask import url_for from eth_utils import ( to_checksum_address, to_canonical_address, is_checksum_address, ) from raiden_contracts.constants import ( CONTRACT_HUMAN_STANDARD_TOKEN, MAX_TOKENS_DEPLOY, TEST_SETTLE_TIMEOUT_MIN, TEST_SETTLE_TIMEOUT_MAX, ) from raiden.api.v1.encoding import ( AddressField, HexAddressConverter, ) from raiden.transfer.state import ( CHANNEL_STATE_OPENED, CHANNEL_STATE_CLOSED, ) from raiden.tests.utils import assert_dicts_are_equal from raiden.tests.utils.client import burn_all_eth from raiden.tests.utils.smartcontracts import deploy_contract_web3 # pylint: disable=too-many-locals,unused-argument,too-many-lines def test_url_with_invalid_address(rest_api_port_number, api_backend): """ Addresses require the leading 0x in the urls. """ url_without_prefix = ( 'http://localhost:{port}/api/1/' 'channels/ea674fdde714fd979de3edf0f56aa9716b898ec8' ).format(port=rest_api_port_number) request = grequests.patch( url_without_prefix, json=dict(state='CHANNEL_STATE_SETTLED'), ) response = request.send().response assert_response_with_code(response, HTTPStatus.NOT_FOUND) def test_api_close_insufficient_eth( api_backend, token_addresses, reveal_timeout, ): # let's create a new channel partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9' token_address = token_addresses[0] settle_timeout = 1650 channel_data_obj = { 'partner_address': partner_address, 'token_address': to_checksum_address(token_address), 'settle_timeout': settle_timeout, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response balance = 0 assert_proper_response(response, status_code=HTTPStatus.CREATED) response = response.json() expected_response = channel_data_obj expected_response['balance'] = balance expected_response['state'] = CHANNEL_STATE_OPENED expected_response['reveal_timeout'] = reveal_timeout expected_response['channel_identifier'] = assert_dicts_are_equal.IGNORE_VALUE expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE assert_dicts_are_equal(response, expected_response) # let's burn all eth and try to close the channel api_server, _ = api_backend burn_all_eth(api_server.rest_api.raiden_api.raiden) request = grequests.patch( api_url_for( api_backend, 'channelsresourcebytokenandpartneraddress', token_address=token_address, partner_address=partner_address, ), json={'state': CHANNEL_STATE_CLOSED}, ) response = request.send().response assert_proper_response(response, HTTPStatus.PAYMENT_REQUIRED) response = response.json() assert 'Insufficient ETH' in response['errors'] #demo #demo def test_api_getcrosstransation_by_id(api_backend, raiden_network, token_addresses,cross_id): _, app1 = raiden_network api_server, _ = api_backend cross_id = cross_id request = grequests.get( api_url_for( api_backend, 'getcrosstransactionbyid', cross_id = cross_id, ) ) response = request.send().response assert_proper_response(response, HTTPStatus.OK) assert response.json() != [] def test_api_crosstransation_hash(api_backend, raiden_network, token_addresses,hash_r): _, app1 = raiden_network api_server, _ = api_backend hash_r = str(hash_r) request = grequests.get( api_url_for( api_backend, 'recivehashresource', hash_r = hash_r, ) ) response = request.send().response assert_proper_response(response, HTTPStatus.OK) assert response.json() == 'hash_r is ok'
[ 6738, 2638, 1330, 14626, 19580, 198, 198, 11748, 640, 198, 11748, 18931, 198, 11748, 12972, 9288, 198, 11748, 308, 8897, 3558, 198, 6738, 42903, 1330, 19016, 62, 1640, 198, 6738, 4555, 62, 26791, 1330, 357, 198, 220, 220, 220, 284, 62, ...
2.386324
1,711
import numpy as np import pandas as pd import random import time import sys if sys.version_info.major == 2: import Tkinter as tk else: import tkinter as tk # if __name__ == '__main__': # server_attribute = pd.DataFrame(np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, # 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, # 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, # 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, # 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, # 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, # 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, # 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]). # reshape(8, 24), # columns=np.arange(24)) # env = Cluster(state_init(), server_attribute) # Qss = env.QSs # print(Qss) # for i in range(len(Qss)): # q = i # for j in range(len(server_attribute)): # index_server = j # print(env.cost_init) # print("The reward of initial state is:") # print(env.reward(env.cost_all(env.cost_init), env.state_init)) # print(env.state_init) # actions=list(range(env.n_actions)) # print(actions) # env.after(100, update) # env.mainloop()
[ 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 4738, 198, 11748, 640, 198, 11748, 25064, 198, 361, 25064, 13, 9641, 62, 10951, 13, 22478, 6624, 362, 25, 198, 220, 220, 220, 1330, 309, 74, 3849, 355, ...
1.570671
1,132
# """ # Runs libEnsemble on the 6-hump camel problem. Documented here: # https://www.sfu.ca/~ssurjano/camel6.html # # Execute via the following command: # mpiexec -np 4 python3 test_6-hump_camel_elapsed_time_abort.py # The number of concurrent evaluations of the objective function will be 4-1=3. # """ from __future__ import division from __future__ import absolute_import from __future__ import print_function from mpi4py import MPI # for libE communicator import sys, os # for adding to path import numpy as np # Import libEnsemble main from libensemble.libE import libE # Import sim_func from libensemble.sim_funcs.six_hump_camel import six_hump_camel # Import gen_func from libensemble.gen_funcs.uniform_sampling import uniform_random_sample script_name = os.path.splitext(os.path.basename(__file__))[0] #State the objective function, its arguments, output, and necessary parameters (and their sizes) sim_specs = {'sim_f': six_hump_camel, # This is the function whose output is being minimized 'in': ['x'], # These keys will be given to the above function 'out': [('f',float), # This is the output from the function being minimized ], 'pause_time': 2, # 'save_every_k': 10 } # State the generating function, its arguments, output, and necessary parameters. gen_specs = {'gen_f': uniform_random_sample, 'in': ['sim_id'], 'out': [('x',float,2), ], 'lb': np.array([-3,-2]), 'ub': np.array([ 3, 2]), 'gen_batch_size': 5, 'num_active_gens': 1, 'batch_mode': False, # 'save_every_k': 10 } # Tell libEnsemble when to stop exit_criteria = {'elapsed_wallclock_time': 1} np.random.seed(1) persis_info = {} for i in range(MPI.COMM_WORLD.Get_size()): persis_info[i] = {'rand_stream': np.random.RandomState(i)} # Perform the run H, persis_info, flag = libE(sim_specs, gen_specs, exit_criteria, persis_info) if MPI.COMM_WORLD.Get_rank() == 0: eprint(flag) eprint(H) assert flag == 2 short_name = script_name.split("test_", 1).pop() filename = short_name + '_results_History_length=' + str(len(H)) + '_evals=' + str(sum(H['returned'])) + '_ranks=' + str(MPI.COMM_WORLD.Get_size()) print("\n\n\nRun completed.\nSaving results to file: " + filename) # if flag == 2: # print("\n\n\nKilling COMM_WORLD") # MPI.COMM_WORLD.Abort()
[ 2, 37227, 198, 2, 44743, 9195, 4834, 15140, 319, 262, 718, 12, 71, 931, 41021, 1917, 13, 16854, 276, 994, 25, 198, 2, 220, 220, 220, 3740, 1378, 2503, 13, 82, 20942, 13, 6888, 14, 93, 824, 333, 73, 5733, 14, 66, 17983, 21, 13, ...
2.33519
1,077
import os import argparse from . import common argparser = argparse.ArgumentParser(add_help=False) graph_group = argparser.add_argument_group('graphtool arguments') graph_group.add_argument('--graph-jar', metavar='<graphtool-jar>', action='store',default=None, dest='graph_jar', help='Path to prog2dfg.jar or apilearner.jar')
[ 11748, 28686, 198, 11748, 1822, 29572, 198, 6738, 764, 1330, 2219, 198, 198, 853, 48610, 796, 1822, 29572, 13, 28100, 1713, 46677, 7, 2860, 62, 16794, 28, 25101, 8, 198, 34960, 62, 8094, 796, 1822, 48610, 13, 2860, 62, 49140, 62, 8094...
2.423077
156
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html from boss_grabbing.sqlite import Sqlite
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 2896, 500, 534, 2378, 31108, 994, 198, 2, 198, 2, 2094, 470, 6044, 284, 751, 534, 11523, 284, 262, 7283, 3620, 62, 47, 4061, 3698, 1268, 1546, 4634, 198, 2, ...
2.701149
87
from django.urls import path from .views import * from django_filters.views import FilterView app_name = 'jobs' urlpatterns = [ path('', FilterView.as_view(filterset_class=JobFilter, template_name='jobs/job_list.html'), name='index'), path('companies/', CompanyListView.as_view(), name='companies'), ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 6738, 764, 33571, 1330, 1635, 198, 6738, 42625, 14208, 62, 10379, 1010, 13, 33571, 1330, 25853, 7680, 198, 198, 1324, 62, 3672, 796, 705, 43863, 6, 198, 198, 6371, 33279, 82, 796, 685...
2.47482
139
from __future__ import print_function from __future__ import absolute_import from __future__ import division from compas.geometry import transform_points_numpy __all__ = [ 'mesh_transform_numpy', 'mesh_transformed_numpy', ] def mesh_transform_numpy(mesh, transformation): """Transform a mesh. Parameters ---------- mesh : compas.datastructures.Mesh The mesh. transformation : compas.geometry.Transformation The transformation. Notes ----- The mesh is modified in-place. Examples -------- >>> mesh = Mesh.from_obj(compas.get('cube.obj')) >>> T = matrix_from_axis_and_angle([0, 0, 1], pi / 4) >>> tmesh = mesh.copy() >>> mesh_transform(tmesh, T) """ vertices = list(mesh.vertices()) xyz = [mesh.vertex_coordinates(vertex) for vertex in vertices] xyz[:] = transform_points_numpy(xyz, transformation) for index, vertex in enumerate(vertices): mesh.vertex_attributes(vertex, 'xyz', xyz[index]) def mesh_transformed_numpy(mesh, transformation): """Transform a copy of ``mesh``. Parameters ---------- mesh : compas.datastructures.Mesh The mesh. transformation : compas.geometry.Transformation The transformation. Returns ------- Mesh A transformed independent copy of ``mesh``. Notes ----- The original mesh is not modified. Instead a transformed independent copy is returned. Examples -------- >>> mesh = Mesh.from_obj(compas.get('cube.obj')) >>> T = matrix_from_axis_and_angle([0, 0, 1], pi / 4) >>> tmesh = mesh_transformed(mesh, T) """ mesh_copy = mesh.copy() mesh_transform_numpy(mesh_copy, transformation) return mesh_copy # ============================================================================== # Main # ============================================================================== if __name__ == "__main__": import doctest doctest.testmod(globs=globals())
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 11593, 37443, 834, 1330, 7297, 198, 198, 6738, 552, 292, 13, 469, 15748, 1330, 6121, 62, 13033, 62, 77, 32152, 628, 198, 83...
2.827004
711
from cereal import car from common.realtime import DT_CTRL from common.numpy_fast import interp from common.realtime import sec_since_boot from selfdrive.config import Conversions as CV from selfdrive.car import apply_std_steer_torque_limits from selfdrive.car.gm import gmcan from selfdrive.car.gm.values import DBC, AccState, CanBus, CarControllerParams from opendbc.can.packer import CANPacker VisualAlert = car.CarControl.HUDControl.VisualAlert
[ 6738, 33158, 1330, 1097, 198, 6738, 2219, 13, 5305, 2435, 1330, 24311, 62, 4177, 7836, 198, 6738, 2219, 13, 77, 32152, 62, 7217, 1330, 987, 79, 198, 6738, 2219, 13, 5305, 2435, 1330, 792, 62, 20777, 62, 18769, 198, 6738, 2116, 19472, ...
3.390977
133
#********************************************************************** # Copyright 2020 Advanced Micro Devices, Inc # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #******************************************************************** import traceback import bpy bl_info = { "name": "Radeon ProRender", "author": "AMD", "version": (3, 1, 0), "blender": (2, 80, 0), "location": "Info header, render engine menu", "description": "Radeon ProRender rendering plugin for Blender 2.8x", "warning": "", "tracker_url": "", "wiki_url": "", "category": "Render" } version_build = "" from .utils import logging, version_updater from .utils import install_libs from .engine.engine import Engine from . import ( nodes, properties, ui, operators, material_library, ) from .engine.render_engine import RenderEngine from .engine.render_engine_2 import RenderEngine2 from .engine.preview_engine import PreviewEngine from .engine.viewport_engine import ViewportEngine from .engine.viewport_engine_2 import ViewportEngine2 from .engine.animation_engine import AnimationEngine, AnimationEngine2 from .engine.render_engine_hybrid import RenderEngine as RenderEngineHybrid from .engine.viewport_engine_hybrid import ViewportEngine as ViewportEngineHybrid from .engine.animation_engine_hybrid import AnimationEngine as AnimationEngineHybrid log = logging.Log(tag='init') log("Loading RPR addon {}".format(bl_info['version'])) render_engine_cls = { 'FULL': RenderEngine, 'HIGH': RenderEngineHybrid, 'MEDIUM': RenderEngineHybrid, 'LOW': RenderEngineHybrid, 'FULL2': RenderEngine2, } animation_engine_cls = { 'FULL': AnimationEngine, 'HIGH': AnimationEngineHybrid, 'MEDIUM': AnimationEngineHybrid, 'LOW': AnimationEngineHybrid, 'FULL2': AnimationEngine2, } viewport_engine_cls = { 'FULL': ViewportEngine, 'HIGH': ViewportEngineHybrid, 'MEDIUM': ViewportEngineHybrid, 'LOW': ViewportEngineHybrid, 'FULL2': ViewportEngine2, } def register(): """ Register all addon classes in Blender """ log("register") install_libs.ensure_boto3() bpy.utils.register_class(RPREngine) material_library.register() properties.register() operators.register() nodes.register() ui.register() bpy.app.handlers.save_pre.append(on_save_pre) bpy.app.handlers.load_pre.append(on_load_pre) bpy.app.handlers.version_update.append(on_version_update) def unregister(): """ Unregister all addon classes from Blender """ log("unregister") bpy.app.handlers.version_update.remove(on_version_update) bpy.app.handlers.load_pre.remove(on_load_pre) bpy.app.handlers.save_pre.remove(on_save_pre) ui.unregister() nodes.unregister() operators.unregister() properties.unregister() material_library.unregister() bpy.utils.unregister_class(RPREngine)
[ 2, 17174, 17174, 2466, 1174, 198, 2, 15069, 12131, 13435, 4527, 29362, 11, 3457, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 3...
2.99561
1,139
""" Plots reconstruction error vs semantic space dimensionality Usage: python metric_err_vs_dim.py Author(s): Wei Chen (wchen459@umd.edu) """ import matplotlib.pyplot as plt import numpy as np plt.rc("font", size=18) examples = ['glass', 'sf_linear', 'sf_s_nonlinear', 'sf_v_nonlinear'] titles = {'glass': 'Glass', 'sf_linear': 'Superformula (linear)', 'sf_s_nonlinear': 'Superformula (slightly nonlinear)', 'sf_v_nonlinear': 'Superformula (very nonlinear)'} n = len(examples) x = range(1, 6) for i in range(n): plt.figure() plt.xticks(np.arange(min(x), max(x)+1, dtype=np.int)) plt.xlabel('Semantic space dimensionality') plt.ylabel('Reconstruction error') plt.xlim(0.5, 5.5) errs = np.zeros((3,5)) for j in x: # Read reconstruction errors in rec_err.txt txtfile = open('./results/'+examples[i]+'/n_samples = 115/n_control_points = 20/semantic_dim = ' +str(j)+'/rec_err.txt', 'r') k = 0 for line in txtfile: errs[k, j-1] = float(line) k += 1 line_pca, = plt.plot(x, errs[0], '-ob', label='PCA') line_kpca, = plt.plot(x, errs[1], '-vg', label='Kernel PCA') line_ae, = plt.plot(x, errs[2], '-sr', label='Autoencoder') plt.legend(handles=[line_pca, line_kpca, line_ae], fontsize=16) plt.title(titles[examples[i]]) fig_name = 'err_vs_dim_'+examples[i]+'.png' plt.tight_layout() plt.savefig('./results/'+fig_name, dpi=300) print fig_name+' saved!'
[ 37811, 198, 3646, 1747, 25056, 4049, 3691, 37865, 2272, 15793, 1483, 198, 198, 28350, 25, 21015, 18663, 62, 8056, 62, 14259, 62, 27740, 13, 9078, 198, 198, 13838, 7, 82, 2599, 29341, 12555, 357, 86, 6607, 33459, 31, 388, 67, 13, 15532...
2.040523
765
# Copyright (C) 2020-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from .pattern_utils import check_fused_scale_shift_patterns, get_fused_scale_shift_patterns, \ check_fused_op_const_patterns, get_fused_op_const_pattern, get_clamp_mult_const_pattern
[ 2, 15069, 357, 34, 8, 12131, 12, 1238, 1828, 8180, 10501, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 24843, 12, 17, 13, 15, 198, 198, 6738, 764, 33279, 62, 26791, 1330, 2198, 62, 69, 1484, 62, 9888, 62, 30846, 62, 33279, ...
2.793814
97
#!/usr/bin/python import random import os import errno for i in range(100): s=set() g=set() while len(s) < 50: s.add((random.randint(0,7),random.randint(0,7))) while len(g) < 50: g.add((random.randint(0,7),random.randint(0,7))) start=list(s) goal=list(g) for size in range(21,50): if not os.path.exists("./%d"%size): try: os.makedirs("./%d"%size) except OSError as exc: if exc.errno != errno.EEXIST: raise with open("./%d/%d.csv"%(size,i), "w") as f: for j in range(size): f.write("%d,%d %d,%d\n"%(start[j][0],start[j][1],goal[j][0],goal[j][1]))
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 11748, 4738, 198, 11748, 28686, 198, 11748, 11454, 3919, 198, 198, 1640, 1312, 287, 2837, 7, 3064, 2599, 198, 220, 264, 28, 2617, 3419, 198, 220, 308, 28, 2617, 3419, 198, 220, 981, 18896, ...
1.893175
337
# -*- coding: utf-8 -*- import argparse import cv2 as cv import mediapipe as mp import sys import time if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--video_device", type=int, default=0) parser.add_argument("--video_file", type=str, default="") args = parser.parse_args() mp_pose = mp.solutions.pose mp_drawing = mp.solutions.drawing_utils with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose: cap = object() if args.video_file != "": cap = cv.VideoCapture(args.video_file) else: cap = cv.VideoCapture(args.video_device) if not cap.isOpened(): print("Cannot open camera device-0") sys.exit(-1) else: print("Video <width: {}, height: {}, fps: {}>".format( cap.get(cv.CAP_PROP_FRAME_WIDTH), cap.get(cv.CAP_PROP_FRAME_HEIGHT), cap.get(cv.CAP_PROP_FPS) )) fps = int(cap.get(cv.CAP_PROP_FPS)) frame_idx = 0 while 1: ret, frame = cap.read() if not ret: print("Cannot receive frame, exiting ...") break frame_idx += 1 st = time.time() # flip the frame horizontally for a later selfie-view display frame = cv.cvtColor(cv.flip(frame, 1), cv.COLOR_BGR2RGB) # to improve performance, optionally mark the frame as not writeable to pass by reference frame.flags.writeable = False results = pose.process(frame) frame.flags.writeable = True frame = cv.cvtColor(frame, cv.COLOR_RGB2BGR) # draw the pose annotation on the frame mp_drawing.draw_landmarks(frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS) ed = time.time() print("Used {:.3f} secs to process frame-{:05}".format(ed - st, frame_idx)) gap = 1000//fps - int(1000 * (ed - st)) if gap < 5: gap = 5 cv.imshow("pose_recognition_from_camera_demo", frame) if cv.waitKey(gap) & 0xFF == 27: break cap.release() cv.destroyAllWindows()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 1822, 29572, 198, 11748, 269, 85, 17, 355, 269, 85, 198, 11748, 16957, 499, 3757, 355, 29034, 198, 11748, 25064, 198, 11748, 640, 628, 198, 361, 11593, 3672, 834,...
1.981116
1,165
# Program name: atomic1D/reference/build_json.py # Author: Thomas Body # Author email: tajb500@york.ac.uk # Date of creation: 14 July 2017 # # # Makes data_dict and copies it into a .json file 'sd1d-case-05.json' filename = 'sd1d-case-05' from boutdata.collect import collect data_dict = {} # Normalisation factor for temperature - T * Tnorm returns in eV data_dict["Tnorm"] = collect("Tnorm") # Normalisation factor for density - N * Nnorm returns in m^-3 data_dict["Nnorm"] = collect("Nnorm") # Plasma pressure (normalised). Pe = 2 Ne Te => P/Ne = Te (and assume Ti=Te) data_dict["P"] = collect("P") # Electron density (normalised) data_dict["Ne"] = collect("Ne") # Neutral density (normalised) data_dict["Nn"] = collect("Nn") # Help for user data_dict["help"] = "Contains outputs from Boutprojects/SD1D/case-05 example. Created with data_dict_export.py - stored in Github.com/TBody/atomic1D/reference" from copy import deepcopy import numpy as np import json # Need to 'jsonify' the numpy arrays (i.e. convert to nested lists) so that they can be stored in plain-text # Deep-copy data to a new dictionary and then edit that one (i.e. break the data pointer association - keep data_dict unchanged in case you want to run a copy-verify on it) data_dict_jsonified = deepcopy(data_dict) numpy_ndarrays = []; for key, element in data_dict.items(): if type(element) == np.ndarray: # Store which keys correspond to numpy.ndarray, so that you can de-jsonify the arrays when reading numpy_ndarrays.append(key) data_dict_jsonified[key] = data_dict_jsonified[key].tolist() data_dict_jsonified['numpy_ndarrays'] = numpy_ndarrays # Encode help # >> data_dict['help'] = 'help string' # <<Use original filename, except with .json instead of .dat extension>> with open('{}.json'.format(filename),'w') as fp: json.dump(data_dict_jsonified, fp, sort_keys=True, indent=4)
[ 2, 6118, 1438, 25, 17226, 16, 35, 14, 35790, 14, 11249, 62, 17752, 13, 9078, 198, 2, 6434, 25, 5658, 12290, 198, 2, 6434, 3053, 25, 256, 1228, 65, 4059, 31, 88, 967, 13, 330, 13, 2724, 198, 2, 7536, 286, 6282, 25, 1478, 2901, ...
2.956454
643
from docx import Document def CompositeTwoDocs(srcDocFullName, dstDocFullName, compositeName): ''' srcDocFullName: dstDocFullName: compositeName: return: ->True->False ''' try: srcDoc = Document(srcDocFullName) dstDoc = Document(dstDocFullName) srcParasMap = {} # Heading 2 => [paras list] dstParasMap = {} # Heading 2 => [paras list] firstPage = False secondPage = False currentLabelStyleContent = None # # for srcPara in srcDoc.paragraphs: if (srcPara.style.name.find('Heading 2') >= 0 and srcPara.text.find(compositeName) >= 0): print('find {0}'.format(srcPara)) firstPage = True elif (srcPara.style.name.find('Heading 2') >= 0 and firstPage): secondPage = True break else: if (firstPage and not secondPage): if (srcPara.style.name.find('Heading 3') >= 0): srcParasMap[srcPara.text] = [] currentLabelStyleContent = srcPara.text else: if currentLabelStyleContent is None: raise ValueError('word') srcParasMap[currentLabelStyleContent].append(srcPara) firstPage = False secondPage = False currentLabelStyleContent = None # # for dstPara in dstDoc.paragraphs: if (dstPara.style.name.find('Heading 2') >= 0 and dstPara.text.find(compositeName) >= 0): print('find {0}'.format(dstPara)) firstPage = True elif (dstPara.style.name.find('Heading 2') >= 0 and firstPage): secondPage = True break else: if (firstPage and not secondPage): if (dstPara.style.name.find('Heading 3') >= 0): dstParasMap[dstPara.text] = [] currentLabelStyleContent = dstPara.text else: if currentLabelStyleContent is None: raise ValueError('word') dstParasMap[currentLabelStyleContent].append(dstPara) # for key, dstParas in dstParasMap.items(): srcParas = srcParasMap[key] if len(srcParas) <= 0: print('--{0}--'.format(key)) continue else: for index, item in enumerate(dstParas): if (index <= len(srcParas)): dstParas[index].text = srcParas[index].text else: print('{0}--{1}----{2}'.format(key, index, len(srcParas))) dstDoc.save(dstDocFullName) except Exception as e: print('...') print(e) return False return True if __name__ == '__main__': srcDocFullName = r'D:\\20208\-111\-111.docx' dstDocFullName = r'D:\\20208\-456\-456.docx' CompositeTwoDocs(srcDocFullName, dstDocFullName, '')
[ 6738, 2205, 87, 1330, 16854, 628, 198, 4299, 49355, 7571, 23579, 82, 7, 10677, 23579, 13295, 5376, 11, 29636, 23579, 13295, 5376, 11, 24185, 5376, 2599, 198, 220, 220, 220, 705, 7061, 198, 220, 220, 220, 12351, 23579, 13295, 5376, 25, ...
1.867225
1,672
import json import time from datetime import datetime from sensors import sensor cover_command_topic = "cover/tydom/{id}/set_positionCmd" cover_config_topic = "homeassistant/cover/tydom/{id}/config" cover_position_topic = "cover/tydom/{id}/current_position" cover_set_postion_topic = "cover/tydom/{id}/set_position" cover_attributes_topic = "cover/tydom/{id}/attributes"
[ 11748, 33918, 198, 11748, 640, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 15736, 1330, 12694, 198, 198, 9631, 62, 21812, 62, 26652, 796, 366, 9631, 14, 774, 3438, 14, 90, 312, 92, 14, 2617, 62, 9150, 40109, 1, 198, 9631, 62...
2.869231
130
""" This is the default template for our main set of servers. This does NOT cover the content machines, which use content.py Common traits: * Use memcached, and cache-backed sessions * Use a MySQL 5.1 database """ # We intentionally define lots of variables that aren't used, and # want to import all variables from base settings files # pylint: disable=wildcard-import, unused-wildcard-import # Pylint gets confused by path.py instances, which report themselves as class # objects. As a result, pylint applies the wrong regex in validating names, # and throws spurious errors. Therefore, we disable invalid-name checking. # pylint: disable=invalid-name import datetime import dateutil from glob import glob import json import os from path import Path as path import pkgutil import platform from django.utils.translation import ugettext_lazy from django.conf import global_settings from celery_redis_sentinel import register from openedx.core.lib.logsettings import get_logger_config from path import Path as path from xmodule.modulestore.modulestore_settings import ( convert_module_store_setting_if_needed, update_module_store_settings, ) from ..common import * from .utils import Configuration, prefer_fun_video # Load custom configuration parameters from yaml files config = Configuration(os.path.dirname(__file__)) # edX has now started using "settings.ENV_TOKENS" and "settings.AUTH_TOKENS" everywhere in the # project, not just in the settings. Let's make sure our settings still work in this case ENV_TOKENS = config AUTH_TOKENS = config # SERVICE_VARIANT specifies name of the variant used, which decides what JSON # configuration files are read during startup. SERVICE_VARIANT = config("SERVICE_VARIANT", default=None) # CONFIG_ROOT specifies the directory where the JSON configuration # files are expected to be found. If not specified, use the project # directory. CONFIG_ROOT = path(config("CONFIG_ROOT", default=ENV_ROOT)) # CONFIG_PREFIX specifies the prefix of the JSON configuration files, # based on the service variant. If no variant is use, don't use a # prefix. CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else "" ################################ ALWAYS THE SAME ############################## RELEASE = config("RELEASE", default=None) DEBUG = False DEFAULT_TEMPLATE_ENGINE["OPTIONS"]["debug"] = False # IMPORTANT: With this enabled, the server must always be behind a proxy that # strips the header HTTP_X_FORWARDED_PROTO from client requests. Otherwise, # a user can fool our server into thinking it was an https connection. # See # https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header # for other warnings. SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") ###################################### CELERY ################################ CELERY_ALWAYS_EAGER = config("CELERY_ALWAYS_EAGER", default=False, formatter=bool) # Don't use a connection pool, since connections are dropped by ELB. BROKER_POOL_LIMIT = 0 BROKER_CONNECTION_TIMEOUT = 1 # For the Result Store, use the django cache named 'celery' CELERY_RESULT_BACKEND = config( "CELERY_RESULT_BACKEND", default="djcelery.backends.cache:CacheBackend" ) # When the broker is behind an ELB, use a heartbeat to refresh the # connection and to detect if it has been dropped. BROKER_HEARTBEAT = 60.0 BROKER_HEARTBEAT_CHECKRATE = 2 # Each worker should only fetch one message at a time CELERYD_PREFETCH_MULTIPLIER = 1 # Celery queues DEFAULT_PRIORITY_QUEUE = config( "DEFAULT_PRIORITY_QUEUE", default="edx.lms.core.default" ) HIGH_PRIORITY_QUEUE = config("HIGH_PRIORITY_QUEUE", default="edx.lms.core.high") LOW_PRIORITY_QUEUE = config("LOW_PRIORITY_QUEUE", default="edx.lms.core.low") HIGH_MEM_QUEUE = config("HIGH_MEM_QUEUE", default="edx.lms.core.high_mem") CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE CELERY_QUEUES = config( "CELERY_QUEUES", default={ DEFAULT_PRIORITY_QUEUE: {}, HIGH_PRIORITY_QUEUE: {}, LOW_PRIORITY_QUEUE: {}, HIGH_MEM_QUEUE: {}, }, formatter=json.loads, ) CELERY_ROUTES = "lms.celery.Router" # Force accepted content to "json" only. If we also accept pickle-serialized # messages, the worker will crash when it's running with a privileged user (even # if it's not the root user but a user belonging to the root group, which is our # case with OpenShift). CELERY_ACCEPT_CONTENT = ["json"] CELERYBEAT_SCHEDULE = {} # For scheduling tasks, entries can be added to this dict ########################## NON-SECURE ENV CONFIG ############################## # Things like server locations, ports, etc. STATIC_ROOT_BASE = path("/edx/app/edxapp/staticfiles") STATIC_ROOT = STATIC_ROOT_BASE STATIC_URL = "/static/" STATICFILES_STORAGE = config( "STATICFILES_STORAGE", default="lms.envs.fun.storage.CDNProductionStorage" ) CDN_BASE_URL = config("CDN_BASE_URL", default=None) MEDIA_ROOT = path("/edx/var/edxapp/media/") MEDIA_URL = "/media/" # DEFAULT_COURSE_ABOUT_IMAGE_URL specifies the default image to show for courses that don't provide one DEFAULT_COURSE_ABOUT_IMAGE_URL = config( "DEFAULT_COURSE_ABOUT_IMAGE_URL", default=DEFAULT_COURSE_ABOUT_IMAGE_URL ) PLATFORM_NAME = config("PLATFORM_NAME", default=PLATFORM_NAME) # For displaying on the receipt. At Stanford PLATFORM_NAME != MERCHANT_NAME, but PLATFORM_NAME is a fine default PLATFORM_TWITTER_ACCOUNT = config( "PLATFORM_TWITTER_ACCOUNT", default=PLATFORM_TWITTER_ACCOUNT ) PLATFORM_FACEBOOK_ACCOUNT = config( "PLATFORM_FACEBOOK_ACCOUNT", default=PLATFORM_FACEBOOK_ACCOUNT ) SOCIAL_SHARING_SETTINGS = config( "SOCIAL_SHARING_SETTINGS", default=SOCIAL_SHARING_SETTINGS, formatter=json.loads ) # Social media links for the page footer SOCIAL_MEDIA_FOOTER_URLS = config( "SOCIAL_MEDIA_FOOTER_URLS", default=SOCIAL_MEDIA_FOOTER_URLS, formatter=json.loads ) CC_MERCHANT_NAME = config("CC_MERCHANT_NAME", default=PLATFORM_NAME) EMAIL_BACKEND = config( "EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend" ) EMAIL_FILE_PATH = config("EMAIL_FILE_PATH", default=None) EMAIL_HOST = config("EMAIL_HOST", default="localhost") EMAIL_PORT = config("EMAIL_PORT", default=25) # django default is 25 EMAIL_USE_TLS = config("EMAIL_USE_TLS", default=False) # django default is False HTTPS = config("HTTPS", default=HTTPS) SESSION_COOKIE_DOMAIN = config("SESSION_COOKIE_DOMAIN", default=None) SESSION_COOKIE_HTTPONLY = config( "SESSION_COOKIE_HTTPONLY", default=True, formatter=bool ) SESSION_COOKIE_SECURE = config( "SESSION_COOKIE_SECURE", default=SESSION_COOKIE_SECURE, formatter=bool ) SESSION_ENGINE = config("SESSION_ENGINE", default="redis_sessions.session") SESSION_SAVE_EVERY_REQUEST = config( "SESSION_SAVE_EVERY_REQUEST", default=SESSION_SAVE_EVERY_REQUEST, formatter=bool ) # Configuration to use session with redis # To use redis, change SESSION_ENGINE to "redis_sessions.session" SESSION_REDIS_HOST = config("SESSION_REDIS_HOST", default="redis") SESSION_REDIS_PORT = config("SESSION_REDIS_PORT", default=6379, formatter=int) SESSION_REDIS_DB = config("SESSION_REDIS_DB", default=1, formatter=int) SESSION_REDIS_PASSWORD = config("SESSION_REDIS_PASSWORD", default=None) SESSION_REDIS_PREFIX = config("SESSION_REDIS_PREFIX", default="session") SESSION_REDIS_SOCKET_TIMEOUT = config( "SESSION_REDIS_SOCKET_TIMEOUT", default=1, formatter=int ) SESSION_REDIS_RETRY_ON_TIMEOUT = config( "SESSION_REDIS_RETRY_ON_TIMEOUT", default=False, formatter=bool ) SESSION_REDIS = config( "SESSION_REDIS", default={ "host": SESSION_REDIS_HOST, "port": SESSION_REDIS_PORT, "db": SESSION_REDIS_DB, # db 0 is used for Celery Broker "password": SESSION_REDIS_PASSWORD, "prefix": SESSION_REDIS_PREFIX, "socket_timeout": SESSION_REDIS_SOCKET_TIMEOUT, "retry_on_timeout": SESSION_REDIS_RETRY_ON_TIMEOUT, }, formatter=json.loads, ) SESSION_REDIS_SENTINEL_LIST = config( "SESSION_REDIS_SENTINEL_LIST", default=None, formatter=json.loads ) SESSION_REDIS_SENTINEL_MASTER_ALIAS = config( "SESSION_REDIS_SENTINEL_MASTER_ALIAS", default=None ) REGISTRATION_EXTRA_FIELDS = config( "REGISTRATION_EXTRA_FIELDS", default=REGISTRATION_EXTRA_FIELDS, formatter=json.loads ) # Set the names of cookies shared with the marketing site # These have the same cookie domain as the session, which in production # usually includes subdomains. EDXMKTG_LOGGED_IN_COOKIE_NAME = config( "EDXMKTG_LOGGED_IN_COOKIE_NAME", default=EDXMKTG_LOGGED_IN_COOKIE_NAME ) EDXMKTG_USER_INFO_COOKIE_NAME = config( "EDXMKTG_USER_INFO_COOKIE_NAME", default=EDXMKTG_USER_INFO_COOKIE_NAME ) # Override feature by feature by whatever is being redefined in the settings.yaml file CONFIG_FEATURES = config("FEATURES", default={}, formatter=json.loads) FEATURES.update(CONFIG_FEATURES) LMS_BASE = config("LMS_BASE", default="localhost:8072") CMS_BASE = config("CMS_BASE", default="localhost:8082") LMS_ROOT_URL = config("LMS_ROOT_URL", default="http://{:s}".format(LMS_BASE)) LMS_INTERNAL_ROOT_URL = config("LMS_INTERNAL_ROOT_URL", default=LMS_ROOT_URL) SITE_NAME = config("SITE_NAME", default=LMS_BASE) ALLOWED_HOSTS = config( "ALLOWED_HOSTS", default=[LMS_BASE.split(":")[0]], formatter=json.loads ) if FEATURES.get("PREVIEW_LMS_BASE"): ALLOWED_HOSTS.append(FEATURES["PREVIEW_LMS_BASE"]) # allow for environments to specify what cookie name our login subsystem should use # this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can # happen with some browsers (e.g. Firefox) if config("SESSION_COOKIE_NAME", default=None): # NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this # being a str() SESSION_COOKIE_NAME = str(config("SESSION_COOKIE_NAME")) CACHE_REDIS_HOST = config("CACHE_REDIS_HOST", default="redis") CACHE_REDIS_PORT = config("CACHE_REDIS_PORT", default=6379, formatter=int) CACHE_REDIS_DB = config("CACHE_REDIS_DB", default=1, formatter=int) CACHE_REDIS_BACKEND = config( "CACHE_REDIS_BACKEND", default="django_redis.cache.RedisCache" ) CACHE_REDIS_URI = "redis://{}:{}/{}".format( CACHE_REDIS_HOST, CACHE_REDIS_PORT, CACHE_REDIS_DB ) CACHE_REDIS_CLIENT = config( "CACHE_REDIS_CLIENT", default="django_redis.client.DefaultClient" ) CACHES_DEFAULT_CONFIG = { "BACKEND": CACHE_REDIS_BACKEND, "LOCATION": CACHE_REDIS_URI, "OPTIONS": {"CLIENT_CLASS": CACHE_REDIS_CLIENT}, } if "Sentinel" in CACHE_REDIS_BACKEND: CACHES_DEFAULT_CONFIG["LOCATION"] = [(CACHE_REDIS_HOST, CACHE_REDIS_PORT)] CACHES_DEFAULT_CONFIG["OPTIONS"]["SENTINEL_SERVICE_NAME"] = config( "CACHE_REDIS_SENTINEL_SERVICE_NAME", default="mymaster" ) CACHES_DEFAULT_CONFIG["OPTIONS"]["REDIS_CLIENT_KWARGS"] = {"db": CACHE_REDIS_DB} CACHES = config( "CACHES", default={ "default": dict(CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "default"}), "general": dict(CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "general"}), "celery": dict(CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "celery"}), "mongo_metadata_inheritance": dict( CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "mongo_metadata_inheritance"} ), "openassessment_submissions": dict( CACHES_DEFAULT_CONFIG, **{"KEY_PREFIX": "openassessment_submissions"} ), "loc_cache": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": "edx_location_mem_cache", }, # Cache backend used by Django 1.8 storage backend while processing static files "staticfiles": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": "edx_location_mem_cache", }, }, formatter=json.loads, ) # Email overrides DEFAULT_FROM_EMAIL = config("DEFAULT_FROM_EMAIL", default=DEFAULT_FROM_EMAIL) DEFAULT_FEEDBACK_EMAIL = config( "DEFAULT_FEEDBACK_EMAIL", default=DEFAULT_FEEDBACK_EMAIL ) ADMINS = config("ADMINS", default=ADMINS, formatter=json.loads) SERVER_EMAIL = config("SERVER_EMAIL", default=SERVER_EMAIL) TECH_SUPPORT_EMAIL = config("TECH_SUPPORT_EMAIL", default=TECH_SUPPORT_EMAIL) CONTACT_EMAIL = config("CONTACT_EMAIL", default=CONTACT_EMAIL) BUGS_EMAIL = config("BUGS_EMAIL", default=BUGS_EMAIL) PAYMENT_SUPPORT_EMAIL = config("PAYMENT_SUPPORT_EMAIL", default=PAYMENT_SUPPORT_EMAIL) FINANCE_EMAIL = config("FINANCE_EMAIL", default=FINANCE_EMAIL) UNIVERSITY_EMAIL = config("UNIVERSITY_EMAIL", default=UNIVERSITY_EMAIL) PRESS_EMAIL = config("PRESS_EMAIL", default=PRESS_EMAIL) # Currency PAID_COURSE_REGISTRATION_CURRENCY = config( "PAID_COURSE_REGISTRATION_CURRENCY", default=["EUR", u"\N{euro sign}"] ) # Payment Report Settings PAYMENT_REPORT_GENERATOR_GROUP = config( "PAYMENT_REPORT_GENERATOR_GROUP", default=PAYMENT_REPORT_GENERATOR_GROUP ) # Bulk Email overrides BULK_EMAIL_DEFAULT_FROM_EMAIL = config( "BULK_EMAIL_DEFAULT_FROM_EMAIL", default=BULK_EMAIL_DEFAULT_FROM_EMAIL ) BULK_EMAIL_EMAILS_PER_TASK = config( "BULK_EMAIL_EMAILS_PER_TASK", default=BULK_EMAIL_EMAILS_PER_TASK, formatter=int ) BULK_EMAIL_DEFAULT_RETRY_DELAY = config( "BULK_EMAIL_DEFAULT_RETRY_DELAY", default=BULK_EMAIL_DEFAULT_RETRY_DELAY, formatter=int, ) BULK_EMAIL_MAX_RETRIES = config( "BULK_EMAIL_MAX_RETRIES", default=BULK_EMAIL_MAX_RETRIES, formatter=int ) BULK_EMAIL_INFINITE_RETRY_CAP = config( "BULK_EMAIL_INFINITE_RETRY_CAP", default=BULK_EMAIL_INFINITE_RETRY_CAP, formatter=int, ) BULK_EMAIL_LOG_SENT_EMAILS = config( "BULK_EMAIL_LOG_SENT_EMAILS", default=BULK_EMAIL_LOG_SENT_EMAILS, formatter=bool ) BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = config( "BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS", default=BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS, formatter=int, ) # We want Bulk Email running on the high-priority queue, so we define the # routing key that points to it. At the moment, the name is the same. # We have to reset the value here, since we have changed the value of the queue name. BULK_EMAIL_ROUTING_KEY = config("BULK_EMAIL_ROUTING_KEY", default=HIGH_PRIORITY_QUEUE) # We can run smaller jobs on the low priority queue. See note above for why # we have to reset the value here. BULK_EMAIL_ROUTING_KEY_SMALL_JOBS = LOW_PRIORITY_QUEUE # Theme overrides THEME_NAME = config("THEME_NAME", default=None) COMPREHENSIVE_THEME_DIR = path( config("COMPREHENSIVE_THEME_DIR", default=COMPREHENSIVE_THEME_DIR) ) # Marketing link overrides MKTG_URL_LINK_MAP = config("MKTG_URL_LINK_MAP", default={}, formatter=json.loads) SUPPORT_SITE_LINK = config("SUPPORT_SITE_LINK", default=SUPPORT_SITE_LINK) # Mobile store URL overrides MOBILE_STORE_URLS = config("MOBILE_STORE_URLS", default=MOBILE_STORE_URLS) # Timezone overrides TIME_ZONE = config("TIME_ZONE", default=TIME_ZONE) # Translation overrides LANGUAGES = config("LANGUAGES", default=LANGUAGES, formatter=json.loads) LANGUAGE_DICT = dict(LANGUAGES) LANGUAGE_CODE = config("LANGUAGE_CODE", default=LANGUAGE_CODE) USE_I18N = config("USE_I18N", default=USE_I18N) # Additional installed apps for app in config("ADDL_INSTALLED_APPS", default=[], formatter=json.loads): INSTALLED_APPS.append(app) WIKI_ENABLED = config("WIKI_ENABLED", default=WIKI_ENABLED, formatter=bool) local_loglevel = config("LOCAL_LOGLEVEL", default="INFO") # Configure Logging LOG_DIR = config("LOG_DIR", default=path("/edx/var/logs/edx"), formatter=path) DATA_DIR = config("DATA_DIR", default=path("/edx/app/edxapp/data"), formatter=path) # Default format for syslog logging standard_format = "%(asctime)s %(levelname)s %(process)d [%(name)s] %(filename)s:%(lineno)d - %(message)s" syslog_format = ( "[variant:lms][%(name)s][env:sandbox] %(levelname)s " "[{hostname} %(process)d] [%(filename)s:%(lineno)d] - %(message)s" ).format(hostname=platform.node().split(".")[0]) LOGGING = { "version": 1, "disable_existing_loggers": False, "handlers": { "local": { "formatter": "syslog_format", "class": "logging.StreamHandler", "level": "INFO", }, "tracking": { "formatter": "raw", "class": "logging.StreamHandler", "level": "DEBUG", }, "console": { "formatter": "standard", "class": "logging.StreamHandler", "level": "INFO", }, }, "formatters": { "raw": {"format": "%(message)s"}, "syslog_format": {"format": syslog_format}, "standard": {"format": standard_format}, }, "filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}}, "loggers": { "": {"level": "INFO", "propagate": False, "handlers": ["console", "local"]}, "tracking": {"level": "DEBUG", "propagate": False, "handlers": ["tracking"]}, }, } SENTRY_DSN = config("SENTRY_DSN", default=None) if SENTRY_DSN: LOGGING["loggers"][""]["handlers"].append("sentry") LOGGING["handlers"]["sentry"] = { "class": "raven.handlers.logging.SentryHandler", "dsn": SENTRY_DSN, "level": "ERROR", "environment": "production", "release": RELEASE, } COURSE_LISTINGS = config("COURSE_LISTINGS", default={}, formatter=json.loads) SUBDOMAIN_BRANDING = config("SUBDOMAIN_BRANDING", default={}, formatter=json.loads) VIRTUAL_UNIVERSITIES = config("VIRTUAL_UNIVERSITIES", default=[]) META_UNIVERSITIES = config("META_UNIVERSITIES", default={}, formatter=json.loads) COMMENTS_SERVICE_URL = config("COMMENTS_SERVICE_URL", default="") COMMENTS_SERVICE_KEY = config("COMMENTS_SERVICE_KEY", default="") CERT_NAME_SHORT = config("CERT_NAME_SHORT", default=CERT_NAME_SHORT) CERT_NAME_LONG = config("CERT_NAME_LONG", default=CERT_NAME_LONG) CERT_QUEUE = config("CERT_QUEUE", default="test-pull") ZENDESK_URL = config("ZENDESK_URL", default=None) FEEDBACK_SUBMISSION_EMAIL = config("FEEDBACK_SUBMISSION_EMAIL", default=None) MKTG_URLS = config("MKTG_URLS", default=MKTG_URLS, formatter=json.loads) # Badgr API BADGR_API_TOKEN = config("BADGR_API_TOKEN", default=BADGR_API_TOKEN) BADGR_BASE_URL = config("BADGR_BASE_URL", default=BADGR_BASE_URL) BADGR_ISSUER_SLUG = config("BADGR_ISSUER_SLUG", default=BADGR_ISSUER_SLUG) # git repo loading environment GIT_REPO_DIR = config( "GIT_REPO_DIR", default=path("/edx/var/edxapp/course_repos"), formatter=path ) GIT_IMPORT_STATIC = config("GIT_IMPORT_STATIC", default=True) for name, value in config("CODE_JAIL", default={}, formatter=json.loads).items(): oldvalue = CODE_JAIL.get(name) if isinstance(oldvalue, dict): for subname, subvalue in value.items(): oldvalue[subname] = subvalue else: CODE_JAIL[name] = value COURSES_WITH_UNSAFE_CODE = config( "COURSES_WITH_UNSAFE_CODE", default=[], formatter=json.loads ) ASSET_IGNORE_REGEX = config("ASSET_IGNORE_REGEX", default=ASSET_IGNORE_REGEX) # Event Tracking TRACKING_IGNORE_URL_PATTERNS = config( "TRACKING_IGNORE_URL_PATTERNS", default=TRACKING_IGNORE_URL_PATTERNS, formatter=json.loads, ) # SSL external authentication settings SSL_AUTH_EMAIL_DOMAIN = config("SSL_AUTH_EMAIL_DOMAIN", default="MIT.EDU") SSL_AUTH_DN_FORMAT_STRING = config("SSL_AUTH_DN_FORMAT_STRING", default=None) # Django CAS external authentication settings CAS_EXTRA_LOGIN_PARAMS = config( "CAS_EXTRA_LOGIN_PARAMS", default=None, formatter=json.loads ) if FEATURES.get("AUTH_USE_CAS"): CAS_SERVER_URL = config("CAS_SERVER_URL", default=None) INSTALLED_APPS.append("django_cas") MIDDLEWARE_CLASSES.append("django_cas.middleware.CASMiddleware") CAS_ATTRIBUTE_CALLBACK = config( "CAS_ATTRIBUTE_CALLBACK", default=None, formatter=json.loads ) if CAS_ATTRIBUTE_CALLBACK: import importlib CAS_USER_DETAILS_RESOLVER = getattr( importlib.import_module(CAS_ATTRIBUTE_CALLBACK["module"]), CAS_ATTRIBUTE_CALLBACK["function"], ) # Video Caching. Pairing country codes with CDN URLs. # Example: {'CN': 'http://api.xuetangx.com/edx/video?s3_url='} VIDEO_CDN_URL = config("VIDEO_CDN_URL", default={}, formatter=json.loads) # Branded footer FOOTER_OPENEDX_URL = config("FOOTER_OPENEDX_URL", default=FOOTER_OPENEDX_URL) FOOTER_OPENEDX_LOGO_IMAGE = config( "FOOTER_OPENEDX_LOGO_IMAGE", default=FOOTER_OPENEDX_LOGO_IMAGE ) FOOTER_ORGANIZATION_IMAGE = config( "FOOTER_ORGANIZATION_IMAGE", default=FOOTER_ORGANIZATION_IMAGE ) FOOTER_CACHE_TIMEOUT = config( "FOOTER_CACHE_TIMEOUT", default=FOOTER_CACHE_TIMEOUT, formatter=int ) FOOTER_BROWSER_CACHE_MAX_AGE = config( "FOOTER_BROWSER_CACHE_MAX_AGE", default=FOOTER_BROWSER_CACHE_MAX_AGE, formatter=int ) # Credit notifications settings NOTIFICATION_EMAIL_CSS = config( "NOTIFICATION_EMAIL_CSS", default=NOTIFICATION_EMAIL_CSS ) NOTIFICATION_EMAIL_EDX_LOGO = config( "NOTIFICATION_EMAIL_EDX_LOGO", default=NOTIFICATION_EMAIL_EDX_LOGO ) ############# CORS headers for cross-domain requests ################# if FEATURES.get("ENABLE_CORS_HEADERS") or FEATURES.get( "ENABLE_CROSS_DOMAIN_CSRF_COOKIE" ): CORS_ALLOW_CREDENTIALS = True CORS_ORIGIN_WHITELIST = config( "CORS_ORIGIN_WHITELIST", default=(), formatter=json.loads ) CORS_ORIGIN_ALLOW_ALL = config( "CORS_ORIGIN_ALLOW_ALL", default=False, formatter=bool ) CORS_ALLOW_INSECURE = config("CORS_ALLOW_INSECURE", default=False, formatter=bool) # If setting a cross-domain cookie, it's really important to choose # a name for the cookie that is DIFFERENT than the cookies used # by each subdomain. For example, suppose the applications # at these subdomains are configured to use the following cookie names: # # 1) foo.example.com --> "csrftoken" # 2) baz.example.com --> "csrftoken" # 3) bar.example.com --> "csrftoken" # # For the cross-domain version of the CSRF cookie, you need to choose # a name DIFFERENT than "csrftoken"; otherwise, the new token configured # for ".example.com" could conflict with the other cookies, # non-deterministically causing 403 responses. # # Because of the way Django stores cookies, the cookie name MUST # be a `str`, not unicode. Otherwise there will `TypeError`s will be raised # when Django tries to call the unicode `translate()` method with the wrong # number of parameters. CROSS_DOMAIN_CSRF_COOKIE_NAME = str(config("CROSS_DOMAIN_CSRF_COOKIE_NAME")) # When setting the domain for the "cross-domain" version of the CSRF # cookie, you should choose something like: ".example.com" # (note the leading dot), where both the referer and the host # are subdomains of "example.com". # # Browser security rules require that # the cookie domain matches the domain of the server; otherwise # the cookie won't get set. And once the cookie gets set, the client # needs to be on a domain that matches the cookie domain, otherwise # the client won't be able to read the cookie. CROSS_DOMAIN_CSRF_COOKIE_DOMAIN = config("CROSS_DOMAIN_CSRF_COOKIE_DOMAIN") # Field overrides. To use the IDDE feature, add # 'courseware.student_field_overrides.IndividualStudentOverrideProvider'. FIELD_OVERRIDE_PROVIDERS = tuple( config("FIELD_OVERRIDE_PROVIDERS", default=[], formatter=json.loads) ) ############################## SECURE AUTH ITEMS ############### # Secret things: passwords, access keys, etc. ############### XBlock filesystem field config ########## DJFS = config( "DJFS", default={ "directory_root": "/edx/var/edxapp/django-pyfs/static/django-pyfs", "type": "osfs", "url_root": "/static/django-pyfs", }, formatter=json.loads, ) ############### Module Store Items ########## HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS = config( "HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS", default={}, formatter=json.loads ) # PREVIEW DOMAIN must be present in HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS for the preview to show draft changes if "PREVIEW_LMS_BASE" in FEATURES and FEATURES["PREVIEW_LMS_BASE"] != "": PREVIEW_DOMAIN = FEATURES["PREVIEW_LMS_BASE"].split(":")[0] # update dictionary with preview domain regex HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS.update({PREVIEW_DOMAIN: "draft-preferred"}) ############### Mixed Related(Secure/Not-Secure) Items ########## LMS_SEGMENT_KEY = config("LMS_SEGMENT_KEY", default=None) CC_PROCESSOR_NAME = config("CC_PROCESSOR_NAME", default=CC_PROCESSOR_NAME) CC_PROCESSOR = config("CC_PROCESSOR", default=CC_PROCESSOR) SECRET_KEY = config("SECRET_KEY", default="ThisisAnExampleKeyForDevPurposeOnly") # Authentication backends # - behind a proxy, use: "lms.envs.fun.backends.ProxyRateLimitModelBackend" # - for LTI provider, add: "lti_provider.users.LtiBackend" # - for CAS, add: "django_cas.backends.CASBackend" AUTHENTICATION_BACKENDS = config( "AUTHENTICATION_BACKENDS", default=("lms.envs.fun.backends.ProxyRateLimitModelBackend",), ) DEFAULT_FILE_STORAGE = config( "DEFAULT_FILE_STORAGE", default="django.core.files.storage.FileSystemStorage" ) # Specific setting for the File Upload Service to store media in a bucket. FILE_UPLOAD_STORAGE_BUCKET_NAME = config( "FILE_UPLOAD_STORAGE_BUCKET_NAME", default="uploads" ) FILE_UPLOAD_STORAGE_PREFIX = config( "FILE_UPLOAD_STORAGE_PREFIX", default=FILE_UPLOAD_STORAGE_PREFIX ) # If there is a database called 'read_replica', you can use the use_read_replica_if_available # function in util/query.py, which is useful for very large database reads DATABASE_ENGINE = config("DATABASE_ENGINE", default="django.db.backends.mysql") DATABASE_HOST = config("DATABASE_HOST", default="mysql") DATABASE_PORT = config("DATABASE_PORT", default=3306, formatter=int) DATABASE_NAME = config("DATABASE_NAME", default="edxapp") DATABASE_USER = config("DATABASE_USER", default="edxapp_user") DATABASE_PASSWORD = config("DATABASE_PASSWORD", default="password") DATABASES = config( "DATABASES", default={ "default": { "ENGINE": DATABASE_ENGINE, "HOST": DATABASE_HOST, "PORT": DATABASE_PORT, "NAME": DATABASE_NAME, "USER": DATABASE_USER, "PASSWORD": DATABASE_PASSWORD, } }, formatter=json.loads, ) # Enable automatic transaction management on all databases # https://docs.djangoproject.com/en/1.8/topics/db/transactions/#tying-transactions-to-http-requests # This needs to be true for all databases for database_name in DATABASES: DATABASES[database_name]["ATOMIC_REQUESTS"] = True XQUEUE_INTERFACE = config( "XQUEUE_INTERFACE", default={"url": None, "basic_auth": None, "django_auth": None}, formatter=json.loads, ) # Configure the MODULESTORE MODULESTORE = convert_module_store_setting_if_needed( config("MODULESTORE", default=MODULESTORE, formatter=json.loads) ) MONGODB_PASSWORD = config("MONGODB_PASSWORD", default="") MONGODB_HOST = config("MONGODB_HOST", default="mongodb") MONGODB_PORT = config("MONGODB_PORT", default=27017, formatter=int) MONGODB_NAME = config("MONGODB_NAME", default="edxapp") MONGODB_USER = config("MONGODB_USER", default=None) MONGODB_SSL = config("MONGODB_SSL", default=False, formatter=bool) MONGODB_REPLICASET = config("MONGODB_REPLICASET", default=None) # Accepted read_preference value can be found here https://github.com/mongodb/mongo-python-driver/blob/2.9.1/pymongo/read_preferences.py#L54 MONGODB_READ_PREFERENCE = config("MONGODB_READ_PREFERENCE", default="PRIMARY") DOC_STORE_CONFIG = config( "DOC_STORE_CONFIG", default={ "collection": "modulestore", "host": MONGODB_HOST, "port": MONGODB_PORT, "db": MONGODB_NAME, "user": MONGODB_USER, "password": MONGODB_PASSWORD, "ssl": MONGODB_SSL, "replicaSet": MONGODB_REPLICASET, "read_preference": MONGODB_READ_PREFERENCE, }, formatter=json.loads, ) update_module_store_settings(MODULESTORE, doc_store_settings=DOC_STORE_CONFIG) MONGODB_LOG = config("MONGODB_LOG", default={}, formatter=json.loads) CONTENTSTORE = config( "CONTENTSTORE", default={ "DOC_STORE_CONFIG": DOC_STORE_CONFIG, "ENGINE": "xmodule.contentstore.mongo.MongoContentStore", }, formatter=json.loads, ) EMAIL_HOST_USER = config("EMAIL_HOST_USER", default="") # django default is '' EMAIL_HOST_PASSWORD = config("EMAIL_HOST_PASSWORD", default="") # django default is '' # Datadog for events! DATADOG = config("DATADOG", default={}, formatter=json.loads) # TODO: deprecated (compatibility with previous settings) DATADOG_API = config("DATADOG_API", default=None) # Analytics dashboard server ANALYTICS_SERVER_URL = config("ANALYTICS_SERVER_URL", default=None) ANALYTICS_API_KEY = config("ANALYTICS_API_KEY", default="") # Analytics data source ANALYTICS_DATA_URL = config("ANALYTICS_DATA_URL", default=ANALYTICS_DATA_URL) ANALYTICS_DATA_TOKEN = config("ANALYTICS_DATA_TOKEN", default=ANALYTICS_DATA_TOKEN) # Analytics Dashboard # when True this setting add a link in instructor dashbord to analytics insight service ANALYTICS_DASHBOARD_URL = config( "ANALYTICS_DASHBOARD_URL", default=False, formatter=bool ) ANALYTICS_DASHBOARD_NAME = config( "ANALYTICS_DASHBOARD_NAME", default=PLATFORM_NAME + " Insights" ) # Mailchimp New User List MAILCHIMP_NEW_USER_LIST_ID = config("MAILCHIMP_NEW_USER_LIST_ID", default=None) # Zendesk ZENDESK_USER = config("ZENDESK_USER", default=None) ZENDESK_API_KEY = config("ZENDESK_API_KEY", default=None) # API Key for inbound requests from Notifier service EDX_API_KEY = config("EDX_API_KEY", default=None) # Celery Broker # For redis sentinel use the redis-sentinel transport CELERY_BROKER_TRANSPORT = config("CELERY_BROKER_TRANSPORT", default="redis") CELERY_BROKER_USER = config("CELERY_BROKER_USER", default="") CELERY_BROKER_PASSWORD = config("CELERY_BROKER_PASSWORD", default="") CELERY_BROKER_HOST = config("CELERY_BROKER_HOST", default="redis") CELERY_BROKER_PORT = config("CELERY_BROKER_PORT", default=6379, formatter=int) CELERY_BROKER_VHOST = config("CELERY_BROKER_VHOST", default=0, formatter=int) if CELERY_BROKER_TRANSPORT == "redis-sentinel": # register redis sentinel schema in celery register() BROKER_URL = "{transport}://{user}:{password}@{host}:{port}/{vhost}".format( transport=CELERY_BROKER_TRANSPORT, user=CELERY_BROKER_USER, password=CELERY_BROKER_PASSWORD, host=CELERY_BROKER_HOST, port=CELERY_BROKER_PORT, vhost=CELERY_BROKER_VHOST, ) # To use redis-sentinel, refer to the documenation here # https://celery-redis-sentinel.readthedocs.io/en/latest/ BROKER_TRANSPORT_OPTIONS = config( "BROKER_TRANSPORT_OPTIONS", default={}, formatter=json.loads ) # upload limits STUDENT_FILEUPLOAD_MAX_SIZE = config( "STUDENT_FILEUPLOAD_MAX_SIZE", default=STUDENT_FILEUPLOAD_MAX_SIZE, formatter=int ) # Event tracking TRACKING_BACKENDS.update(config("TRACKING_BACKENDS", default={}, formatter=json.loads)) EVENT_TRACKING_BACKENDS["tracking_logs"]["OPTIONS"]["backends"].update( config("EVENT_TRACKING_BACKENDS", default={}, formatter=json.loads) ) EVENT_TRACKING_BACKENDS["segmentio"]["OPTIONS"]["processors"][0]["OPTIONS"][ "whitelist" ].extend( config("EVENT_TRACKING_SEGMENTIO_EMIT_WHITELIST", default=[], formatter=json.loads) ) TRACKING_SEGMENTIO_WEBHOOK_SECRET = config( "TRACKING_SEGMENTIO_WEBHOOK_SECRET", default=TRACKING_SEGMENTIO_WEBHOOK_SECRET ) TRACKING_SEGMENTIO_ALLOWED_TYPES = config( "TRACKING_SEGMENTIO_ALLOWED_TYPES", default=TRACKING_SEGMENTIO_ALLOWED_TYPES, formatter=json.loads, ) TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES = config( "TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES", default=TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES, formatter=json.loads, ) TRACKING_SEGMENTIO_SOURCE_MAP = config( "TRACKING_SEGMENTIO_SOURCE_MAP", default=TRACKING_SEGMENTIO_SOURCE_MAP, formatter=json.loads, ) # Student identity verification settings VERIFY_STUDENT = config("VERIFY_STUDENT", default=VERIFY_STUDENT, formatter=json.loads) # Grades download GRADES_DOWNLOAD_ROUTING_KEY = config( "GRADES_DOWNLOAD_ROUTING_KEY", default=HIGH_MEM_QUEUE ) GRADES_DOWNLOAD = config( "GRADES_DOWNLOAD", default=GRADES_DOWNLOAD, formatter=json.loads ) GRADES_DOWNLOAD = config("GRADES_DOWNLOAD", default=GRADES_DOWNLOAD) # financial reports FINANCIAL_REPORTS = config( "FINANCIAL_REPORTS", default=FINANCIAL_REPORTS, formatter=json.loads ) ##### ACCOUNT LOCKOUT DEFAULT PARAMETERS ##### MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = config( "MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED", default=5, formatter=int ) MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = config( "MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS", default=15 * 60, formatter=int ) MICROSITE_CONFIGURATION = config( "MICROSITE_CONFIGURATION", default={}, formatter=json.loads ) MICROSITE_ROOT_DIR = path(config("MICROSITE_ROOT_DIR", default="")) #### PASSWORD POLICY SETTINGS ##### PASSWORD_MIN_LENGTH = config("PASSWORD_MIN_LENGTH", default=12, formatter=int) PASSWORD_MAX_LENGTH = config("PASSWORD_MAX_LENGTH", default=None, formatter=int) PASSWORD_COMPLEXITY = config( "PASSWORD_COMPLEXITY", default={"UPPER": 1, "LOWER": 1, "DIGITS": 1}, formatter=json.loads, ) PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = config( "PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD", default=PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD, formatter=int, ) PASSWORD_DICTIONARY = config("PASSWORD_DICTIONARY", default=[], formatter=json.loads) ### INACTIVITY SETTINGS #### SESSION_INACTIVITY_TIMEOUT_IN_SECONDS = config( "SESSION_INACTIVITY_TIMEOUT_IN_SECONDS", default=None, formatter=int ) ##### LMS DEADLINE DISPLAY TIME_ZONE ####### TIME_ZONE_DISPLAYED_FOR_DEADLINES = config( "TIME_ZONE_DISPLAYED_FOR_DEADLINES", default=TIME_ZONE_DISPLAYED_FOR_DEADLINES ) ##### X-Frame-Options response header settings ##### X_FRAME_OPTIONS = config("X_FRAME_OPTIONS", default=X_FRAME_OPTIONS) ##### Third-party auth options ################################################ if FEATURES.get("ENABLE_THIRD_PARTY_AUTH"): # The reduced session expiry time during the third party login pipeline. (Value in seconds) SOCIAL_AUTH_PIPELINE_TIMEOUT = config("SOCIAL_AUTH_PIPELINE_TIMEOUT", default=600) # The SAML private/public key values do not need the delimiter lines (such as # "-----BEGIN PRIVATE KEY-----", default="-----END PRIVATE KEY-----" etc.) but they may be included # if you want (though it's easier to format the key values as JSON without the delimiters). SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = config( "SOCIAL_AUTH_SAML_SP_PRIVATE_KEY", default="" ) SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = config( "SOCIAL_AUTH_SAML_SP_PUBLIC_CERT", default="" ) SOCIAL_AUTH_OAUTH_SECRETS = config( "SOCIAL_AUTH_OAUTH_SECRETS", default={}, formatter=json.loads ) SOCIAL_AUTH_LTI_CONSUMER_SECRETS = config( "SOCIAL_AUTH_LTI_CONSUMER_SECRETS", default={}, formatter=json.loads ) # third_party_auth config moved to ConfigurationModels. This is for data migration only: THIRD_PARTY_AUTH_OLD_CONFIG = config("THIRD_PARTY_AUTH", default=None) if ( config("THIRD_PARTY_AUTH_SAML_FETCH_PERIOD_HOURS", default=24, formatter=int) is not None ): CELERYBEAT_SCHEDULE["refresh-saml-metadata"] = { "task": "third_party_auth.fetch_saml_metadata", "schedule": datetime.timedelta( hours=config( "THIRD_PARTY_AUTH_SAML_FETCH_PERIOD_HOURS", default=24, formatter=int, ) ), } # The following can be used to integrate a custom login form with third_party_auth. # It should be a dict where the key is a word passed via ?auth_entry=, and the value is a # dict with an arbitrary 'secret_key' and a 'url'. THIRD_PARTY_AUTH_CUSTOM_AUTH_FORMS = config( "THIRD_PARTY_AUTH_CUSTOM_AUTH_FORMS", default={}, formatter=json.loads ) ##### OAUTH2 Provider ############## if FEATURES.get("ENABLE_OAUTH2_PROVIDER"): OAUTH_OIDC_ISSUER = config("OAUTH_OIDC_ISSUER", default=None) OAUTH_ENFORCE_SECURE = config("OAUTH_ENFORCE_SECURE", default=True, formatter=bool) OAUTH_ENFORCE_CLIENT_SECURE = config( "OAUTH_ENFORCE_CLIENT_SECURE", default=True, formatter=bool ) ##### ADVANCED_SECURITY_CONFIG ##### ADVANCED_SECURITY_CONFIG = config( "ADVANCED_SECURITY_CONFIG", default={}, formatter=json.loads ) ##### GOOGLE ANALYTICS IDS ##### GOOGLE_ANALYTICS_ACCOUNT = config("GOOGLE_ANALYTICS_ACCOUNT", default=None) GOOGLE_ANALYTICS_LINKEDIN = config("GOOGLE_ANALYTICS_LINKEDIN", default=None) ##### OPTIMIZELY PROJECT ID ##### OPTIMIZELY_PROJECT_ID = config("OPTIMIZELY_PROJECT_ID", default=OPTIMIZELY_PROJECT_ID) #### Course Registration Code length #### REGISTRATION_CODE_LENGTH = config("REGISTRATION_CODE_LENGTH", default=8, formatter=int) # REGISTRATION CODES DISPLAY INFORMATION INVOICE_CORP_ADDRESS = config("INVOICE_CORP_ADDRESS", default=INVOICE_CORP_ADDRESS) INVOICE_PAYMENT_INSTRUCTIONS = config( "INVOICE_PAYMENT_INSTRUCTIONS", default=INVOICE_PAYMENT_INSTRUCTIONS ) # Which access.py permission names to check; # We default this to the legacy permission 'see_exists'. COURSE_CATALOG_VISIBILITY_PERMISSION = config( "COURSE_CATALOG_VISIBILITY_PERMISSION", default=COURSE_CATALOG_VISIBILITY_PERMISSION ) COURSE_ABOUT_VISIBILITY_PERMISSION = config( "COURSE_ABOUT_VISIBILITY_PERMISSION", default=COURSE_ABOUT_VISIBILITY_PERMISSION ) # Enrollment API Cache Timeout ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT = config( "ENROLLMENT_COURSE_DETAILS_CACHE_TIMEOUT", default=60, formatter=int ) # PDF RECEIPT/INVOICE OVERRIDES PDF_RECEIPT_TAX_ID = config("PDF_RECEIPT_TAX_ID", default=PDF_RECEIPT_TAX_ID) PDF_RECEIPT_FOOTER_TEXT = config( "PDF_RECEIPT_FOOTER_TEXT", default=PDF_RECEIPT_FOOTER_TEXT ) PDF_RECEIPT_DISCLAIMER_TEXT = config( "PDF_RECEIPT_DISCLAIMER_TEXT", default=PDF_RECEIPT_DISCLAIMER_TEXT ) PDF_RECEIPT_BILLING_ADDRESS = config( "PDF_RECEIPT_BILLING_ADDRESS", default=PDF_RECEIPT_BILLING_ADDRESS ) PDF_RECEIPT_TERMS_AND_CONDITIONS = config( "PDF_RECEIPT_TERMS_AND_CONDITIONS", default=PDF_RECEIPT_TERMS_AND_CONDITIONS ) PDF_RECEIPT_TAX_ID_LABEL = config( "PDF_RECEIPT_TAX_ID_LABEL", default=PDF_RECEIPT_TAX_ID_LABEL ) PDF_RECEIPT_LOGO_PATH = config("PDF_RECEIPT_LOGO_PATH", default=PDF_RECEIPT_LOGO_PATH) PDF_RECEIPT_COBRAND_LOGO_PATH = config( "PDF_RECEIPT_COBRAND_LOGO_PATH", default=PDF_RECEIPT_COBRAND_LOGO_PATH ) PDF_RECEIPT_LOGO_HEIGHT_MM = config( "PDF_RECEIPT_LOGO_HEIGHT_MM", default=PDF_RECEIPT_LOGO_HEIGHT_MM, formatter=int ) PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM = config( "PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM", default=PDF_RECEIPT_COBRAND_LOGO_HEIGHT_MM, formatter=int, ) if ( FEATURES.get("ENABLE_COURSEWARE_SEARCH") or FEATURES.get("ENABLE_DASHBOARD_SEARCH") or FEATURES.get("ENABLE_COURSE_DISCOVERY") or FEATURES.get("ENABLE_TEAMS") ): # Use ElasticSearch as the search engine herein SEARCH_ENGINE = "search.elastic.ElasticSearchEngine" ELASTIC_SEARCH_CONFIG = config( "ELASTIC_SEARCH_CONFIG", default=[{}], formatter=json.loads ) # Facebook app FACEBOOK_API_VERSION = config("FACEBOOK_API_VERSION", default=None) FACEBOOK_APP_SECRET = config("FACEBOOK_APP_SECRET", default=None) FACEBOOK_APP_ID = config("FACEBOOK_APP_ID", default=None) XBLOCK_SETTINGS = config("XBLOCK_SETTINGS", default={}, formatter=json.loads) XBLOCK_SETTINGS.setdefault("VideoDescriptor", {})["licensing_enabled"] = FEATURES.get( "LICENSING", False ) XBLOCK_SETTINGS.setdefault("VideoModule", {})["YOUTUBE_API_KEY"] = config( "YOUTUBE_API_KEY", default=YOUTUBE_API_KEY ) ##### CDN EXPERIMENT/MONITORING FLAGS ##### CDN_VIDEO_URLS = config("CDN_VIDEO_URLS", default=CDN_VIDEO_URLS) ONLOAD_BEACON_SAMPLE_RATE = config( "ONLOAD_BEACON_SAMPLE_RATE", default=ONLOAD_BEACON_SAMPLE_RATE ) ##### ECOMMERCE API CONFIGURATION SETTINGS ##### ECOMMERCE_PUBLIC_URL_ROOT = config( "ECOMMERCE_PUBLIC_URL_ROOT", default=ECOMMERCE_PUBLIC_URL_ROOT ) ECOMMERCE_API_URL = config("ECOMMERCE_API_URL", default=ECOMMERCE_API_URL) ECOMMERCE_API_TIMEOUT = config( "ECOMMERCE_API_TIMEOUT", default=ECOMMERCE_API_TIMEOUT, formatter=int ) ECOMMERCE_SERVICE_WORKER_USERNAME = config( "ECOMMERCE_SERVICE_WORKER_USERNAME", default=ECOMMERCE_SERVICE_WORKER_USERNAME ) ECOMMERCE_API_TIMEOUT = config("ECOMMERCE_API_TIMEOUT", default=ECOMMERCE_API_TIMEOUT) ECOMMERCE_API_SIGNING_KEY = config( "ECOMMERCE_API_SIGNING_KEY", default=ECOMMERCE_API_SIGNING_KEY ) ##### Custom Courses for EdX ##### if FEATURES.get("CUSTOM_COURSES_EDX"): INSTALLED_APPS += ("lms.djangoapps.ccx",) FIELD_OVERRIDE_PROVIDERS += ( "lms.djangoapps.ccx.overrides.CustomCoursesForEdxOverrideProvider", ) CCX_MAX_STUDENTS_ALLOWED = config( "CCX_MAX_STUDENTS_ALLOWED", default=CCX_MAX_STUDENTS_ALLOWED ) ##### Individual Due Date Extensions ##### if FEATURES.get("INDIVIDUAL_DUE_DATES"): FIELD_OVERRIDE_PROVIDERS += ( "courseware.student_field_overrides.IndividualStudentOverrideProvider", ) ##### Self-Paced Course Due Dates ##### FIELD_OVERRIDE_PROVIDERS += ( "courseware.self_paced_overrides.SelfPacedDateOverrideProvider", ) # PROFILE IMAGE CONFIG PROFILE_IMAGE_BACKEND = config("PROFILE_IMAGE_BACKEND", default=PROFILE_IMAGE_BACKEND) PROFILE_IMAGE_SECRET_KEY = config( "PROFILE_IMAGE_SECRET_KEY", default=PROFILE_IMAGE_SECRET_KEY ) PROFILE_IMAGE_MAX_BYTES = config( "PROFILE_IMAGE_MAX_BYTES", default=PROFILE_IMAGE_MAX_BYTES, formatter=int ) PROFILE_IMAGE_MIN_BYTES = config( "PROFILE_IMAGE_MIN_BYTES", default=PROFILE_IMAGE_MIN_BYTES, formatter=int ) PROFILE_IMAGE_DEFAULT_FILENAME = "images/profiles/default" # EdxNotes config EDXNOTES_PUBLIC_API = config("EDXNOTES_PUBLIC_API", default=EDXNOTES_PUBLIC_API) EDXNOTES_INTERNAL_API = config("EDXNOTES_INTERNAL_API", default=EDXNOTES_INTERNAL_API) ##### Credit Provider Integration ##### CREDIT_PROVIDER_SECRET_KEYS = config( "CREDIT_PROVIDER_SECRET_KEYS", default={}, formatter=json.loads ) ##################### LTI Provider ##################### if FEATURES.get("ENABLE_LTI_PROVIDER"): INSTALLED_APPS += ("lti_provider",) LTI_USER_EMAIL_DOMAIN = config("LTI_USER_EMAIL_DOMAIN", default="lti.example.com") # For more info on this, see the notes in common.py LTI_AGGREGATE_SCORE_PASSBACK_DELAY = config( "LTI_AGGREGATE_SCORE_PASSBACK_DELAY", default=LTI_AGGREGATE_SCORE_PASSBACK_DELAY ) ##################### Credit Provider help link #################### CREDIT_HELP_LINK_URL = config("CREDIT_HELP_LINK_URL", default=CREDIT_HELP_LINK_URL) #### JWT configuration #### JWT_ISSUER = config("JWT_ISSUER", default=JWT_ISSUER) JWT_EXPIRATION = config("JWT_EXPIRATION", default=JWT_EXPIRATION) ################# PROCTORING CONFIGURATION ################## PROCTORING_BACKEND_PROVIDER = config( "PROCTORING_BACKEND_PROVIDER", default=PROCTORING_BACKEND_PROVIDER ) PROCTORING_SETTINGS = config( "PROCTORING_SETTINGS", default=PROCTORING_SETTINGS, formatter=json.loads ) ################# MICROSITE #################### MICROSITE_CONFIGURATION = config( "MICROSITE_CONFIGURATION", default={}, formatter=json.loads ) MICROSITE_ROOT_DIR = path(config("MICROSITE_ROOT_DIR", default="")) # Cutoff date for granting audit certificates if config("AUDIT_CERT_CUTOFF_DATE", default=None): AUDIT_CERT_CUTOFF_DATE = dateutil.parser.parse( config("AUDIT_CERT_CUTOFF_DATE", default=AUDIT_CERT_CUTOFF_DATE) ) ################ CONFIGURABLE LTI CONSUMER ############### # Add just the standard LTI consumer by default, forcing it to open in a new window and ask # the user before sending email and username: LTI_XBLOCK_CONFIGURATIONS = config( "LTI_XBLOCK_CONFIGURATIONS", default=[ { "display_name": "LTI consumer", "pattern": ".*", "hidden_fields": [ "ask_to_send_email", "ask_to_send_username", "new_window", ], "defaults": { "ask_to_send_email": True, "ask_to_send_username": True, "launch_target": "new_window", }, } ], formatter=json.loads, ) LTI_XBLOCK_SECRETS = config("LTI_XBLOCK_SECRETS", default={}, formatter=json.loads) ################################ FUN stuff ################################ SITE_VARIANT = "lms" # Environment's name displayed in FUN's backoffice ENVIRONMENT = config("ENVIRONMENT", default="no set") BASE_ROOT = path("/edx/app/edxapp/") # Fun-apps configuration INSTALLED_APPS += ( "backoffice", "bootstrapform", "ckeditor", "course_dashboard", "course_pages", "courses_api", "courses", "easy_thumbnails", "edx_gea", "forum_contributors", "fun_api", "fun_certificates", "fun_instructor", "fun", "funsite", "haystack", "masquerade", "newsfeed", "password_container", "payment_api", "payment", "pure_pagination", "raven.contrib.django.raven_compat", "rest_framework.authtoken", "teachers", "universities", "videoproviders", ) ROOT_URLCONF = "fun.lms.urls" # Related Richie platform url PLATFORM_RICHIE_URL = config("PLATFORM_RICHIE_URL", default=None) # Haystack configuration (default is minimal working configuration) HAYSTACK_CONNECTIONS = config( "HAYSTACK_CONNECTIONS", default={ "default": {"ENGINE": "courses.search_indexes.ConfigurableElasticSearchEngine"} }, formatter=json.loads, ) CKEDITOR_UPLOAD_PATH = "./" CKEDITOR_CONFIGS = { "default": { "toolbar": [ [ "Undo", "Redo", "-", "Bold", "Italic", "Underline", "-", "Link", "Unlink", "Anchor", "-", "Format", "-", "SpellChecker", "Scayt", "-", "Maximize", ], [ "HorizontalRule", "-", "Table", "-", "BulletedList", "NumberedList", "-", "Cut", "Copy", "Paste", "PasteText", "PasteFromWord", "-", "SpecialChar", "-", "Source", ], ], "toolbarCanCollapse": False, "entities": False, "width": 955, "uiColor": "#9AB8F3", }, "news": { # Redefine path where the news images/files are uploaded. This would # better be done at runtime with the 'reverse' function, but # unfortunately there is no way around defining this in the settings # file. "filebrowserUploadUrl": "/news/ckeditor/upload/", "filebrowserBrowseUrl": "/news/ckeditor/browse/", "toolbar_Full": [ [ "Styles", "Format", "Bold", "Italic", "Underline", "Strike", "SpellChecker", "Undo", "Redo", ], ["Image", "Flash", "Table", "HorizontalRule"], ["NumberedList", "BulletedList", "Blockquote", "TextColor", "BGColor"], ["Smiley", "SpecialChar"], ["Source"], ], }, } # ### FUN-APPS SETTINGS ### # This is dist-packages path where all fun-apps are FUN_BASE_ROOT = path(os.path.dirname(pkgutil.get_loader("funsite").filename)) SHARED_ROOT = DATA_DIR / "shared" # Add FUN applications templates directories to MAKO template finder before edX's ones MAKO_TEMPLATES["main"] = [ # overrides template in edx-platform/lms/templates FUN_BASE_ROOT / "funsite/templates/lms", FUN_BASE_ROOT / "funsite/templates", FUN_BASE_ROOT / "course_pages/templates", FUN_BASE_ROOT / "payment/templates", FUN_BASE_ROOT / "course_dashboard/templates", FUN_BASE_ROOT / "newsfeed/templates", FUN_BASE_ROOT / "fun_certificates/templates", ] + MAKO_TEMPLATES["main"] # JS static override DEFAULT_TEMPLATE_ENGINE["DIRS"].append(FUN_BASE_ROOT / "funsite/templates/lms") FUN_SMALL_LOGO_RELATIVE_PATH = "funsite/images/logos/funmooc173.png" FUN_BIG_LOGO_RELATIVE_PATH = "funsite/images/logos/funmoocfp.png" FAVICON_PATH = "fun/images/favicon.ico" # Locale paths # Here we rewrite LOCAL_PATHS to give precedence to our applications above edx-platform's ones, # then we add xblocks which provide translations as there is no native mechanism to handle this # See Xblock i18n: http://www.libremente.eu/2017/12/06/edx-translation/ LOCALIZED_FUN_APPS = [ "backoffice", "course_dashboard", "course_pages", "courses", "fun_api", "fun_certificates", "funsite", "newsfeed", "payment", "universities", "videoproviders", ] LOCALE_PATHS = [FUN_BASE_ROOT / app / "locale" for app in LOCALIZED_FUN_APPS] LOCALE_PATHS.append(REPO_ROOT / "conf/locale") # edx-platform locales LOCALE_PATHS.append(path(pkgutil.get_loader("proctor_exam").filename) / "locale") # -- Certificates CERTIFICATES_DIRECTORY_NAME = "attestations" FUN_LOGO_PATH = FUN_BASE_ROOT / "funsite/static" / FUN_BIG_LOGO_RELATIVE_PATH FUN_ATTESTATION_LOGO_PATH = ( FUN_BASE_ROOT / "funsite/static" / "funsite/images/logos/funmoocattest.png" ) STUDENT_NAME_FOR_TEST_CERTIFICATE = "Test User" # Videofront subtitles cache CACHES["video_subtitles"] = { "BACKEND": "django.core.cache.backends.filebased.FileBasedCache", "KEY_PREFIX": "video_subtitles", "LOCATION": DATA_DIR / "video_subtitles_cache", } # Course image thumbnails FUN_THUMBNAIL_OPTIONS = { "small": {"size": (270, 152), "crop": "smart"}, "big": {"size": (337, 191), "crop": "smart"}, "about": {"size": (730, 412), "crop": "scale"}, "facebook": { "size": (600, 315), "crop": "smart", }, # https://developers.facebook.com/docs/sharing/best-practices } THUMBNAIL_PRESERVE_EXTENSIONS = True THUMBNAIL_EXTENSION = "png" ##### ORA2 ###### ORA2_FILEUPLOAD_BACKEND = "swift" ORA2_SWIFT_KEY = config("ORA2_SWIFT_KEY", default="") ORA2_SWIFT_URL = config("ORA2_SWIFT_URL", default="") # Prefix for uploads of example-based assessment AI classifiers # This can be used to separate uploads for different environments ORA2_FILE_PREFIX = config("ORA2_FILE_PREFIX", default=ORA2_FILE_PREFIX) # Profile image upload PROFILE_IMAGE_BACKEND = { "class": "storages.backends.overwrite.OverwriteStorage", "options": { "location": os.path.join(MEDIA_ROOT, "profile-images/"), "base_url": os.path.join(MEDIA_URL, "profile-images/"), }, } ENABLE_ADWAYS_FOR_COURSES = config( "ENABLE_ADWAYS_FOR_COURSES", default=[], formatter=json.loads ) # Add our v3 CSS and JS files to assets compilation pipeline to make them available in courseware. # On FUN v3 frontend, which do not use edX's templates, those files are loaded # by funsite/templates/funsite/parts/base.html and css/lms-main.css PIPELINE_CSS["style-vendor"]["source_filenames"].append("fun/css/cookie-banner.css") PIPELINE_CSS["style-vendor"]["source_filenames"].append("funsite/css/header.css") PIPELINE_CSS["style-vendor"]["source_filenames"].append("funsite/css/footer.css") # can't find any common group for group in ["base_vendor", "main_vendor"]: PIPELINE_JS[group]["source_filenames"].append("funsite/js/header.js") PIPELINE_JS[group]["source_filenames"].append("fun/js/cookie-banner.js") # Glowbl GLOWBL_LTI_ENDPOINT = config( "GLOWBL_LTI_ENDPOINT", default="http://ltiapps.net/test/tp.php" ) GLOWBL_LTI_KEY = config("GLOWBL_LTI_KEY", default="jisc.ac.uk") GLOWBL_LTI_SECRET = config("GLOWBL_LTI_SECRET", default="secret") GLOWBL_LTI_ID = config("GLOWBL_LTI_ID", default="testtoolconsumer") GLOWBL_LAUNCH_URL = config( "GLOWBL_LAUNCH_URL", default="http://ltiapps.net/test/tp.php" ) GLOWBL_COLL_OPT = config("GLOWBL_COLL_OPT", default="FunMoocJdR") DEFAULT_TEMPLATE_ENGINE["DIRS"].append(FUN_BASE_ROOT / "funsite/templates/lms") DEFAULT_TEMPLATE_ENGINE["OPTIONS"]["context_processors"].append( "fun.context_processor.fun_settings" ) TEMPLATES = [DEFAULT_TEMPLATE_ENGINE] # This force Edx Studio to use our own video provider Xblock on default button FUN_DEFAULT_VIDEO_PLAYER = "libcast_xblock" MIDDLEWARE_CLASSES += ( "fun.middleware.LegalAcceptance", "backoffice.middleware.PathLimitedMasqueradeMiddleware", ) # These are the allowed subtitle languages, we have the same list on Videofront server # We remove 2 deprecated chinese language codes which do not exist on Django 1.10 VideoFront SUBTITLE_SUPPORTED_LANGUAGES = LazyChoicesSorter( (code, ugettext_lazy(lang)) for code, lang in global_settings.LANGUAGES if code not in ("zh-cn", "zh-tw") ) ANONYMIZATION_KEY = config("ANONYMIZATION_KEY", default="") RAVEN_CONFIG = config("RAVEN_CONFIG", default={"dsn": ""}, formatter=json.loads) ELASTICSEARCH_INDEX_SETTINGS = { "settings": { "analysis": { "filter": { "elision": { "type": "elision", "articles": ["l", "m", "t", "qu", "n", "s", "j", "d"], } }, "analyzer": { "custom_french_analyzer": { "tokenizer": "letter", "filter": [ "asciifolding", "lowercase", "french_stem", "elision", "stop", "word_delimiter", ], } }, } } } FUN_MKTG_URLS = config("FUN_MKTG_URLS", default={}, formatter=json.loads) # Default visibility of student's profile to other students ACCOUNT_VISIBILITY_CONFIGURATION["default_visibility"] = "private" # A user is verified if he has an approved SoftwareSecurePhotoVerification entry # this setting will create a dummy SoftwareSecurePhotoVerification for user in # paybox success callback view. A this point, we think it's better to create a # dummy one than to remove verifying process in edX FUN_ECOMMERCE_DEBUG_NO_NOTIFICATION = config( "FUN_ECOMMERCE_DEBUG_NO_NOTIFICATION", default=False, formatter=bool ) ECOMMERCE_NOTIFICATION_URL = config("ECOMMERCE_NOTIFICATION_URL", default=None) PAYMENT_ADMIN = "paybox@fun-mooc.fr" # List of pattern definitions to automatically add verified users to a cohort # If value is [] this feature is disabled # Otherwise this setting is a list of # tuple values (r"<course id regex>", "<cohort name>"). # e.g: if you want to enable this feature for a particular course you can set # this setting to # [ # (r"<course id>", "cohort name"), # ] VERIFIED_COHORTS = config("VERIFIED_COHORTS", default=[]) # Force Edx to use `libcast_xblock` as default video player # in the studio (big green button) and if any xblock is called `video` XBLOCK_SELECT_FUNCTION = prefer_fun_video if "sentry" in LOGGING.get("handlers"): LOGGING["handlers"]["sentry"]["environment"] = "development" # Configure gelf handler to listen on graylog server LOGGING["loggers"][""]["handlers"].append("gelf") LOGGING["loggers"]["tracking"]["handlers"].append("gelf") LOGGING["handlers"]["gelf"] = { "level": "DEBUG", "class": "djehouty.libgelf.handlers.GELFTCPSocketHandler", "host": "graylog", "port": 12201, "null_character": True, } DEBUG = True REQUIRE_DEBUG = True EMAIL_BACKEND = config( "EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend" ) PIPELINE_ENABLED = False STATICFILES_STORAGE = "openedx.core.storage.DevelopmentStorage" ALLOWED_HOSTS = ["*"] FEATURES["AUTOMATIC_AUTH_FOR_TESTING"] = True # ORA2 fileupload ORA2_FILEUPLOAD_BACKEND = "filesystem" ORA2_FILEUPLOAD_ROOT = os.path.join(SHARED_ROOT, "openassessment_submissions") ORA2_FILEUPLOAD_CACHE_ROOT = os.path.join( SHARED_ROOT, "openassessment_submissions_cache" ) AUTHENTICATION_BACKENDS = config( "AUTHENTICATION_BACKENDS", default=["django.contrib.auth.backends.ModelBackend"], formatter=json.loads )
[ 37811, 198, 1212, 318, 262, 4277, 11055, 329, 674, 1388, 900, 286, 9597, 13, 770, 857, 5626, 198, 9631, 262, 2695, 8217, 11, 543, 779, 2695, 13, 9078, 198, 198, 17227, 12796, 25, 198, 9, 5765, 1066, 66, 2317, 11, 290, 12940, 12, 1...
2.394661
23,638
# The MIT License # # Copyright (c) 2008 William T. Katz # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. __author__ = 'Kyle Conroy' import datetime import calendar import logging import os import re import string import urllib import urlparse from google.appengine.api import memcache from google.appengine.api import users from google.appengine.ext import webapp from google.appengine.ext import db from datetime import date, timedelta from django.conf import settings from django.template.loader import render_to_string from django.utils import simplejson as json from time import mktime from models import List, Status, Service, Event, Profile import xml.etree.ElementTree as et from utils import authorized from wsgiref.handlers import format_date_time
[ 2, 383, 17168, 13789, 198, 2, 198, 2, 15069, 357, 66, 8, 3648, 3977, 309, 13, 36290, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 4866, 198, 2, 286, 428, 3788, 290, 3917, 103...
3.77707
471
from wtforms import Form, StringField, PasswordField, DecimalField, IntegerField, SelectField, validators from wtforms.fields.html5 import DateField
[ 6738, 266, 83, 23914, 1330, 5178, 11, 10903, 15878, 11, 30275, 15878, 11, 4280, 4402, 15878, 11, 34142, 15878, 11, 9683, 15878, 11, 4938, 2024, 198, 6738, 266, 83, 23914, 13, 25747, 13, 6494, 20, 1330, 7536, 15878, 628, 628, 198 ]
3.731707
41
# -*- coding: utf-8 -*- """ Created on Wed Sep 15 08:32:03 2021 @author: User """ import numpy as np import matplotlib.pyplot as plt a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) print(a) print(a[0]) print(a.ndim) #te dice la cantidad de ejes (o dimensiones) del arreglo print(a.shape) #Te va a dar una tupla de enteros que indican la cantidad de elementos en cada eje. print(a.size) #%% vec_fila = a[np.newaxis, :] print(vec_fila.shape, a.shape) #%% print(a.sum()) print(a.min()) print(a.max()) #%% print(a) print(a.max(axis=1)) print(a.max(axis=0)) #%% print(np.random.random(3))
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 3300, 8621, 1315, 8487, 25, 2624, 25, 3070, 33448, 198, 198, 31, 9800, 25, 11787, 198, 37811, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 260...
2.211111
270
"""Correlation functions for matter and halos. """ import cluster_toolkit from cluster_toolkit import _ArrayWrapper, _handle_gsl_error import numpy as np def xi_nfw_at_r(r, M, c, Omega_m, delta=200): """NFW halo profile correlation function. Args: r (float or array like): 3d distances from halo center in Mpc/h comoving M (float): Mass in Msun/h c (float): Concentration Omega_m (float): Omega_matter, matter fraction of the density delta (int; optional): Overdensity, default is 200 Returns: float or array like: NFW halo profile. """ r = _ArrayWrapper(r, 'r') xi = _ArrayWrapper.zeros_like(r) cluster_toolkit._lib.calc_xi_nfw(r.cast(), len(r), M, c, delta, Omega_m, xi.cast()) return xi.finish() def xi_einasto_at_r(r, M, conc, alpha, om, delta=200, rhos=-1.): """Einasto halo profile. Args: r (float or array like): 3d distances from halo center in Mpc/h comoving M (float): Mass in Msun/h; not used if rhos is specified conc (float): Concentration alpha (float): Profile exponent om (float): Omega_matter, matter fraction of the density delta (int): Overdensity, default is 200 rhos (float): Scale density in Msun h^2/Mpc^3 comoving; optional Returns: float or array like: Einasto halo profile. """ r = _ArrayWrapper(r, 'r') xi = _ArrayWrapper.zeros_like(r) cluster_toolkit._lib.calc_xi_einasto(r.cast(), len(r), M, rhos, conc, alpha, delta, om, xi.cast()) return xi.finish() def xi_mm_at_r(r, k, P, N=500, step=0.005, exact=False): """Matter-matter correlation function. Args: r (float or array like): 3d distances from halo center in Mpc/h comoving k (array like): Wavenumbers of power spectrum in h/Mpc comoving P (array like): Matter power spectrum in (Mpc/h)^3 comoving N (int; optional): Quadrature step count, default is 500 step (float; optional): Quadrature step size, default is 5e-3 exact (boolean): Use the slow, exact calculation; default is False Returns: float or array like: Matter-matter correlation function """ r = _ArrayWrapper(r, 'r') k = _ArrayWrapper(k, allow_multidim=True) P = _ArrayWrapper(P, allow_multidim=True) xi = _ArrayWrapper.zeros_like(r) if not exact: rc = cluster_toolkit._lib.calc_xi_mm(r.cast(), len(r), k.cast(), P.cast(), len(k), xi.cast(), N, step) _handle_gsl_error(rc, xi_mm_at_r) else: if r.arr.max() > 1e3: raise Exception("max(r) cannot be >1e3 for numerical stability.") rc = cluster_toolkit._lib.calc_xi_mm_exact(r.cast(), len(r), k.cast(), P.cast(), len(k), xi.cast()) _handle_gsl_error(rc, xi_mm_at_r) return xi.finish() def xi_2halo(bias, xi_mm): """2-halo term in halo-matter correlation function Args: bias (float): Halo bias xi_mm (float or array like): Matter-matter correlation function Returns: float or array like: 2-halo term in halo-matter correlation function """ xi_mm = _ArrayWrapper(xi_mm, allow_multidim=True) xi = _ArrayWrapper.zeros_like(xi_mm) cluster_toolkit._lib.calc_xi_2halo(len(xi_mm), bias, xi_mm.cast(), xi.cast()) return xi.finish() def xi_hm(xi_1halo, xi_2halo, combination="max"): """Halo-matter correlation function Note: at the moment you can combine the 1-halo and 2-halo terms by either taking the max of the two or the sum of the two. The 'combination' field must be set to either 'max' (default) or 'sum'. Args: xi_1halo (float or array like): 1-halo term xi_2halo (float or array like, same size as xi_1halo): 2-halo term combination (string; optional): specifies how the 1-halo and 2-halo terms are combined, default is 'max' which takes the max of the two Returns: float or array like: Halo-matter correlation function """ if combination == "max": switch = 0 elif combination == 'sum': switch = 1 else: raise Exception("Combinations other than maximum not implemented yet") xi_1halo = _ArrayWrapper(xi_1halo, allow_multidim=True) xi_2halo = _ArrayWrapper(xi_2halo, allow_multidim=True) xi = _ArrayWrapper.zeros_like(xi_1halo) cluster_toolkit._lib.calc_xi_hm(len(xi_1halo), xi_1halo.cast(), xi_2halo.cast(), xi.cast(), switch) return xi.finish() def xi_DK(r, M, conc, be, se, k, P, om, delta=200, rhos=-1., alpha=-1., beta=-1., gamma=-1.): """Diemer-Kravtsov 2014 profile. Args: r (float or array like): radii in Mpc/h comoving M (float): mass in Msun/h conc (float): Einasto concentration be (float): DK transition parameter se (float): DK transition parameter k (array like): wavenumbers in h/Mpc P (array like): matter power spectrum in [Mpc/h]^3 Omega_m (float): matter density fraction delta (float): overdensity of matter. Optional, default is 200 rhos (float): Einasto density. Optional, default is compute from the mass alpha (float): Einasto parameter. Optional, default is computed from peak height beta (float): DK 2-halo parameter. Optional, default is 4 gamma (float): DK 2-halo parameter. Optional, default is 8 Returns: float or array like: DK profile evaluated at the input radii """ r = _ArrayWrapper(r, 'r') k = _ArrayWrapper(k, allow_multidim=True) P = _ArrayWrapper(P, allow_multidim=True) xi = _ArrayWrapper.zeros_like(r) cluster_toolkit._lib.calc_xi_DK(r.cast(), len(r), M, rhos, conc, be, se, alpha, beta, gamma, delta, k.cast(), P.cast(), len(k), om, xi.cast()) return xi.finish() def xi_DK_appendix1(r, M, conc, be, se, k, P, om, bias, xi_mm, delta=200, rhos=-1., alpha=-1., beta=-1., gamma=-1.): """Diemer-Kravtsov 2014 profile, first form from the appendix, eq. A3. Args: r (float or array like): radii in Mpc/h comoving M (float): mass in Msun/h conc (float): Einasto concentration be (float): DK transition parameter se (float): DK transition parameter k (array like): wavenumbers in h/Mpc P (array like): matter power spectrum in [Mpc/h]^3 Omega_m (float): matter density fraction bias (float): halo bias xi_mm (float or array like): matter correlation function at r delta (float): overdensity of matter. Optional, default is 200 rhos (float): Einasto density. Optional, default is compute from the mass alpha (float): Einasto parameter. Optional, default is computed from peak height beta (float): DK 2-halo parameter. Optional, default is 4 gamma (float): DK 2-halo parameter. Optional, default is 8 Returns: float or array like: DK profile evaluated at the input radii """ r = _ArrayWrapper(r, 'r') k = _ArrayWrapper(k, allow_multidim=True) P = _ArrayWrapper(P, allow_multidim=True) xi_mm = _ArrayWrapper(xi_mm, allow_multidim=True) xi = np.zeros_like(r) cluster_toolkit._lib.calc_xi_DK_app1(r.cast(), len(r), M, rhos, conc, be, se, alpha, beta, gamma, delta, k.cast(), P.cast(), len(k), om, bias, xi_mm.cast(), xi.cast()) return xi.finish() def xi_DK_appendix2(r, M, conc, be, se, k, P, om, bias, xi_mm, delta=200, rhos=-1., alpha=-1., beta=-1., gamma=-1.): """Diemer-Kravtsov 2014 profile, second form from the appendix, eq. A4. Args: r (float or array like): radii in Mpc/h comoving M (float): mass in Msun/h conc (float): Einasto concentration be (float): DK transition parameter se (float): DK transition parameter k (array like): wavenumbers in h/Mpc P (array like): matter power spectrum in [Mpc/h]^3 Omega_m (float): matter density fraction bias (float): halo bias xi_mm (float or array like): matter correlation function at r delta (float): overdensity of matter. Optional, default is 200 rhos (float): Einasto density. Optional, default is compute from the mass alpha (float): Einasto parameter. Optional, default is computed from peak height beta (float): DK 2-halo parameter. Optional, default is 4 gamma (float): DK 2-halo parameter. Optional, default is 8 Returns: float or array like: DK profile evaluated at the input radii """ r = _ArrayWrapper(r, 'r') k = _ArrayWrapper(k) P = _ArrayWrapper(P) xi_mm = _ArrayWrapper(xi_mm) xi = _ArrayWrapper.zeros_like(r) cluster_toolkit._lib.calc_xi_DK_app2(r.cast(), len(r), M, rhos, conc, be, se, alpha, beta, gamma, delta, k.cast(), P.cast(), len(k), om, bias, xi_mm.cast(), xi.cast()) return xi.finish()
[ 37811, 10606, 49501, 5499, 329, 2300, 290, 10284, 418, 13, 198, 198, 37811, 198, 11748, 13946, 62, 25981, 15813, 198, 6738, 13946, 62, 25981, 15813, 1330, 4808, 19182, 36918, 2848, 11, 4808, 28144, 62, 70, 6649, 62, 18224, 198, 11748, 2...
2.283887
4,065
#! /usr/bin/env python """Reverse grep. Usage: rgrep [-i] pattern file """ import sys import re import getopt if __name__ == '__main__': main()
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 37811, 49, 964, 325, 42717, 13, 198, 198, 28350, 25, 48670, 7856, 25915, 72, 60, 3912, 2393, 198, 37811, 198, 198, 11748, 25064, 198, 11748, 302, 198, 11748, 651, 8738, 198, 1...
2.533333
60
import struct f = open("c0610400.102200", 'rb') def read_dataset(file): ch = file.read(1) buf = [] while True: if chr(ch[0]) == '\n' and chr(buf[-1]) == '\r': break buf.append(ch[0]) ch = file.read(1) buf.append(ch[0]) return bytes(buf) h = Header() print(h)
[ 11748, 2878, 198, 198, 69, 796, 1280, 7203, 66, 3312, 940, 7029, 13, 940, 34294, 1600, 705, 26145, 11537, 628, 628, 628, 198, 4299, 1100, 62, 19608, 292, 316, 7, 7753, 2599, 198, 220, 220, 220, 442, 796, 2393, 13, 961, 7, 16, 8, ...
1.901163
172
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Modified optimizer_v2 implementation enabling XLA across variable updates.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx from tensorflow.python.distribute import parameter_server_strategy from tensorflow.python.framework import ops from tensorflow.python.framework import dtypes from tensorflow.python.keras import backend from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.keras.optimizer_v2 import utils as optimizer_utils from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import variables as tf_variables
[ 2, 15069, 2864, 383, 309, 22854, 37535, 46665, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, ...
3.96978
364
import os import errno import requests import glob import os import json from tqdm import tqdm from selenium import webdriver from selenium.webdriver.firefox.options import Options
[ 11748, 28686, 198, 11748, 11454, 3919, 198, 11748, 7007, 198, 11748, 15095, 198, 11748, 28686, 198, 11748, 33918, 198, 198, 6738, 256, 80, 36020, 1330, 256, 80, 36020, 628, 198, 6738, 384, 11925, 1505, 1330, 3992, 26230, 198, 198, 6738, ...
3.357143
56
''' Handles calibration library and calibration of subs. ''' import os.path import numpy as np from scipy.stats import trimboth from kivy.app import App from loguru import logger from kivy.properties import BooleanProperty, DictProperty, NumericProperty from kivy.core.window import Window from jocular.table import Table from jocular.utils import make_unique_filename from jocular.component import Component from jocular.settingsmanager import Settings from jocular.image import Image, save_image, fits_in_dir date_time_format = '%d %b %y %H:%M'
[ 7061, 6, 7157, 829, 36537, 5888, 290, 36537, 286, 6352, 13, 198, 7061, 6, 198, 198, 11748, 28686, 13, 6978, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 629, 541, 88, 13, 34242, 1330, 491, 14107, 849, 198, 198, 6738, 479, 452, 88,...
3.44375
160
from test.office_model import Headquarters, Office from marshmallow import fields from pynamodb.attributes import DiscriminatorAttribute from marshmallow_pynamodb import ModelSchema
[ 6738, 1332, 13, 31810, 62, 19849, 1330, 32193, 11, 4452, 198, 198, 6738, 22397, 42725, 1330, 7032, 198, 6738, 279, 4989, 375, 65, 13, 1078, 7657, 1330, 8444, 3036, 20900, 33682, 198, 198, 6738, 22397, 42725, 62, 79, 4989, 375, 65, 133...
3.978723
47
"""Helper initialising functions """ #pylint: disable=I0011, C0321, C0301, C0103, C0325, R0902, R0913, no-member, E0213 def init_fuel_tech_p_by(all_enduses_with_fuels, nr_of_fueltypes): """Helper function to define stocks for all enduse and fueltype Parameters ---------- all_enduses_with_fuels : dict Provided fuels nr_of_fueltypes : int Nr of fueltypes Returns ------- fuel_tech_p_by : dict """ fuel_tech_p_by = {} for enduse in all_enduses_with_fuels: fuel_tech_p_by[enduse] = dict.fromkeys(range(nr_of_fueltypes), {}) return fuel_tech_p_by def dict_zero(first_level_keys): """Initialise a dictionary with one level Parameters ---------- first_level_keys : list First level data Returns ------- one_level_dict : dict dictionary """ one_level_dict = dict.fromkeys(first_level_keys, 0) # set zero as argument return one_level_dict def service_type_tech_by_p(lu_fueltypes, fuel_tech_p_by): """Initialise dict and fill with zeros Parameters ---------- lu_fueltypes : dict Look-up dictionary fuel_tech_p_by : dict Fuel fraction per technology for base year Return ------- service_fueltype_tech_by_p : dict Fraction of service per fueltype and technology for base year """ service_fueltype_tech_by_p = {} for fueltype_int in lu_fueltypes.values(): service_fueltype_tech_by_p[fueltype_int] = dict.fromkeys(fuel_tech_p_by[fueltype_int].keys(), 0) return service_fueltype_tech_by_p
[ 37811, 47429, 4238, 1710, 5499, 198, 37811, 198, 2, 79, 2645, 600, 25, 15560, 28, 40, 405, 1157, 11, 327, 3070, 2481, 11, 327, 3070, 486, 11, 327, 486, 3070, 11, 327, 3070, 1495, 11, 371, 2931, 2999, 11, 371, 2931, 1485, 11, 645, ...
2.467593
648
from .npd import parse_wellbore_name
[ 6738, 764, 77, 30094, 1330, 21136, 62, 4053, 65, 382, 62, 3672, 198 ]
2.846154
13
############################################################################### # Name: util.py # # Purpose: Misc utility functions used through out Editra # # Author: Cody Precord <cprecord@editra.org> # # Copyright: (c) 2008 Cody Precord <staff@editra.org> # # License: wxWindows License # ############################################################################### """ This file contains various helper functions and utilities that the program uses. """ __author__ = "Cody Precord <cprecord@editra.org>" __svnid__ = "$Id: util.py 72623 2012-10-06 19:33:06Z CJP $" __revision__ = "$Revision: 72623 $" #--------------------------------------------------------------------------# # Imports import os import sys import mimetypes import encodings import codecs import urllib2 import wx # Editra Libraries import ed_glob import ed_event import ed_crypt import dev_tool import syntax.syntax as syntax import syntax.synglob as synglob import ebmlib _ = wx.GetTranslation #--------------------------------------------------------------------------# #---- End FileDropTarget ----# #---- Misc Common Function Library ----# # Used for holding the primary selection on mac/msw FAKE_CLIPBOARD = None def GetClipboardText(primary=False): """Get the primary selection from the clipboard if there is one @return: str or None """ if primary and wx.Platform == '__WXGTK__': wx.TheClipboard.UsePrimarySelection(True) elif primary: # Fake the primary selection on mac/msw global FAKE_CLIPBOARD return FAKE_CLIPBOARD else: pass text_obj = wx.TextDataObject() rtxt = None if wx.TheClipboard.IsOpened() or wx.TheClipboard.Open(): if wx.TheClipboard.GetData(text_obj): rtxt = text_obj.GetText() wx.TheClipboard.Close() if primary and wx.Platform == '__WXGTK__': wx.TheClipboard.UsePrimarySelection(False) return rtxt def SetClipboardText(txt, primary=False): """Copies text to the clipboard @param txt: text to put in clipboard @keyword primary: Set txt as primary selection (x11) """ # Check if using primary selection if primary and wx.Platform == '__WXGTK__': wx.TheClipboard.UsePrimarySelection(True) elif primary: # Fake the primary selection on mac/msw global FAKE_CLIPBOARD FAKE_CLIPBOARD = txt return True else: pass data_o = wx.TextDataObject() data_o.SetText(txt) if wx.TheClipboard.IsOpened() or wx.TheClipboard.Open(): wx.TheClipboard.SetData(data_o) wx.TheClipboard.Close() if primary and wx.Platform == '__WXGTK__': wx.TheClipboard.UsePrimarySelection(False) return True else: return False def FilterFiles(file_list): """Filters a list of paths and returns a list of paths that can probably be opened in the editor. @param file_list: list of files/folders to filter for good files in """ good = list() checker = ebmlib.FileTypeChecker() for path in file_list: if not checker.IsBinary(path): good.append(path) return good def GetFileType(fname): """Get what the type of the file is as Editra sees it in a formatted string. @param fname: file path @return: string (formatted/translated filetype) """ if os.path.isdir(fname): return _("Folder") eguess = syntax.GetTypeFromExt(fname.split('.')[-1]) if eguess == synglob.LANG_TXT and fname.split('.')[-1] == 'txt': return _("Text Document") elif eguess == synglob.LANG_TXT: mtype = mimetypes.guess_type(fname)[0] if mtype is not None: return mtype else: return _("Unknown") else: return _("%s Source File") % eguess def GetFileReader(file_name, enc='utf-8'): """Returns a file stream reader object for reading the supplied file name. It returns a file reader using the encoding (enc) which defaults to utf-8. If lookup of the reader fails on the host system it will return an ascii reader. If there is an error in creating the file reader the function will return a negative number. @param file_name: name of file to get a reader for @keyword enc: encoding to use for reading the file @return file reader, or int if error. """ try: file_h = file(file_name, "rb") except (IOError, OSError): dev_tool.DEBUGP("[file_reader] Failed to open file %s" % file_name) return -1 try: reader = codecs.getreader(enc)(file_h) except (LookupError, IndexError, ValueError): dev_tool.DEBUGP('[file_reader] Failed to get %s Reader' % enc) reader = file_h return reader def GetFileWriter(file_name, enc='utf-8'): """Returns a file stream writer object for reading the supplied file name. It returns a file writer in the supplied encoding if the host system supports it other wise it will return an ascii reader. The default will try and return a utf-8 reader. If there is an error in creating the file reader the function will return a negative number. @param file_name: path of file to get writer for @keyword enc: encoding to write text to file with """ try: file_h = open(file_name, "wb") except IOError: dev_tool.DEBUGP("[file_writer][err] Failed to open file %s" % file_name) return -1 try: writer = codecs.getwriter(enc)(file_h) except (LookupError, IndexError, ValueError): dev_tool.DEBUGP('[file_writer][err] Failed to get %s Writer' % enc) writer = file_h return writer # TODO: DEPRECATED - remove once callers migrate to ebmlib GetFileManagerCmd = ebmlib.GetFileManagerCmd def GetUserConfigBase(): """Get the base user configuration directory path""" cbase = ed_glob.CONFIG['CONFIG_BASE'] if cbase is None: cbase = wx.StandardPaths_Get().GetUserDataDir() if wx.Platform == '__WXGTK__': if u'.config' not in cbase and not os.path.exists(cbase): # If no existing configuration return xdg config path base, cfgdir = os.path.split(cbase) tmp_path = os.path.join(base, '.config') if os.path.exists(tmp_path): cbase = os.path.join(tmp_path, cfgdir.lstrip(u'.')) return cbase + os.sep def HasConfigDir(loc=u""): """ Checks if the user has a config directory and returns True if the config directory exists or False if it does not. @return: whether config dir in question exists on an expected path """ cbase = GetUserConfigBase() to_check = os.path.join(cbase, loc) return os.path.exists(to_check) def MakeConfigDir(name): """Makes a user config directory @param name: name of config directory to make in user config dir """ cbase = GetUserConfigBase() try: os.mkdir(cbase + name) except (OSError, IOError): pass def RepairConfigState(path): """Repair the state of profile path, updating and creating it it does not exist. @param path: path of profile """ if os.path.isabs(path) and os.path.exists(path): return path else: # Need to fix some stuff up CreateConfigDir() import profiler return profiler.Profile_Get("MYPROFILE") def CreateConfigDir(): """ Creates the user config directory its default sub directories and any of the default config files. @postcondition: all default configuration files/folders are created """ #---- Resolve Paths ----# config_dir = GetUserConfigBase() profile_dir = os.path.join(config_dir, u"profiles") dest_file = os.path.join(profile_dir, u"default.ppb") ext_cfg = [u"cache", u"styles", u"plugins"] #---- Create Directories ----# if not os.path.exists(config_dir): os.mkdir(config_dir) if not os.path.exists(profile_dir): os.mkdir(profile_dir) for cfg in ext_cfg: if not HasConfigDir(cfg): MakeConfigDir(cfg) import profiler profiler.TheProfile.LoadDefaults() profiler.Profile_Set("MYPROFILE", dest_file) profiler.TheProfile.Write(dest_file) profiler.UpdateProfileLoader() def ResolvConfigDir(config_dir, sys_only=False): """Checks for a user config directory and if it is not found it then resolves the absolute path of the executables directory from the relative execution path. This is then used to find the location of the specified directory as it relates to the executable directory, and returns that path as a string. @param config_dir: name of config directory to resolve @keyword sys_only: only get paths of system config directory or user one @note: This method is probably much more complex than it needs to be but the code has proven itself. """ # Try to get a User config directory if not sys_only: user_config = GetUserConfigBase() user_config = os.path.join(user_config, config_dir) if os.path.exists(user_config): return user_config + os.sep # Check if the system install path has already been resolved once before if ed_glob.CONFIG['INSTALL_DIR'] != u"": tmp = os.path.join(ed_glob.CONFIG['INSTALL_DIR'], config_dir) tmp = os.path.normpath(tmp) + os.sep if os.path.exists(tmp): return tmp else: del tmp # The following lines are used only when Editra is being run as a # source package. If the found path does not exist then Editra is # running as as a built package. if not hasattr(sys, 'frozen'): path = __file__ if not ebmlib.IsUnicode(path): path = path.decode(sys.getfilesystemencoding()) path = os.sep.join(path.split(os.sep)[:-2]) path = path + os.sep + config_dir + os.sep if os.path.exists(path): if not ebmlib.IsUnicode(path): path = unicode(path, sys.getfilesystemencoding()) return path # If we get here we need to do some platform dependent lookup # to find everything. path = sys.argv[0] if not ebmlib.IsUnicode(path): path = unicode(path, sys.getfilesystemencoding()) # If it is a link get the real path if os.path.islink(path): path = os.path.realpath(path) # Tokenize path pieces = path.split(os.sep) if wx.Platform == u'__WXMSW__': # On Windows the exe is in same dir as config directories pro_path = os.sep.join(pieces[:-1]) if os.path.isabs(pro_path): pass elif pro_path == u"": pro_path = os.getcwd() pieces = pro_path.split(os.sep) pro_path = os.sep.join(pieces[:-1]) else: pro_path = os.path.abspath(pro_path) elif wx.Platform == u'__WXMAC__': # On OS X the config directories are in the applet under Resources stdpath = wx.StandardPaths_Get() pro_path = stdpath.GetResourcesDir() pro_path = os.path.join(pro_path, config_dir) else: pro_path = os.sep.join(pieces[:-2]) if pro_path.startswith(os.sep): pass elif pro_path == u"": pro_path = os.getcwd() pieces = pro_path.split(os.sep) if pieces[-1] not in [ed_glob.PROG_NAME.lower(), ed_glob.PROG_NAME]: pro_path = os.sep.join(pieces[:-1]) else: pro_path = os.path.abspath(pro_path) if wx.Platform != u'__WXMAC__': pro_path = pro_path + os.sep + config_dir + os.sep path = os.path.normpath(pro_path) + os.sep # Make sure path is unicode if not ebmlib.IsUnicode(path): path = unicode(path, sys.getdefaultencoding()) return path def GetResources(resource): """Returns a list of resource directories from a given toplevel config dir @param resource: config directory name @return: list of resource directory that exist under the given resource path """ rec_dir = ResolvConfigDir(resource) if os.path.exists(rec_dir): rec_lst = [ rec.title() for rec in os.listdir(rec_dir) if os.path.isdir(rec_dir + rec) and rec[0] != u"." ] return rec_lst else: return -1 def GetResourceFiles(resource, trim=True, get_all=False, suffix=None, title=True): """Gets a list of resource files from a directory and trims the file extentions from the names if trim is set to True (default). If the get_all parameter is set to True the function will return a set of unique items by looking up both the user and system level files and combining them, the default behavior returns the user level files if they exist or the system level files if the user ones do not exist. @param resource: name of config directory to look in (i.e cache) @keyword trim: trim file extensions or not @keyword get_all: get a set of both system/user files or just user level @keyword suffix: Get files that have the specified suffix or all (default) @keyword title: Titlize the results """ rec_dir = ResolvConfigDir(resource) if get_all: rec_dir2 = ResolvConfigDir(resource, True) rec_list = list() if not os.path.exists(rec_dir): return -1 else: recs = os.listdir(rec_dir) if get_all and os.path.exists(rec_dir2): recs.extend(os.listdir(rec_dir2)) for rec in recs: if os.path.isfile(rec_dir + rec) or \ (get_all and os.path.isfile(rec_dir2 + rec)): # If a suffix was specified only keep files that match if suffix is not None: if not rec.endswith(suffix): continue # Trim the last part of an extension if one exists if trim: rec = ".".join(rec.split(u".")[:-1]).strip() # Make the resource name a title if requested if title and len(rec): rec = rec[0].upper() + rec[1:] if len(rec): rec_list.append(rec) rec_list.sort() return list(set(rec_list)) def GetAllEncodings(): """Get all encodings found on the system @return: list of strings """ elist = encodings.aliases.aliases.values() elist = list(set(elist)) elist.sort() elist = [ enc for enc in elist if not enc.endswith('codec') ] return elist def Log(msg, *args): """Push the message to the apps log @param msg: message string to log @param args: optional positional arguments to use as a printf formatting to the message. """ try: wx.GetApp().GetLog()(msg, args) except: pass def GetProxyOpener(proxy_set): """Get a urlopener for use with a proxy @param proxy_set: proxy settings to use """ Log("[util][info] Making proxy opener with %s" % str(proxy_set)) proxy_info = dict(proxy_set) auth_str = "%(uname)s:%(passwd)s@%(url)s" url = proxy_info['url'] if url.startswith('http://'): auth_str = "http://" + auth_str proxy_info['url'] = url.replace('http://', '') else: pass if len(proxy_info.get('port', '')): auth_str = auth_str + ":%(port)s" proxy_info['passwd'] = ed_crypt.Decrypt(proxy_info['passwd'], proxy_info['pid']) Log("[util][info] Formatted proxy request: %s" % \ (auth_str.replace('%(passwd)s', '****') % proxy_info)) proxy = urllib2.ProxyHandler({"http" : auth_str % proxy_info}) opener = urllib2.build_opener(proxy, urllib2.HTTPHandler) return opener #---- GUI helper functions ----# def SetWindowIcon(window): """Sets the given windows icon to be the programs application icon. @param window: window to set app icon for """ try: if wx.Platform == "__WXMSW__": ed_icon = ed_glob.CONFIG['SYSPIX_DIR'] + u"editra.ico" window.SetIcon(wx.Icon(ed_icon, wx.BITMAP_TYPE_ICO)) else: ed_icon = ed_glob.CONFIG['SYSPIX_DIR'] + u"editra.png" window.SetIcon(wx.Icon(ed_icon, wx.BITMAP_TYPE_PNG)) finally: pass #-----------------------------------------------------------------------------#
[ 29113, 29113, 7804, 4242, 21017, 198, 2, 6530, 25, 7736, 13, 9078, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 2...
2.405929
6,915
from typing import Tuple import torch
[ 6738, 19720, 1330, 309, 29291, 198, 198, 11748, 28034, 628 ]
4
10
import mtoa.ui.ae.templates as templates import pymel.core as pm import maya.cmds as cmds import mtoa.ui.ae.utils as aeUtils templates.registerTranslatorUI(aiPotaTemplate, "camera", "pota")
[ 11748, 285, 1462, 64, 13, 9019, 13, 3609, 13, 11498, 17041, 355, 24019, 198, 11748, 279, 4948, 417, 13, 7295, 355, 9114, 198, 11748, 743, 64, 13, 28758, 82, 355, 23991, 82, 198, 11748, 285, 1462, 64, 13, 9019, 13, 3609, 13, 26791, ...
2.704225
71
import unittest from app import create_app import json from tests.basetest import BaseTest
[ 11748, 555, 715, 395, 198, 6738, 598, 1330, 2251, 62, 1324, 198, 11748, 33918, 198, 6738, 5254, 13, 12093, 316, 395, 1330, 7308, 14402, 628 ]
3.68
25
# Generated by Django 2.1.4 on 2019-01-24 12:22 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 362, 13, 16, 13, 19, 319, 13130, 12, 486, 12, 1731, 1105, 25, 1828, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
# Implementation of Shell Sort algorithm in Python l = [4, 1, 2, 5, 3] print("Initial list: " + str(l)) shellSort(l) print("Sorted list: " + str(l))
[ 2, 46333, 286, 17537, 33947, 11862, 287, 11361, 198, 220, 220, 220, 220, 198, 75, 796, 685, 19, 11, 352, 11, 362, 11, 642, 11, 513, 60, 198, 4798, 7203, 24243, 1351, 25, 366, 1343, 965, 7, 75, 4008, 198, 29149, 42758, 7, 75, 8, ...
2.610169
59
from django.db import models # Create your models here.
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 198, 2, 13610, 534, 4981, 994, 13 ]
3.733333
15
import base64 import json from webhook import post_webhook from datetime import datetime def hello_pubsub(event, context): """Triggered from a message on a Cloud Pub/Sub topic. Args: event (dict): Event payload. context (google.cloud.functions.Context): Metadata for the event. """ pubsub_message = base64.b64decode(event['data']).decode('utf-8') #post_webhook(message=f'{pubsub_message}', timestamp='now', status='status', title='title') message = json.loads(pubsub_message) message = message['incident'] #post_webhook(message, timestamp, status, title='Monitoring'): null = None status = 'Status' log_message = '' title = 'Monitoring Alert' status = message['state'].title() timestamp = datetime.utcfromtimestamp(message["started_at"]).isoformat() log_message += f'Started: {timestamp} UTC' color = 16772608 if message['ended_at'] is not None: timestamp = datetime.utcfromtimestamp(message["ended_at"]).isoformat() log_message += f'\nEnded: {timestamp} UTC' color = 65297 title = message['policy_name'] log_message += f'\n{message["summary"]}' log_message += f'\n[Monitor Event]({message["url"]})' post_webhook(message=log_message, timestamp=timestamp, status=status, title=title, color=color)
[ 11748, 2779, 2414, 198, 11748, 33918, 198, 6738, 3992, 25480, 1330, 1281, 62, 12384, 25480, 198, 6738, 4818, 8079, 1330, 4818, 8079, 628, 198, 4299, 23748, 62, 12984, 7266, 7, 15596, 11, 4732, 2599, 198, 220, 220, 220, 37227, 2898, 328,...
2.720165
486
#!/usr/bin/env waf ''' This is a wafit tool for using zyre ''' import util
[ 2, 48443, 14629, 14, 8800, 14, 24330, 266, 1878, 198, 7061, 6, 198, 1212, 318, 257, 266, 1878, 270, 2891, 329, 1262, 1976, 35759, 198, 7061, 6, 198, 11748, 7736, 198, 220, 220, 220, 220, 628 ]
2.25
36
import argparse, os, fnmatch, json, joblib import pandas as pd from sklearn.mixture import GaussianMixture from sklearn.metrics import adjusted_rand_score # Reference paper - https://arxiv.org/abs/1906.11373 # "Unsupervised Methods for Identifying Pass Coverage Among Defensive Backs with NFL Player Tracking Data" STATS_PREFIX = "week" SKIP_COLS_KEY = "global_skip_cols" ONLY_CLOSEST_KEY = "only_closest" CLOSE_TO_BR_KEY = "close_to_br" SELECT_GROUP_KEY = "select_group_by" GROUP_BY = ["gameId", "playId"] MAX_COL = "closest_frames" main()
[ 11748, 1822, 29572, 11, 28686, 11, 24714, 15699, 11, 33918, 11, 1693, 8019, 198, 11748, 19798, 292, 355, 279, 67, 198, 6738, 1341, 35720, 13, 76, 9602, 1330, 12822, 31562, 44, 9602, 198, 6738, 1341, 35720, 13, 4164, 10466, 1330, 12328, ...
2.804124
194
from style import blue, none n = input(f'Type {blue}something{none}: ') print(f'{blue}{n.isnumeric()}')
[ 6738, 3918, 1330, 4171, 11, 4844, 198, 198, 77, 796, 5128, 7, 69, 6, 6030, 1391, 17585, 92, 18927, 90, 23108, 38362, 705, 8, 198, 198, 4798, 7, 69, 6, 90, 17585, 18477, 77, 13, 271, 77, 39223, 3419, 92, 11537, 198 ]
2.52381
42
"""Platform for the Panasonic Comfort Cloud.""" from datetime import timedelta import logging from typing import Any, Dict import asyncio from async_timeout import timeout import voluptuous as vol from homeassistant.core import HomeAssistant from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry from homeassistant.const import ( CONF_USERNAME, CONF_PASSWORD) from homeassistant.exceptions import ConfigEntryNotReady import homeassistant.helpers.config_validation as cv from homeassistant.helpers.typing import HomeAssistantType from homeassistant.helpers import discovery from .const import TIMEOUT from .panasonic import PanasonicApiDevice _LOGGER = logging.getLogger(__name__) DOMAIN = "panasonic_cc" CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, } ) }, extra=vol.ALLOW_EXTRA, ) PANASONIC_DEVICES = "panasonic_devices" COMPONENT_TYPES = ["climate", "sensor", "switch"]
[ 37811, 37148, 329, 262, 46049, 45769, 10130, 526, 15931, 198, 6738, 4818, 8079, 1330, 28805, 12514, 198, 11748, 18931, 198, 6738, 19720, 1330, 4377, 11, 360, 713, 198, 198, 11748, 30351, 952, 198, 6738, 30351, 62, 48678, 1330, 26827, 198,...
2.707809
397
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from unittest.mock import patch, MagicMock from bossingest.ingest_manager import IngestManager from bossingest.models import IngestJob from bossingest.test.setup import SetupTests from bosscore.test.setup_db import SetupTestDB from bosscore.error import ErrorCodes from bosscore.lookup import LookUpKey import bossutils.aws from django.contrib.auth.models import User from ndingest.ndqueue.uploadqueue import UploadQueue from rest_framework.test import APITestCase
[ 2, 15069, 1584, 383, 25824, 21183, 2059, 27684, 23123, 18643, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13...
3.799308
289
import json import logging import logging.config import platform import re import traceback from io import StringIO import pygments from devtools import pformat from devtools.ansi import isatty, sformat from pygments.formatters import Terminal256Formatter from pygments.lexers import Python3TracebackLexer rs_dft_logger = logging.getLogger('sdev.server.dft') rs_aux_logger = logging.getLogger('sdev.server.aux') tools_logger = logging.getLogger('sdev.tools') main_logger = logging.getLogger('sdev.main') LOG_FORMATS = { logging.DEBUG: sformat.dim, logging.INFO: sformat.green, logging.WARN: sformat.yellow, } pyg_lexer = Python3TracebackLexer() pyg_formatter = Terminal256Formatter(style='vim') split_log = re.compile(r'^(\[.*?\])') def log_config(verbose: bool) -> dict: """ Setup default config. for dictConfig. :param verbose: level: DEBUG if True, INFO if False :return: dict suitable for ``logging.config.dictConfig`` """ log_level = 'DEBUG' if verbose else 'INFO' return { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'default': { 'format': '[%(asctime)s] %(message)s', 'datefmt': '%H:%M:%S', 'class': 'sanic_devtools.log.DefaultFormatter', }, 'no_ts': { 'format': '%(message)s', 'class': 'sanic_devtools.log.DefaultFormatter', }, 'sanic': { 'format': '%(message)s', 'class': 'sanic_devtools.log.AccessFormatter', }, }, 'handlers': { 'default': { 'level': log_level, 'class': 'sanic_devtools.log.HighlightStreamHandler', 'formatter': 'default' }, 'no_ts': { 'level': log_level, 'class': 'sanic_devtools.log.HighlightStreamHandler', 'formatter': 'no_ts' }, 'sanic_access': { 'level': log_level, 'class': 'sanic_devtools.log.HighlightStreamHandler', 'formatter': 'sanic' }, 'sanic_server': { 'class': 'sanic_devtools.log.HighlightStreamHandler', 'formatter': 'sanic' }, }, 'loggers': { rs_dft_logger.name: { 'handlers': ['default'], 'level': log_level, }, rs_aux_logger.name: { 'handlers': ['default'], 'level': log_level, }, tools_logger.name: { 'handlers': ['default'], 'level': log_level, }, main_logger.name: { 'handlers': ['no_ts'], 'level': log_level, }, 'sanic.access': { 'handlers': ['sanic_access'], 'level': log_level, 'propagate': False, }, 'sanic.server': { 'handlers': ['sanic_server'], 'level': log_level, }, }, } def setup_logging(verbose): config = log_config(verbose) logging.config.dictConfig(config)
[ 11748, 33918, 198, 11748, 18931, 198, 11748, 18931, 13, 11250, 198, 11748, 3859, 198, 11748, 302, 198, 11748, 12854, 1891, 198, 6738, 33245, 1330, 10903, 9399, 198, 198, 11748, 12972, 11726, 198, 6738, 1614, 31391, 1330, 279, 18982, 198, ...
1.888249
1,736
import calendar from typing import Union import dateutil.parser from rest_framework import status from rest_framework.response import Response from django.utils.cache import get_conditional_response from django.utils.http import http_date from ..models import Resource, ResourceVersion FhirResource = Union[Resource, ResourceVersion]
[ 11748, 11845, 198, 6738, 19720, 1330, 4479, 198, 198, 11748, 3128, 22602, 13, 48610, 198, 6738, 1334, 62, 30604, 1330, 3722, 198, 6738, 1334, 62, 30604, 13, 26209, 1330, 18261, 198, 198, 6738, 42625, 14208, 13, 26791, 13, 23870, 1330, 6...
4.084337
83
''' n 01 2 : : : [2,0,2,1,1,0] : [0,0,1,1,2,2] 01 2 012 LeetCode https://leetcode-cn.com/problems/sort-colors '''
[ 7061, 6, 198, 77, 220, 628, 5534, 220, 362, 220, 198, 198, 25, 628, 198, 25, 198, 198, 25, 685, 17, 11, 15, 11, 17, 11, 16, 11, 16, 11, 15, 60, 198, 25, 685, 15, 11, 15, 11, 16, 11, 16, 11, 17, 11, 17, 60, 628, 198, 19...
1.585366
82
from .solution import lcaDeepestLeaves from ..utils import TreeNode print('Enter tree, e.g. [2,3,1,3,1,null,1]:', end=' ') nodes = [int(node) if node != 'null' else None for node in input().strip().split(',')] root = TreeNode.fromList(nodes) lowestCommonAncestor = lcaDeepestLeaves(root) print(f'The lowest common ancestor is: {lowestCommonAncestor.toList()}')
[ 6738, 764, 82, 2122, 1330, 300, 6888, 29744, 395, 3123, 3080, 198, 6738, 11485, 26791, 1330, 12200, 19667, 198, 198, 4798, 10786, 17469, 5509, 11, 304, 13, 70, 13, 685, 17, 11, 18, 11, 16, 11, 18, 11, 16, 11, 8423, 11, 16, 5974, ...
2.729323
133
#!/usr/bin/env python import json from support import parse_states import sys import xapian if len(sys.argv) != 3: print("Usage: %s DATAPATH DBPATH" % sys.argv[0]) sys.exit(1) index(datapath = sys.argv[1], dbpath = sys.argv[2])
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 33918, 198, 6738, 1104, 1330, 21136, 62, 27219, 198, 11748, 25064, 198, 11748, 2124, 499, 666, 198, 198, 361, 18896, 7, 17597, 13, 853, 85, 8, 14512, 513, 25, 198, 220, 2...
2.366337
101
import constants
[ 11748, 38491, 628, 628, 198 ]
4.2
5
#!/usr/bin/env python2.7 # Copyright 2015 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generates the appropriate build.json data for all the end2end tests.""" import collections import yaml TestOptions = collections.namedtuple('TestOptions', 'flaky cpu_cost') default_test_options = TestOptions(False, 1.0) # maps test names to options BAD_CLIENT_TESTS = { 'cert': default_test_options._replace(cpu_cost=0.1), # Disabling this test because it does not link correctly as written # 'alpn': default_test_options._replace(cpu_cost=0.1), } if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 17, 13, 22, 198, 2, 15069, 1853, 308, 49, 5662, 7035, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 7...
3.33033
333
# -*- coding: utf-8 -*- from six.moves.urllib.parse import urlencode from django.contrib import messages from django.contrib.messages.api import get_messages from django.contrib.messages.middleware import MessageMiddleware from django.contrib.messages.storage.base import Message from django.contrib.sessions.middleware import SessionMiddleware from django.test import RequestFactory, TestCase, override_settings from allauth.socialaccount.providers import registry from allauth_cas.views import AuthAction from .example.provider import ExampleCASProvider
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 2237, 13, 76, 5241, 13, 333, 297, 571, 13, 29572, 1330, 2956, 11925, 8189, 198, 198, 6738, 42625, 14208, 13, 3642, 822, 1330, 6218, 198, 6738, 42625, 14208, 13, ...
3.484472
161
from ..remote import RemoteModel from infoblox_netmri.utils.utils import check_api_availability
[ 6738, 11485, 47960, 1330, 21520, 17633, 198, 6738, 1167, 45292, 1140, 62, 3262, 76, 380, 13, 26791, 13, 26791, 1330, 2198, 62, 15042, 62, 47274, 628, 220, 220, 220, 220, 198, 220, 220, 220, 220 ]
3.028571
35
import logging import time from selenium.common import exceptions from selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.common import action_chains from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait from selenium_utils import exception logger = logging.getLogger(__name__) def hover_over_element(driver: WebDriver, element): """Moves the mouse pointer to the element and hovers""" action_chains.ActionChains(driver).move_to_element(element).perform() def wait_until_stops_moving(element, wait_seconds=1): """Waits until the element stops moving Args: selenium.webdriver.remote.webelement.WebElement """ prev_location = None timer_begin = time.time() while prev_location != element.location: prev_location = element.location time.sleep(0.1) if time.time() - timer_begin > wait_seconds: raise exception.ElementMovingTimeout def get_when_visible(driver: WebDriver, locator, wait_seconds=1): """ Args: driver (base.CustomDriver) locator (tuple) Returns: selenium.webdriver.remote.webelement.WebElement """ return WebDriverWait( driver, wait_seconds) \ .until(EC.presence_of_element_located(locator)) def wait_until_condition(driver: WebDriver, condition, wait_seconds=1): """Wait until given expected condition is met""" WebDriverWait( driver, wait_seconds).until(condition) def wait_until_not_present(driver: WebDriver, locator): """Wait until no element(-s) for locator given are present in the DOM.""" wait_until_condition(driver, lambda d: len(d.find_elements(*locator)) == 0) def get_when_all_visible(driver: WebDriver, locator, wait_seconds=1): """Return WebElements by locator when all of them are visible. Args: locator (tuple) Returns: selenium.webdriver.remote.webelement.WebElements """ return WebDriverWait( driver, wait_seconds) \ .until(EC.visibility_of_any_elements_located(locator)) def get_when_clickable(driver: WebDriver, locator, wait_seconds=1): """ Args: driver (base.CustomDriver) locator (tuple) Returns: selenium.webdriver.remote.webelement.WebElement """ return WebDriverWait( driver, wait_seconds) \ .until(EC.element_to_be_clickable(locator)) def get_when_invisible(driver: WebDriver, locator, wait_seconds=1): """ Args: driver (base.CustomDriver) locator (tuple) Returns: selenium.webdriver.remote.webelement.WebElement """ return WebDriverWait( driver, wait_seconds) \ .until(EC.invisibility_of_element_located(locator)) def wait_for_element_text(driver: WebDriver, locator, text, wait_seconds=1): """ Args: driver (base.CustomDriver) locator (tuple) text (str) """ return WebDriverWait( driver, wait_seconds) \ .until(EC.text_to_be_present_in_element(locator, text)) def is_value_in_attr(element, attr="class", value="active"): """Checks if the attribute value is present for given attribute Args: element (selenium.webdriver.remote.webelement.WebElement) attr (basestring): attribute name e.g. "class" value (basestring): value in the class attribute that indicates the element is now active/opened Returns: bool """ attributes = element.get_attribute(attr) return value in attributes.split() def click_on_staleable_element(driver: WebDriver, el_locator, wait_seconds=1): """Clicks an element that can be modified between the time we find it and when we click on it""" time_start = time.time() while time.time() - time_start < wait_seconds: try: driver.find_element(*el_locator).click() break except exceptions.StaleElementReferenceException as e: logger.error(str(e)) time.sleep(0.1) else: raise exception.ElementNotFound(el_locator) def scroll_into_view(driver: WebDriver, element, offset_pixels=0): """Scrolls page to element using JS""" driver.execute_script("return arguments[0].scrollIntoView();", element) # compensate for the header driver.execute_script("window.scrollBy(0, -{});".format(offset_pixels)) return element
[ 11748, 18931, 198, 11748, 640, 198, 198, 6738, 384, 11925, 1505, 13, 11321, 1330, 13269, 198, 6738, 384, 11925, 1505, 13, 12384, 26230, 13, 47960, 13, 12384, 26230, 1330, 5313, 32103, 198, 6738, 384, 11925, 1505, 13, 12384, 26230, 13, 1...
2.586366
1,731
# -*- coding: utf-8 -*- """Various functions for ib applications.""" from modules.iib_api import get_status def get_metric_name(metric_label): """Returns pushgateway formatted metric name.""" return 'ib_application_{0}'.format(metric_label) def get_metric_annotation(): """Returns dictionary with annotations 'HELP' and 'TYPE' for metrics.""" annotations = { 'status': '# HELP {0} Current status of IB application.\n\ # TYPE {0} gauge\n'.format(get_metric_name('status'))} return annotations def format_applications(applications, broker_name): """Returns string with all metrics for all applications which ready to push to pushgateway.""" metrics_annotation = get_metric_annotation() app_metric_data = str() for app in applications: app_list = app.split() egname, app_name, status = app_list[6], app_list[2], app_list[8].replace(".","") template_string = 'egname="{0}", brokername="{1}", appname="{2}"'.format( egname.replace("'", ""), broker_name, app_name.replace("'", "")) app_metric = '{0}{{{1}}} {2}\n'.format( get_metric_name(metric_label='status'), template_string, get_status(status=status)) app_metric_data += app_metric app_metric_data = '{0}{1}'.format( metrics_annotation['status'], app_metric_data) return app_metric_data
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 40009, 5499, 329, 24283, 5479, 526, 15931, 198, 6738, 13103, 13, 72, 571, 62, 15042, 1330, 651, 62, 13376, 628, 198, 4299, 651, 62, 4164, 1173, 62, 3672, 7, 416...
2.454231
579
import os import requests import psycopg2 import db_lib as db from app import send_message, log from apscheduler.schedulers.blocking import BlockingScheduler DATABASE_URL = os.environ['DATABASE_URL'] conn = psycopg2.connect(DATABASE_URL, sslmode='require') sched = BlockingScheduler() sched.add_job(kitchen_reminder, 'cron', hour=0, minute=0) sched.add_job(rent_reminder, 'cron', day=1) sched.start()
[ 11748, 28686, 198, 11748, 7007, 198, 11748, 17331, 22163, 70, 17, 198, 11748, 20613, 62, 8019, 355, 20613, 198, 198, 6738, 598, 1330, 3758, 62, 20500, 11, 2604, 198, 6738, 257, 862, 1740, 18173, 13, 1416, 704, 377, 364, 13, 41938, 133...
2.589744
156
#!/usr/bin/env python import rospy import smach import gate import pole if __name__ == '__main__': rospy.init_node('hippo_sm') sm = SubStates() outcome = sm.tasks.execute() rospy.spin()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 686, 2777, 88, 198, 11748, 895, 620, 198, 198, 11748, 8946, 198, 11748, 16825, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 220, ...
2.225806
93
from networkapi.test.test_case import NetworkApiTestCase from networkapi.plugins.SDN.ODL.flows.acl import AclFlowBuilder
[ 198, 6738, 3127, 15042, 13, 9288, 13, 9288, 62, 7442, 1330, 7311, 32, 14415, 14402, 20448, 198, 6738, 3127, 15042, 13, 37390, 13, 10305, 45, 13, 3727, 43, 13, 44041, 13, 37779, 1330, 317, 565, 37535, 32875, 628 ]
3.236842
38
from picamera.array import PiRGBArray from picamera import PiCamera import cv2 import numpy as np import time from fractions import Fraction from PIL import Image #cap = cv2.VideoCapture(0) camera = PiCamera() camera.resolution = (426, 240) camera.framerate = 24 camera.exposure_mode = 'off' camera.exposure_compensation = -3 camera.drc_strength = 'off' camera.still_stats = False camera.awb_mode = 'off' camera.awb_gains = (Fraction(25, 16), Fraction(25,16)) rawCapture = PiRGBArray(camera, size=(426, 240)) # allow the camera to warmup time.sleep(0.1) # lower = [135, 130, 50] # upper = [180, 200, 255] # lower = [160, 100, 100] # upper = [180, 255, 255] # lower2 = [0, 100, 100] # upper2 = [10, 255, 255] #lower1 = [0, 50, 50] #upper1 = [5, 255, 255] out = cv2.VideoWriter(str(time.time()) + ".avi",cv2.VideoWriter_fourcc('M','J','P','G'), 10, (426, 240)) # lower = np.array(lower, dtype = "uint8") # upper = np.array(upper, dtype = "uint8") # lower2 = np.array(lower2, dtype = "uint8") # upper2 = np.array(upper2, dtype = "uint8") #lower1 = np.array(lower1, dtype = "uint8") #upper1 = np.array(upper1, dtype = "uint8") for img in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): #print(camera.awb_gains) #r, frame = cap.read() for i in range(5): # Clears the 5 frame buffer frame = img.array height, width = frame.shape[:2] centre = (int(width/2), int(height/2)) #frame = cv2.GaussianBlur(frame, (9, 9), 0) #frame = cv2.medianBlur(frame,3) #frame = cv2.GaussianBlur(frame, (9, 9), 0) #mask = cv2.inRange(frame, lower, upper) #mask2 = cv2.inRange(frame, lower2, upper2) #mask2 = cv2.inRange(frame, lower1, upper1) #mask = mask1 + mask2 #img_rec_red = cv2.bitwise_and(frame, frame, mask = mask) #img_rec_redo = cv2.bitwise_and(frame, frame, mask = mask2) #cv2.imshow("pre or1", img_rec_red) #cv2.imshow("pre or2", img_rec_redo) #img_rec_red = cv2.bitwise_or(img_rec_red, img_rec_redo) b_channel = np.array(frame[:,:,0]).astype('float') g_channel = np.array(frame[:,:,1]).astype('float') r_channel = np.array(frame[:,:,2]).astype('float') # #cv2.imshow('b_chan', b_channel) # # cv2.imshow('g_chan', g_channel) # # cv2.imshow('r_chan', r_channel) bgr_channel = np.add((np.add(b_channel, g_channel)), r_channel) img_rec_red2 = np.subtract(r_channel,((b_channel + g_channel)/ 2)) #img_rec_red2 = np.divide(r_channel, 255) img_rec_red2 = np.divide(img_rec_red2,255) #img_rec_red2 = np.square(img_rec_red2) img_rec_red2[img_rec_red2 < 0.3] = 0 img_rec_red2 = img_rec_red2 * 255 img_rec_red2 = np.floor(img_rec_red2).astype('uint8') #img_rec_red = cv2.cvtColor(img_rec_red, cv2.COLOR_BGR2GRAY) #cv2.imshow('recred2', img_rec_red2) ret, th = cv2.threshold(img_rec_red2,10,255,cv2.THRESH_BINARY) #ret, th = cv2.threshold(r_channel.astype('uint8'),110,255,cv2.THRESH_BINARY) #th = cv2.bitwise_not(th, th) kernel = np.ones((5,5),np.uint8) #th = cv2.erode(th, kernel) th = cv2.dilate(th, kernel) th = cv2.GaussianBlur(th, (5,5), 0) try: M = cv2.moments(th) # calculate x,y coordinate of center cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) # put text and highlight the center cv2.circle(frame, (cX, cY), 5, (255, 255, 255), -1) #cv2.putText(frame, "centroid", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) cv2.line(frame, centre, (cX, cY), (255,0,0), 2) dX = cX - centre[0] dY = centre[1] - cY cv2.putText(frame, ("(" + str(dX) + ", " + str(dY) + " )"), (centre[0] - 20, centre[1] - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) print('Velocities: ' + str(dX) + "," + str(dY)) except: print("No centre detected") #kernel2 = np.ones((15,15),np.uint8) #eroded_th = cv2.erode(dilated_th, kernel2) #blurred_th = cv2.GaussianBlur(eroded_th.copy(), (9, 9), 0) #eroded_th = cv2.bitwise_not(eroded_th,eroded_th) #dilated_th = cv2.bitwise_not(dilated_th, dilated_th) # circles = cv2.HoughCircles(th,cv2.HOUGH_GRADIENT, 1,1000, # param1=40,param2=23,minRadius=20,maxRadius=0) # try: # circles = np.uint16(np.around(circles)) # for i in circles[0,:]: # # draw the outer circle # cv2.circle(frame,(i[0],i[1]),i[2],(0,255,0),2) # # draw the center of the circle # cv2.circle(frame,(i[0],i[1]),2,(0,0,255),3) # except: # pass cv2.imshow('original', frame) #cv2.imshow('rec_red',img_rec_red) cv2.imshow('detected circles',th) out.write(frame) k = cv2.waitKey(1) rawCapture.truncate(0) if k == 0xFF & ord("q"): break #cv2.destroyAllWindows() #cap.release() out.release()
[ 198, 198, 6738, 8301, 18144, 13, 18747, 1330, 13993, 36982, 19182, 198, 6738, 8301, 18144, 1330, 13993, 35632, 198, 11748, 269, 85, 17, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 640, 198, 6738, 49876, 1330, 376, 7861, 198, 6738, 3...
2.038795
2,423
# -*- coding: utf-8 -*- # Module: default # Author: asciidisco # Created on: 24.07.2017 # License: MIT https://goo.gl/WA1kby """Setup""" from __future__ import unicode_literals from os.path import abspath, dirname, join from re import search from sys import exit, version, version_info from setuptools import find_packages, setup REQUIRED_PYTHON_VERSION = (2, 7) PACKAGES = find_packages() INSTALL_DEPENDENCIES = [] SETUP_DEPENDENCIES = [] TEST_DEPENDENCIES = [ 'nose', 'Kodistubs', 'httpretty', 'mock', ] EXTRA_DEPENDENCIES = { 'dev': [ 'nose', 'flake8', 'codeclimate-test-reporter', 'pylint', 'mccabe', 'pycodestyle', 'pyflakes', 'Kodistubs', 'httpretty', 'mock', 'requests', 'beautifulsoup4', 'pyDes', 'radon', 'Sphinx', 'sphinx_rtd_theme', 'm2r', 'kodi-release-helper', 'dennis', 'blessings', 'demjson', 'restructuredtext_lint', 'yamllint', ] } def get_addon_data(): """Loads the Kodi plugin data from addon.xml""" root_dir = dirname(abspath(__file__)) pathname = join(root_dir, 'addon.xml') with open(pathname, 'rb') as addon_xml: addon_xml_contents = addon_xml.read() _id = search( r'(?<!xml )id="(.+?)"', addon_xml_contents).group(1) author = search( r'(?<!xml )provider-name="(.+?)"', addon_xml_contents).group(1) name = search( r'(?<!xml )name="(.+?)"', addon_xml_contents).group(1) version = search( r'(?<!xml )version="(.+?)"', addon_xml_contents).group(1) desc = search( r'(?<!xml )description lang="en_GB">(.+?)<', addon_xml_contents).group(1) email = search( r'(?<!xml )email>(.+?)<', addon_xml_contents).group(1) source = search( r'(?<!xml )email>(.+?)<', addon_xml_contents).group(1) return { 'id': _id, 'author': author, 'name': name, 'version': version, 'desc': desc, 'email': email, 'source': source, } if version_info < REQUIRED_PYTHON_VERSION: exit('Python >= 2.7 is required. Your version:\n{0}'.format(version)) if __name__ == '__main__': ADDON_DATA = get_addon_data() setup( name=ADDON_DATA.get('name'), version=ADDON_DATA.get('version'), author=ADDON_DATA.get('author'), author_email=ADDON_DATA.get('email'), description=ADDON_DATA.get('desc'), packages=PACKAGES, include_package_data=True, install_requires=INSTALL_DEPENDENCIES, setup_requires=SETUP_DEPENDENCIES, tests_require=TEST_DEPENDENCIES, extras_require=EXTRA_DEPENDENCIES, test_suite='nose.collector', )
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 19937, 25, 4277, 198, 2, 6434, 25, 355, 979, 312, 4861, 198, 2, 15622, 319, 25, 1987, 13, 2998, 13, 5539, 198, 2, 13789, 25, 17168, 3740, 1378, 42469, 13, 4743, ...
1.932855
1,534
import numpy as np import os, sys os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from tensorflow.keras.models import Model import tensorflow as tf from PIL import Image from utils_rtp import ProMP if __name__ == "__main__": ENCODED_MODEL_PATH = "/home/arshad/Documents/reach_to_palpate_validation_models/encoded_model_regions" PREDICTOR_MODEL = "/home/arshad/Documents/reach_to_palpate_validation_models/model_cnn_rgb_1" image = np.load( "/home/arshad/catkin_ws/image_xy_rtp.npy" ) predictor = Predictor(ENCODED_MODEL_PATH, PREDICTOR_MODEL) traj = predictor.predict(image) np.save("/home/arshad/catkin_ws/predicted_joints_values_rtp.npy", traj) print ("\n Predicted ProMPs weights for RTP task. Joint trajectory is saved in the file. \n Press 'p' to display the trajectory...")
[ 11748, 299, 32152, 355, 45941, 198, 11748, 28686, 11, 25064, 220, 198, 418, 13, 268, 2268, 17816, 10234, 62, 8697, 47, 62, 23678, 62, 25294, 62, 2538, 18697, 20520, 796, 705, 18, 6, 220, 198, 6738, 11192, 273, 11125, 13, 6122, 292, ...
2.539185
319
"""Wrapper around CTGAN model.""" from sdv.tabular.base import BaseTabularModel
[ 37811, 36918, 2848, 1088, 16356, 45028, 2746, 526, 15931, 198, 198, 6738, 45647, 85, 13, 8658, 934, 13, 8692, 1330, 7308, 33349, 934, 17633, 628 ]
3.28
25
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import os import logging import logging.config if (sys.version_info > (3, 0)): # Python 3 code in this block import configparser else: # Python 2 code in this block import ConfigParser as configparser DEFAULT_CONFIG = ".cointrader.ini"
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 25064, 198, 11748, 28686, 198, 11748, 18931, 198, 11748, 18931, 13, 11250, 198, 361, 357, 17597, 13, 9641, 62, 1...
2.810811
111
"""Classes implementing the descriptor protocol.""" __all__ = ("classproperty",)
[ 37811, 9487, 274, 15427, 262, 43087, 8435, 526, 15931, 198, 198, 834, 439, 834, 796, 5855, 4871, 26745, 1600, 8, 628 ]
3.952381
21
import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import torch import omegaconf import mbrl.env.continuous_dubins as dubins_env import mbrl.env.reward_fns as reward_fns import mbrl.env.termination_fns as termination_fns import mbrl.models as models import mbrl.planning as planning import mbrl.util.common as common_util import mbrl.util as util if __name__ == "__main__": mpl.rcParams.update({"font.size": 16}) device = 'cuda:0' if torch.cuda.is_available() else 'cpu' noisy = False seed = 0 env = dubins_env.ContinuousDubinsEnv(noisy) env.seed(seed) rng = np.random.default_rng(seed=seed) generator = torch.Generator(device=device) generator.manual_seed(seed) obs_shape = env.observation_space.shape act_shape = env.action_space.shape # This functions allows the model to evaluate the true rewards given an observation reward_fn = reward_fns.continuous_dubins # This function allows the model to know if an observation should make the episode end term_fn = termination_fns.continuous_dubins trial_length = 200 num_trials = 10 ensemble_size = 5 # Everything with "???" indicates an option with a missing value. # Our utility functions will fill in these details using the # environment information cfg_dict = { # dynamics model configuration "dynamics_model": { "model": { "_target_": "mbrl.models.GaussianMLP", "device": device, "num_layers": 3, "ensemble_size": ensemble_size, "hid_size": 200, "use_silu": True, "in_size": "???", "out_size": "???", "deterministic": False, "propagation_method": "fixed_model" } }, # options for training the dynamics model "algorithm": { "learned_rewards": False, "target_is_delta": True, "normalize": True, }, # these are experiment specific options "overrides": { "trial_length": trial_length, "num_steps": num_trials * trial_length, "model_batch_size": 32, "validation_ratio": 0.05 } } cfg = omegaconf.OmegaConf.create(cfg_dict) # Create a 1-D dynamics model for this environment dynamics_model = common_util.create_one_dim_tr_model(cfg, obs_shape, act_shape) # Create a gym-like environment to encapsulate the model model_env = models.ModelEnv(env, dynamics_model, term_fn, reward_fn, generator=generator) replay_buffer = common_util.create_replay_buffer(cfg, obs_shape, act_shape, rng=rng) common_util.rollout_agent_trajectories( env, trial_length, # initial exploration steps planning.RandomAgent(env), {}, # keyword arguments to pass to agent.act() replay_buffer=replay_buffer, trial_length=trial_length ) print("# samples stored", replay_buffer.num_stored) agent_cfg = omegaconf.OmegaConf.create({ # this class evaluates many trajectories and picks the best one "_target_": "mbrl.planning.TrajectoryOptimizerAgent", "planning_horizon": 15, "replan_freq": 1, "verbose": False, "action_lb": "???", "action_ub": "???", # this is the optimizer to generate and choose a trajectory "optimizer_cfg": { "_target_": "mbrl.planning.CEMOptimizer", "num_iterations": 5, "elite_ratio": 0.1, "population_size": 500, "alpha": 0.1, "device": device, "lower_bound": "???", "upper_bound": "???", "return_mean_elites": True } }) agent = planning.create_trajectory_optim_agent_for_model( model_env, agent_cfg, num_particles=20 ) train_losses = [] val_scores = [] # Create a trainer for the model model_trainer = models.ModelTrainer(dynamics_model, optim_lr=1e-3, weight_decay=5e-5) # Create visualization objects fig, axs = plt.subplots(1, 1, figsize=(14, 3.75)) ax_text = axs.text(300, 50, "") # Main PETS loop all_rewards = [0] for trial in range(num_trials): obs = env.reset() agent.reset() done = False total_reward = 0.0 steps_trial = 0 while not done: # --------------- Model Training ----------------- if steps_trial == 0: dynamics_model.update_normalizer(replay_buffer.get_all()) # update normalizer stats dataset_train, dataset_val = replay_buffer.get_iterators( batch_size=cfg.overrides.model_batch_size, val_ratio=cfg.overrides.validation_ratio, train_ensemble=True, ensemble_size=ensemble_size, shuffle_each_epoch=True, bootstrap_permutes=False, # build bootstrap dataset using sampling with replacement ) model_trainer.train( dataset_train, dataset_val=dataset_val, num_epochs=50, patience=50, callback=train_callback) # --- Doing env step using the agent and adding to model dataset --- next_obs, reward, done, _ = common_util.step_env_and_add_to_buffer(env, obs, agent, {}, replay_buffer) obs = next_obs total_reward += reward steps_trial += 1 if steps_trial == trial_length: break all_rewards.append(total_reward) env.save_trajectory(f"dubins_{trial}.png") print(all_rewards) plot_graph(axs, None, ax_text, trial, steps_trial, all_rewards, force_update=True) # fig.savefig("dubins.png")
[ 11748, 2603, 29487, 8019, 355, 285, 489, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 11748, 267, 28917, 7807, 69, 198, 198, 11748, 285, 1671, 75, 13, 24330, ...
2.200075
2,664
# from pixiv_web_crawler import Getters
[ 2, 422, 279, 844, 452, 62, 12384, 62, 66, 39464, 1330, 3497, 1010 ]
3
13
# # # # : # : MIT License # from time import sleep import requests def get_htmls(urls): """ URL- HTML :param urls: URL- :type urls: list :return: HTML- """ htmls = [] # for url in urls: # URL html = get_html(url) # HTML URL htmls.append(html) # HTML sleep(1) return htmls # - HTML def get_html(url): """ URL- HTML :param url: URL- :type url: str :return: HTML- """ print(f"""get_html url={url}""") r = requests.get(url, headers={'User-Agent': 'Custom'}) # web- url print(r) # <Response [200]> return r.text # HTML if __name__ == '__main__': pass
[ 2, 198, 2, 220, 220, 220, 220, 220, 220, 220, 198, 2, 198, 2, 220, 220, 1058, 220, 220, 198, 2, 220, 220, 1058, 17168, 13789, 198, 2, 198, 198, 6738, 640, 1330, 3993, 198, 11748, 7007, 628, 198, 4299, 651, 62, 6494, 82, 7, 637...
1.843972
423
#!/usr/bin/python # # This post-exploitation script can be used to spider numerous systems # to identify sensitive and/or confidential data. A good scenario to # use this script is when you have admin credentials to tons of # Windows systems, and you want to look for files containing data such # as PII, network password documents, etc. For the most part, # this script uses smbclient, parses the results, and prints # out the results in a nice format for you. # # Author: Alton Johnson <alton@vonahi.io # Version: 2.4 # Updated: 01/23/2014 # import commands, time, getopt, re, os from sys import argv start_time = time.time() banner = "\n " + "*" * 56 banner += "\n * _ *" banner += "\n * | | // \\\\ *" banner += "\n * ___ _ __ ___ | |__ _\\\\()//_ *" banner += "\n * / __| '_ ` _ \| '_ \ / // \\\\ \ *" banner += "\n * \__ \ | | | | | |_) | |\__/| *" banner += "\n * |___/_| |_| |_|_.__/ *" banner += "\n * *" banner += "\n * SMB Spider v2.4, Alton Johnson (alton@vonahi.io) *" banner += "\n " + "*" * 56 + "\n" if __name__ == "__main__": try: start(argv[1:]) except KeyboardInterrupt: print "\nExiting. Interrupted by user (ctrl-c)." exit() except Exception, err: print err exit() print "\n-----" print "Completed in: %.1fs" % (time.time() - start_time)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 2, 198, 2, 770, 1281, 12, 20676, 78, 3780, 4226, 460, 307, 973, 284, 19230, 6409, 3341, 198, 2, 284, 5911, 8564, 290, 14, 273, 15279, 1366, 13, 317, 922, 8883, 284, 220, 198, 2, 77...
2.381282
577
import os import subprocess import sys from typing import Dict, List from app.pipelines import Pipeline
[ 11748, 28686, 198, 11748, 850, 14681, 198, 11748, 25064, 198, 6738, 19720, 1330, 360, 713, 11, 7343, 198, 198, 6738, 598, 13, 79, 541, 20655, 1330, 37709, 628 ]
3.785714
28
from unittest.mock import MagicMock, patch import os import sys import unittest import json import copy import io import gzip sys.modules["trace_forwarder.connection"] = MagicMock() sys.modules["datadog_lambda.wrapper"] = MagicMock() sys.modules["datadog_lambda.metric"] = MagicMock() sys.modules["datadog"] = MagicMock() sys.modules["requests"] = MagicMock() sys.modules["requests_futures.sessions"] = MagicMock() env_patch = patch.dict( os.environ, { "DD_API_KEY": "11111111111111111111111111111111", "DD_ADDITIONAL_TARGET_LAMBDAS": "ironmaiden,megadeth", }, ) env_patch.start() import lambda_function import parsing env_patch.stop() test_data = { "Records": [ { "eventVersion": "1.08", "userIdentity": { "type": "AssumedRole", "principalId": "AROAYYB64AB3HGPQO2EPR:DatadogAWSIntegration", "arn": "arn:aws:sts::601427279990:assumed-role/Siti_DatadogAWSIntegrationRole/i-08014e4f62ccf762d", "accountId": "601427279990", "accessKeyId": "ASIAYYB64AB3DWOY7JNT", "sessionContext": { "sessionIssuer": { "type": "Role", "principalId": "AROAYYB64AB3HGPQO2EPR", "arn": "arn:aws:iam::601427279990:role/Siti_DatadogAWSIntegrationRole", "accountId": "601427279990", "userName": "Siti_DatadogAWSIntegrationRole", }, "attributes": { "creationDate": "2021-05-02T23:49:01Z", "mfaAuthenticated": "false", }, }, }, "eventTime": "2021-05-02T23:53:28Z", "eventSource": "dynamodb.amazonaws.com", "eventName": "DescribeTable", "awsRegion": "us-east-1", "sourceIPAddress": "54.162.201.161", "userAgent": "Datadog", "requestParameters": {"tableName": "KinesisClientLibraryLocal"}, "responseElements": None, "requestID": "A9K7562IBO4MPDQE4O5G9QETRFVV4KQNSO5AEMVJF66Q9ASUAAJG", "eventID": "a5dd11f9-f616-4ea8-8030-0b3eef554352", "readOnly": True, "resources": [ { "accountId": "601427279990", "type": "AWS::DynamoDB::Table", "ARN": "arn:aws:dynamodb:us-east-1:601427279990:table/KinesisClientLibraryLocal", } ], "eventType": "AwsApiCall", "apiVersion": "2012-08-10", "managementEvent": True, "recipientAccountId": "601427279990", "eventCategory": "Management", } ] } if __name__ == "__main__": unittest.main()
[ 6738, 555, 715, 395, 13, 76, 735, 1330, 6139, 44, 735, 11, 8529, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 555, 715, 395, 198, 11748, 33918, 198, 11748, 4866, 198, 11748, 33245, 198, 11748, 308, 13344, 198, 198, 17597, 13, 181...
1.795726
1,591
""" Python 3 Object-Oriented Programming Chapter 6, Abstract Base Classes and Operator Overloading """ from lookup_mapping import Lookup
[ 37811, 198, 37906, 513, 9515, 12, 46, 380, 4714, 30297, 198, 198, 14126, 718, 11, 27741, 7308, 38884, 290, 35946, 3827, 25138, 198, 37811, 198, 6738, 35847, 62, 76, 5912, 1330, 6803, 929, 198 ]
4.058824
34
TASK_STATUS = [ ('TD', 'To Do'), ('IP', 'In Progress'), ('QA', 'Testing'), ('DO', 'Done'), ] TASK_PRIORITY = [ ('ME', 'Medium'), ('HI', 'Highest'), ('HG', 'High'), ('LO', 'Lowest'), ]
[ 51, 1921, 42, 62, 35744, 2937, 796, 685, 198, 220, 220, 220, 19203, 21016, 3256, 705, 2514, 2141, 33809, 198, 220, 220, 220, 19203, 4061, 3256, 705, 818, 18387, 33809, 198, 220, 220, 220, 19203, 48, 32, 3256, 705, 44154, 33809, 198, ...
1.972727
110
from __future__ import absolute_import from __future__ import division from __future__ import print_function import mxnet as mx import numpy as np from config import config #def lin(data, num_filter, workspace, name, binarize, dcn): # bit = 1 # ACT_BIT = config.ACT_BIT # bn_mom = config.bn_mom # workspace = config.workspace # if not binarize: # if not dcn: # conv1 = Conv(data=data, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), # no_bias=True, workspace=workspace, name=name + '_conv') # bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn') # act1 = Act(data=bn1, act_type='relu', name=name + '_relu') # return act1 # else: # bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn') # act1 = Act(data=bn1, act_type='relu', name=name + '_relu') # conv1_offset = mx.symbol.Convolution(name=name+'_conv_offset', data = act1, # num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1)) # conv1 = mx.contrib.symbol.DeformableConvolution(name=name+"_conv", data=act1, offset=conv1_offset, # num_filter=num_filter, pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=False) # #conv1 = Conv(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1), # # no_bias=False, workspace=workspace, name=name + '_conv') # return conv1 # else: # bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn') # act1 = Act(data=bn1, act_type='relu', name=name + '_relu') # conv1 = mx.sym.QConvolution_v1(data=act1, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), # no_bias=True, workspace=workspace, name=name + '_conv', act_bit=ACT_BIT, weight_bit=bit) # conv1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2') # return conv1
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 11593, 37443, 834, 1330, 7297, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 11748, 285, 87, 3262, 355, 285, 87, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 4566, ...
2.074331
1,009
''' --- I M P O R T S T A T E M E N T S --- ''' import coloredlogs, logging coloredlogs.install() import numpy as np ''' === S T A R T O F C L A S S E V A L M E T R I C === [About] Object class for calculating average values. [Init Args] - name: String for the variable name to calculate average value for. [Methods] - __init__ : Class initialiser - update : Function to be implemented by the children sub-classes. - reset : Function for resetting the number of instances and the sum of the metric. - get : Calculation of the average value based on the number of instances and the provided sum. - get_name_value : Function for returning the name(s) and the value(s). - check_label_shapes : Function responsible for type and shape checking. ''' ''' === E N D O F C L A S S E V A L M E T R I C === ''' ''' === S T A R T O F C L A S S M E T R I C L I S T === [About] EvalMetric class for creating a list containing Evalmetric objects. [Init Args] - name: String for the variable name. [Methods] - __init__ : Class initialiser - update : Function to update the list of EvalMetric objects. - reset : Function for resetting the list. - get : Function for getting each of the EvalMetric objects in the list. - get_name_value : Function for getting the name of the list items. ''' ''' === E N D O F C L A S S M E T R I C L I S T === ''' ''' === S T A R T O F C L A S S A C C U R A C Y === [About] EvalMetric class for creating an accuracy estimate. [Init Args] - name: String for the variable name. Defaults to `accuracy`. - topk: Number of top predictions to be used of the score (top-1, top-5 etc.). Defaults to 1. [Methods] - __init__ : Class initialiser - update : Function to update scores. ''' ''' === E N D O F C L A S S A C C U R A C Y === ''' ''' === S T A R T O F C L A S S L O S S === [About] EvalMetric class for creating a loss score. The class acts a a `dummy estimate` as no further calculations are required for the loss. Instead it is primarily used to easily/directly print the loss. [Init Args] - name: String for the variable name. Defaults to `loss`. [Methods] - __init__ : Class initialiser - update : Function to update scores. ''' ''' === E N D O F C L A S S L O S S === ''' ''' === S T A R T O F C L A S S L O S S === [About] EvalMetric class for batch-size used. The class acts a a `dummy estimate` as no further calculations are required for the size of the batch. Instead it is primarily used to easily/directly print the batch size. [Init Args] - name: String for the variable name. Defaults to `batch-size`. [Methods] - __init__ : Class initialiser - update : Function used for updates. ''' ''' === E N D O F C L A S S L O S S === ''' ''' === S T A R T O F C L A S S L E A R N I N G R A T E === [About] EvalMetric class for learning rate used. The class acts a a `dummy estimate` as no further calculations are required for the size of the lr. Instead it is primarily used to easily/directly print the learning rate. [Init Args] - name: String for the variable name. Defaults to `lr`. [Methods] - __init__ : Class initialiser - update : Function used for updates. ''' ''' === E N D O F C L A S S L E A R N I N G R A T E === ''' if __name__ == "__main__": import torch # Test Accuracy predicts = [torch.from_numpy(np.array([[0.7, 0.3], [0, 1.], [0.4, 0.6]]))] labels = [torch.from_numpy(np.array([ 0, 1, 1 ]))] losses = [torch.from_numpy(np.array([ 0.3, 0.4, 0.5 ]))] logging.getLogger().setLevel(logging.DEBUG) logging.debug("input pred: {}".format(predicts)) logging.debug("input label: {}".format(labels)) logging.debug("input loss: {}".format(labels)) acc = Accuracy() acc.update(preds=predicts, labels=labels, losses=losses, lr=0, batch_size=1) logging.info(acc.get()) # Test MetricList metrics = MetricList(Loss(name="ce-loss"), Accuracy(topk=1, name="acc-top1"), Accuracy(topk=2, name="acc-top2"), ) metrics.update(preds=predicts, labels=labels, losses=losses, lr=0, batch_size=1) logging.info("------------") logging.info(metrics.get()) acc.get_name_value()
[ 7061, 6, 198, 6329, 220, 314, 337, 350, 440, 371, 309, 220, 311, 309, 317, 309, 412, 337, 412, 399, 309, 311, 220, 11420, 198, 7061, 6, 198, 11748, 16396, 6404, 82, 11, 18931, 198, 25717, 6404, 82, 13, 17350, 3419, 198, 11748, 299...
2.439355
1,921
import sys from string import whitespace from clint.textui import puts, indent, colored from poly.common import * from poly.node import * def empty_space(s): if len(s) == 0: return True for c in s: if s in whitespace: return True return False if __name__ == "__main__": repl_main(sys.argv[1:])
[ 11748, 25064, 198, 6738, 4731, 1330, 13216, 10223, 198, 198, 6738, 537, 600, 13, 5239, 9019, 1330, 7584, 11, 33793, 11, 16396, 198, 198, 6738, 7514, 13, 11321, 1330, 1635, 198, 6738, 7514, 13, 17440, 1330, 1635, 198, 198, 4299, 6565, ...
2.457143
140
title = 'Pmw.SelectionDialog demonstration' # Import Pmw from this directory tree. import sys sys.path[:0] = ['../../..'] import Tkinter import Pmw ###################################################################### # Create demo in root window for testing. if __name__ == '__main__': root = Tkinter.Tk() Pmw.initialise(root) root.title(title) exitButton = Tkinter.Button(root, text = 'Exit', command = root.destroy) exitButton.pack(side = 'bottom') widget = Demo(root) root.mainloop()
[ 7839, 796, 705, 47, 76, 86, 13, 4653, 1564, 44204, 13646, 6, 198, 198, 2, 17267, 350, 76, 86, 422, 428, 8619, 5509, 13, 198, 11748, 25064, 198, 17597, 13, 6978, 58, 25, 15, 60, 796, 37250, 40720, 40720, 492, 20520, 198, 198, 11748...
2.965909
176
from infomap import infomap infomapWrapper = infomap.Infomap("--two-level") # Add weight as an optional third argument infomapWrapper.addLink(0, 1) infomapWrapper.addLink(0, 2) infomapWrapper.addLink(0, 3) infomapWrapper.addLink(1, 0) infomapWrapper.addLink(1, 2) infomapWrapper.addLink(2, 1) infomapWrapper.addLink(2, 0) infomapWrapper.addLink(3, 0) infomapWrapper.addLink(3, 4) infomapWrapper.addLink(3, 5) infomapWrapper.addLink(4, 3) infomapWrapper.addLink(4, 5) infomapWrapper.addLink(5, 4) infomapWrapper.addLink(5, 3) infomapWrapper.run() tree = infomapWrapper.tree print("Found %d modules with codelength: %f" % (tree.numTopModules(), tree.codelength())) print("\n#node module") for node in tree.leafIter(): print("%d %d" % (node.physIndex, node.moduleIndex()))
[ 6738, 1167, 296, 499, 1330, 1167, 296, 499, 198, 198, 10745, 296, 499, 36918, 2848, 796, 1167, 296, 499, 13, 18943, 296, 499, 7203, 438, 11545, 12, 5715, 4943, 198, 198, 2, 3060, 3463, 355, 281, 11902, 2368, 4578, 198, 10745, 296, 4...
2.357576
330
import unittest if __name__ == '__main__': unittest.main()
[ 11748, 555, 715, 395, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 555, 715, 395, 13, 12417, 3419, 198 ]
2.407407
27
# ------------------------------------------------------------------------ # Copyright 2020, 2021 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------ """ Verify settings in configuration YAML file (helper functions) """ # Global modules # None # Local modules from modules.command import ( CmdShell, CmdSsh ) from modules.constants import getConstants from modules.exceptions import RpmFileNotFoundException from modules.ocp import ocLogin from modules.tools import ( refSystemIsStandard, areContainerMemResourcesValid, getRpmFileForPackage, strBold, getHdbCopySshCommand ) # Functions for formatting the output def showMsgOk(text): """ print text with header """ print("[Ok ] " + text) def showMsgErr(text): """ print text with header """ print('[' + strBold('Error') + '] ' + text) def showMsgInd(text): """ print text with header """ print("[.....] " + text) # Classes
[ 2, 16529, 982, 198, 2, 15069, 12131, 11, 33448, 19764, 11421, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2...
3.495516
446
import time import board import adafruit_dotstar import atexit import signal kill_now = False DOTSTAR_DATA = board.D5 DOTSTAR_CLOCK = board.D6 dots = adafruit_dotstar.DotStar(DOTSTAR_CLOCK, DOTSTAR_DATA, 3, brightness=0.5) atexit.register(exit_handler) signal.signal(signal.SIGINT, exit_handler) signal.signal(signal.SIGTERM, exit_handler) while not kill_now: for j in range(255): for i in range(3): rc_index = (i * 256 // 3) + j * 5 dots[i] = wheel(rc_index & 255) dots.show() time.sleep(0.01)
[ 11748, 640, 198, 11748, 3096, 198, 11748, 512, 1878, 4872, 62, 26518, 7364, 198, 11748, 379, 37023, 198, 11748, 6737, 198, 198, 12728, 62, 2197, 796, 10352, 198, 35, 2394, 46678, 62, 26947, 796, 3096, 13, 35, 20, 198, 35, 2394, 46678,...
2.212851
249
from .corpus import Corpus, DirectoryCorpus from .loaders import OweLoader, YorubaBlogCorpus, BBCCorpus, BibeliCorpus
[ 6738, 764, 10215, 79, 385, 1330, 44874, 11, 27387, 45680, 385, 198, 6738, 764, 2220, 364, 1330, 440, 732, 17401, 11, 42453, 22013, 42383, 45680, 385, 11, 7823, 45680, 385, 11, 43278, 43733, 45680, 385 ]
3.342857
35
import torch import torch.nn as nn import torch.nn.functional as F import math import matplotlib.pyplot as plt import matplotlib.ticker as ticker import numpy as np from masked_cross_entropy import * from preprocess import * from parameter import * import time # # Training # # Evaluating the network # def evaluate(input_seq, max_length=MAX_LENGTH):
[ 11748, 28034, 201, 198, 11748, 28034, 13, 20471, 355, 299, 77, 201, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 201, 198, 201, 198, 11748, 10688, 201, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 201, 198, 1174...
2.816176
136
from .api import * __version__ = "5.0.0"
[ 6738, 764, 15042, 1330, 1635, 198, 198, 834, 9641, 834, 796, 366, 20, 13, 15, 13, 15, 1 ]
2.277778
18
import unittest import pycqed as pq import os import matplotlib.pyplot as plt from pycqed.analysis_v2 import measurement_analysis as ma
[ 11748, 555, 715, 395, 198, 11748, 12972, 66, 80, 276, 355, 279, 80, 198, 11748, 28686, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 6738, 12972, 66, 80, 276, 13, 20930, 62, 85, 17, 1330, 15558, 62, 20930, 355, ...
3.044444
45
import turtle '''http://www.algorithm.co.il/blogs/computer-science/fractals-in-10-minutes-no-6-turtle-snowflake/ This would be a good introduction to recursion. I don't see how students would invent this on their own, but they could modify it and see what other fractals they could generate. ''' pen = turtle.Turtle() pen.penup() pen.goto(-200,0) pen.pendown() pen.speed(0) #Draw the fractal fractal(pen, 500, 4) turtle.done()
[ 11748, 28699, 198, 198, 7061, 6, 4023, 1378, 2503, 13, 282, 42289, 13, 1073, 13, 346, 14, 49096, 14, 33215, 12, 16801, 14, 69, 974, 874, 12, 259, 12, 940, 12, 1084, 1769, 12, 3919, 12, 21, 12, 83, 17964, 12, 82, 2197, 47597, 14,...
2.84106
151
import unittest from spatial import Edge, Vector3
[ 11748, 555, 715, 395, 198, 198, 6738, 21739, 1330, 13113, 11, 20650, 18, 628 ]
3.714286
14
from django.urls import re_path from user_queries.views import UserQuerySaveView, UserQueryCollectView urlpatterns = [ re_path(r"^/save/?$", UserQuerySaveView.as_view(), name="user-save-query"), re_path( r"^/collect/?$", UserQueryCollectView.as_view(), name="user-collect-queries", ), ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 302, 62, 6978, 198, 6738, 2836, 62, 421, 10640, 13, 33571, 1330, 11787, 20746, 16928, 7680, 11, 11787, 20746, 31337, 7680, 198, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 302, 62, 6978,...
2.41791
134
# Generated by Django 2.0.7 on 2019-10-06 11:46 import datetime from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 362, 13, 15, 13, 22, 319, 13130, 12, 940, 12, 3312, 1367, 25, 3510, 198, 198, 11748, 4818, 8079, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.972222
36
import pandas as pd from math import log
[ 11748, 19798, 292, 355, 279, 67, 198, 6738, 10688, 1330, 2604, 628 ]
3.5
12
strings = { "en": { "error_no_reply": "This command must be sent as a reply to one's message!", "error_report_admin": "Whoa! Don't report admins ", "error_restrict_admin": "You cannot restrict an admin.", "report_date_format": "%d.%m.%Y at %H:%M", "report_message": ' Sent {time} (server time)\n' '<a href="{msg_url}">Go to message</a>', "report_note": "\n\nNote: {note}", "report_sent": "<i>Report sent</i>", "action_del_msg": "Delete message", "action_del_and_ban": "Delete and ban", "action_deleted": "\n\n <b>Deleted</b>", "action_deleted_banned": "\n\n <b>Deleted, user banned</b>", "action_deleted_partially": "Some messages couldn't be found or deleted", "readonly_forever": " <i>User set to read-only mode forever</i>", "readonly_temporary": " <i>User set to read-only mode until {time} (server time)</i>", "nomedia_forever": " <i>User set to text-only mode forever</i>", "nomedia_temporary": " <i>User set to text-only mode until {time} (server time)</i>", "need_admins_attention": 'Dear admins, your presence in chat is needed!\n\n' '<a href="{msg_url}">Go to chat</a>', }, "ru": { "error_no_reply": " - !", "error_report_admin": " ? -- ", "error_restrict_admin": " .", "report_date_format": "%d.%m.%Y %H:%M", "report_message": ' {time} ( )\n' '<a href="{msg_url}"> </a>', "report_note": "\n\n: {note}", "report_sent": "<i> </i>", "action_del_msg": " ", "action_del_and_ban": " ", "action_deleted": "\n\n <b></b>", "action_deleted_banned": "\n\n <b>, </b>", "action_deleted_partially": " ", "readonly_forever": " <i> </i>", "readonly_temporary": " <i> {time} ( )</i>", "nomedia_forever": " <i> </i>", "nomedia_temporary": " <i> {time} ( )</i>", "need_admins_attention": ' , !\n\n' '<a href="{msg_url}"> </a>', }, "de": { "error_no_reply": "Dieser Befehl kann nur als Antwort gesendet werden!", "error_report_admin": "Whoa! Du kannst Admins nicht melden ", "error_restrict_admin": "Du kannst keine Admins einschrnken.", "report_date_format": "%d.%m.%Y um %H:%M Uhr", "report_message": ' Gesendet {time} (server time)\n' '<a href="{msg_url}">Zur Nachricht</a>', "report_note": "\n\nNotiz: {note}", "report_sent": "<i>Gemeldet</i>", "action_del_msg": "Nachricht lschen", "action_del_and_ban": "Lschen und Sperren", "action_deleted": "\n\n <b>Lschen</b>", "action_deleted_banned": "\n\n <b>Gelscht, Nutzer gesperrt!</b>", "action_deleted_partially": "Einige Nachrichten wurden nicht gefunden zum lschen", "readonly_forever": " <i>Nutzer ist fr immer stumm</i>", "readonly_temporary": " <i>Nutzer bis {time} stumm. (server time)</i>", "nomedia_forever": " <i>Nutzer fr immer im Nur-Text-Modus.</i>", "nomedia_temporary": " <i>Nutzer bis {time} im nur Text-Modus. (server time)</i>", "need_admins_attention": 'Liebe Admins, ich sehne euch herbei!\n\n' '<a href="{msg_url}">Zum Chat</a>', } @@ -64,7 +89,7 @@ def get_string(lang: str, key: str): lang = strings.get(lang) if not lang: if not strings.get("en"): raise KeyError(f'Neither "{lang}" nor "en" locales found') raise KeyError(f'Weder "{lang}" noch "en" gefunden.') else: lang = strings.get("en") try: return lang[key] except KeyError: return strings.get("en").get(key, "ERR_NO_STRING")
[ 37336, 796, 1391, 198, 220, 220, 220, 366, 268, 1298, 1391, 198, 220, 220, 220, 220, 220, 220, 220, 366, 18224, 62, 3919, 62, 47768, 1298, 366, 1212, 3141, 1276, 307, 1908, 355, 257, 10971, 284, 530, 338, 3275, 40754, 198, 220, 220,...
1.973617
1,971
# coding: utf-8 from __future__ import absolute_import from datetime import date, datetime # noqa: F401 from typing import List, Dict # noqa: F401 from swagger_server.models.base_model_ import Model from swagger_server.models.impedance import Impedance # noqa: F401,E501 from swagger_server import util
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 4818, 8079, 1330, 3128, 11, 4818, 8079, 220, 1303, 645, 20402, 25, 376, 21844, 198, 198, 6738, 19720, 1330, 7343, 11, 360, 713, 220...
3.1
100
from rest_framework import serializers from .models import Register
[ 6738, 1334, 62, 30604, 1330, 11389, 11341, 198, 6738, 764, 27530, 1330, 17296, 628 ]
4.928571
14
# Created by Hansi at 3/16/2020 import os from algo.data_process.data_preprocessor import data_cleaning_flow from algo.utils.file_utils import delete_create_folder def extract_gt_tokens(text): """ Given GT string, method to extract GT labels. GT string should be formatted as Twitter-Event-Data-2019. parameters ----------- :param text: str :return: list List of GT labels corresponding to a single event Since there can be duplicate definitions for a single event, this list contains separate label lists for each duplicate definition. """ duplicates = [] for element in text.split("|"): labels = [] for subelement in element.split("["): if subelement: subelement = subelement.replace("\n", "") subelement = subelement.replace("]", "") tokens = subelement.split(",") labels.append(tokens) duplicates.append(labels) return duplicates def load_gt(folder_path): """ Method to read GT data into a dictionary formatted as {time-window: labels} parameters ----------- :param folder_path: str Path to folder which contains GT data :return: object Dictionary of GT data """ gt = dict() for root, dirs, files in os.walk(folder_path): for file in files: file_name = os.path.splitext(file)[0] f = open(os.path.join(folder_path, file), 'r', encoding='utf-8') events = [] for line in f: tokens = extract_gt_tokens(line) events.append(tokens) gt[file_name] = events f.close() return gt def generate_gt_string(tokens): """ Given a list of GT labels corresponding to a single event, convert them to a string formatted according to Twitter-Event-Data-2019 GT format. parameters ----------- :param tokens: list :return: str """ str = "" for duplicate in tokens: if str and str[-1] == "]": str = str + "|" for label in duplicate: str = str + "[" for element in label: if str[-1] == "[": str = str + element else: str = str + "," + element str = str + "]" return str def get_combined_gt(gt): """ Combine the GT labels of multiple events available at a time frame into single event representation. parameters ----------- :param gt: object Dictionary of GT returned by load_GT :return: object Dictionary of combined GT """ combined_gt = dict() for time_frame in gt.keys(): gt_events = gt[time_frame] combined_gt_event = gt_events[0] for event in gt_events[1:]: temp = [] for duplicate in event: for combined_event in combined_gt_event: temp.append(combined_event + duplicate) combined_gt_event = temp # even though there is 1 event, it is added to a list to preserve consistency with general evaluation_v2 methods events = [combined_gt_event] combined_gt[time_frame] = events return combined_gt def preprocess_gt(input_filepath, output_filepath): """ Preprocess ground truth data in input_file and save to the output_file parameters ----------- :param input_filepath: str (.txt file path) Ground truth file formatted as Twitter-Event-Data-2019 :param output_filepath: str (.txt file path) :return: """ input_file = open(input_filepath, 'r') output_file = open(output_filepath, 'a', encoding='utf-8') events = [] for line in input_file: tokens = extract_gt_tokens(line) events.append(tokens) # update tokens new_events = [] for event in events: new_duplicates = [] for duplicate in event: new_labels = [] for label in duplicate: new_elements = [] for element in label: new_label = data_cleaning_flow(element) new_elements.append(new_label) new_labels.append(new_elements) new_duplicates.append(new_labels) new_events.append(new_duplicates) for event in new_events: str = generate_gt_string(event) output_file.write(str) output_file.write("\n") output_file.close() def preprocess_gt_bulk(input_folder_path, output_folder_path): """ Preprocess ground truth data in all files in input_folder and save to the output_folder parameters ----------- :param input_folder_path: str Path to folder which contains GT data files :param output_folder_path: str Path to folder to save preprocessed GT data :return: """ # delete if there already exist a folder and create new folder delete_create_folder(output_folder_path) for root, dirs, files in os.walk(input_folder_path): for file in files: input_filepath = os.path.join(input_folder_path, file) output_filepath = os.path.join(output_folder_path, file) preprocess_gt(input_filepath, output_filepath)
[ 2, 15622, 416, 13071, 72, 379, 513, 14, 1433, 14, 42334, 198, 198, 11748, 28686, 198, 198, 6738, 435, 2188, 13, 7890, 62, 14681, 13, 7890, 62, 3866, 41341, 1330, 1366, 62, 2375, 7574, 62, 11125, 198, 6738, 435, 2188, 13, 26791, 13, ...
2.360036
2,247
import functools def test_func(*args, **kwargs): return "In test func" test_value = 1
[ 11748, 1257, 310, 10141, 628, 628, 628, 198, 198, 4299, 1332, 62, 20786, 46491, 22046, 11, 12429, 46265, 22046, 2599, 198, 220, 220, 220, 1441, 366, 818, 1332, 25439, 1, 628, 198, 9288, 62, 8367, 796, 352, 198 ]
2.605263
38
# -*- coding: utf-8 -*- # Copyright (c) 2019, Brandon Nielsen # All rights reserved. # # This software may be modified and distributed under the terms # of the BSD license. See the LICENSE file for details. import unittest import aniso8601 from aniso8601.exceptions import ISOFormatError from aniso8601.interval import (_parse_interval, parse_interval, parse_repeating_interval) from aniso8601.tests.compat import mock
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 15069, 357, 66, 8, 13130, 11, 14328, 31154, 198, 2, 1439, 2489, 10395, 13, 198, 2, 198, 2, 770, 3788, 743, 307, 9518, 290, 9387, 739, 262, 2846, 198, 2, 28...
2.808642
162
# encoding: utf-8 import operator from functools import reduce from django.core.exceptions import FieldDoesNotExist from django.db.models import Q from django.db.models.constants import LOOKUP_SEP def split_camel_name(name, fall=False): """ GenerateURLs => [Generate, URLs] generateURLsLite => [generate, URLs, Lite] """ if not name: return [] lastest_upper = name[0].isupper() idx_list = [] for idx, char in enumerate(name): upper = char.isupper() # rising if upper and not lastest_upper: idx_list.append(idx) # falling elif fall and not upper and lastest_upper: idx_list.append(idx-1) lastest_upper = upper l_idx = 0 name_items = [] for r_idx in idx_list: if name[l_idx:r_idx]: name_items.append(name[l_idx:r_idx]) l_idx = r_idx if name[l_idx:]: name_items.append(name[l_idx:]) return name_items def construct_search(queryset, field_name): """ """ if field_name.startswith('^'): return "%s__istartswith" % field_name[1:] elif field_name.startswith('='): return "%s__iexact" % field_name[1:] # Use field_name if it includes a lookup. opts = queryset.model._meta lookup_fields = field_name.split(LOOKUP_SEP) # Go through the fields, following all relations. prev_field = None for path_part in lookup_fields: if path_part == 'pk': path_part = opts.pk.name try: field = opts.get_field(path_part) except FieldDoesNotExist: # Use valid query lookups. if prev_field and prev_field.get_lookup(path_part): return field_name else: prev_field = field if hasattr(field, 'get_path_info'): # Update opts to follow the relation. opts = field.get_path_info()[-1].to_opts # Otherwise, use the field with icontains. return "%s__icontains" % field_name def get_search_results(queryset, search_term, search_fields, model): """ Return a tuple containing a queryset to implement the search and a boolean indicating if the results may contain duplicates. """ try: from django.contrib.admin.utils import ( lookup_needs_distinct as lookup_spawns_duplicates, ) except ImportError: from django.contrib.admin.utils import lookup_spawns_duplicates use_distinct = False if search_fields and search_term: orm_lookups = [construct_search(queryset, str(search_field)) for search_field in search_fields] for bit in search_term.split(): or_queries = [Q(**{orm_lookup: bit}) for orm_lookup in orm_lookups] queryset = queryset.filter(reduce(operator.or_, or_queries)) use_distinct |= any(lookup_spawns_duplicates(model._meta, search_spec) for search_spec in orm_lookups) return queryset, use_distinct
[ 2, 21004, 25, 3384, 69, 12, 23, 198, 11748, 10088, 198, 6738, 1257, 310, 10141, 1330, 4646, 198, 198, 6738, 42625, 14208, 13, 7295, 13, 1069, 11755, 1330, 7663, 13921, 3673, 3109, 396, 198, 6738, 42625, 14208, 13, 9945, 13, 27530, 133...
2.241199
1,335
from setuptools import setup setup(name='lygadgets', version='0.1.31', description='Tools to make klayout, the standalone, and python environments work better together', long_description=readme(), long_description_content_type='text/markdown', author='Alex Tait', author_email='alexander.tait@nist.gov', license='MIT', packages=['lygadgets'], install_requires=['future', 'xmltodict'], package_data={'': ['*.lym']}, include_package_data=True, entry_points={'console_scripts': [ 'lygadgets_link=lygadgets.command_line:cm_link_any', 'lygadgets_unlink=lygadgets.command_line:cm_unlink_any', ]}, )
[ 6738, 900, 37623, 10141, 1330, 9058, 628, 198, 198, 40406, 7, 3672, 11639, 306, 70, 324, 11407, 3256, 198, 220, 220, 220, 220, 220, 2196, 11639, 15, 13, 16, 13, 3132, 3256, 198, 220, 220, 220, 220, 220, 6764, 11639, 33637, 284, 787,...
2.351351
296
# inputDialog.py """ Provides a window to get input values from the user to animate a cannonball.""" from graphics import GraphWin, Entry, Text, Point from button import Button
[ 2, 5128, 44204, 13, 9078, 198, 37811, 47081, 257, 4324, 284, 651, 5128, 3815, 220, 198, 6738, 262, 2836, 284, 43828, 257, 21202, 1894, 526, 15931, 198, 198, 6738, 9382, 1330, 29681, 16643, 11, 21617, 11, 8255, 11, 6252, 198, 6738, 493...
4.045455
44
import gym from gym import error, spaces, utils from gym.utils import seeding from gym.envs.mujoco.reacher import ReacherEnv import numpy as np # def reset(self): # super().reset() # def render(self, mode='human'): # ... # def close(self): # ...
[ 11748, 11550, 198, 6738, 11550, 1330, 4049, 11, 9029, 11, 3384, 4487, 198, 6738, 11550, 13, 26791, 1330, 384, 8228, 198, 6738, 11550, 13, 268, 14259, 13, 76, 23577, 25634, 13, 260, 3493, 1330, 797, 3493, 4834, 85, 198, 11748, 299, 321...
2.607843
102
#/usr/bin/env python import httplib2 import json import sys from prettytable import PrettyTable from config import field def main(): company_statistics = {} engineer_statistics = {} stackalytics = Stackalytics("http://stackalytics.com") for project_type in field['project_type']: company_statistics[project_type] = {} for company in field['company']: company_statistics[project_type][company] = {} for metric in field['metric']: company_statistics[project_type][company][metric] = {} url = "/api/1.0/stats/companies?release={}&metric={}&project_type={}&company={}".format(field['release'], metric, project_type, company) resp, content = stackalytics.get_metrics(url) stats = json.loads(content)['stats'] try: metric_dict = stats[0] except IndexError: metric_dict = {'id': company, 'metric': 0} company_statistics[project_type][company][metric] = metric_dict for project_type in field['project_type']: engineer_statistics[project_type] = {} for engineer in field['engineers']['ids']: engineer_statistics[project_type][engineer] = {} for metric in field['metric']: engineer_statistics[project_type][engineer][metric] = {} engineers_url = "/api/1.0/stats/engineers?&release={}&metric={}"\ "&project_type={}&company={}&user_id={}".format(field['release'], metric, project_type, field['engineers']['owercompany'], engineer) engineers_resp, engineers_content = stackalytics.get_metrics(engineers_url) engineers_stats = json.loads(engineers_content)['stats'] try: engineers_metric_dict = engineers_stats[0] except IndexError: engineers_metric_dict = {'id': engineer, 'metric': 0} engineer_statistics[project_type][engineer][metric] = engineers_metric_dict engineer_table_field = ['metric'] + [engineer for engineer in field['engineers']['ids']] for project_type in field['project_type']: print "{} {} project by tencent individual:".format(field['release'], project_type) table = PrettyTable(engineer_table_field) for metric in field['metric']: table.add_row([metric] + [engineer_statistics[project_type][engineer][metric]['metric'] for engineer in field['engineers']['ids']]) print table table_field = ['metric'] + [company.replace('%20', ' ') for company in field['company']] for project_type in field['project_type']: print "{} {} project by company:".format(field['release'], project_type) table = PrettyTable(table_field) for metric in field['metric']: table.add_row([metric] + [company_statistics[project_type][company][metric]['metric'] for company in field['company']]) print table # print company_statistics if __name__ == '__main__': sys.exit(main())
[ 2, 14, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 1841, 489, 571, 17, 198, 11748, 33918, 198, 11748, 25064, 198, 6738, 2495, 11487, 1330, 20090, 10962, 198, 6738, 4566, 1330, 2214, 628, 628, 198, 4299, 1388, 33529, 628, 220, ...
1.913354
1,962