content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import matplotlib
from libs import GeneratorCNN
matplotlib.interactive(False)
from importlib import reload
import gc
from libs import model_zoo
reload(model_zoo)
import BaseLineModel as approach
import numpy as np
from importlib import reload
from libs import utils
from libs.utils import Bunch
from libs import Multi_Class_Metrics as mcm
from tensorflow.keras import callbacks
reload(GeneratorCNN)
network = model_zoo.EEGNetLawhern#SpatialTemporalMultiClass#ConnectivityModel#Channel_Decision_Small#Channel_Decision_Model#ShallowConvNetMultiClass#SpatialTemporalMultiClass#ShallowConvNetMultiClassLinear##SpatialAverageModel# ConnectivityModel#
gc.collect()
use_stable=True
subjects = {'dementia':[],'control':[],'mci':[]}
utils.get_subjects_wrt_site(subjects,30,30,'dementia')
utils.get_subjects_wrt_site(subjects,100,100,'control')
utils.get_subjects_wrt_site(subjects,1,15,'mci')
use_stable='all'
if use_stable=='order0':
channel_matches = np.load('pareto_opt_matches.npy')
channel_matches = utils.get_stable_channels(channel_matches,0).T#[:,44:46]
elif use_stable=='all':
import json
with open('A_B_bestpositional_hungarian.json') as f:
match = json.load(f)
match['matching0'].pop('info')
channel_matches = np.array([list(match['matching0'].keys()),list(match['matching0'].values())]).astype(int)
elif use_stable is None:
channel_matches = np.stack([np.arange(160),np.arange(160)])
site_as_label = False
if site_as_label:
num_classes = 2
else:
num_classes = 3
sensitivity_mci_dem = mcm.MultiClassRecall(num_classes=num_classes,
pos_ind=[1,2],
average='macro',
name="mci_dem_sensitivity")
specificity_mci_dem = mcm.MultiClassSpecificity(num_classes=num_classes,
pos_ind=[1,2],
average='macro',
name="mci_dem_Specificity")
f1_mci_dem = mcm.MultiClassF1(num_classes=num_classes,
pos_ind=[1,2],average='macro',
name="mci_dem_F1")
metrics = ['acc', sensitivity_mci_dem, specificity_mci_dem,f1_mci_dem]
callbacks = [callbacks.EarlyStopping(patience=5,
restore_best_weights=True,
monitor="val_mci_dem_F1",
mode='max',
verbose=1)]
frame_size = utils.Bunch(value=2,unit='s')
fs = 256
network_params = {
'batch_size':32,
'use_bn':True,
'do_ratio':0.2,
'numTrainEpochs':15,
'optimizer':'adam',
'frame_size':frame_size,
'cross_subjects':True,
'num_classes':num_classes,
'n_folds':5,
'use_class_weights':True,
'workers':1,
'multiprocessing':False,
'monitor':"val_mci_dem_F1"
}
data_params={
'standardize':'em_astd',#'ema',#'look_at_time',# look_at_time
'fs':fs,
'channel_matches':channel_matches,
'subjects':subjects,
'test_ratio':0.2,
'readSubjects_params':{
'site_as_label':site_as_label,
'l_freq':1,
'h_freq':30,
'frame_length':utils.time2samples(time_container=frame_size,fs=fs),
'bad_samples_path':r'../BioMagData/badsamples1000Hz'
},
}
SA = np.load('SA.npy')[:,:80]
SB = np.load('SB.npy')[:,:80]
pSA = np.load('pSA.npy')[:80]
pSB = np.load('pSB.npy')[:80]
# Build augmentation pipeline for Generator
function_params = {}
function_params['draw_random_time_frame']={
'frame_length':utils.time2samples(fs=data_params['fs'],time_container=network_params['frame_size']),
'trial_length':5*60*data_params['fs']}
function_params['additive_correlated_noise']={'sigma_noise':5e-2}
function_params['transform_recording_site']={'sensor_inv':[pSA,pSB],
'sensor_fwd':[SA,SB],
'map_to':'random'}
function_params['apply_channel_matches']={'matches':data_params['channel_matches']}
functions=[GeneratorCNN.draw_random_time_frame,
GeneratorCNN.additive_correlated_noise,
GeneratorCNN.transform_recording_site,
GeneratorCNN.apply_channel_matches
]
prob = {'apply_channel_matches':1.0,
'draw_random_time_frame':1.0,
'transform_recording_site':1.0,
'additive_correlated_noise':0.5,
}
train_aug_pipe=GeneratorCNN.Augment_Pipeline(functions=functions,
params=function_params,
p=prob)
func = [
GeneratorCNN.draw_random_time_frame,
GeneratorCNN.transform_recording_site,
GeneratorCNN.apply_channel_matches]
p = {'transform_recording_site':1,'apply_channel_matches':1,'draw_random_time_frame':1}
aug_params = {
'transform_recording_site':
{'map_to':'identity',
'sensor_inv':[pSA,pSB],
'sensor_fwd':[SA,SB]},
'apply_channel_matches':
{'matches':data_params['channel_matches']},
'draw_random_time_frame':
{'frame_length':utils.time2samples(fs=data_params['fs'],time_container=network_params['frame_size']),
'trial_length':5*60*data_params['fs']}
}
valid_pipe = GeneratorCNN.Augment_Pipeline(func,aug_params,p) | [
198,
11748,
2603,
29487,
8019,
198,
6738,
9195,
82,
1330,
35986,
18474,
198,
6759,
29487,
8019,
13,
3849,
5275,
7,
25101,
8,
198,
6738,
1330,
8019,
1330,
18126,
198,
11748,
308,
66,
198,
6738,
9195,
82,
1330,
2746,
62,
89,
2238,
198,
... | 1.999272 | 2,749 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from wolframclient.about import __author__, __name__, __version__
__all__ = ("__version__", "__name__", "__author__")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
201,
198,
201,
198,
6738,
17481,
859,
16366,
13,
10755,
1330,
... | 2.7875 | 80 |
from django.shortcuts import render
import datetime
from time import ctime
from fixture import models
from fixture.models import schedule
#time = ctime()[11:16] == "01:00"
if(True):
rows = schedule.objects.all()
for row in rows:
row.delete()
from fixture import fixtureScraping
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
11748,
4818,
8079,
198,
6738,
640,
1330,
269,
2435,
198,
6738,
29220,
1330,
4981,
198,
6738,
29220,
13,
27530,
1330,
7269,
198,
198,
2,
2435,
796,
269,
2435,
3419,
58,
1157,
25,
1... | 3.2 | 90 |
########################################################################
#
# (C) 2015, Chris Houseknecht <chouse@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from stat import S_IRUSR, S_IWUSR
import yaml
from ansible.utils.display import Display
display = Display()
class GalaxyToken(object):
''' Class to storing and retrieving token in ~/.ansible_galaxy '''
| [
29113,
29113,
7804,
198,
2,
198,
2,
357,
34,
8,
1853,
11,
5180,
2097,
74,
710,
21474,
1279,
354,
1076,
31,
504,
856,
13,
785,
29,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
28038,
856,
198,
2,
198,
2,
28038,
856,
318,
1479,
378... | 3.844884 | 303 |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Manqala Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import get_url
import requests
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
2864,
11,
1869,
80,
6081,
12052,
13,
290,
20420,
198,
2,
1114,
5964,
1321,
11,
3387,
766,
5964,
13,
14116,
198,
198,
6738,
11593,
37443,
834,
1... | 3.373494 | 83 |
"""chppL.
C/C++ package management system.
created by @nocotan
"""
class ChpplData:
"""main data"""
def __init__(self):
"""initialize
@param: __url
@param: __name
@param: __description
@param: __creator
@param: __package
@param: __confilm
"""
self.__url = ""
self.__name = ""
self.__description = ""
self.__creator = ""
self.__package = ""
self.__confilm = ""
def get_url(self):
"""url getter
@return: self.__url
"""
return self.__url
def set_url(self, url):
"""url setter"""
self.__url = url
def get_name(self):
"""name getter
@return: self.__name
"""
return self.__name
def set_name(self, name):
"""name setter"""
self.__name = name
def get_description(self):
"""description getter
@return: self.__description
"""
return self.__description
def set_description(self, description):
"""description setter"""
self.__description = description
def get_creator(self):
"""creator getter
@return: self.__creator
"""
return self.__creator
def set_creator(self, creator):
"""creator setter"""
self.__creator = creator
def get_package(self):
"""package getter
@return: self.__package
"""
return self.__package
def set_package(self, package):
"""package setter"""
self.__package = package
def get_confilm(self):
"""confilm getter
@return: confilm
"""
return self.__confilm
def set_confilm(self, confilm):
"""confilm setter"""
self.__confilm = confilm
| [
37811,
354,
381,
43,
13,
198,
34,
14,
34,
4880,
5301,
4542,
1080,
13,
198,
25598,
416,
2488,
77,
420,
313,
272,
198,
37811,
628,
198,
4871,
609,
381,
75,
6601,
25,
198,
220,
220,
220,
37227,
12417,
1366,
37811,
198,
220,
220,
220,... | 2.142012 | 845 |
#!/usr/bin/python
"""
(C) Copyright 2019-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from command_utils_base import FormattedParameter
from command_utils import ExecutableCommand
class DaosPerfCommand(ExecutableCommand):
"""Defines a object representing the daos_perf command.
The daos_perf utility benchmarks point-to-point I/O performance of different
layers of the DAOS stack.
"""
def __init__(self, path):
"""Create a daos_perf command object.
Args:
path (str): path to the daos_perf command
"""
super(DaosPerfCommand, self).__init__(
"/run/daos_perf/*", "daos_perf", path)
# daos_perf command line options:
#
# -P <number/string>
# Pool SCM partition size, which can have M(megatbytes) or
# G(gigabytes) as postfix of number. E.g. -P 512M, -P 8G.
self.pool_scm_size = FormattedParameter("-P {}")
# -N <number/string>
# Pool NVMe partition size.
self.pool_nvme_size = FormattedParameter("-N {}")
# -T <vos|echo|daos>
# Type of test, it can be 'vos' and 'daos'.
# vos : run directly on top of Versioning Object Store (VOS).
# echo : I/O traffic generated by the utility only goes
# through the network stack and never lands to storage.
# daos : I/O traffic goes through the full DAOS stack,
# including both network and storage.
# The default value is 'vos'.
self.test_type = FormattedParameter("-T {}", "vos")
# -C <number>
# Credits for concurrently asynchronous I/O. It can be value
# between 1 and 64. The utility runs in synchronous mode if
# credits is set to 0. This option is ignored for mode 'vos'.
self.credits = FormattedParameter("-C {}")
# -c <TINY|LARGE|R2S|R3S|R4S|EC2P2|EC4P2|EC8P2>
# Object class for DAOS full stack test.
self.object_class = FormattedParameter("-c {}")
# -o <number>
# Number of objects are used by the utility.
self.objects = FormattedParameter("-o {}")
# -d <number/string>
# Number of dkeys per object. The number can have 'k' or 'm' as
# postfix which stands for kilo or million.
self.dkeys = FormattedParameter("-d {}")
# -a <number/string>
# Number of akeys per dkey. The number can have 'k' or 'm' as
# postfix which stands for kilo or million.
self.akeys = FormattedParameter("-a {}")
# -r <number/string>
# Number of records per akey. The number can have 'k' or 'm' as
# postfix which stands for kilo or million.
self.records = FormattedParameter("-r {}")
# -A
# Use array value of akey, single value is selected by default.
self.akey_use_array = FormattedParameter("-A", False)
# -s <number/string>
# Size of single value, or extent size of array value. The number
# can have 'K' or 'M' as postfix which stands for kilobyte or
# megabytes.
self.value_size = FormattedParameter("-s {}")
# -z
# Use zero copy API, this option is only valid for 'vos'
self.zero_copy_api = FormattedParameter("-z", False)
# -t
# Instead of using different indices and epochs, all I/Os land to
# the same extent in the same epoch. This option can reduce usage
# of storage space.
self.same_extent = FormattedParameter("-t", False)
# -U
# Only run update performance test.
self.update_test_only = FormattedParameter("-U", False)
# -F
# Only run fetch performance test. This does an update first, but
# only measures the time for the fetch portion.
self.fetch_test_only = FormattedParameter("-F", False)
# -v
# Verify fetch. Checks that what was read from the filesystem is
# what was written to it. This verifcation is not part of timed
# performance measurement. This is turned off by default.
self.verify_fetch = FormattedParameter("-v", False)
# -R
# Only run rebuild performance test.
self.rebuild_test_only = FormattedParameter("-R", False)
# -B
# Profile performance of both update and fetch.
self.profile_performance = FormattedParameter("-B", False)
# -I
# Only run iterate performance test. Only runs in vos mode.
self.iterate_test_only = FormattedParameter("-I", False)
# -n
# Only run iterate performance test but with nesting iterator
# enable. This can only run in vos mode.
self.nesting_iterate_test_only = FormattedParameter("-n", False)
# -f <pathname>
# Full path name of the VOS file.
self.pathname = FormattedParameter("-f {}")
# -w
# Pause after initialization for attaching debugger or analysis
# tool.
self.pause_after_init = FormattedParameter("-w", False)
# Environment variable names to export when running daos_perf
self._env_names = ["D_LOG_FILE"]
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
37811,
198,
220,
357,
34,
8,
15069,
13130,
12,
1238,
2481,
8180,
10501,
13,
628,
220,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
347,
10305,
12,
17,
12,
2601,
682,
12,
12130,
298,
198,
37... | 2.277548 | 2,414 |
from negotiator_base import BaseNegotiator
from random import random, shuffle
# def receive_utility(self, utility):
# self.utility_history[self.current_iter] = (utility, self.utility())
#
# def compare_offers(self, opponent_offer):
# pass
#
# def counter_offer(self, opponent_offer):
# pass | [
6738,
49194,
62,
8692,
1330,
7308,
32863,
5092,
1352,
198,
6738,
4738,
1330,
4738,
11,
36273,
628,
198,
220,
220,
220,
1303,
825,
3328,
62,
315,
879,
7,
944,
11,
10361,
2599,
198,
220,
220,
220,
1303,
220,
220,
220,
220,
220,
220,
... | 2.59542 | 131 |
import sys
import pygame as pg
import logging
import Project_GameState
from settings import *
# What each module does
# sys - This will set the recursion limit so that algorithms won't run on forever.
# settings - This will import the settings file in the current directory.
# Importing the GameState which will be used purely as the GUI for the application. As it
# As it stands right now, we draw the GUI information from a mix of this file and
# the GameState. In the next update the DisplayState will have more of that responsibility.
from Project_GameState import GameState as DisplayState
# set which version of the GameState you will use for each Player in the game
from Project_GameState import GameState as P1GameState
from Project_GameState import GameState as P2GameState
# set which Player object you will use for each Player in the game
P1Player = Project_GameState.Player_AlphaBeta(1, 0)
P2Player = Project_GameState.Player_AlphaBeta(2, 0) # Project_GameState.Player_AlphaBeta(2, 0)
# The basic Checkers class.
# The init function where we initalize important information about pygame and checkers.
# The main game update loop of the application
# This will draw everything on the screen.
# This will draw the checkered background of the checkers screen.
# This will draw a list of pieces on a board using a list of tuples.
# draw some text with the given arguments
# reset the game to a the default state board
# This will execute a move when passed a new row/column location.
# This function will do a basic move
# Returns the tile (r,c) on the grid underneath a given mouse position in pixels
# This function will handle all user input handling.
# This is the main executable part of the program.
sys.setrecursionlimit(10000) # Can't go past 10000 recursive depth.
# This is the basic game object
game_object = Checkers()
# This is the "game loop" of the program, it is an infinite loop that runs the game.
while True:
game_object.update()
| [
11748,
25064,
198,
11748,
12972,
6057,
355,
23241,
198,
11748,
18931,
198,
11748,
4935,
62,
8777,
9012,
198,
6738,
6460,
1330,
1635,
198,
198,
2,
1867,
1123,
8265,
857,
198,
2,
25064,
532,
770,
481,
900,
262,
664,
24197,
4179,
523,
32... | 3.503367 | 594 |
import random
import jasmin
from .test_jcli import jCliWithAuthTestCases
from hashlib import md5
from twisted.internet import defer
| [
11748,
4738,
198,
11748,
474,
292,
1084,
198,
6738,
764,
9288,
62,
73,
44506,
1330,
474,
2601,
72,
3152,
30515,
14402,
34,
1386,
198,
6738,
12234,
8019,
1330,
45243,
20,
198,
6738,
19074,
13,
37675,
1330,
29135,
628
] | 3.5 | 38 |
# Francis Adepoju, 2018-02-16
#Project Euler problem #1
summ = 0
i = 1
while i < 1000:
# For all integers either divisible by 3 or by 5, sum them up into variable called summ
if i % 3 == 0 or i % 5 == 0:
summ += i
i += 1
print("Sum of divisible of 3 or 5 les than 1000 equals: ", summ)
| [
2,
12155,
1215,
538,
13210,
84,
11,
2864,
12,
2999,
12,
1433,
198,
2,
16775,
412,
18173,
1917,
1303,
16,
198,
198,
82,
13929,
796,
657,
198,
72,
796,
352,
628,
198,
4514,
1312,
1279,
8576,
25,
198,
220,
220,
220,
1303,
1114,
477,
... | 2.649573 | 117 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from google.appengine.ext import ndb
import guestbook
import pytest
import webtest
@pytest.fixture
| [
2,
15069,
1584,
3012,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.742105 | 190 |
import logging
import pytest
import six
if six.PY2:
pytestmark = pytest.mark.skip("Python 2 non compatible code") # py2 styling
else:
from dbnd_examples.tutorial_syntax import T23_task_with_mutliple_outputs_py3 as T23
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
198,
11748,
12972,
9288,
198,
11748,
2237,
628,
198,
361,
2237,
13,
47,
56,
17,
25,
198,
220,
220,
220,
12972,
9288,
4102,
796,
12972,
9288,
13,
4102,
13,
48267,
7203,
37906,
362,
1729,
11670,
2438,
4943,
220,
1303,... | 2.69 | 100 |
import docx
import subprocess
if __name__ == "__main__":
filee = 'test_restore/Informare Sisteme noi - sindicat și angajați_v1 (1).docx'
tmp_file = "test_restore/tmp.txt"
tmp_res = 'test_restore/tmp_res.txt'
#print(get_docx_text(filee))
doc = docx.Document(filee)
#output = open("output_"+cur_date+"_.txt", "w")
for i in range(len(doc.paragraphs)):
#if 'sea' in paragraph.text:
par = doc.paragraphs[i]
with open(tmp_file, 'w') as f:
f.write(par.text)
subprocess.call(["python3", "model_diacritice.py", "-restore", tmp_file, "-load" ,"only_chars_win31_256_64_55","-classes" , "5"])
txt_res = ""
print('x')
with open(tmp_res, 'r') as f:
for line in f:
txt_res += line
par.text = txt_res
doc.save('test_restore/modified.docx')
| [
11748,
2205,
87,
198,
11748,
850,
14681,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
2393,
68,
796,
705,
9288,
62,
2118,
382,
14,
818,
687,
533,
311,
396,
34755,
645,
72,
532,
264,
521,
2... | 1.99768 | 431 |
import os
import pytest
from conda_build import api
from .utils import thisdir
@pytest.fixture()
@pytest.mark.sanity
@pytest.mark.serial
| [
11748,
28686,
198,
11748,
12972,
9288,
198,
198,
6738,
1779,
64,
62,
11249,
1330,
40391,
198,
198,
6738,
764,
26791,
1330,
428,
15908,
628,
198,
31,
9078,
9288,
13,
69,
9602,
3419,
628,
198,
31,
9078,
9288,
13,
4102,
13,
12807,
414,
... | 2.803922 | 51 |
from configparser import ConfigParser
config = ConfigParser()
config.read('data/channels.ini')
| [
6738,
4566,
48610,
1330,
17056,
46677,
198,
198,
11250,
796,
17056,
46677,
3419,
198,
11250,
13,
961,
10786,
7890,
14,
354,
8961,
13,
5362,
11537,
198
] | 3.692308 | 26 |
from absl import logging
import json
import tornado.web
from icubam.www.handlers import base
| [
6738,
2352,
75,
1330,
18931,
198,
11748,
33918,
198,
11748,
33718,
13,
12384,
198,
198,
6738,
14158,
549,
321,
13,
2503,
13,
4993,
8116,
1330,
2779,
628
] | 3.518519 | 27 |
import time
from copy import deepcopy
import torch
import torch.cuda.amp as amp
from .default_trainer import DefaultTrainer
# TODO over-ride from_checkpoint, load_checkpoint to load the model
| [
11748,
640,
198,
6738,
4866,
1330,
2769,
30073,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
66,
15339,
13,
696,
355,
20766,
198,
6738,
764,
12286,
62,
2213,
10613,
1330,
15161,
2898,
10613,
628,
198,
2,
16926,
46,
625,
12,
13154,
4... | 3.545455 | 55 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-01 10:27
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
20,
319,
2177,
12,
3070,
12,
486,
838,
25,
1983,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
19... | 2.736842 | 57 |
# # -*- coding:utf-8 -*-
# Author:wancong
# Date: 2018-04-30
from pyhanlp import *
def demo_occurrence():
""" 演示词共现统计
>>> demo_occurrence()
信息=1
先进=1
图形图像=1
处理=2
技术=1
方面=1
比较=1
目前=1
算法=2
视频=1
计算机=1
音视频=1
<BLANKLINE>
信息→算法= tf=1 mi=8.856243954648566 le=0.0 re=0.0 score=NaN
先进→视频= tf=1 mi=6.594180024229758 le=0.0 re=0.0 score=NaN
图形图像→技术= tf=1 mi=20.46090157247892 le=0.0 re=0.0 score=NaN
处理→方面= tf=1 mi=4.04319404601706 le=0.0 re=0.0 score=NaN
处理→算法= tf=1 mi=9.247593120777918 le=0.0 re=0.0 score=NaN
技术→信息= tf=1 mi=4.012478779454232 le=0.0 re=0.0 score=NaN
方面→目前= tf=1 mi=12.825210015738996 le=0.0 re=0.0 score=NaN
比较→先进= tf=1 mi=6.050081533887511 le=0.0 re=0.0 score=NaN
目前→比较= tf=1 mi=13.377862072309142 le=0.0 re=0.0 score=NaN
算法→处理= tf=1 mi=9.247593120777918 le=0.0 re=0.0 score=NaN
视频→处理= tf=1 mi=5.139944592929454 le=0.0 re=0.0 score=NaN
计算机→音视频= tf=1 mi=20.46090157247892 le=0.0 re=0.0 score=NaN
音视频→图形图像= tf=1 mi=20.46090157247892 le=0.0 re=0.0 score=NaN
<BLANKLINE>
信息→算法→处理= tf=1 mi=0.0 le=0.0 re=0.0
先进→视频→处理= tf=1 mi=0.0 le=0.0 re=0.0
图形图像→技术→信息= tf=1 mi=0.0 le=0.0 re=0.0
处理→方面→目前= tf=1 mi=0.0 le=0.0 re=0.0
技术→信息→算法= tf=1 mi=0.0 le=0.0 re=0.0
方面→目前→比较= tf=1 mi=0.0 le=0.0 re=0.0
比较→先进→视频= tf=1 mi=0.0 le=0.0 re=0.0
目前→比较→先进= tf=1 mi=0.0 le=0.0 re=0.0
算法→处理→方面= tf=1 mi=0.0 le=0.0 re=0.0
视频→处理→算法= tf=1 mi=0.0 le=0.0 re=0.0
计算机→音视频→图形图像= tf=1 mi=0.0 le=0.0 re=0.0
音视频→图形图像→技术= tf=1 mi=0.0 le=0.0 re=0.0
"""
Occurrence = JClass("com.hankcs.hanlp.corpus.occurrence.Occurrence")
PairFrequency = JClass("com.hankcs.hanlp.corpus.occurrence.PairFrequency")
TermFrequency = JClass("com.hankcs.hanlp.corpus.occurrence.TermFrequency")
TriaFrequency = JClass("com.hankcs.hanlp.corpus.occurrence.TriaFrequency")
occurrence = Occurrence()
occurrence.addAll("在计算机音视频和图形图像技术等二维信息算法处理方面目前比较先进的视频处理算法")
occurrence.compute()
unigram = occurrence.getUniGram()
for entry in unigram.iterator():
term_frequency = entry.getValue()
print(term_frequency)
print()
bigram = occurrence.getBiGram()
for entry in bigram.iterator():
pair_frequency = entry.getValue()
if pair_frequency.isRight():
print(pair_frequency)
print()
trigram = occurrence.getTriGram()
for entry in trigram.iterator():
tria_frequency = entry.getValue()
if tria_frequency.isRight():
print(tria_frequency)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| [
2,
1303,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
6434,
171,
120,
248,
86,
1192,
506,
198,
2,
7536,
25,
2864,
12,
3023,
12,
1270,
198,
6738,
12972,
7637,
34431,
1330,
1635,
628,
198,
4299,
13605,
62,
13966,
3392... | 1.43794 | 1,845 |
"""
Commandline interface
"""
import argparse
import enum
import functools
import sys
from typing import Dict, Iterator, List, Sequence, Set, TextIO, Tuple, Union
from . import __version__
from ._depinfo import DependencyInfo
from ._dotbuilder import export_to_dot
from ._htmlbuilder import export_to_html
from ._modulegraph import ModuleGraph
from ._nodes import BaseNode
from ._utilities import saved_sys_path
# --- Helper code for the Graphviz builder
# Mapping from node class name to Graphviz attributes for the
# node.
NODE_ATTR = {
"Script": {"shape": "note"},
"Package": {"shape": "folder"},
"SourceModule": {"shape": "rectangle"},
"BytecodeModule": {"shape": "rectangle"},
"ExtensionModule": {"shape": "parallelogram"},
"BuiltinModule": {"shape": "hexagon"},
"MissingModule": {"shape": "rectangle", "color": "red"},
}
def format_node(node: BaseNode, mg: ModuleGraph) -> Dict[str, Union[str, int]]:
"""
Return a dict of Graphviz attributes for *node*
Args:
node: The node to format
mg: The graph containing the node
Returns:
Graphviz attributes for the node
"""
results: Dict[str, Union[str, int]] = {}
if node in mg.roots():
results["penwidth"] = 2
results["root"] = "true"
results.update(NODE_ATTR.get(type(node).__name__, {}))
return results
def format_edge(
source: BaseNode, target: BaseNode, edge: Set[DependencyInfo]
) -> Dict[str, Union[str, int]]:
"""
Return a dict of Graphviz attributes for an edge
Args:
source: Source node for the edge
target: Target node for the edge
edge: Set of edge attributes
Returns:
Graphviz attributes for the edge
"""
results: Dict[str, Union[str, int]] = {}
if all(e.is_optional for e in edge):
results["style"] = "dashed"
if source.identifier.startswith(target.identifier + "."):
results["weight"] = 10
results["arrowhead"] = "none"
return results
def group_nodes(graph: ModuleGraph) -> Iterator[Tuple[str, str, Sequence[BaseNode]]]:
"""
Detect groups of reachable nodes in the graph.
This function groups nodes in two ways:
- Group all nodes related to a particular distribution
- Group all nodes in the same stdlib package
Args:
graph: The dependency graph
Returns:
A list of ``(groupname, shape, nodes)`` for the
groupings.
"""
clusters: Dict[str, Tuple[str, str, List[BaseNode]]] = {}
for node in graph.iter_graph():
if not isinstance(node, BaseNode):
continue
if node.distribution is not None:
dist = node.distribution.name
if dist not in clusters:
clusters[dist] = (dist, "tab", [])
clusters[dist][-1].append(node)
return iter(clusters.values())
# ----
@enum.unique
class NodeType(enum.Enum):
"""
The types of nodes that can be added to
a dependency graph
"""
SCRIPT = enum.auto()
MODULE = enum.auto()
DISTRIBUTION = enum.auto()
@enum.unique
class OutputFormat(enum.Enum):
"""
The file formats that can be used for
output.
"""
HTML = "html"
GRAPHVIZ = "dot"
def parse_arguments(argv: List[str]) -> argparse.Namespace:
"""
Parse command-line arguments for the module.
The result namespace contains the following attributes:
- **node_type (NodeType)**: The type of node that should be added.
- **output_format (OutputFormat)**: File type for outputting the graph.
- **excludes (List[str])**: List of modules to exclude from the graph.
- **path (List[str])**: Directories to add to :data:`sys.path`.
- **output_file** (Optional[str])**: Filename to output to.
Args:
argv: The script arguments, usually ``sys.argv[1:]``
Returns:
The parsed options.
Raises:
SystemExit: On usage errors or when the user has requested help
"""
parser = argparse.ArgumentParser(
prog=f"{sys.executable.rsplit('/')[-1]} -mmodulegraph2",
description=f"Graph builder from modulegraph2 {__version__}",
)
parser.add_argument(
"-m",
"--module",
action="store_const",
const=NodeType.MODULE,
dest="node_type",
default=NodeType.MODULE,
help="The positional arguments are modules (the default)",
)
parser.add_argument(
"-s",
"--script",
action="store_const",
const=NodeType.SCRIPT,
dest="node_type",
help="The positional arguments are scripts",
)
parser.add_argument(
"-d",
"--distribution",
action="store_const",
const=NodeType.DISTRIBUTION,
dest="node_type",
help="The positional arguments are distributions",
)
parser.add_argument(
"-f",
"--format",
dest="output_format",
choices=[v.value for v in OutputFormat],
default=OutputFormat.HTML.value,
help="The output format (default: %(default)s)",
)
parser.add_argument(
"-x",
"--exclude",
dest="excludes",
action="append",
metavar="NAME",
default=[],
help="Add NAME to the list of module excludes",
)
parser.add_argument(
"-p",
"--path",
dest="path",
action="append",
metavar="PATH",
default=[],
help="Add PATH to the module search path",
)
parser.add_argument(
"-o",
"--output",
dest="output_file",
metavar="FILE",
default=None,
help="Write output to path (defaults to stdout)",
)
parser.add_argument("name", nargs="+", help="Names to add to the graph")
args = parser.parse_args(argv)
# Not sure if this can be done cleaner...
args.output_format = OutputFormat(args.output_format)
return args
def make_graph(args: argparse.Namespace) -> ModuleGraph:
"""
Build a dependency graph based on the command-line arguments.
Args:
args: The result of :func:`parse_arguments`.
Returns:
The generated graph
"""
with saved_sys_path():
for p in args.path[::-1]:
sys.path.insert(0, p)
mg = ModuleGraph()
mg.add_excludes(args.excludes)
if args.node_type == NodeType.MODULE:
for name in args.name:
mg.add_module(name)
elif args.node_type == NodeType.SCRIPT:
for name in args.name:
mg.add_script(name)
elif args.node_type == NodeType.DISTRIBUTION:
for name in args.name:
mg.add_distribution(name)
else: # pragma: nocover
raise AssertionError("Invalid NodeType")
return mg
def print_graph(file: TextIO, output_format: OutputFormat, mg: ModuleGraph) -> None:
"""
Output the graph in the given output format to a text stream.
Args:
file: The text stream to data should be written to
output_format: The format to use
mg: The graph to write
"""
if output_format == OutputFormat.HTML:
export_to_html(file, mg)
elif output_format == OutputFormat.GRAPHVIZ:
export_to_dot(
file, mg, functools.partial(format_node, mg=mg), format_edge, group_nodes
)
else: # pragma: nocover
raise AssertionError("Invalid OutputFormat")
def format_graph(args: argparse.Namespace, mg: ModuleGraph) -> None:
"""
Output the graph as specified in *args*.
Args:
args: Command-line arguments
mg: The graph to output.
"""
if args.output_file is None:
print_graph(sys.stdout, args.output_format, mg)
else:
try:
with open(args.output_file, "w") as fp:
print_graph(fp, args.output_format, mg)
except OSError as exc:
print(exc, file=sys.stderr)
raise SystemExit(1)
def main(argv: List[str]) -> None:
"""
Entry point for the module.
Args:
argv: Command-line arguments, should be ``sys.path[1:]``.
"""
args = parse_arguments(argv)
mg = make_graph(args)
format_graph(args, mg)
if __name__ == "__main__": # pragma: nocover
main(sys.argv[1:])
| [
37811,
198,
21575,
1370,
7071,
198,
37811,
198,
11748,
1822,
29572,
198,
11748,
33829,
198,
11748,
1257,
310,
10141,
198,
11748,
25064,
198,
6738,
19720,
1330,
360,
713,
11,
40806,
1352,
11,
7343,
11,
45835,
11,
5345,
11,
8255,
9399,
11... | 2.433138 | 3,410 |
import sys
sys.stdin = open("card_count_sample_input.txt", "r")
T = int(input())
# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.
for test_case in range(1, T + 1):
cnt = []
card_li = []
num = int(input())
card_num = input()
idx = 0
for i in card_num:
card_li.append(i)
cnt = [0 for _ in range(10)]
for i in range(num):
cnt[int(card_li[i])] += 1
for i in range(9, 0,-1):
if max(cnt) == cnt[i]:
idx = i
break
print(f'#{test_case} {idx} {max(cnt)}')
| [
11748,
25064,
198,
17597,
13,
19282,
259,
796,
1280,
7203,
9517,
62,
9127,
62,
39873,
62,
15414,
13,
14116,
1600,
366,
81,
4943,
198,
198,
51,
796,
493,
7,
15414,
28955,
198,
198,
2,
23821,
245,
105,
167,
253,
105,
166,
108,
250,
... | 1.581602 | 337 |
# Generated by Django 2.1.4 on 2018-12-09 17:22
import django.core.validators
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
19,
319,
2864,
12,
1065,
12,
2931,
1596,
25,
1828,
198,
198,
11748,
42625,
14208,
13,
7295,
13,
12102,
2024,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.95122 | 41 |
import numpy as np
import pandas as pd
from collections import defaultdict
from difflib import SequenceMatcher
from .settings import FILE_claims_count, FILE_qualifiers_count, FILE_total_count
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
814,
8019,
1330,
45835,
19044,
2044,
198,
6738,
764,
33692,
1330,
45811,
62,
6604,
82,
62,
9127,
11,
45811,
62,
13255,
... | 3.784314 | 51 |
import time
from colored_logs.logger import Logger, LogType#, LogEnvironmeent
log = Logger(
ID='Test-id-1'
# environment=LogEnvironmeent.HTML, # Override to print html logs
# console_line_char_len=90 # Optionally provide how many chars does fir in one consolee line
)
log.info('This is an info log')
time.sleep(0.1)
log.ID='Test-id-2'
log.info('This is an info log with a new id')
log.ID='Test-id-1'
time.sleep(0.1)
log.success('This is a success log')
time.sleep(0.1)
log.warning('This is a warning log')
time.sleep(0.1)
log.error('This is an error log')
time.sleep(0.1)
log.fail('This is a fail log')
time.sleep(0.1)
log.critical('This is a critical log')
time.sleep(0.1)
log.subtle('This is a subtle log')
time.sleep(0.1)
log.start_process('This will take a while')
time.sleep(2.0)
log.info('This is an info log while also logging the active process')
time.sleep(2.0)
duration_float_seconds = log.stop_process(
log_type=LogType.Success,
values='Successfully finished task'
) | [
11748,
640,
198,
198,
6738,
16396,
62,
6404,
82,
13,
6404,
1362,
1330,
5972,
1362,
11,
5972,
6030,
2,
11,
5972,
4834,
2268,
1326,
298,
198,
198,
6404,
796,
5972,
1362,
7,
198,
220,
220,
220,
4522,
11639,
14402,
12,
312,
12,
16,
6,... | 2.621134 | 388 |
import FWCore.ParameterSet.Config as cms
l1tBmtfAlgoSelector = cms.EDProducer(
'L1TBMTFAlgoSelector',
# verbose = cms.untracked.bool(False),
bmtfKalman = cms.InputTag("simKBmtfDigis:BMTF"),
bmtfLegacy = cms.InputTag("simBmtfDigis:BMTF"),
feds = cms.InputTag("rawDataCollector")
)
| [
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
198,
75,
16,
83,
33,
16762,
69,
2348,
2188,
17563,
273,
796,
269,
907,
13,
1961,
11547,
2189,
7,
198,
220,
220,
220,
705,
43,
16,
51,
12261,
10234,
2348,
2188,
1... | 2.185714 | 140 |
# -*- coding: UTF-8 -*-
import MySQLdb
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
11748,
33476,
9945,
198
] | 2.222222 | 18 |
import sys
import pydart
import math
import controller
print('Example: bipedJump')
pydart.init()
print('pydart initialization OK')
data_dir = pydart.misc.example_data_dir(__file__)
print('data_dir = ' + data_dir)
world = pydart.create_world(1.0 / 2000.0, data_dir + '/skel/fullbody1.skel')
print('pydart create_world OK')
skel = world.skels[1]
# Initialize the pose
q = skel.q
q[(2, 4, 5)] = [0.02 * math.pi, -0.02, 0]
skel.set_positions(q)
print('skeleton position OK')
# Initialize the controller
skel.controller = controller.Controller(skel, world.dt)
print('create controller OK')
def keyboard_callback(world, key):
""" Programmable interactions """
if key == 'S':
print('save world')
world.save('test_world.txt')
print("'1'--'4': programmed interaction")
# Run the application
if 'qt' in sys.argv:
tb = pydart.qtgui.Trackball(phi=-1.4, theta=-6.2, zoom=1.0,
rot=[-0.05, 0.07, -0.01, 1.00],
trans=[0.02, 0.09, -3.69])
pydart.qtgui.run(title='bipedStand', simulation=world, trackball=tb,
keyboard_callback=keyboard_callback)
else:
pydart.glutgui.run(title='bipedStand', simulation=world, trans=[0, 0, -3],
keyboard_callback=keyboard_callback)
| [
11748,
25064,
198,
11748,
279,
5173,
433,
198,
11748,
10688,
198,
11748,
10444,
198,
4798,
10786,
16281,
25,
14141,
276,
36046,
11537,
628,
198,
79,
5173,
433,
13,
15003,
3419,
198,
4798,
10786,
79,
5173,
433,
37588,
7477,
11537,
198,
1... | 2.248705 | 579 |
from orm import *
from orm import base
| [
6738,
393,
76,
1330,
1635,
198,
6738,
393,
76,
1330,
2779,
628
] | 3.333333 | 12 |
from chromosome import Chromosome as BaseChromosome
import population
from nodes import Deport, Customer, CustomerDistanceTable
from csv_reader import csv_read
import ga_params
population.Chromosome = Chromosome
run_file_name = input("Enter run file name [default: C101_200]: ")
if not run_file_name:
run_file_name = ga_params.run_file['name']
print('Running: "' + run_file_name + '"')
customers_input_read = csv_read(run_file_name, header_map=ga_params.run_file['header_map'])
customers = [Deport(**customers_input_read[0])]
customers += [Customer(**customer_dict) for customer_dict in customers_input_read]
# for c in customers:
# print(c)
# print(len(customers))
customers_distance_table = CustomerDistanceTable(customers)
# print(str(customers_distance_table))
ga_pop = population.Population(chromosome_width=len(customers), run_file_name=run_file_name, **ga_params.population)
# print(str(ga_pop))
best_chrome = ga_pop.evolve()
print(best_chrome)
| [
6738,
34348,
1330,
18255,
418,
462,
355,
7308,
1925,
398,
418,
462,
198,
11748,
3265,
198,
6738,
13760,
1330,
2129,
419,
11,
22092,
11,
22092,
45767,
10962,
198,
6738,
269,
21370,
62,
46862,
1330,
269,
21370,
62,
961,
198,
11748,
31986,... | 2.930303 | 330 |
import glfw
import compushady.config
import compushady.formats
import compushady
from compushady.shaders import hlsl
import struct
import platform
compushady.config.set_debug(True)
print('Using device', compushady.get_current_device().name)
buffer = compushady.Buffer(512 * 512 * 4, compushady.HEAP_UPLOAD)
buffer.upload(b'\xFF\x00\x00\x00' * 512 * 512)
texture = compushady.Texture2D(512, 512, compushady.formats.R8G8B8A8_UNORM)
buffer.copy_to(texture)
target = compushady.Texture2D(512, 512, compushady.formats.B8G8R8A8_UNORM)
staging_buffer = compushady.Buffer(4 * 2 * 3 * 2, compushady.HEAP_UPLOAD)
staging_buffer.upload(struct.pack('IIIIIIIIIIII', 10, 10,
200, 5, 50, 100, 10, 110, 200, 105, 50, 200))
vertices = compushady.Buffer(
4 * 2 * 3 * 2, format=compushady.formats.R32G32_UINT)
staging_buffer.copy_to(vertices)
shader = hlsl.compile("""
Buffer<uint2> vertices : register(t0);
RWTexture2D<float4> target : register(u0);
float3 barycentric(float2 a, float2 b, float2 c, float2 p)
{
float3 x = float3(c.x - a.x, b.x - a.x, a.x - p.x);
float3 y = float3(c.y - a.y, b.y - a.y, a.y - p.y);
float3 u = cross(x, y);
if (abs(u.z) < 1.0)
{
return float3(-1, 1, 1);
}
return float3(1.0 - (u.x+u.y)/u.z, u.y/u.z, u.x/u.z);
}
void draw_triangle(uint2 a, uint2 b, uint2 c, uint2 p)
{
float3 bc = barycentric(a, b, c, p);
if (bc.x < 0 || bc.y < 0 || bc.z < 0)
{
return;
}
target[p] = float4(bc.x, bc.y, bc.z, 1);
}
[numthreads(8,8,1)]
void main(int3 tid : SV_DispatchThreadID)
{
uint2 a = vertices[tid.z * 3];
uint2 b = vertices[tid.z * 3 + 1];
uint2 c = vertices[tid.z * 3 + 2];
draw_triangle(a, b, c, uint2(tid.x, tid.y));
}
""")
compute = compushady.Compute(shader, srv=[vertices], uav=[target])
glfw.init()
# we do not want implicit OpenGL!
glfw.window_hint(glfw.CLIENT_API, glfw.NO_API)
window = glfw.create_window(target.width, target.height, "Rasterizer", None, None)
if platform.system() == 'Windows':
swapchain = compushady.Swapchain(glfw.get_win32_window(
window), compushady.formats.B8G8R8A8_UNORM, 3)
elif platform.system() == 'Darwin':
from compushady.backends.metal import create_metal_layer
ca_metal_layer = create_metal_layer(glfw.get_cocoa_window(window), compushady.formats.B8G8R8A8_UNORM)
swapchain = compushady.Swapchain(ca_metal_layer, compushady.formats.B8G8R8A8_UNORM, 3)
else:
swapchain = compushady.Swapchain((glfw.get_x11_display(), glfw.get_x11_window(
window)), compushady.formats.B8G8R8A8_UNORM, 3)
x = 0
y = 0
while not glfw.window_should_close(window):
glfw.poll_events()
compute.dispatch(target.width // 8, target.height // 8, 2)
swapchain.present(target, x, y)
x += 1
y += 1
swapchain = None # this ensures the swapchain is destroyed before the window
glfw.terminate()
| [
11748,
1278,
44482,
198,
11748,
552,
1530,
4597,
13,
11250,
198,
11748,
552,
1530,
4597,
13,
687,
1381,
198,
11748,
552,
1530,
4597,
198,
6738,
552,
1530,
4597,
13,
1477,
9972,
1330,
289,
75,
6649,
198,
11748,
2878,
198,
11748,
3859,
... | 2.195437 | 1,315 |
#
# Tutt library III location codes
#
FULL_CODE_MAP = {
'dacc': 'Digital Archives of Colorado College',
'ewww':'Online',
'ewwwd':'Online Government Documents',
'ewwwp':'Online Periodcals',
'ewwwn':'Online',
'tarf':'Tutt Reference',
'tarfa':'Tutt 1st Floor South',
'tarfc':'Tutt Reference',
'tarfd':'Tutt Reference Desk',
'tarfg':'Tutt Reference Desk',
'tarfi':'Tutt Reference',
'tarfm':'Tutt Reference North 2nd Floor',
'tarfo':'Tutt Reference North 2nd Floor',
'tban':'Art Reading Room Tutt 2nd Floor South',
'tbanf':'Art Reading Room Tutt 2nd Floor South',
'tb':'Tutt 3rd Floor',
'tbnc':'Tutt 3rd Floor',
'tbndp':'Tutt Display',
'tbnew':'Tutt New Arrivals',
'tbp':'Tutt North Basement',
'tbpnc':'Tutt North Basement',
'tcat':'Tutt Cataloging Office',
'tcas':'CDROM/Cassette Tutt Circulation Desk',
'tcbs':'Tutt Leisure Reading',
'tcurr':'Tutt 3rd Floor',
'tdacd':'Government Documents CD-ROM index',
'tfly':'On The Fly',
'tgr':'Tutt North Basement',
'tor':'Technical Services',
'tr':'On Order',
'tre':'Electronic Reserves',
'trp':'Reserves Tutt 1st Floor',
'trrm':'Reserves Tutt 1st Floor',
'trs':'Reserves Tutt 1st Floor',
'trsdo':'Reserves Tutt 1st Floor',
'trstv':'Reserves Tutt 1st Floor',
'td':'Tutt South Basement',
'tdo':'Tutt South Basement',
'tdcol':'Tutt South Basement',
'tde':'Tutt South Basement',
'tdea':'Tutt South Basement',
'tdem':'Tutt South Basement',
'tdemc':'Tutt South Basement',
'tdn':'Tutt South Basement',
'tdi':'Tutt 1st Floor South',
'tdm':'Tutt 1st Floor South',
'tdmt':'Tutt 1st Floor South',
'tdmf':'Tutt 1st Floor South',
'tdmi':'Microforms Tutt 2nd Floor',
'tdof':'Tutt 1st Floor South',
'tdscs':'Tutt 1st Floor South',
'tmaps':'Tutt North 2nd Floor Map File',
'tmi':'Microforms Tutt 2nd Floor',
'tmic':'Microforms Tutt 2nd Floor',
'tmico':'Microforms Tutt 2nd Floor',
'tmics':'Microforms Tutt 2nd Floor',
'tmifs':'Microforms Tutt 2nd Floor',
'tt':'Tutt North Basement Theses',
'ttlc':'Teaching Learning Center',
'ttla':'Tutt North Basement Lit Award',
'xfb':'Fine Arts Center',
'xm':'Music Library Books and Scores',
'xmcas':'Music Library Cassette',
'xmcat':'Music Library Catalog Office',
'xmcd':'Music Library CD-ROM',
'xmcir':'Music Library Circ Desk',
'xmdvd':'Music Library DVD',
'xmedd':'Music Library Education DVD',
'xmh':'Music Library -NC Books and Scores',
'xmhs':'Music Library -NC Storage',
'xmhsm':'Music Library -NC Music Mini Scores',
'xmins':'Music Lib. -Instrument Storage',
'xmld':'Music Library',
'xmlp1':'Music Library LP/SLP Room',
'xmmcd':'Music Library Music CD',
'xmmi':'Music Library Microforms',
'xmo':'Music Library Oversize',
'xmr':'Music Library Reference',
'xmper':'Music Library Periodicals',
'xms':'Music Library Storage',
'xmr':'Music Library Reference',
'xmrt':'Music Library Reel Tape',
'xmrs':'Music Library Reserves',
'xmscm':'Music Library MiniScores',
'xmv':'Music Library Video',
'tper':'Periodicals Tutt 2nd Floor',
'tpero':'Periodicals Tutt 2nd Floor',
'tdmo':'Oversize Tutt 2nd Floor',
'tf':'Folio Tutt 3rd Floor',
'to':'Oversize Tutt 3rd Floor',
'tsa':'Special Collections Audio',
'tlr':'Special Collections Lincoln Room',
'tlrc':'Special Collections Lincoln Room Small',
'tlrf':'Special Collections Lincoln Room Folio',
'tlro':'Special Collections Lincoln Room Oversize',
'tlrp':'Special Collections Lincoln Room Periodical',
'tlrpa':'Special Collections Lincoln Room Pamphlet',
'tscc':'Special Collections CC Room',
'tsccf':'Special Collections CC Room Folio',
'tscco':'Special Collections CC Room Oversize',
'tsccp':'Special Collections CC Room Periodical ',
'tsnv':'Special Collections CC Room Pamphlet ',
'tsco':'Special Collections Colorado Room',
'tscof':'Special Collections Colorado Room Folio',
'tscoo':'Special Collections Colorado Room Oversize',
'tscop':'Special Collections Colorado Room Periodical',
'tsm':'Special Collections Maps',
'tsmi':'Special Collections Microform',
'tsmf':'Special Collections Manuscripts File',
'tsms':'Special Collections Manuscripts',
'tsof':'Special Collections Offices',
'tsra':'Special Collections Rare',
'tsraf':'Special Collections Rare Folio',
'tsrao':'Special Collections Rare Oversize',
'tsrat':'Special Collections Rare Small',
'tsse':'Special Collections Special Editions',
'tssef':'Special Collections Special Editions Folio',
'tsseo':'Special Collections Special Editions Oversize',
'tsset':'Special Collections Special Editions Small',
'tssto':'Special Collections Storage Basement',
'tsv':'Special Collections Videos',
'tv':'Videos-Tutt 2nd Floor North',
'tvc':'Video-Tutt Circulation Desk',
'tvdvd':'DVD-Tutt Circulation Desk',
'xbaca':'Baca Campus',
'xfb':'Fine Arts Center',
'xfcab':'Fine Arts Center',
'xfdsk':'Fine Arts Center',
'xfdvd':'Fine Arts Center',
'xffil':'Fine Arts Center',
'xfup':'Fine Arts Center',
'xfo':'Fine Arts Center',
'xfv':'Fine Arts Center',
'xsan':'Anthropology Seminar-Barnes',
'xsby':'Biology Seminar-Olin',
'xsbyo':'Biology Seminar-Olin',
'xsch':'Barnes Chemistry Library',
'xscho':'Barnes Chemistry Library',
'xsed':'Education Dept. (Mierow)',
'xsedc':'Education Dept. (Mierow)',
'xsedl':'Education Dept. Computer Lab',
'xsedp':'Education Dept. (Mierow)',
'xserc':'Environmental Science Seminar',
'xsetv':'Environmental Science Seminar',
'xsgeo':'Geology Map Room',
'xsgix':'Keck GIS Commons',
'xsgm':'Geology Map Room',
'xsmat':'Math Seminar-Palmer',
'xsph':'Physics Seminar-Barnes',
'xspho':'Physics Seminar-Barnes',
'xsps':'Political Science Seminar-Palmer',
'xsrus':'Russian Seminar-Armstrong',
'xwebb':'Penrose Hospital',
}
LOCATION_CODE_MAP = {
'ewww':'Online',
'ewwwd':'Online',
'ewwwp':'Online',
'ewwwn':'Online',
'tarf':'Tutt Reference',
'tarfa':'Tutt 1st Floor South',
'tarfc':'Tutt Reference',
'tarfd':'Tutt Reference Desk',
'tarfg':'Tutt Reference Desk',
'tarfi':'Tutt Reference',
'tarfm':'Tutt Reference North 2nd Floor',
'tarfo':'Tutt Reference North 2nd Floor',
'tban':'Art Reading Room Tutt 2nd Floor South',
'tbanf':'Art Reading Room Tutt 2nd Floor South',
'tb':'Tutt 3rd Floor',
'tbnc':'Tutt 3rd Floor',
'tbndp':'Tutt Display',
'tbnew':'Tutt New Arrivals',
'tbp':'Tutt North Basement',
'tbpnc':'Tutt North Basement',
'tcat':'Tutt Cataloging Office',
'tcas':'CDROM/Cassette Tutt Circulation Desk',
'tcbs':'Tutt Leisure Reading',
'tcurr':'Tutt 3rd Floor',
'tdacd':'Government Documents CD-ROM index',
'tfly':'On The Fly',
'tgr':'Tutt North Basement',
'tor':'Technical Services',
'tr':'On Order',
'tre':'Electronic Reserves',
'trp':'Reserves Tutt 1st Floor',
'trrm':'Reserves Tutt 1st Floor',
'trs':'Reserves Tutt 1st Floor',
'trsdo':'Reserves Tutt 1st Floor',
'trstv':'Reserves Tutt 1st Floor',
'td':'Tutt South Basement',
'tdo':'Tutt South Basement',
'tdcol':'Tutt South Basement',
'tde':'Tutt South Basement',
'tdea':'Tutt South Basement',
'tdem':'Tutt South Basement',
'tdemc':'Tutt South Basement',
'tdn':'Tutt South Basement',
'tdi':'Tutt 1st Floor South',
'tdm':'Tutt 1st Floor South',
'tdmt':'Tutt 1st Floor South',
'tdmf':'Tutt 1st Floor South',
'tdmi':'Microforms Tutt 2nd Floor',
'tdof':'Tutt 1st Floor South',
'tdscs':'Tutt 1st Floor South',
'tmaps':'Tutt North 2nd Floor Map File',
'tmi':'Microforms Tutt 2nd Floor',
'tmic':'Microforms Tutt 2nd Floor',
'tmico':'Microforms Tutt 2nd Floor',
'tmics':'Microforms Tutt 2nd Floor',
'tmifs':'Microforms Tutt 2nd Floor',
'tt':'Tutt North Basement Theses',
'ttlc':'Teaching Learning Center',
'ttla':'Tutt North Basement Lit Award',
'xfb':'Fine Arts Center',
'xm':'Music Library Books and Scores',
'xmcas':'Music Library Cassette',
'xmcat':'Music Library Catalog Office',
'xmcd':'Music Library CD-ROM',
'xmcir':'Music Library Circ Desk',
'xmdvd':'Music Library DVD',
'xmedd':'Music Library Education DVD',
'xmh':'Music Library -NC Books and Scores',
'xmhs':'Music Library -NC Storage',
'xmhsm':'Music Library -NC Music Mini Scores',
'xmins':'Music Lib. -Instrument Storage',
'xmld':'Music Library',
'xmlp1':'Music Library LP/SLP Room',
'xmmcd':'Music Library Music CD',
'xmmi':'Music Library Microforms',
'xmo':'Music Library Oversize',
'xmr':'Music Library Reference',
'xmper':'Music Library Periodicals',
'xms':'Music Library Storage',
'xmr':'Music Library Reference',
'xmrt':'Music Library Reel Tape',
'xmrs':'Music Library Reserves',
'xmscm':'Music Library MiniScores',
'xmv':'Music Library Video',
'tper':'Periodicals Tutt 2nd Floor',
'tpero':'Periodicals Tutt 2nd Floor',
'tdmo':'Oversize Tutt 2nd Floor',
'tf':'Folio Tutt 3rd Floor',
'to':'Oversize Tutt 3rd Floor',
'tsa':'Special Collections Audio',
'tlr':'Special Collections Lincoln Room',
'tlrc':'Special Collections Lincoln Room',
'tlrf':'Special Collections Lincoln Room',
'tlro':'Special Collections Lincoln Room',
'tlrp':'Special Collections Lincoln Room',
'tlrpa':'Special Collections Lincoln Room',
'tscc':'Special Collections CC Room',
'tsccf':'Special Collections CC Room',
'tscco':'Special Collections CC Room',
'tsccp':'Special Collections CC Room',
'tsnv':'Special Collections CC Room',
'tsco':'Special Collections Colorado Room',
'tscof':'Special Collections Colorado Room',
'tscoo':'Special Collections Colorado Room',
'tscop':'Special Collections Colorado Room',
'tsm':'Special Collections Maps',
'tsmi':'Special Collections Microform',
'tsmf':'Special Collections Manuscripts',
'tsms':'Special Collections Manuscripts',
'tsof':'Special Collections Offices',
'tsra':'Special Collections Rare',
'tsraf':'Special Collections Rare',
'tsrao':'Special Collections Rare',
'tsrat':'Special Collections Rare',
'tsse':'Special Collections Special Editions',
'tssef':'Special Collections Special Editions',
'tsseo':'Special Collections Special Editions',
'tsset':'Special Collections Special Editions',
'tssto':'Special Collections Storage Basement',
'tsv':'Special Collections Videos',
'tv':'Videos-Tutt 2nd Floor North',
'tvc':'Video-Tutt Circulation Desk',
'tvdvd':'DVD-Tutt Circulation Desk',
'xbaca':'Baca Campus',
'xfb':'Fine Arts Center',
'xfcab':'Fine Arts Center',
'xfdsk':'Fine Arts Center',
'xfdvd':'Fine Arts Center',
'xffil':'Fine Arts Center',
'xfup':'Fine Arts Center',
'xfo':'Fine Arts Center',
'xfv':'Fine Arts Center',
'xsan':'Anthropology Seminar-Barnes',
'xsby':'Biology Seminar-Olin',
'xsbyo':'Biology Seminar-Olin',
'xsch':'Barnes Chemistry Library',
'xscho':'Barnes Chemistry Library',
'xsed':'Education Dept. (Mierow)',
'xsedc':'Education Dept. (Mierow)',
'xsedl':'Education Dept. Computer Lab',
'xsedp':'Education Dept. (Mierow)',
'xserc':'Environmental Science Seminar',
'xsetv':'Environmental Science Seminar',
'xsgeo':'Geology Map Room',
'xsgix':'Keck GIS Commons',
'xsgm':'Geology Map Room',
'xsmat':'Math Seminar-Palmer',
'xsph':'Physics Seminar-Barnes',
'xspho':'Physics Seminar-Barnes',
'xsps':'Political Science Seminar-Palmer',
'xsrus':'Russian Seminar-Armstrong',
'xwebb':'Penrose Hospital',
}
GOVDOCS_COLLECTIONS = {'tdacd':'Government Documents CD-ROM index',
'td':'Tutt South Basement',
'tdi':'Tutt 1st Floor South',
'tdm':'Tutt 1st Floor South',
'tdmf':'Tutt 1st Floor South',
'tdmi':'Microforms Tutt 2nd Floor',
'tdmo':'Oversize Tutt 2nd Floor',
'tdmt':'Tutt 1st Floor South',
'tdn':'Tutt South Basement',
'tdof':'Tutt 1st Floor South',
'tdo':'Tutt South Basement',
'tdcol':'Tutt South Basement',
'tde':'Tutt South Basement',
'tdea':'Tutt South Basement',
'ewwwd':'Online',
'tdem':'Tutt South Basement',
'tdemc':'Tutt South Basement'}
SPECIAL_COLLECTIONS = {'tsa':'Special Collections Audio',
'tlr':'Special Collections Lincoln Room',
'tlrc':'Special Collections Lincoln Room',
'tlrf':'Special Collections Lincoln Room',
'tlro':'Special Collections Lincoln Room',
'tlrp':'Special Collections Lincoln Room',
'tlrpa':'Special Collections Lincoln Room',
'tscc':'Special Collections CC Room',
'tsccf':'Special Collections CC Room',
'tscco':'Special Collections CC Room',
'tsccp':'Special Collections CC Room',
'tsnv':'Special Collections CC Room',
'tsco':'Special Collections Colorado Room',
'tscof':'Special Collections Colorado Room',
'tscoo':'Special Collections Colorado Room',
'tscop':'Special Collections Colorado Room',
'tsm':'Special Collections Maps',
'tsmi':'Special Collections Microform',
'tsmf':'Special Collections Manuscripts',
'tsms':'Special Collections Manuscripts',
'tsof':'Special Collections Offices',
'tsra':'Special Collections Rare',
'tsraf':'Special Collections Rare',
'tsrao':'Special Collections Rare',
'tsrat':'Special Collections Rare',
'tsse':'Special Collections Special Editions',
'tssef':'Special Collections Special Editions',
'tsseo':'Special Collections Special Editions',
'tsset':'Special Collections Special Editions',
'tssto':'Special Collections Storage Basement',
'tsv':'Special Collections Videos'}
| [
2,
198,
2,
309,
15318,
5888,
6711,
4067,
12416,
198,
2,
220,
198,
37,
9994,
62,
34,
16820,
62,
33767,
796,
1391,
198,
220,
220,
220,
705,
67,
4134,
10354,
705,
27640,
22275,
286,
7492,
5535,
3256,
198,
220,
220,
220,
705,
413,
138... | 2.335166 | 6,367 |
from ...app import cache
from ... import requests as r
from . import main
from flask import render_template
from datetime import datetime
import dart.common
import dart.common.html
@main.route("/")
@main.route("/hosts")
@main.route("/host/<string:fqdn>")
@main.route("/processes")
@main.route("/process/<string:name>")
@main.route("/register")
| [
6738,
2644,
1324,
1330,
12940,
198,
6738,
2644,
1330,
7007,
355,
374,
198,
6738,
764,
1330,
1388,
198,
6738,
42903,
1330,
8543,
62,
28243,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
35970,
13,
11321,
198,
11748,
35970,
13,
113... | 2.991597 | 119 |
from django.core.management import BaseCommand
from declarations.models import Declaration
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
1330,
7308,
21575,
198,
6738,
31713,
13,
27530,
1330,
24720,
628
] | 5.111111 | 18 |
import sys
import yaml
import pkgutil
import requests
import logging
import re
import os
from datetime import date
def percent_list(part_list, whole_list):
"""return percent of the part"""
w = len(whole_list)
if not w:
return (w,0)
p = 100 * float(len(part_list))/float(w)
return (w,round(100-p, 2))
def inspect_source(pattern, string):
"""inspect string to find domains"""
logging.debug("*** Searching valid domains...")
# find all domains according to the pattern
matched_domains = re.findall(pattern, string, re.M)
# and eliminate duplicated domains
domains = list(set(d for d in matched_domains))
# calculate total domains and percent of duplication
w,p = percent_list(domains,matched_domains)
logging.debug("*** domains=%s duplicated=%s%%" % (w,p) )
return domains
def fetch(ext_cfg=None):
"""fetch sources"""
# read default config
try:
conf = pkgutil.get_data(__package__, 'blocklist.conf')
cfg = yaml.safe_load(conf)
except Exception as e:
logging.error("invalid config: %s" % e)
sys.exit(1)
# overwrite config with external config ?
if ext_cfg is not None:
try:
cfg.update( yaml.safe_load(ext_cfg) )
except Exception as e:
logging.error("invalid external config: %s" % e)
sys.exit(1)
# init logger
level = logging.INFO
if cfg["verbose"]: level = logging.DEBUG
logging.basicConfig(format='%(asctime)s %(message)s',
stream=sys.stdout, level=level)
domains_bl = []
# feching all sources
for s in cfg["sources"]:
for u in s["urls"]:
try:
r = requests.get(u, timeout=float(cfg["timeout"]), verify=cfg["tlsverify"])
except requests.exceptions.RequestException as e:
logging.error("requests exception: %s" % e)
else:
if r.status_code != 200:
logging.error("http error: %s" % r.status_code)
else:
domains_bl.extend(inspect_source(s["pattern"], r.text))
# add more domains to the blocklist ?
if cfg["blacklist"] is not None:
domains_bl.extend(cfg["blacklist"])
# remove duplicated domains
domains_unified = list(set(d for d in domains_bl))
w,p = percent_list(domains_unified,domains_bl)
logging.debug("blocklist total=%s duplicated=%s%%" % (len(domains_unified),p))
# remove domains from the whilelist
set_domains = set(domains_unified)
set_whitelist = set(cfg["whitelist"])
set_domains.difference_update(set_whitelist)
domains_unified = list(set_domains)
logging.debug("blocklist without domains from whitelist total=%s" % len(domains_unified))
return domains_unified
def save(filename, data):
"""save to file"""
with open(filename, 'w') as f:
f.write(data)
def save_raw(filename, ext_cfg=None):
"""save to file with raw format"""
# feching bad domains
domains = fetch(ext_cfg=ext_cfg)
raw = [ "# Generated with blocklist-aggregator" ]
raw.append( "# Updated: %s" % date.today() )
raw.append( "" )
raw.extend(domains)
save(filename, "\n".join(raw) )
def save_hosts(filename, ip="0.0.0.0", ext_cfg=None):
"""save to file with hosts format"""
# feching bad domains
domains = fetch(ext_cfg=ext_cfg)
hosts = [ "# Generated with blocklist-aggregator" ]
hosts.append( "# Updated: %s" % date.today() )
hosts.append( "" )
domains_ = list(map(lambda p: "%s " % ip + p, domains))
hosts.extend(domains_)
# save-it in a file
save(filename, "\n".join(hosts) ) | [
198,
11748,
25064,
198,
11748,
331,
43695,
198,
11748,
279,
10025,
22602,
198,
11748,
7007,
198,
11748,
18931,
198,
11748,
302,
198,
11748,
28686,
198,
6738,
4818,
8079,
1330,
3128,
198,
198,
4299,
1411,
62,
4868,
7,
3911,
62,
4868,
11,... | 2.31537 | 1,633 |
import pytz
import logging
from dateutil.parser import parse
from datetime import datetime, timedelta
from django.db.models import Q
from osf.models import OSFUser
from website.app import init_app
from framework.database import paginated
from scripts.analytics.base import EventAnalytics
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
if __name__ == '__main__':
init_app()
user_domain_events = UserDomainEvents()
args = user_domain_events.parse_args()
date = parse(args.date).date() if args.date else None
events = user_domain_events.get_events(date)
user_domain_events.send_events(events)
| [
11748,
12972,
22877,
198,
11748,
18931,
198,
6738,
3128,
22602,
13,
48610,
1330,
21136,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
1195,
198,
198,
6738,
267,
28202,
... | 3.1 | 210 |
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
from django.contrib import admin
from front.views import (
teacherprofile,
studentprofile,
recieve_comment,
searchoffer,
searchneed,
home,
comment_posted,
SignupTeacherView,
newteacherprofile,
editteacherprofile,
ajax_new_skill,
)
urlpatterns = [
url(r"^admin/", include(admin.site.urls)),
url(r"^$", TemplateView.as_view(template_name="homepage.html"), name="home"),
url(r"^account/signup/teacher/$", SignupTeacherView.as_view(), name="teacher_signup"),
url(r"^newteacherprofile/$", newteacherprofile, name="new_teacher_profile"),
url(r"^account/", include("account.urls")), #pinax urls
url(r"^recieve_data/", recieve_comment, name="recieve_comment"),
url(r"^teacherprofile/(?P<teacher_id>\d+)/$", teacherprofile, name="teacherprofile"),
url(r'^editteacherprofile/$', editteacherprofile, name="editteacherprofile"),
url(r"^studentprofile/$", studentprofile, name="studentprofile"),
url(r"^searchoffer/$", searchoffer, name="searchoffer"),
url(r"^searchneed/$", searchneed, name="searchneed"),
url(r"^home/$", home, name="homesite"),
url(r'^comments/posted/$', comment_posted, name="commentposted"),
url(r'^comments/', include("django_comments_xtd.urls")),
url(r'^ajax/skills/new/$', ajax_new_skill, name="ajax-new-skill"),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
11,
19016,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12708,
1330,
9037,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
... | 2.633058 | 605 |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="olsq",
version="0.0.4",
license = "BSD",
author="Daniel Bochen Tan",
author_email="bctan@cs.ucla.edu",
description="Optimal Layout Synthesis for Quantum Computing (OLSQ) for mapping and scheduling quantum circuits",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/tbcdebug/OLSQ",
project_urls={
"Bug Tracker": "https://github.com/tbcdebug/OLSQ/issues",
},
install_requires=[
"networkx>=2.5",
"z3-solver>=4.8.9.0",
],
packages=setuptools.find_packages(),
package_data={ "olsq": ["devices/*", "benchmarks/*"]},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
)
| [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
1600,
21004,
2625,
40477,
12,
23,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
2617,
... | 2.431762 | 403 |
import argparse
import bpy
import sys
if __name__ == "__main__":
if "--" not in sys.argv:
argv = [] # as if no args are passed
else:
argv = sys.argv[sys.argv.index("--") + 1:] # get all args after "--"
main(argv)
| [
11748,
1822,
29572,
198,
11748,
275,
9078,
198,
11748,
25064,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
611,
366,
438,
1,
407,
287,
25064,
13,
853,
85,
25,
198,
220,
220,
220,
220,
... | 2.216216 | 111 |
print('\033[7;30mOlá Mundo!\033[m')
| [
4798,
10786,
59,
44427,
58,
22,
26,
1270,
76,
30098,
6557,
33324,
78,
0,
59,
44427,
58,
76,
11537,
198
] | 1.8 | 20 |
'''An example to show how to set up an pommerman game programmatically'''
import pommerman
from pommerman import agents
import sys
sys.path.append("../pommerman")
import fight
def main():
'''Simple function to bootstrap a game.
Use this as an example to set up your training env.
'''
# Print all possible environments in the Pommerman registry
print(pommerman.REGISTRY)
# Generate a json every 5 episodes
json_check = 5
# Create a set of agents (exactly four)
agent_list = [
agents.SimpleAgent(),
agents.RandomAgent(),
agents.SimpleAgent(),
agents.RandomAgent(),
]
deep_agents = 'test::agents.SimpleAgent,test::agents.RandomAgent,test::agents.RandomAgent,test::agents.SimpleAgent'
#agents.DockerAgent("multiagentlearning/hakozakijunctions", port=12345),
#agents.DockerAgent("multiagentlearning/eisenach", port=12345),
#agents.DockerAgent("multiagentlearning/skynet955", port=12345),
# Make the "Free-For-All" environment using the agent list
config = 'PommeFFACompetition-v0'
#config = 'PommeTeamCompetition-v1'
env = pommerman.make(config, agent_list)
# Run the episodes just like OpenAI Gym
for i_episode in range(20):
if i_episode % json_check == 0:
fight.run(config, deep_agents, record_json_dir = "test_json/test_json" + str(i_episode)) # GIVES ME ERROR DURING env.save_json for anything except FFA
else:
state = env.reset()
done = False
while not done:
actions = env.act(state)
state, reward, done, info = env.step(actions)
print('Episode {} finished'.format(i_episode))
print("Final Result: ", info)
env.close()
if __name__ == '__main__':
main()
| [
7061,
6,
2025,
1672,
284,
905,
703,
284,
900,
510,
281,
279,
296,
647,
805,
983,
1430,
49454,
7061,
6,
198,
11748,
279,
296,
647,
805,
198,
6738,
279,
296,
647,
805,
1330,
6554,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
... | 2.43832 | 762 |
from reclaimer.h2.constants import HALO2_MAP_TYPES
| [
6738,
302,
17111,
13,
71,
17,
13,
9979,
1187,
1330,
42968,
46,
17,
62,
33767,
62,
9936,
47,
1546,
201,
198,
201,
198
] | 2.347826 | 23 |
# Generated by Django 2.2.6 on 2019-10-28 20:09
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
21,
319,
13130,
12,
940,
12,
2078,
1160,
25,
2931,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from __future__ import annotations
from typing import Any
from .abc import ABCEncoder
class Message:
"""Message returned from queue. Should not be created manually."""
__slots__ = frozenset(("_priority", "_data"))
@classmethod
@classmethod
@property
def priority(self) -> int:
"""Message priority."""
return self._priority
@property
def data(self) -> Any:
"""Message data."""
return self._data
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
19720,
1330,
4377,
198,
198,
6738,
764,
39305,
1330,
9738,
27195,
12342,
628,
198,
4871,
16000,
25,
198,
220,
220,
220,
37227,
12837,
4504,
422,
16834,
13,
10358,
407,
307,
2727,
145... | 2.859756 | 164 |
import math
import concurrent.futures
from functools import partial
MAX_VALUE = 2_000_000
THREADS = 50
size = MAX_VALUE // THREADS
childrens = []
if __name__ == "__main__":
main() | [
11748,
10688,
198,
11748,
24580,
13,
69,
315,
942,
198,
6738,
1257,
310,
10141,
1330,
13027,
628,
628,
198,
22921,
62,
39488,
796,
362,
62,
830,
62,
830,
198,
4221,
15675,
50,
796,
2026,
198,
198,
7857,
796,
25882,
62,
39488,
3373,
... | 2.768116 | 69 |
import matplotlib
matplotlib.use("TkAgg")
import tkinter as tk
from tkinter import ttk
import cv2
from tkinter.filedialog import *
import tkinter.font as tkFont
from tkinter.ttk import Treeview
import time
from PIL import Image, ImageTk
import threading
from test import *
LARGE_FONT = ("Verdana", 12)
import random
class Main(tk.Tk):
'''主页'''
pic_path = ""
viewhigh = 770
viewwide = 800
update_time = 0
thread = None
thread_run = False
camera = None
if __name__ == '__main__':
app = Main()
app.mainloop()
| [
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
7203,
51,
74,
46384,
4943,
198,
11748,
256,
74,
3849,
355,
256,
74,
198,
6738,
256,
74,
3849,
1330,
256,
30488,
198,
11748,
269,
85,
17,
198,
6738,
256,
74,
3849,
13,
69,
... | 2.488688 | 221 |
from enum import Enum
from keras.preprocessing.image import ImageDataGenerator
from PIL import Image
import io
| [
6738,
33829,
1330,
2039,
388,
198,
6738,
41927,
292,
13,
3866,
36948,
13,
9060,
1330,
7412,
6601,
8645,
1352,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
33245,
628
] | 3.862069 | 29 |
#This program gets the input from the DSB server and processes, then saves it so they can be used by Bot.js
print("STARTING SERVICE....")
from PIL import Image
import urllib.request
import requests
import base64
import filecmp
import time
import json
import os
print("LOADING COMPLETE")
print("GETTING DATA")
#PATH OF FILE TO OCR
subscription_key = ""
FILEPATH = "/Users/leonardo/Desktop/index.jpg"
OLDFILE = "/Users/leonardo/Documents/Files/Vertretungsplan/Vplaene/index.jpg"
#get data from server (PNG)
#make get request
#get images from get request
#process data with OCR (if the last image != the new one)
#-------------------------------------------------------------------------------
# Replace <Subscription Key> with your valid subscription key.
assert subscription_key
# You must use the same region in your REST call as you used to get your
# subscription keys. For example, if you got your subscription keys from
# westus, replace "westcentralus" in the URI below with "westus".
#
# Free trial subscription keys are generated in the westcentralus region.
# If you use a free trial subscription key, you shouldn't need to change
# this region.
vision_base_url = "https://westcentralus.api.cognitive.microsoft.com/vision/v2.0/"
analyze_url = vision_base_url + "analyze"
# Set image_path to the local path of an image that you want to analyze.
image_path = FILEPATH
# Read the image into a byte array
image_data = open(image_path, "rb").read()
headers = {'Ocp-Apim-Subscription-Key': subscription_key,
'Content-Type': 'application/octet-stream'}
params = {'visualFeatures': 'Categories,Description,Color'}
response = requests.post(
analyze_url, headers=headers, params=params, data=image_data)
response.raise_for_status()
# The 'analysis' object contains various fields that describe the image. The most
# relevant caption for the image is obtained from the 'description' property.
Vertretungsplan = response.json()
print(Vertretungsplan)
#-------------------------------------------------------------------------------
#save data as JSON
with open(os.getcwd() + "/Vplaene/aktuekllerplan.json", 'w') as outfile:
json.dump(Vertretungsplan, outfile)
print("File saved")
#only check every 10 minutes and only between 5:00 AM and 12:AM
currenttime = time.localtime(time.time())
if(currenttime.tm_hour < 5):
print("it is between 12:00 AM and 5:00 AM. Going to sleep")
time.sleep((((5 - currenttime.tm_hour) - 1)* 60 * 60) + ((61 - currenttime.tm_min)* 60))
else:
time.sleep(10 * 60)
| [
2,
1212,
1430,
3011,
262,
5128,
422,
262,
360,
16811,
4382,
290,
7767,
11,
788,
16031,
340,
523,
484,
460,
307,
973,
416,
18579,
13,
8457,
198,
4798,
7203,
2257,
7227,
2751,
47453,
1106,
4943,
198,
6738,
350,
4146,
1330,
7412,
198,
... | 3.352243 | 758 |
from rest_framework import serializers
from sales.models import Sale, SaleProduct
from seller_products.models import SellerProductArchive
from sellers.models import Seller
from users.models import User
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
198,
6738,
4200,
13,
27530,
1330,
16467,
11,
16467,
15667,
198,
6738,
18583,
62,
29498,
13,
27530,
1330,
46555,
15667,
19895,
425,
198,
6738,
23531,
13,
27530,
1330,
46555,
198,
6738,
2985,
... | 4.354167 | 48 |
"""
Repository of various documents
"""
# Third party imports
from flask import Blueprint
from flask import Response
from flask import redirect
from flask import current_app
from flask_cas import login_required
# Local application imports
from app.users import User
from app.utils.jinja_filters import file_name, file_type
from app.logger import DynamoAccessLogger
from app.errors.handlers import NotFoundError, ForbiddenError
bp = Blueprint('facgov', __name__, url_prefix='/faculty_governance')
@bp.route('/<path:key>')
@login_required
def download(key):
"""
Downloads a file from S3 based on the key in the path
"""
logger = DynamoAccessLogger('facgov_download')
current_user = User()
# Check access
if current_user.has_facgov_access():
client = current_app.config['S3_RESOURCE']
bucket = client.Bucket(current_app.config['FACGOV_BUCKET'])
# Redirect to base url for keys that end with '/' which are valid S3 keys but are not files
if key.endswith('/'):
return redirect(bp.url_prefix)
try:
file_obj = bucket.Object(key).get()
except client.meta.client.exceptions.NoSuchKey: # per boto3 docs
logger.log_access(has_access=False, downloaded_object=key)
raise NotFoundError(f'File {file_name(key)} not found.')
logger.log_access(has_access=True, downloaded_object=key)
return Response(
file_obj['Body'].read(),
mimetype=file_type(key),
headers={'Content-Disposition': 'inline; filename={}'.format(file_name(key))}
)
else:
logger.log_access(has_access=False, downloaded_object=key)
raise ForbiddenError('You do not have access to this page. \
Please reach out to Timur Gulyamov (tg2648) to get access.')
| [
37811,
198,
6207,
13264,
286,
2972,
4963,
198,
37811,
198,
198,
2,
10467,
2151,
17944,
198,
6738,
42903,
1330,
39932,
198,
6738,
42903,
1330,
18261,
198,
6738,
42903,
1330,
18941,
198,
6738,
42903,
1330,
1459,
62,
1324,
198,
6738,
42903,
... | 2.61898 | 706 |
# https://raw.githubusercontent.com/raghakot/keras-resnet/master/resnet.py
import os
from keras.optimizers import SGD
from keras.models import Model, model_from_json
from keras.layers import (
Input,
Activation,
merge,
Dense,
Flatten
)
from keras.layers.convolutional import (
Convolution2D,
MaxPooling2D,
AveragePooling2D
)
from keras.layers.normalization import BatchNormalization
from generate_data import *
import sys
import mahotas
import multiprocessing
import matplotlib
import matplotlib.pyplot as plt
# Helper to build a conv -> BN -> relu block
# Helper to build a BN -> relu -> conv block
# This is an improved scheme proposed in http://arxiv.org/pdf/1603.05027v2.pdf
# Bottleneck architecture for > 34 layer resnet.
# Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
# Returns a final conv layer of nb_filters * 4
# Basic 3 X 3 convolution blocks.
# Use for resnet with layers <= 34
# Follows improved proposed scheme in http://arxiv.org/pdf/1603.05027v2.pdf
# Basic 3 X 3 convolution blocks.
# Use for resnet with layers <= 34
# Follows old scheme from original paper
# Adds a shortcut between input and residual block and merges them with "sum"
# Builds a residual block with repeating bottleneck blocks.
# http://arxiv.org/pdf/1512.03385v1.pdf
# 34 Layer resnet from figure 3
# uses old scheme
if __name__ == '__main__':
train_samples = 50000
val_samples = 10000
learning_rate = 0.1
rng = np.random.RandomState(7)
doTrain = int(sys.argv[1])
if doTrain:
import time
start = time.time()
model = resnet_old()
duration = time.time() - start
print "{} s to make model".format(duration)
start = time.time()
model.output
duration = time.time() - start
print "{} s to get output".format(duration)
start = time.time()
sgd = SGD(lr=0.0001, decay=0, momentum=0.0, nesterov=False)
model.compile(loss='sparse_categorical_crossentropy', optimizer=sgd)
duration = time.time() - start
print "{} s to get compile".format(duration)
data_val = generate_experiment_data_supervised(purpose='validate', nsamples=val_samples, patchSize=65, balanceRate=0.5, rng=rng)
data_x_val = data_val[0].astype(np.float32)
data_x_val = np.reshape(data_x_val, [-1, 1, 65, 65])
data_y_val = data_val[1].astype(np.float32)
# start pool for data
print "Starting worker."
pool = multiprocessing.Pool(processes=1)
futureData = pool.apply_async(stupid_map_wrapper, [[generate_experiment_data_supervised,'train', train_samples, 65, 0.5, rng]])
best_val_loss_so_far = 100
for epoch in xrange(10000):
print "Waiting for data."
data = futureData.get()
data_x = data[0].astype(np.float32)
data_x = np.reshape(data_x, [-1, 1, 65, 65])
data_y = data[1].astype(np.float32)
print "got new data"
futureData = pool.apply_async(stupid_map_wrapper, [[generate_experiment_data_supervised, 'train', train_samples, 65, 0.5, rng]])
model.fit(data_x, data_y, batch_size=100, nb_epoch=1)
validation_loss = model.evaluate(data_x_val, data_y_val, batch_size=100)
print "validation loss ", validation_loss
json_string = model.to_json()
open('resnet_keras.json', 'w').write(json_string)
model.save_weights('resnet_keras_weights.h5', overwrite=True)
if validation_loss < best_val_loss_so_far:
best_val_loss_so_far = validation_loss
print "NEW BEST MODEL"
json_string = model.to_json()
open('resnet_keras_best.json', 'w').write(json_string)
model.save_weights('resnet_keras_best_weights.h5', overwrite=True)
else:
model = model_from_json(open('resnet_keras.json').read())
model.load_weights('resnet_keras_weights.h5')
sgd = SGD(lr=learning_rate, decay=0, momentum=0.0, nesterov=False)
# this is summed, not averaged loss => need to adjust learning rate with batch size
model.compile(loss='sparse_categorical_crossentropy', optimizer=sgd)
image = mahotas.imread('ac3_input_0141.tif')
image = image[:512,:512]
prob_img = np.zeros(image.shape)
start_time = time.clock()
for rows in xrange(image.shape[0]):
patch_data = generate_image_data(image, patchSize=65, rows=[rows]).astype(np.float32)
patch_data = np.reshape(patch_data, [-1, 1, 65, 65])
probs = model.predict(x=patch_data, batch_size = image.shape[0])[:,0]
prob_img[rows,:] = probs
if rows%10==0:
print rows
print "time so far: ", time.clock()-start_time
mahotas.imsave('keras_prediction_resnet_08.png', np.uint8(prob_img*255))
plt.imshow(prob_img)
plt.show()
| [
2,
3740,
1378,
1831,
13,
12567,
43667,
13,
785,
14,
81,
10471,
461,
313,
14,
6122,
292,
12,
411,
3262,
14,
9866,
14,
411,
3262,
13,
9078,
198,
198,
11748,
28686,
198,
6738,
41927,
292,
13,
40085,
11341,
1330,
26147,
35,
198,
6738,
... | 2.172742 | 2,414 |
import requests
from pwn import *
url = "https://ac141f861ea930df80a3036f00f10064.web-security-academy.net/filter?category=Accessories"
if __name__ == '__main__':
col_no = get_cols_no(url, 1, 10) | [
11748,
7007,
201,
198,
6738,
279,
675,
1330,
1635,
201,
198,
6371,
796,
366,
5450,
1378,
330,
23756,
69,
4521,
16,
18213,
45418,
7568,
1795,
64,
1270,
2623,
69,
405,
69,
3064,
2414,
13,
12384,
12,
12961,
12,
330,
324,
3065,
13,
3262... | 2.356322 | 87 |
import numpy as np
from torch.utils.data import Dataset
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
316,
628
] | 3.222222 | 18 |
'''
K.I.S.T.I.E (Keep, It, Simple, Take, It, Easy)
Created on 1 Jan 2013
@author: Leonardo Bruni, leo.b2003@gmail.com
Kistie Maya Module Library
This Kistie implementation i's part of project 'Kistie_Autorig' by Leonardo Bruni, leo.b2003@gmail.com
'''
#ToDo: implement a debug mode for print or not
import pymel as pm # import pymel lib
import maya.cmds as cmds # import maya cmds lib
import maya.OpenMaya as om | [
7061,
6,
198,
42,
13,
40,
13,
50,
13,
51,
13,
40,
13,
36,
357,
15597,
11,
632,
11,
17427,
11,
7214,
11,
632,
11,
16789,
8,
198,
41972,
319,
352,
2365,
2211,
198,
31,
9800,
25,
38083,
15700,
72,
11,
443,
78,
13,
65,
16088,
31... | 2.183036 | 224 |
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import scipy as scipy
import scipy.integrate as integrate
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
355,
629,
541,
88,
198,
11748,
629,
541,
88,
13,
18908,
4873,
3... | 2.891304 | 92 |
# ================================================================
# MIT License
# Copyright (c) 2021 edwardyehuang (https://github.com/edwardyehuang)
# ================================================================
import tensorflow as tf
| [
2,
46111,
4770,
25609,
18604,
198,
2,
17168,
13789,
198,
2,
15069,
357,
66,
8,
33448,
1225,
904,
5948,
13415,
648,
357,
5450,
1378,
12567,
13,
785,
14,
276,
904,
5948,
13415,
648,
8,
198,
2,
46111,
4770,
25609,
18604,
198,
198,
1174... | 4.979592 | 49 |
#! /usr/bin/python
#===============================================================================
# File Name : app.py
# Date : 12-02-2015
# Input Files : Nil
# Author : Satheesh <sathishsms@gmail.com>
# Description :
# How to run :twit_test.py -l info
# :twit_test.py -h
#===============================================================================
from flask_restful import fields, marshal_with, reqparse, Resource, Api
from flask import Flask
import time
import sys
#import util
import argparse
sys.path.append('common')
sys.path.append('resources')
import globalS
import generic
import loggerRecord
import intercom
import json
#from bson import ObjectId
#
#class JSONEncoder(json.JSONEncoder):
# def default(self, o):
# if isinstance(o, ObjectId):
# return str(o)
# return json.JSONEncoder.default(self, o)
#parse the run-time args passed
parser = argparse.ArgumentParser(description=' To get the mra.log,rc.log,\
qpTraces & tcpdump to the log viewer machine or any user defined server\
all in one place with a single click. Works among multiple Active Pairs \
(MPE\'s, MRA\'s)..................................................\
Example: ./app CAM-92410 -c serverRack_C6GRsetup.cfg or ./loggy \
CAM-92410 or ./app -v ',add_help=True)
#parser.add_argument('testName',help='Name suffixed to log file name generated')
#if the def file is not passed as arg thn take the default file.
parser.add_argument('-c', '--config',default='serverRack.def',help='definition file')
parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.3')
parser.add_argument("-l", "--logLevel",default='error',help="Enable standard output verbosity")
args = parser.parse_args()
#create a flask app
app = Flask(__name__)
FlaskRestApi = Api(app) #creating a flask-restfull api
############################################################################
#Function Name : compileFileName #
#Input : Nil #
#Return Value : just sets the suffix fileName for logs #
############################################################################
############################################################################
globalS.init()#intialize the global variables
generic = generic.generic()
#==============================================================================#
# Opening log file to record all the cli outputs #
#==============================================================================#
sufFileName = compileFileName()
logFileName = "/tmp/" + sufFileName + ".log"
logger = loggerRecord.loggerInit(logFileName,args.logLevel)
logger.debug('Log file# %s & TestBed file ',logFileName)
intercom=intercom.intercom()
FlaskRestApi.add_resource(Departmental_Salary, '/dept/<string:department_name>')
FlaskRestApi.add_resource(FbUserDetails, '/facebook')
#FlaskRestApi.add_resource(Foo, '/Foo', '/Foo/<str:id>')
#FlaskRestApi.add_resource(Bar, '/Bar', '/Bar/<str:id>')
#FlaskRestApi.add_resource(Baz, '/Baz', '/Baz/<str:id>')
@app.route('/')
@app.route('/authorize-instagram')
@app.route('/handle-instagram-authorization')
if 'debug' in args.logLevel:
app.debug = True
if __name__ == '__main__':
# Get the environment information we need to start the server
#ip = os.environ['OPENSHIFT_PYTHON_IP']
#port = int(os.environ['OPENSHIFT_PYTHON_PORT'])
#host_name = os.environ['OPENSHIFT_GEAR_DNS']
#app.run(host=ip,port=port)
app.run()
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
198,
2,
23926,
25609,
18604,
198,
2,
9220,
6530,
220,
220,
220,
220,
220,
1058,
598,
13,
9078,
198,
2,
7536,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1058,
1105,
12,
2999,
12,
462... | 2.811828 | 1,302 |
import json
import logging
import logging.handlers
import pytest
from ethereum import slogging
@pytest.mark.parametrize('level_name', ['critical', 'error', 'warning', 'info', 'debug', 'trace'])
def test_lazy_log():
"""
test lacy evaluation of json log data
e.g.
class LogState
class LogMemory
"""
called_print = []
slogging.configure(log_json=True)
log = slogging.get_logger()
log.trace('no', data=Expensive())
assert not called_print
log.info('yes', data=Expensive()) # !!!!!!!!!!!!!
assert called_print.pop()
def test_how_to_use_as_vm_logger():
"""
don't log until there was an error
"""
slogging.configure(':DEBUG,eth.vm:INFO')
log = slogging.get_logger('eth.vm')
# record all logs
recorder = slogging.LogRecorder()
try:
run_vm(raise_error=True)
except:
log = slogging.get_logger('eth.vm')
for x in recorder.pop_records():
log.info(x.pop('event'), **x)
@pytest.mark.parametrize(
('logger_name', 'filter', 'should_log'),
[
('a', None, True),
('a.a', 'a', True),
('a.a', 'a.a', True),
('a.a', 'b', False),
])
def test_bound_logger_isolation(caplog):
"""
Ensure bound loggers don't "contaminate" their parent
"""
slogging.configure(config_string=':trace')
real_log = slogging.getLogger()
bound_log_1 = real_log.bind(key1="value1")
with caplog.at_level(slogging.TRACE):
bound_log_1.info("test1")
records = caplog.records()
assert len(records) == 1
assert "test1" in records[0].msg
assert "key1=value1" in records[0].msg
with caplog.at_level(slogging.TRACE):
real_log.info("test2")
records = caplog.records()
assert len(records) == 2
assert "test2" in records[1].msg
assert "key1=value1" not in records[1].msg
@pytest.mark.parametrize(
('config', 'logger', 'level'), (
(":WARNING", "", "WARNING"),
(":DEBUG,eth:INFO", "", "DEBUG"),
(":DEBUG,eth:INFO", "eth", "INFO"),
(":DEBUG,eth:INFO,devp2p:INFO", "devp2p", "INFO"),))
| [
11748,
33918,
198,
11748,
18931,
198,
11748,
18931,
13,
4993,
8116,
198,
11748,
12972,
9288,
198,
6738,
304,
17733,
1330,
25801,
2667,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
10786,
5715,
62,
3672,
3256,
37250,
34... | 2.248705 | 965 |
"""Test fetch queries."""
from backend.lib.database.postgres import connect
from backend.lib.fetch import census, providers, representative_points
import mock
engine = connect.create_db_engine()
class TestFetchRepresentativePoints():
"""Test fetch representative_points."""
@staticmethod
def test_fetch_representative_points_one_service_area():
"""Test fetch_representative_points."""
service_areas = ['ca_los_angeles_county_00000']
results = representative_points.fetch_representative_points(
service_areas, include_census_data=False, engine=engine
)
assert len(results) > 1000
@staticmethod
def test_fetch_representative_points_two_service_areas():
"""Test fetch_representative_points."""
service_areas = ['ca_los_angeles_county_00000', 'ca_los_angeles_county_00000']
results = representative_points.fetch_representative_points(
service_areas, include_census_data=False, engine=engine
)
assert len(results) > 1000
@staticmethod
def test_fetch_representative_points_no_service_area():
"""Test fetch_representative_points."""
service_areas = []
results = representative_points.fetch_representative_points(
service_areas, include_census_data=False, engine=engine
)
assert len(results) == 0
@staticmethod
def test_fetch_representative_points_no_valid_service_area():
"""Test fetch_representative_points."""
service_areas = ['not_valid']
results = representative_points.fetch_representative_points(
service_areas, include_census_data=False, engine=engine
)
assert len(results) == 0
@staticmethod
def test_minimal_fetch_representative_points_one_service_area():
"""Test fetch_representative_points as used internally by the backend."""
service_areas = ['ca_los_angeles_county_00000']
results = representative_points.minimal_fetch_representative_points(
service_areas, engine=engine
)
assert len(results) > 1000
class TestFetchProviders():
"""Test fetch providers."""
@staticmethod
def test_geocode_providers_existing_address():
"""Test geocode_providers when the address exists."""
# Note - This address should exist in the database.
providers_input = ['1000 E DOMINGUEZ ST, CARSON, CA 90746']
results = providers.geocode_providers(providers_input, engine=engine)
assert results[0]['status'] == 'success'
@staticmethod
def test_geocode_providers_address_does_not_exist():
"""Test fetch_representative_points."""
providers_input = ['I DO NOT EXIST']
results = providers.geocode_providers(providers_input, engine=engine)
assert len(results) == 1
assert results[0]['status'] == 'error'
@staticmethod
def test_geocode_providers_address_multiple_input():
"""Test fetch_representative_points."""
providers_input = ['1000 E DOMINGUEZ ST, CARSON, CA 90746', 'I DO NOT EXIST']
results = providers.geocode_providers(providers_input, engine=engine)
assert len(results) == 2
assert results[0]['status'] == 'success'
assert results[1]['status'] == 'error'
class TestFetchServiceAreas():
"""Test methods to fetch service areas."""
@staticmethod
def test_fetch_all_service_areas():
"""Test fetch_all_service_areas."""
results = representative_points.fetch_all_service_areas(engine=engine)
assert len(results) > 0
class TestFetchCensus(object):
"""Test fetching of census data for service areas."""
def setup(self):
"""Initialize a mock representative point dictionary with census data."""
self.mock_point = {
'id': 17323,
'service_area_id': 'ca_los_angeles_county_00000',
'lat': 74.38732,
'lng': -122.323331,
'county': 'Los Angeles',
'population': 2000,
'zip': '94105',
'census_tract': 304,
'demographics': {
'age': {
'0-18 Years': 24.0,
'19-25 Years': 9.0,
'26-34 Years': 14.0,
'35-54 Years': 30.0,
'55-64 Years': 10.0,
'65+ Years': 10.0
},
'income': {
'$100k - $150k': 17.0,
'$150k - $200k': 9.0,
'$15k - $50k': 24.0,
'$50k - $100k': 26.0,
'< $15k': 8.0,
'> $200k': 13.0
},
'insurance': {
'No Health Insurance': 8.0,
'Private Health Insurance': 71.0,
'Public Health Insurance': 29.0
},
'race': {
'American Indian & Alaska Native': 0.0,
'Asian': 28.0,
'Black': 11.0,
'Hispanic or Latino (any race)': 22.0,
'Multiracial or Other': 4.0,
'Native Hawaiian & other Pacific Islander': 0.0,
'White': 31.0
},
'sex': {
'Female': 51.0, 'Male': 48.0
}
}
}
@mock.patch('backend.lib.fetch.representative_points.fetch_representative_points')
def test_fetch_census_info_by_service_area(self, mock_fetch_rps):
"""Test fetch_census_info_by_service_area."""
mock_fetch_rps.return_value = [self.mock_point] * 10
output = census.fetch_census_info_by_service_area(['ca_los_angeles_county_00000'], engine)
assert output['ca_los_angeles_county_00000'] == self.mock_point['demographics']
@staticmethod
@mock.patch('backend.lib.fetch.representative_points.fetch_representative_points')
def test_fetch_census_info_by_service_area_missing_service_area(mock_fetch_rps):
"""Test fetch_census_info_by_service_area for a non-existent service area."""
mock_fetch_rps.return_value = []
output = census.fetch_census_info_by_service_area(['i_am_not_a_valid_service_area'], engine)
assert output == {}
| [
37811,
14402,
21207,
20743,
526,
15931,
198,
6738,
30203,
13,
8019,
13,
48806,
13,
7353,
34239,
1330,
2018,
198,
6738,
30203,
13,
8019,
13,
69,
7569,
1330,
21649,
11,
9549,
11,
8852,
62,
13033,
198,
198,
11748,
15290,
198,
198,
18392,
... | 2.173255 | 2,909 |
# encoding: utf-8
# module PySide.QtNetwork
# from C:\Python27\lib\site-packages\PySide\QtNetwork.pyd
# by generator 1.147
# no doc
# imports
import PySide.QtCore as __PySide_QtCore
import Shiboken as __Shiboken
from QAbstractNetworkCache import QAbstractNetworkCache
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
2,
8265,
9485,
24819,
13,
48,
83,
26245,
198,
2,
422,
327,
7479,
37906,
1983,
59,
8019,
59,
15654,
12,
43789,
59,
20519,
24819,
59,
48,
83,
26245,
13,
79,
5173,
198,
2,
416,
17301,
352,
13,
... | 2.935484 | 93 |
# PyImageCompare
# Copyright © 2020 by Paul Wilhelm <anfrage@paulwilhelm.de>
# https://github.com/shredEngineer/PyImageCompare
# Compares all JPEG images in a folder and renames (enumerates) duplicates. Differences in resolution and quality don't matter!
# This program loads all images in a folder, generates grayscale thumbnails and calculates the cross-image power.
# It then renames (enumerates) similar image pairs, such that you can examine and delete the duplicates afterwards.
# The "DUP_xxxx_A_…" files should always be the bigger ones, so you are probably safe to delete the "DUP_xxxx_B_…" files.
# Tested with Python 3.8 in Ubuntu 20.04
# Note: You should upgrade pillow if you get metadata errors: python3 -m pip install pillow --upgrade
import os
import glob
from tqdm import tqdm
from si_prefix import si_format
from PIL import Image, ImageChops, ImageOps
# Set this to your image folder path
path = "/media/pw/EXTERN/FOTOS/2020 - Unsortiert"
# These settings worked very well for me
thumb_size = (128, 128)
power_threshold = 50
if __name__ == "__main__":
main()
| [
2,
9485,
5159,
41488,
198,
2,
15069,
10673,
12131,
416,
3362,
50031,
1279,
272,
8310,
496,
31,
79,
2518,
86,
346,
33485,
13,
2934,
29,
198,
2,
3740,
1378,
12567,
13,
785,
14,
1477,
445,
13798,
263,
14,
20519,
5159,
41488,
198,
198,
... | 3.390093 | 323 |
#
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class lbvserver_binding(base_resource):
""" Binding class showing the resources that can be bound to lbvserver_binding.
"""
@property
def name(self) :
r"""Name of the virtual server. If no name is provided, statistical data of all configured virtual servers is displayed.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name of the virtual server. If no name is provided, statistical data of all configured virtual servers is displayed.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def lbvserver_auditsyslogpolicy_bindings(self) :
r"""auditsyslogpolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_auditsyslogpolicy_binding
except Exception as e:
raise e
@property
def lbvserver_pqpolicy_bindings(self) :
r"""pqpolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_pqpolicy_binding
except Exception as e:
raise e
@property
def lbvserver_dnspolicy64_bindings(self) :
r"""dnspolicy64 that can be bound to lbvserver.
"""
try :
return self._lbvserver_dnspolicy64_binding
except Exception as e:
raise e
@property
def lbvserver_appfwpolicy_bindings(self) :
r"""appfwpolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_appfwpolicy_binding
except Exception as e:
raise e
@property
def lbvserver_rewritepolicy_bindings(self) :
r"""rewritepolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_rewritepolicy_binding
except Exception as e:
raise e
@property
def lbvserver_videooptimizationpacingpolicy_bindings(self) :
r"""videooptimizationpacingpolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_videooptimizationpacingpolicy_binding
except Exception as e:
raise e
@property
def lbvserver_contentinspectionpolicy_bindings(self) :
r"""contentinspectionpolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_contentinspectionpolicy_binding
except Exception as e:
raise e
@property
def lbvserver_spilloverpolicy_bindings(self) :
r"""spilloverpolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_spilloverpolicy_binding
except Exception as e:
raise e
@property
def lbvserver_auditnslogpolicy_bindings(self) :
r"""auditnslogpolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_auditnslogpolicy_binding
except Exception as e:
raise e
@property
def lbvserver_appqoepolicy_bindings(self) :
r"""appqoepolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_appqoepolicy_binding
except Exception as e:
raise e
@property
def lbvserver_transformpolicy_bindings(self) :
r"""transformpolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_transformpolicy_binding
except Exception as e:
raise e
@property
def lbvserver_filterpolicy_bindings(self) :
r"""filterpolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_filterpolicy_binding
except Exception as e:
raise e
@property
def lbvserver_scpolicy_bindings(self) :
r"""scpolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_scpolicy_binding
except Exception as e:
raise e
@property
def lbvserver_feopolicy_bindings(self) :
r"""feopolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_feopolicy_binding
except Exception as e:
raise e
@property
def lbvserver_csvserver_bindings(self) :
r"""csvserver that can be bound to lbvserver.
"""
try :
return self._lbvserver_csvserver_binding
except Exception as e:
raise e
@property
def lbvserver_appflowpolicy_bindings(self) :
r"""appflowpolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_appflowpolicy_binding
except Exception as e:
raise e
@property
def lbvserver_analyticsprofile_bindings(self) :
r"""analyticsprofile that can be bound to lbvserver.
"""
try :
return self._lbvserver_analyticsprofile_binding
except Exception as e:
raise e
@property
def lbvserver_videooptimizationdetectionpolicy_bindings(self) :
r"""videooptimizationdetectionpolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_videooptimizationdetectionpolicy_binding
except Exception as e:
raise e
@property
def lbvserver_authorizationpolicy_bindings(self) :
r"""authorizationpolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_authorizationpolicy_binding
except Exception as e:
raise e
@property
def lbvserver_servicegroup_bindings(self) :
r"""servicegroup that can be bound to lbvserver.
"""
try :
return self._lbvserver_servicegroup_binding
except Exception as e:
raise e
@property
def lbvserver_cachepolicy_bindings(self) :
r"""cachepolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_cachepolicy_binding
except Exception as e:
raise e
@property
def lbvserver_botpolicy_bindings(self) :
r"""botpolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_botpolicy_binding
except Exception as e:
raise e
@property
def lbvserver_service_bindings(self) :
r"""service that can be bound to lbvserver.
"""
try :
return self._lbvserver_service_binding
except Exception as e:
raise e
@property
def lbvserver_servicegroupmember_bindings(self) :
r"""servicegroupmember that can be bound to lbvserver.
"""
try :
return self._lbvserver_servicegroupmember_binding
except Exception as e:
raise e
@property
def lbvserver_responderpolicy_bindings(self) :
r"""responderpolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_responderpolicy_binding
except Exception as e:
raise e
@property
def lbvserver_dospolicy_bindings(self) :
r"""dospolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_dospolicy_binding
except Exception as e:
raise e
@property
def lbvserver_tmtrafficpolicy_bindings(self) :
r"""tmtrafficpolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_tmtrafficpolicy_binding
except Exception as e:
raise e
@property
def lbvserver_cmppolicy_bindings(self) :
r"""cmppolicy that can be bound to lbvserver.
"""
try :
return self._lbvserver_cmppolicy_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(lbvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.lbvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, name="", option_="") :
r""" Use this API to fetch lbvserver_binding resource.
"""
try :
if not name :
obj = lbvserver_binding()
response = obj.get_resources(service, option_)
elif type(name) is not list :
obj = lbvserver_binding()
obj.name = name
response = obj.get_resource(service)
else :
if name and len(name) > 0 :
obj = [lbvserver_binding() for _ in range(len(name))]
for i in range(len(name)) :
obj[i].name = name[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
| [
2,
198,
2,
15069,
357,
66,
8,
33448,
15792,
8609,
11998,
11,
3457,
13,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
4943,
198,
2,
220,
220,
345,
743,
407,
779,
428,
2393... | 2.760546 | 3,295 |
import tarfile
from multiprocessing import Pool
import numpy as np
import subprocess
import os
from pathlib import Path
import math
import time
from datetime import datetime
from functools import partial
import argparse
from collections import defaultdict, Counter
import json
import pickle
import csv
import shutil
from tqdm import tqdm
import pandas as pd
if __name__ == "__main__":
args = parse_args()
print(args)
print("datset feature dir: {}".format(args.feat_dir))
feats_path = sorted(Path(args.feat_dir).glob("*.pkl"))
print("dataset indexing start...")
if args.num_workers > 1:
with Pool(args.num_workers) as pool:
results = list(
tqdm(
pool.imap(indexing, feats_path),
ncols=80,
total=len(feats_path),
)
)
else:
results = []
for pkl_path in tqdm(feats_path, total=len(feats_path), ncols=80):
results.append(indexing(pkl_path))
dataset_dict = {}
for shard_name, filenames in results:
dataset_dict[shard_name] = filenames
print("dataset indexing done...")
input_shard_names = sorted(list(dataset_dict.keys()))
duplicate_files = defaultdict(list)
print("duplicate checking...")
for shard_name in tqdm(input_shard_names, ncols=80):
if len(set(dataset_dict[shard_name])) != len(dataset_dict[shard_name]):
filename_counter = Counter(dataset_dict[shard_name])
for filename in set(dataset_dict[shard_name]):
if filename_counter[filename] > 1:
duplicate_files[shard_name] += [(filename, filename_counter[filename])]
num_duplicate_files = sum([len(duplicate_files[shard_name]) for shard_name in duplicate_files])
print(f"# of duplicate files: {num_duplicate_files}")
with open("dulicate_files.pkl", "wb") as f:
pickle.dump(duplicate_files, f)
non_matching_files = defaultdict(list)
feat_dir = Path(args.feat_dir)
new_feat_dir = Path(args.new_feat_dir)
for shard_name in tqdm(input_shard_names, ncols=80):
with open(os.path.join(args.input_dir, f"{shard_name}.json"), "r") as j:
meta_shard = json.load(j)
filenames = [meta['filename'] for meta in meta_shard]
for filename in dataset_dict[shard_name]:
if filename not in filenames:
non_matching_files[shard_name] += [filename]
num_non_matching_files = sum([len(non_matching_files[shard_name]) for shard_name in non_matching_files])
print(f"# of non matching files: {num_non_matching_files}")
with open("non_matching_files.pkl", "wb") as f:
pickle.dump(non_matching_files, f)
'''
if len(non_matching_files) > 0:
print(f"deleting non matching files")
new_feat_dir.mkdir(exist_ok=True, parents=True)
for shard_name in tqdm(input_shard_names, ncols=80):
pkl_path = feat_dir.joinpath(f"{shard_name}.pkl")
new_pkl_path = new_feat_dir.joinpath(f"{shard_name}.pkl")
if shard_name in non_matching_files:
with open(pkl_path, "rb") as f:
feats = pickle.load(f)
new_feats = [feat for feat in feats if feat['filename'] not in non_matching_files[shard_name]]
with open(new_pkl_path, "wb") as f:
pickle.dump(new_feats, f)
else:
shutil.copy(str(pkl_path), new_pkl_path)
'''
| [
11748,
13422,
7753,
198,
6738,
18540,
305,
919,
278,
1330,
19850,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
850,
14681,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
10688,
198,
11748,
640,
198,
6738,
4818,
8079... | 2.155733 | 1,631 |
import torch.nn as nn
import torch
import math
| [
11748,
220,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
198,
11748,
10688,
628
] | 3.266667 | 15 |
import tsensor
import numpy as np
| [
11748,
40379,
22854,
198,
11748,
299,
32152,
355,
45941,
628
] | 3.5 | 10 |
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 10 21:43:38 2018
@author: Gireesh Sundaram
"""
#Importing the packages
import pandas as pd
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from nltk.corpus import stopwords
from textblob import TextBlob
import numpy as np
from nltk.sentiment.vader import SentimentIntensityAnalyzer
#%%
tweets_df = pd.read_pickle("...\\Files\\IPLFinals.pk1")
#%%
#Building a word cloud:
words = pd.Series(tweets_df["Translated"].tolist()).astype(str)
stop_words = ["https", "co", "rt"]
stop = set(stopwords.words('english'))
for i in range(0, len(words)):
words[i] = " ".join([x for x in words[i].lower().split() if x not in stop_words])
words[i] = " ".join([x for x in words[i].lower().split() if x not in stop])
stop = set(stopwords.words('english'))
cloud = WordCloud(width=900, height=900,
stopwords=(stop),
colormap='hsv').generate(''.join(words.astype(str)))
plt.figure(figsize=(15, 15))
plt.imshow(cloud)
plt.axis('off')
plt.show()
#%%
#using textblob
tweet_text = tweets_df["Tweet"]
polarity = []
for i in tweet_text:
txt = TextBlob(i)
polarity.append( (txt.sentiment.polarity)*10 )
columns = ['Tweet','Polarity', 'DateTime']
data = pd.DataFrame(tweets_df, columns=columns)
data.head()
data['Polarity'] = pd.DataFrame(polarity)
data_by_polarity = data.sort_values(by='Polarity',ascending=False)
data_by_polarity = data_by_polarity.dropna()
dt = data_by_polarity['Polarity']
fig, ax = plt.subplots(figsize=(10,7))
ax.set_title("Frequency of tweet sentiment!")
ax.set_xlabel("Sentiment amount")
ax.set_ylabel("Amount of tweets")
mean = np.mean(dt)
ax.hist(dt)
fig.tight_layout()
plt.show()
#%%
sent = SentimentIntensityAnalyzer()
tweets_df["Compounded_polarity"] = tweets_df.Translated.apply(lambda x: sent.polarity_scores(x)['compound']*10)
tweets_df["Neutral"] = tweets_df.Translated.apply(lambda x: sent.polarity_scores(x)['neu']*10)
tweets_df["Negative"] = tweets_df.Translated.apply(lambda x: sent.polarity_scores(x)['neg']*10)
tweets_df["Positive"] = tweets_df.Tweet.apply(lambda x: sent.polarity_scores(x)['pos']*10)
tweets_df["Sentiment"] = ""
tweets_df.loc[tweets_df.Compounded_polarity > 0, "Sentiment"] = "Positive"
tweets_df.loc[tweets_df.Compounded_polarity == 0, "Sentiment"] = "Neutral"
tweets_df.loc[tweets_df.Compounded_polarity < 0, "Sentiment"] = "Negative"
#%%
tweets_df.Sentiment.value_counts().plot(kind = 'bar')
#%%
tweets_df.to_pickle("...\\Files\\IPLFinals.pk1")
#%%
tweets_df["tweet_lower"] = tweets_df['Translated'].str.lower()
PlayerSentiment = pd.DataFrame(columns= ["Player", "Translated", "Compounded_polarity", "Sentiment"])
#%%
PlayerSentimentDef("MS Dhoni", "dhoni")
PlayerSentimentDef("S Watson", "watson")
PlayerSentimentDef("L Ngidi", "ngidi")
PlayerSentimentDef("SK Raina", "raina")
PlayerSentimentDef("R Jadeja", "jadeja")
PlayerSentimentDef("S Dhawan", "dhawan")
PlayerSentimentDef("KS Williamson", "williamson")
PlayerSentimentDef("YK Pathan", "pathan")
PlayerSentimentDef("B Kumar", "bhuvneshwar")
PlayerSentimentDef("Rashid Khan", "rashid")
PlayerSentimentDef("V Kohli", "kohli")
PlayerSentimentDef("SR Tendulkar", "sachin")
PlayerSentimentDef("R Ashwin", "ashwin")
PlayerSentimentDef("Sunil Narine", "narine")
PlayerSentimentDef("Sanju Samson", "sanju")
PlayerSentiment = PlayerSentiment.drop_duplicates()
PlayerSentiment = PlayerSentiment.loc[PlayerSentiment.Compounded_polarity != 0]
#%%
tweets_df["geo_lower"] = tweets_df['Geo'].str.lower()
city_not_null = tweets_df.dropna(subset = ["Geo"])
CitySentiment = pd.DataFrame(columns= ["City", "Translated", "Compounded_polarity", "Sentiment"])
CitySentimentDef("Chennai", "chennai")
CitySentimentDef("Kolkata", "kolkata")
CitySentimentDef("Mumbai", "mumbai")
CitySentimentDef("Hyderabad", "hyderabad")
CitySentimentDef("Bangalore", "bangalore")
CitySentimentDef("Bangalore", "Bengaluru")
CitySentimentDef("Delhi", "delhi")
CitySentimentDef("Jaipur", "jaipur")
CitySentimentDef("Pune", "pune")
CitySentimentDef("Coimbatore", "coimbatore")
CitySentimentDef("Dubai", "dubai")
CitySentimentDef("Paris", "paris")
CitySentimentDef("Dhaka", "dhaka")
CitySentimentDef("Kabul", "kabul")
CitySentiment = CitySentiment.drop_duplicates()
CitySentiment = CitySentiment.loc[CitySentiment.Compounded_polarity != 0]
#%%
dhoni = city_not_null.loc[city_not_null.tweet_lower.str.contains('dhoni')]
dhoni = dhoni[["Geo", "Translated", "Compounded_polarity", "Sentiment"]]
dhoni = dhoni.drop_duplicates()
dhoni = dhoni.loc[dhoni.Compounded_polarity != 0]
#%%
dhoni["geo_lower"] = dhoni['Geo'].str.lower()
DhoniSentiment = pd.DataFrame(columns= ["City", "Translated", "Compounded_polarity", "Sentiment"])
CitySentimentDef("Chennai", "chennai")
CitySentimentDef("Kolkata", "kolkata")
CitySentimentDef("Mumbai", "mumbai")
CitySentimentDef("Hyderabad", "hyderabad")
CitySentimentDef("Bangalore", "bangalore")
CitySentimentDef("Bangalore", "Bengaluru")
CitySentimentDef("Delhi", "delhi")
CitySentimentDef("Jaipur", "jaipur")
CitySentimentDef("Pune", "pune")
CitySentimentDef("Coimbatore", "coimbatore")
CitySentimentDef("Dubai", "dubai")
CitySentimentDef("Paris", "paris")
CitySentimentDef("Dhaka", "dhaka")
CitySentimentDef("Kabul", "kabul")
DhoniSentiment = DhoniSentiment.drop_duplicates()
DhoniSentiment = DhoniSentiment.loc[DhoniSentiment.Compounded_polarity != 0]
#%%
PlayerSentiment.to_excel("...\\Files\\PlayerSentiments.xlsx")
CitySentiment.to_excel("...\\Files\\CitySentiment.xlsx")
DhoniSentiment.to_excel("...\\Files\\DhoniSentiment.xlsx")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3825,
7653,
838,
2310,
25,
3559,
25,
2548,
2864,
201,
198,
201,
198,
31,
9800,
25,
402,
557,
5069,
3309,
41158,
201,
198,
37811,
201,... | 2.42652 | 2,368 |
from __future__ import print_function
import pathlib
from builtins import object
from builtins import str
from typing import Dict
from empire.server.common import helpers
from empire.server.common.module_models import PydanticModule
from empire.server.database.models import Credential
from empire.server.utils import data_util
from empire.server.utils.module_util import handle_error_message
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
3108,
8019,
198,
6738,
3170,
1040,
1330,
2134,
198,
6738,
3170,
1040,
1330,
965,
198,
6738,
19720,
1330,
360,
713,
198,
198,
6738,
13735,
13,
15388,
13,
11321,
1330,
49385,... | 4 | 99 |
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""End to end tests of Timesketch client functionality."""
from timesketch_api_client import search
from . import interface
from . import manager
class ClientTest(interface.BaseEndToEndTest):
"""End to end tests for client functionality."""
NAME = 'client_test'
def test_client(self):
"""Client tests."""
expected_user = 'test'
user = self.api.current_user
self.assertions.assertEqual(user.username, expected_user)
self.assertions.assertEqual(user.is_admin, False)
self.assertions.assertEqual(user.is_active, True)
sketches = list(self.api.list_sketches())
number_of_sketches = len(sketches)
sketch_name = 'Testing'
sketch_description = 'This is truly a foobar'
new_sketch = self.api.create_sketch(
name=sketch_name, description=sketch_description)
self.assertions.assertEqual(new_sketch.name, sketch_name)
self.assertions.assertEqual(
new_sketch.description, sketch_description)
sketches = list(self.api.list_sketches())
self.assertions.assertEqual(len(sketches), number_of_sketches + 1)
for index in self.api.list_searchindices():
if index is None:
continue
self.assertions.assertTrue(bool(index.index_name))
def test_direct_es(self):
"""Test injecting data into Elastic and make it acccessible in TS."""
index_name = 'direct_testing'
self.import_directly_to_elastic(
filename='evtx_direct.csv', index_name=index_name)
new_sketch = self.api.create_sketch(
name='Testing Direct', description='Adding data directly from ES')
context = 'e2e - > test_direct_es'
timeline_name = 'Ingested Via Mechanism'
timeline = new_sketch.generate_timeline_from_es_index(
es_index_name=index_name, name=timeline_name,
provider='end_to_end_testing_platform', context=context)
_ = new_sketch.lazyload_data(refresh_cache=True)
self.assertions.assertEqual(len(new_sketch.list_timelines()), 1)
self.assertions.assertEqual(timeline.name, timeline_name)
data_sources = timeline.data_sources
self.assertions.assertEqual(len(data_sources), 1)
data_source = data_sources[0]
self.assertions.assertEqual(data_source.get('context', ''), context)
def test_sigma_list(self):
"""Client Sigma list tests."""
rules = self.api.list_sigma_rules()
self.assertions.assertGreaterEqual(len(rules), 1)
rule = rules[0]
self.assertions.assertIn('b793-11ea-b3de-0242ac130004', rule.id)
self.assertions.assertIn('b793-11ea-b3de-0242ac130004', rule.rule_uuid)
self.assertions.assertIn('Installation of ZMap', rule.title)
self.assertions.assertIn('zmap', rule.es_query)
self.assertions.assertIn('Alexander', rule.author)
self.assertions.assertIn('2020/06/26',rule.date)
self.assertions.assertIn('installation of ZMap', rule.description)
self.assertions.assertEqual(len(rule.detection), 2)
self.assertions.assertIn('zmap*', rule.es_query)
self.assertions.assertIn('shell:zsh:history', rule.es_query)
self.assertions.assertIn('Unknown', rule.falsepositives[0])
self.assertions.assertEqual(len(rule.logsource), 2)
self.assertions.assertIn('2020/06/26', rule.modified)
self.assertions.assertIn('lnx_susp_zmap.yml', rule.file_relpath)
self.assertions.assertIn('lnx_susp_zmap', rule.file_name)
self.assertions.assertIn('high', rule.level)
self.assertions.assertIn('rmusser.net', rule.references[0])
def test_get_sigma_rule(self):
"""Client Sigma object tests."""
rule = self.api.get_sigma_rule(
rule_uuid='5266a592-b793-11ea-b3de-0242ac130004')
rule.from_rule_uuid('5266a592-b793-11ea-b3de-0242ac130004')
self.assertions.assertGreater(len(rule.attributes),5)
self.assertions.assertIsNotNone(rule)
self.assertions.assertIn('Alexander', rule.author)
self.assertions.assertIn('Alexander', rule.get_attribute('author'))
self.assertions.assertIn('b793-11ea-b3de-0242ac130004', rule.id)
self.assertions.assertIn('Installation of ZMap', rule.title)
self.assertions.assertIn('zmap', rule.es_query)
self.assertions.assertIn('shell:zsh:history', rule.es_query)
self.assertions.assertIn('lnx_susp_zmap.yml', rule.file_relpath)
self.assertions.assertIn('sigma/rule/5266a592', rule.resource_uri)
self.assertions.assertIn('installation of ZMap', rule.description)
self.assertions.assertIn('high', rule.level)
self.assertions.assertEqual(len(rule.falsepositives), 1)
self.assertions.assertIn('Unknown', rule.falsepositives[0])
self.assertions.assertIn('susp_zmap', rule.file_name)
self.assertions.assertIn('2020/06/26', rule.date)
self.assertions.assertIn('2020/06/26', rule.modified)
self.assertions.assertIn('high', rule.level)
self.assertions.assertIn('rmusser.net', rule.references[0])
self.assertions.assertEqual(len(rule.detection), 2)
self.assertions.assertEqual(len(rule.logsource), 2)
# Test an actual query
self.import_timeline('sigma_events.csv')
search_obj = search.Search(self.sketch)
search_obj.query_string = rule.es_query
data_frame = search_obj.table
count = len(data_frame)
self.assertions.assertEqual(count, 1)
manager.EndToEndTestManager.register_test(ClientTest)
| [
2,
15069,
12131,
3012,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 2.435767 | 2,561 |
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
import pytest
from f5.bigip import ManagementRoot
from f5.bigip.tm.security.protocol_inspection import Compliance
from f5.bigip.tm.security.protocol_inspection import Compliances
from f5.bigip.tm.security.protocol_inspection import Profile
from f5.bigip.tm.security.protocol_inspection import Signature
from f5.sdk_exception import MissingRequiredCreationParameter
from six import iterkeys
@pytest.fixture
@pytest.fixture
| [
2,
15069,
2177,
376,
20,
27862,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
... | 3.541379 | 290 |
"""Test pipeline.py
..note: These tests depend on global state and are therefore not reentrant.
"""
import os
import sys
import types
import shutil
import tempfile
from mindbender import pipeline
from nose.tools import (
with_setup,
assert_equals,
assert_raises
)
self = sys.modules[__name__]
@with_setup(clear)
def test_loaders():
"""Registering a path of loaders imports them appropriately"""
tempdir = tempfile.mkdtemp()
loader = """
from mindbender import api
class DemoLoader(api.Loader):
def process(self, asset, subset, version, representation):
pass
"""
with open(os.path.join(tempdir, "my_loader.py"), "w") as f:
f.write(loader)
try:
pipeline.register_loader_path(tempdir)
loaders = pipeline.discover_loaders()
assert "DemoLoader" in list(
L.__name__ for L in loaders
), "Loader not found in %s" % ", ".join(
l.__name__ for l in loaders)
finally:
shutil.rmtree(tempdir)
| [
37811,
14402,
11523,
13,
9078,
198,
198,
492,
11295,
25,
2312,
5254,
4745,
319,
3298,
1181,
290,
389,
4361,
407,
302,
298,
5250,
13,
198,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
3858,
198,
11748,
4423,
346,
... | 2.577215 | 395 |
from abc import ABCMeta, abstractmethod
class PacketObserver:
"""
An abstract implementation of the packet observer
"""
__metaclass__ = ABCMeta
@abstractmethod
def update(self, handler):
"""
:param PacketHandler handler:
:return:
"""
pass
| [
6738,
450,
66,
1330,
9738,
48526,
11,
12531,
24396,
628,
198,
4871,
6400,
316,
31310,
18497,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1052,
12531,
7822,
286,
262,
19638,
22890,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
... | 2.487805 | 123 |
import pytorch_lightning as pl
import torch
from torch.utils.data import Dataset, DataLoader, TensorDataset
import torchvision.utils as vutils
import numpy as np
import glob
from lib.utils import *
def label_wavepr(arr, T1=0.30, T2=0.15):
"""Label the wave profit retract to discrete labels.
Args:
arr: (N, 2) array, denoting profit and retract.
T1: The threshold for minimum profit.
T2: The threshold for maximum retract.
"""
P, R = arr[:, 0], arr[:, 1]
buy_labels = (P > T1) & (-R < P / 2) # label 1
# sell point: retracts is nonnegligiable and is larger than profits
sell_labels = (-R > T2) & (-R > P / 2) # label 2
# hold point: other cases. label 0
label = np.zeros((arr.shape[0],), dtype="uint8")
label[buy_labels] = 1
label[sell_labels] = 2
buy_count = buy_labels.sum()
sell_count = sell_labels.sum()
N = arr.shape[0]
return label, [N - buy_count - sell_count, buy_count, sell_count]
def label_wavepr_dataset(year_symbols, label_dic, info_dic):
"""Label a whole year-symbol dataset.
Args:
year_symbols: The key list.
label_dic: The target dict to be modified.
info_dic: The information dict used for labeling.
Returns:
The label count for each category.
"""
total_counts = []
for k1, k2 in year_symbols:
if k1 not in label_dic:
label_dic[k1] = {}
label, counts = label_wavepr(info_dic[k1][k2])
label_dic[k1][k2] = label
if len(total_counts) == 0:
total_counts = np.array(counts)
else:
total_counts += np.array(counts)
return total_counts
class DictListSampler(torch.utils.data.Dataset):
"""The data is organized in a dict.
The value is an array that needs to be sampled.
"""
class PointwiseDataset(pl.LightningDataModule):
"""year data and corresponding pointwise labeling."""
| [
11748,
12972,
13165,
354,
62,
2971,
768,
355,
458,
198,
11748,
28034,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
316,
11,
6060,
17401,
11,
309,
22854,
27354,
292,
316,
198,
11748,
28034,
10178,
13,
26791,
355,
410,
26791,
... | 2.372405 | 819 |
# Classes to model a HDL design hierarchy
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014-2019, Lars Asplund lars.anders.asplund@gmail.com
"""
Functionality to represent and operate on a HDL code project
"""
from os.path import join, basename, dirname, splitext, isdir, exists
from copy import copy
import traceback
import logging
from collections import OrderedDict
from vunit.hashing import hash_string
from vunit.dependency_graph import (DependencyGraph,
CircularDependencyException)
from vunit.vhdl_parser import VHDLParser, VHDLReference
from vunit.cached import file_content_hash
from vunit.parsing.verilog.parser import VerilogParser
from vunit.parsing.encodings import HDL_FILE_ENCODING
from vunit.exceptions import CompileError
from vunit.simulator_factory import SIMULATOR_FACTORY
from vunit.design_unit import DesignUnit, VHDLDesignUnit, Entity, Module
from vunit import ostools
LOGGER = logging.getLogger(__name__)
class Project(object): # pylint: disable=too-many-instance-attributes
"""
The representation of a HDL code project.
Compute lists of source files to recompile based on file contents,
timestamps and depenencies derived from the design hierarchy.
"""
def __init__(self,
depend_on_package_body=False,
database=None):
"""
depend_on_package_body - Package users depend also on package body
"""
self._database = database
self._vhdl_parser = VHDLParser(database=self._database)
self._verilog_parser = VerilogParser(database=self._database)
self._libraries = OrderedDict()
# Mapping between library lower case name and real library name
self._lower_library_names_dict = {}
self._source_files_in_order = []
self._manual_dependencies = []
self._depend_on_package_body = depend_on_package_body
self._builtin_libraries = set(["ieee", "std"])
def _validate_new_library_name(self, library_name):
"""
Check that the library_name is valid or raise RuntimeError
"""
if library_name == "work":
LOGGER.error("Cannot add library named work. work is a reference to the current library. "
"http://www.sigasi.com/content/work-not-vhdl-library")
raise RuntimeError("Illegal library name 'work'")
if library_name in self._libraries:
raise ValueError("Library %s already exists" % library_name)
lower_name = library_name.lower()
if lower_name in self._lower_library_names_dict:
raise RuntimeError(
"Library name %r not case-insensitive unique. Library name %r previously defined"
% (library_name, self._lower_library_names_dict[lower_name]))
def add_builtin_library(self, logical_name):
"""
Add a builtin library name that does not give missing dependency warnings
"""
self._builtin_libraries.add(logical_name)
def add_library(self, logical_name, directory, vhdl_standard='2008', is_external=False):
"""
Add library to project with logical_name located or to be located in directory
is_external -- Library is assumed to a black-box
"""
self._validate_new_library_name(logical_name)
if is_external:
if not exists(directory):
raise ValueError("External library %r does not exist" % directory)
if not isdir(directory):
raise ValueError("External library must be a directory. Got %r" % directory)
library = Library(logical_name, directory, vhdl_standard, is_external=is_external)
LOGGER.debug('Adding library %s with path %s', logical_name, directory)
self._libraries[logical_name] = library
self._lower_library_names_dict[logical_name.lower()] = library.name
def add_source_file(self, # pylint: disable=too-many-arguments
file_name, library_name, file_type='vhdl', include_dirs=None, defines=None,
vhdl_standard=None,
no_parse=False):
"""
Add a file_name as a source file in library_name with file_type
:param no_parse: Do not parse file contents
"""
if not ostools.file_exists(file_name):
raise ValueError("File %r does not exist" % file_name)
LOGGER.debug('Adding source file %s to library %s', file_name, library_name)
library = self._libraries[library_name]
if file_type == "vhdl":
assert include_dirs is None
source_file = VHDLSourceFile(
file_name,
library,
vhdl_parser=self._vhdl_parser,
database=self._database,
vhdl_standard=library.vhdl_standard if vhdl_standard is None else vhdl_standard,
no_parse=no_parse)
elif file_type in VERILOG_FILE_TYPES:
source_file = VerilogSourceFile(file_type,
file_name,
library,
verilog_parser=self._verilog_parser,
database=self._database,
include_dirs=include_dirs,
defines=defines,
no_parse=no_parse)
else:
raise ValueError(file_type)
old_source_file = library.add_source_file(source_file)
if id(source_file) == id(old_source_file):
self._source_files_in_order.append(source_file)
return old_source_file
def add_manual_dependency(self, source_file, depends_on):
"""
Add manual dependency where 'source_file' depends_on 'depends_on'
"""
self._manual_dependencies.append((source_file, depends_on))
@staticmethod
def _find_primary_secondary_design_unit_dependencies(source_file):
"""
Iterate over dependencies between the primary design units of the source_file
and their secondary design units
"""
library = source_file.library
for unit in source_file.design_units:
if unit.is_primary:
continue
try:
primary_unit = library.primary_design_units[unit.primary_design_unit]
except KeyError:
LOGGER.warning("%s: failed to find a primary design unit '%s' in library '%s'",
source_file.name, unit.primary_design_unit, library.name)
else:
yield primary_unit.source_file
def _find_vhdl_library_reference(self, library_name):
"""
Find a VHDL library reference that is case insensitive or raise KeyError
"""
real_library_name = self._lower_library_names_dict[library_name]
return self._libraries[real_library_name]
def _find_other_vhdl_design_unit_dependencies(self, # pylint: disable=too-many-branches
source_file,
depend_on_package_body,
implementation_dependencies):
"""
Iterate over the dependencies on other design unit of the source_file
"""
for ref in source_file.dependencies:
try:
library = self._find_vhdl_library_reference(ref.library)
except KeyError:
if ref.library not in self._builtin_libraries:
LOGGER.warning("%s: failed to find library '%s'", source_file.name, ref.library)
continue
if ref.is_entity_reference() and ref.design_unit in library.modules:
# Is a verilog module instantiation
yield library.modules[ref.design_unit].source_file
continue
try:
primary_unit = library.primary_design_units[ref.design_unit]
except KeyError:
if not library.is_external:
LOGGER.warning("%s: failed to find a primary design unit '%s' in library '%s'",
source_file.name, ref.design_unit, library.name)
continue
else:
yield primary_unit.source_file
if ref.is_entity_reference():
if ref.reference_all_names_within():
# Reference all architectures,
# We make configuration declarations implicitly reference all architectures
names = primary_unit.architecture_names.keys()
elif ref.name_within is None and implementation_dependencies:
# For implementation dependencies we add a dependency to all architectures
names = primary_unit.architecture_names.keys()
else:
names = [ref.name_within]
for name in names:
if name is None:
# Was not a reference to a specific architecture
continue
if name in primary_unit.architecture_names:
file_name = primary_unit.architecture_names[name]
yield library.get_source_file(file_name)
else:
LOGGER.warning("%s: failed to find architecture '%s' of entity '%s.%s'",
source_file.name, name, library.name, primary_unit.name)
elif ref.is_package_reference() and depend_on_package_body:
try:
yield library.get_package_body(primary_unit.name).source_file
except KeyError:
# There was no package body, which is legal in VHDL
pass
def _find_verilog_package_dependencies(self, source_file):
"""
Find dependencies from import of verilog packages
"""
for package_name in source_file.package_dependencies:
for library in self._libraries.values():
try:
design_unit = library.verilog_packages[package_name]
yield design_unit.source_file
except KeyError:
pass
def _find_verilog_module_dependencies(self, source_file):
"""
Find dependencies from instantiation of verilog modules
"""
for module_name in source_file.module_dependencies:
if module_name in source_file.library.modules:
design_unit = source_file.library.modules[module_name]
yield design_unit.source_file
else:
for library in self._libraries.values():
try:
design_unit = library.modules[module_name]
yield design_unit.source_file
except KeyError:
pass
@staticmethod
def _find_component_design_unit_dependencies(source_file):
"""
Iterate over the dependencies on other design units of the source_file
that are the result of component instantiations
"""
for unit_name in source_file.depending_components:
found_component_match = False
try:
primary_unit = source_file.library.primary_design_units[unit_name]
yield primary_unit.source_file
for file_name in primary_unit.architecture_names.values():
yield source_file.library.get_source_file(file_name)
except KeyError:
pass
else:
found_component_match = True
try:
module = source_file.library.modules[unit_name]
except KeyError:
pass
else:
found_component_match = True
yield module.source_file
if not found_component_match:
LOGGER.debug("failed to find a matching entity/module for component '%s' ", unit_name)
def create_dependency_graph(self, implementation_dependencies=False):
"""
Create a DependencyGraph object of the HDL code project
"""
def add_dependency(start, end):
"""
Utility to add dependency
"""
if start.name == end.name:
return
is_new = dependency_graph.add_dependency(start, end)
if is_new:
LOGGER.debug('Adding dependency: %s depends on %s', end.name, start.name)
def add_dependencies(dependency_function, files):
"""
Utility to add all dependencies returned by a dependency_function
returning an iterator of dependencies
"""
for source_file in files:
for dependency in dependency_function(source_file):
add_dependency(dependency, source_file)
dependency_graph = DependencyGraph()
for source_file in self.get_source_files_in_order():
dependency_graph.add_node(source_file)
vhdl_files = [source_file
for source_file in self.get_source_files_in_order()
if source_file.file_type == 'vhdl']
depend_on_package_bodies = self._depend_on_package_body or implementation_dependencies
add_dependencies(
lambda source_file: self._find_other_vhdl_design_unit_dependencies(source_file,
depend_on_package_bodies,
implementation_dependencies),
vhdl_files)
add_dependencies(self._find_primary_secondary_design_unit_dependencies, vhdl_files)
verilog_files = [source_file
for source_file in self.get_source_files_in_order()
if source_file.file_type in VERILOG_FILE_TYPES]
add_dependencies(self._find_verilog_package_dependencies, verilog_files)
add_dependencies(self._find_verilog_module_dependencies, verilog_files)
if implementation_dependencies:
add_dependencies(self._find_component_design_unit_dependencies, vhdl_files)
for source_file, depends_on in self._manual_dependencies:
add_dependency(depends_on, source_file)
return dependency_graph
@staticmethod
def _handle_circular_dependency(exception):
"""
Pretty print circular dependency to error log
"""
LOGGER.error("Found circular dependency:\n%s",
" ->\n".join(source_file.name for source_file in exception.path))
def _get_compile_timestamps(self, files):
"""
Return a dictionary of mapping file to the timestamp when it
was compiled or None if it was not compiled
"""
# Cache timestamps to avoid duplicate file operations
timestamps = {}
for source_file in files:
hash_file_name = self._hash_file_name_of(source_file)
if not ostools.file_exists(hash_file_name):
timestamps[source_file] = None
else:
timestamps[source_file] = ostools.get_modification_time(hash_file_name)
return timestamps
def get_files_in_compile_order(self, incremental=True, dependency_graph=None):
"""
Get a list of all files in compile order
incremental -- Only return files that need recompile if True
"""
if dependency_graph is None:
dependency_graph = self.create_dependency_graph()
all_files = self.get_source_files_in_order()
timestamps = self._get_compile_timestamps(all_files)
files = []
for source_file in all_files:
if (not incremental) or self._needs_recompile(dependency_graph, source_file, timestamps):
files.append(source_file)
# Get files that are affected by recompiling the modified files
try:
affected_files = dependency_graph.get_dependent(files)
compile_order = dependency_graph.toposort()
except CircularDependencyException as exc:
self._handle_circular_dependency(exc)
raise CompileError
retval = sorted(affected_files, key=comparison_key)
return retval
def get_dependencies_in_compile_order(self, target_files=None, implementation_dependencies=False):
"""
Get a list of dependencies of target files including the
target files.
:param target_files: A list of SourceFiles
"""
if target_files is None:
target_files = self.get_source_files_in_order()
dependency_graph = self.create_dependency_graph(implementation_dependencies)
try:
affected_files = dependency_graph.get_dependencies(set(target_files))
compile_order = dependency_graph.toposort()
except CircularDependencyException as exc:
self._handle_circular_dependency(exc)
raise CompileError
sorted_files = sorted(affected_files, key=comparison_key)
return sorted_files
def get_source_files_in_order(self):
"""
Get a list of source files in the order they were added to the project
"""
return [source_file for source_file in self._source_files_in_order]
def _needs_recompile(self, dependency_graph, source_file, timestamps):
"""
Returns True if the source_file needs to be recompiled
given the dependency_graph, the file contents and the last modification time
"""
timestamp = timestamps[source_file]
content_hash_file_name = self._hash_file_name_of(source_file)
if timestamp is None:
LOGGER.debug("%s has no vunit_hash file at %s and must be recompiled",
source_file.name, content_hash_file_name)
return True
old_content_hash = ostools.read_file(content_hash_file_name)
if old_content_hash != source_file.content_hash:
LOGGER.debug("%s has different hash than last time and must be recompiled",
source_file.name)
return True
for other_file in dependency_graph.get_direct_dependencies(source_file):
other_timestamp = timestamps[other_file]
if other_timestamp is None:
# Other file has not been compiled and will trigger recompile of this file
continue
if other_timestamp > timestamp:
LOGGER.debug("%s has dependency compiled earlier and must be recompiled",
source_file.name)
return True
LOGGER.debug("%s has same hash file and must not be recompiled",
source_file.name)
return False
def _hash_file_name_of(self, source_file):
"""
Returns the name of the hash file associated with the source_file
"""
library = self.get_library(source_file.library.name)
prefix = hash_string(dirname(source_file.name))
return join(library.directory, prefix, basename(source_file.name) + ".vunit_hash")
def update(self, source_file):
"""
Mark that source_file has been recompiled, triggers a re-write of the hash file
to update the timestamp
"""
new_content_hash = source_file.content_hash
ostools.write_file(self._hash_file_name_of(source_file), new_content_hash)
LOGGER.debug('Wrote %s content_hash=%s', source_file.name, new_content_hash)
class Library(object): # pylint: disable=too-many-instance-attributes
"""
Represents a VHDL library
"""
def add_source_file(self, source_file):
"""
Add source file to library unless it exists
returns The source file that has added or the old source file
"""
if source_file.name in self._source_files:
old_source_file = self._source_files[source_file.name]
if old_source_file.content_hash != source_file.content_hash:
raise RuntimeError("%s already added to library %s" % (
source_file.name, self.name))
LOGGER.info("Ignoring duplicate file %s added to library %s due to identical contents",
source_file.name, self.name)
return old_source_file
self._source_files[source_file.name] = source_file
source_file.add_to_library(self)
return source_file
def get_source_file(self, file_name):
"""
Get source file with file name or raise KeyError
"""
return self._source_files[file_name]
@property
def is_external(self):
"""
External black box library, typically compiled outside of VUnit
"""
return self._is_external
@staticmethod
def _warning_on_duplication(design_unit, old_file_name):
"""
Utility function to give warning for design unit duplication
"""
LOGGER.warning("%s: %s '%s' previously defined in %s",
design_unit.source_file.name,
design_unit.unit_type,
design_unit.name,
old_file_name)
def _check_duplication(self, dictionary, design_unit):
"""
Utility function to check if design_unit already in dictionary
and give warning
"""
if design_unit.name in dictionary:
self._warning_on_duplication(design_unit, dictionary[design_unit.name].source_file.name)
def add_vhdl_design_units(self, design_units):
"""
Add VHDL design units to the library
"""
for design_unit in design_units:
if design_unit.is_primary:
self._check_duplication(self.primary_design_units,
design_unit)
self.primary_design_units[design_unit.name] = design_unit
if design_unit.unit_type == 'entity':
if design_unit.name not in self._architectures:
self._architectures[design_unit.name] = {}
self._entities[design_unit.name] = design_unit
for architecture in self._architectures[design_unit.name].values():
design_unit.add_architecture(architecture)
else:
if design_unit.unit_type == 'architecture':
if design_unit.primary_design_unit not in self._architectures:
self._architectures[design_unit.primary_design_unit] = {}
if design_unit.name in self._architectures[design_unit.primary_design_unit]:
self._warning_on_duplication(
design_unit,
self._architectures[design_unit.primary_design_unit][design_unit.name].source_file.name)
self._architectures[design_unit.primary_design_unit][design_unit.name] = design_unit
if design_unit.primary_design_unit in self._entities:
self._entities[design_unit.primary_design_unit].add_architecture(design_unit)
if design_unit.unit_type == 'package body':
if design_unit.primary_design_unit in self._package_bodies:
self._warning_on_duplication(
design_unit,
self._package_bodies[design_unit.primary_design_unit].source_file.name)
self._package_bodies[design_unit.primary_design_unit] = design_unit
def add_verilog_design_units(self, design_units):
"""
Add Verilog design units to the library
"""
for design_unit in design_units:
if design_unit.unit_type == 'module':
if design_unit.name in self.modules:
self._warning_on_duplication(design_unit, self.modules[design_unit.name].source_file.name)
self.modules[design_unit.name] = design_unit
elif design_unit.unit_type == 'package':
if design_unit.name in self.verilog_packages:
self._warning_on_duplication(design_unit, self.verilog_packages[design_unit.name].source_file.name)
self.verilog_packages[design_unit.name] = design_unit
def get_entities(self):
"""
Return a list of all entites in the design with their generic names and architecture names
"""
entities = []
for entity in self._entities.values():
entities.append(entity)
return entities
def get_modules(self):
"""
Return a list of all modules in the design
"""
return list(self.modules.values())
def has_entity(self, name):
"""
Return true if entity with 'name' is in library
"""
return name in self._entities
class SourceFile(object):
"""
Represents a generic source file
"""
@property
@property
@property
def set_compile_option(self, name, value):
"""
Set compile option
"""
SIMULATOR_FACTORY.check_compile_option(name, value)
self._compile_options[name] = copy(value)
def add_compile_option(self, name, value):
"""
Add compile option
"""
SIMULATOR_FACTORY.check_compile_option(name, value)
if name not in self._compile_options:
self._compile_options[name] = copy(value)
else:
self._compile_options[name] += value
@property
def get_compile_option(self, name):
"""
Return a copy of the compile option list
"""
SIMULATOR_FACTORY.check_compile_option_name(name)
if name not in self._compile_options:
self._compile_options[name] = []
return copy(self._compile_options[name])
def _compile_options_hash(self):
"""
Compute hash of compile options
Needs to be updated if there are nested dictionaries
"""
return hash_string(repr(sorted(self._compile_options.items())))
@property
def content_hash(self):
"""
Compute hash of contents and compile options
"""
return hash_string(self._content_hash + self._compile_options_hash())
class VerilogSourceFile(SourceFile):
"""
Represents a Verilog source file
"""
def parse(self, parser, database, include_dirs):
"""
Parse Verilog code and adding dependencies and design units
"""
try:
design_file = parser.parse(self.name, include_dirs, self.defines)
for included_file_name in design_file.included_files:
self._content_hash = hash_string(self._content_hash
+ file_content_hash(included_file_name,
encoding=HDL_FILE_ENCODING,
database=database))
for module in design_file.modules:
self.design_units.append(Module(module.name, self, module.parameters))
for package in design_file.packages:
self.design_units.append(DesignUnit(package.name, self, "package"))
for package_name in design_file.imports:
self.package_dependencies.append(package_name)
for package_name in design_file.package_references:
self.package_dependencies.append(package_name)
for instance_name in design_file.instances:
self.module_dependencies.append(instance_name)
except KeyboardInterrupt:
raise KeyboardInterrupt
except: # pylint: disable=bare-except
traceback.print_exc()
LOGGER.error("Failed to parse %s", self.name)
def add_to_library(self, library):
"""
Add design units to the library
"""
assert self.library == library
library.add_verilog_design_units(self.design_units)
class VHDLSourceFile(SourceFile):
"""
Represents a VHDL source file
"""
def get_vhdl_standard(self):
"""
Return the VHDL standard used to create this file
"""
return self._vhdl_standard
def _add_design_file(self, design_file):
"""
Parse VHDL code and adding dependencies and design units
"""
self.design_units = self._find_design_units(design_file)
self.dependencies = self._find_dependencies(design_file)
self.depending_components = design_file.component_instantiations
for design_unit in self.design_units:
if design_unit.is_primary:
LOGGER.debug('Adding primary design unit (%s) %s', design_unit.unit_type, design_unit.name)
elif design_unit.unit_type == 'package body':
LOGGER.debug('Adding secondary design unit (package body) for package %s',
design_unit.primary_design_unit)
else:
LOGGER.debug('Adding secondary design unit (%s) %s', design_unit.unit_type, design_unit.name)
if self.depending_components:
LOGGER.debug("The file '%s' has the following components:", self.name)
for component in self.depending_components:
LOGGER.debug(component)
else:
LOGGER.debug("The file '%s' has no components", self.name)
def _find_dependencies(self, design_file):
"""
Return a list of dependencies of this source_file based on the
use clause and entity instantiations
"""
# Find dependencies introduced by the use clause
result = []
for ref in design_file.references:
ref = ref.copy()
if ref.library == "work":
# Work means same library as current file
ref.library = self.library.name
result.append(ref)
for configuration in design_file.configurations:
result.append(VHDLReference('entity', self.library.name, configuration.entity, 'all'))
return result
def _find_design_units(self, design_file):
"""
Return all design units found in the design_file
"""
result = []
for entity in design_file.entities:
generic_names = [generic.identifier for generic in entity.generics]
result.append(Entity(entity.identifier, self, generic_names))
for context in design_file.contexts:
result.append(VHDLDesignUnit(context.identifier, self, 'context'))
for package in design_file.packages:
result.append(VHDLDesignUnit(package.identifier, self, 'package'))
for architecture in design_file.architectures:
result.append(VHDLDesignUnit(architecture.identifier, self, 'architecture', False, architecture.entity))
for configuration in design_file.configurations:
result.append(VHDLDesignUnit(configuration.identifier, self, 'configuration'))
for body in design_file.package_bodies:
result.append(VHDLDesignUnit(body.identifier,
self, 'package body', False, body.identifier))
return result
@property
def content_hash(self):
"""
Compute hash of contents and compile options
"""
return hash_string(self._content_hash + self._compile_options_hash() + hash_string(self._vhdl_standard))
def add_to_library(self, library):
"""
Add design units to the library
"""
assert self.library == library
library.add_vhdl_design_units(self.design_units)
# lower case representation of supported extensions
VHDL_EXTENSIONS = (".vhd", ".vhdl", ".vho")
VERILOG_EXTENSIONS = (".v", ".vp", ".vams", ".vo")
SYSTEM_VERILOG_EXTENSIONS = (".sv",)
VERILOG_FILE_TYPES = ("verilog", "systemverilog")
FILE_TYPES = ("vhdl", ) + VERILOG_FILE_TYPES
def file_type_of(file_name):
"""
Return the file type of file_name based on the file ending
"""
_, ext = splitext(file_name)
if ext.lower() in VHDL_EXTENSIONS:
return "vhdl"
if ext.lower() in VERILOG_EXTENSIONS:
return "verilog"
if ext.lower() in SYSTEM_VERILOG_EXTENSIONS:
return "systemverilog"
raise RuntimeError("Unknown file ending '%s' of %s" % (ext, file_name))
def check_vhdl_standard(vhdl_standard, from_str=None):
"""
Check the VHDL standard selected is recognized
"""
if from_str is None:
from_str = ""
else:
from_str += " "
valid_standards = ('93', '2002', '2008')
if vhdl_standard not in valid_standards:
raise ValueError("Unknown VHDL standard '%s' %snot one of %r" % (vhdl_standard, from_str, valid_standards))
| [
2,
38884,
284,
2746,
257,
48867,
1486,
18911,
201,
198,
2,
201,
198,
2,
770,
8090,
6127,
5178,
318,
2426,
284,
262,
2846,
286,
262,
29258,
5094,
201,
198,
2,
13789,
11,
410,
13,
362,
13,
15,
13,
1002,
257,
4866,
286,
262,
4904,
... | 2.128529 | 16,152 |
#!/usr/bin/env python3
import os
import shutil
import sys
import yaml
from my_analytics.stats import plot
config = yaml.safe_load(open("config.yml"))
def run():
"""
Runs analytics on the places data from firefox
"""
if os.path.exists(config["config"]["places_directory"]):
# create a copy of places.sqlite as db is locked due to concurrency issues with sqlite
shutil.copyfile(
config["config"]["places_directory"], config["config"]["places_destination"]
)
else:
print(f"Unable to find '{config['config']['places_directory']}'")
sys.exit()
plot.plot_visit_counts()
plot.plot_visit_times()
if __name__ == "__main__":
run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
25064,
198,
198,
11748,
331,
43695,
198,
198,
6738,
616,
62,
38200,
14094,
13,
34242,
1330,
7110,
198,
198,
11250,
796,
331,
4... | 2.505263 | 285 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="dcsdata",
version="0.0.9",
author="Charles E. Jewers",
author_email="charlesejewers@gmail.com",
description="A text data tool for the University of Sheffield.",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=[
'beautifulsoup4',
'bs4',
'pandas',
'mysqlclient',
'SQLAlchemy',
'requests',
'Whoosh',
'lxml'
],
packages=[
"dcsscrapers",
"dcsscrapers.private",
"dcssearch"
],
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
198,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
... | 2.231959 | 388 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-05 21:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
13,
22,
319,
1584,
12,
3312,
12,
2713,
2310,
25,
2713,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
1... | 2.724638 | 69 |
"""Support for MQTT switches."""
import logging
import voluptuous as vol
from homeassistant.components import mqtt, switch
from homeassistant.components.switch import SwitchEntity
from homeassistant.const import (
CONF_DEVICE,
CONF_ICON,
CONF_NAME,
CONF_OPTIMISTIC,
CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON,
CONF_UNIQUE_ID,
CONF_VALUE_TEMPLATE,
STATE_ON,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import (
ATTR_DISCOVERY_HASH,
CONF_COMMAND_TOPIC,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
subscription,
)
from .debug_info import log_messages
from .discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "MQTT Switch"
DEFAULT_PAYLOAD_ON = "ON"
DEFAULT_PAYLOAD_OFF = "OFF"
DEFAULT_OPTIMISTIC = False
CONF_STATE_ON = "state_on"
CONF_STATE_OFF = "state_off"
PLATFORM_SCHEMA = (
mqtt.MQTT_RW_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(CONF_STATE_OFF): cv.string,
vol.Optional(CONF_STATE_ON): cv.string,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
.extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
.extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT switch through configuration.yaml."""
await _async_setup_entity(config, async_add_entities, discovery_info)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT switch dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add a MQTT switch."""
discovery_data = discovery_payload.discovery_data
try:
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(
config, async_add_entities, config_entry, discovery_data
)
except Exception:
clear_discovery_hash(hass, discovery_data[ATTR_DISCOVERY_HASH])
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(switch.DOMAIN, "mqtt"), async_discover
)
async def _async_setup_entity(
config, async_add_entities, config_entry=None, discovery_data=None
):
"""Set up the MQTT switch."""
async_add_entities([MqttSwitch(config, config_entry, discovery_data)])
class MqttSwitch(
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
SwitchEntity,
RestoreEntity,
):
"""Representation of a switch that can be toggled using MQTT."""
def __init__(self, config, config_entry, discovery_data):
"""Initialize the MQTT switch."""
self._state = False
self._sub_state = None
self._state_on = None
self._state_off = None
self._optimistic = None
self._unique_id = config.get(CONF_UNIQUE_ID)
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_data, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
state_on = config.get(CONF_STATE_ON)
self._state_on = state_on if state_on else config[CONF_PAYLOAD_ON]
state_off = config.get(CONF_STATE_OFF)
self._state_off = state_off if state_off else config[CONF_PAYLOAD_OFF]
self._optimistic = config[CONF_OPTIMISTIC]
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
template = self._config.get(CONF_VALUE_TEMPLATE)
if template is not None:
template.hass = self.hass
@callback
@log_messages(self.hass, self.entity_id)
def state_message_received(msg):
"""Handle new MQTT state messages."""
payload = msg.payload
if template is not None:
payload = template.async_render_with_possible_json_value(payload)
if payload == self._state_on:
self._state = True
elif payload == self._state_off:
self._state = False
self.async_write_ha_state()
if self._config.get(CONF_STATE_TOPIC) is None:
# Force into optimistic mode.
self._optimistic = True
else:
self._sub_state = await subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
CONF_STATE_TOPIC: {
"topic": self._config.get(CONF_STATE_TOPIC),
"msg_callback": state_message_received,
"qos": self._config[CONF_QOS],
}
},
)
if self._optimistic:
last_state = await self.async_get_last_state()
if last_state:
self._state = last_state.state == STATE_ON
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
await MqttDiscoveryUpdate.async_will_remove_from_hass(self)
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the switch."""
return self._config[CONF_NAME]
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def icon(self):
"""Return the icon."""
return self._config.get(CONF_ICON)
async def async_turn_on(self, **kwargs):
"""Turn the device on.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass,
self._config[CONF_COMMAND_TOPIC],
self._config[CONF_PAYLOAD_ON],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
# Optimistically assume that switch has changed state.
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the device off.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass,
self._config[CONF_COMMAND_TOPIC],
self._config[CONF_PAYLOAD_OFF],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
# Optimistically assume that switch has changed state.
self._state = False
self.async_write_ha_state()
| [
37811,
15514,
329,
337,
48,
15751,
18225,
526,
15931,
198,
11748,
18931,
198,
198,
11748,
2322,
37623,
5623,
355,
2322,
198,
198,
6738,
1363,
562,
10167,
13,
5589,
3906,
1330,
285,
80,
926,
11,
5078,
198,
6738,
1363,
562,
10167,
13,
5... | 2.18027 | 3,994 |
from voximplant.apiclient import VoximplantAPI, VoximplantException
if __name__ == "__main__":
voxapi = VoximplantAPI("credentials.json")
# Bind the skills 1, 5 to the users 5, 6, 10.
SKILL_ID = [1, 3]
USER_ID = [5, 6, 10]
try:
res = voxapi.bind_skill(skill_id=SKILL_ID,
user_id=USER_ID)
print(res)
except VoximplantException as e:
print("Error: {}".format(e.message))
| [
6738,
410,
1140,
320,
15060,
13,
499,
291,
75,
1153,
1330,
28035,
320,
15060,
17614,
11,
28035,
320,
15060,
16922,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
410,
1140,
15042,
796,
28035,
32... | 2.084112 | 214 |
#!/usr/bin/env python3
n,r,m = [int(i) for i in input().split()]
data = [input().split() for i in range(m)]
data = [(int(t), float(p), int(d)) for t,p,d in data]
inf = n*50000
lower,upper = 0, inf
while True:
mid = (lower+upper)/2
if mid == lower or mid == upper:
break
DP = [0]*(r-n) # i-th iteration of DP[j] := Expected extra time it takes to break the record compared to an optimal run just before attempting trick m-i with j margin for error remaining.
for t,p,d in reversed(data):
DP = [p * DP[j] + (1-p) * min(mid + t, inf if j<d else d+DP[j-d]) for j in range(r-n)]
if DP[r-n-1] > mid: lower = mid
else: upper = mid
print(n + (lower+upper)/2)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
77,
11,
81,
11,
76,
796,
685,
600,
7,
72,
8,
329,
1312,
287,
5128,
22446,
35312,
3419,
60,
198,
7890,
796,
685,
15414,
22446,
35312,
3419,
329,
1312,
287,
2837,
7,
76,
15437,
... | 2.344371 | 302 |
"""
This module performs probabilistic model on some network of streets given by
the modified adjacency matrix (with dictionary of length, width, alpha, beta,
orientation).
"""
import numpy as np
import scipy.integrate as integrate
from collections import defaultdict
from source.junction import Junction
class Model(object):
"""
This class of methods is the core part of the Probabilistic ray model of
energy propagation.
"""
def set_adjacency(self, modified_adjacency):
"""
This setter method sets the adjacency matrix of the network.
"""
self.__modified_adjacency = modified_adjacency
self.__nodes = len(modified_adjacency)
self.__set_graph(modified_adjacency)
def __set_graph(self, modified_adjacency):
"""
This method returns normal adjacency matrix from modified adjacency
matrix.
"""
graph = defaultdict(set)
for i in range(self.__nodes):
for j in range(self.__nodes):
if modified_adjacency[i][j] != 0:
graph[i].add(j)
self.__graph = graph
def set_source(self, source1, source2):
"""
This setter method sets the source node.
"""
try:
source1 = int(source1)
source2 = int(source2)
except ValueError:
raise ValueError("Source nodes must be integers.")
if source1 < 0 or source1 > self.__nodes:
raise ValueError("First source node not in range.")
if source2 < 0 or source2 > self.__nodes:
raise ValueError("Second source node not in range.")
if source2 not in self.__graph[source1]:
raise ValueError("Sources are not neighbours.")
self.__source = (source1, source2)
def set_receiver(self, receiver1, receiver2):
"""
This setter method sets the receiver node.
"""
try:
receiver1 = int(receiver1)
receiver2 = int(receiver2)
except ValueError:
raise ValueError("Receiver nodes must be integers.")
if receiver1 < 0 or receiver1 > self.__nodes:
raise ValueError("First receiver node not in range.")
if receiver2 < 0 or receiver2 > self.__nodes:
raise ValueError("Second receiver node not in range.")
if receiver2 not in self.__graph[receiver1]:
raise ValueError("Receivers are not neighbours.")
self.__receiver = (receiver1, receiver2)
def set_threshold(self, threshold):
"""
This setter method sets the threshold of the computation.
"""
try:
threshold = int(threshold)
except ValueError:
raise ValueError("Threshold must be an integer.")
if threshold < 0:
raise ValueError("Threshold must be a positive number.")
self.__threshold = threshold
def solve(self):
"""
This method is the main method of the class and solves the wave
propagation problem.
"""
assert self.__source is not None and self.__receiver is not None and self.__threshold is not None
paths = self.__compute_paths() # obtain all connecting paths
print("Number of paths is {}".format(len(paths)))
power = 0
error = 0
for path in paths:
integrand = self.__walk(path) # obtain functions and breaking points
(part_power, part_error) = self.__integrate(integrand)
power += part_power
error += part_error
print("==========================================")
print("Resulting power from node {0} to node {1} is {2} (error {3})".format(
self.__source, self.__receiver, power, error))
return (power, error, paths) # resulting power flow
def __compute_paths(self):
"""
This private method computes all paths between source and receiver.
"""
lengths = []
paths = []
# Find lengths of all four combinations
for source in self.__source:
for receiver in self.__receiver:
lengths.append(self.__dijkstra(source)[receiver])
# Find minimal length and compute cutoff
shortest_length = min(lengths)
cutoff = shortest_length + self.__threshold
# Find all paths of lengths up to cutoff of all four combinations
for source in self.__source:
for receiver in self.__receiver:
paths.extend(self.__find_paths(source, receiver, cutoff+1))
return paths
def __find_paths(self, element, receiver, n, distances=False):
"""
This private method implements an algorithm for finding all paths
between source and receiver of specified length.
"""
# Compute distances dictionary only the first time
if not distances:
distances = self.__dijkstra(receiver)
paths = []
# Recursive algorithm
if n > 0:
for neighbor in self.__graph[element]:
for path in self.__find_paths(neighbor, receiver, n-1, distances):
if distances[element] < n:
paths.append([element]+path)
# Only append path if the last element is the receiver node
if element == receiver:
paths.append([element])
return paths
def __walk(self, path):
"""
This private method iterates through the path and fills the functions
and other street values at each step.
"""
# Prepend "apparent" source
if path[0] == self.__source[0]:
path.insert(0, self.__source[1])
lengths = [self.__modified_adjacency[path[0]][path[1]]["length"]/2]
else:
path.insert(0, self.__source[0])
lengths = [self.__modified_adjacency[path[0]][path[1]]["length"]/2]
# Fill width, alpha and rotation of the first street
widths = [self.__modified_adjacency[path[0]][path[1]]["width"]]
alphas = [self.__modified_adjacency[path[0]][path[1]]["alpha"]]
betas = [self.__modified_adjacency[path[0]][path[1]]["beta"]]
rotations = [0]
# Append "apparent" receiver
if path[-1] == self.__receiver[0]:
path.append(self.__receiver[1])
else:
path.append(self.__receiver[0])
# Set empty array of functions and breaking points
functions = []
# Iterate through the rest of the path
for i in range(1, len(path)-1):
previous, current, following = path[i-1], path[i], path[i+1]
# Get widths of appropriately rotated junction
rotated_widths = self.__rotate(previous, current, following)
junction = Junction(rotated_widths, current)
functions.append(junction.compute_function())
# Add length, alpha and rotation of the following street
lengths.append(self.__modified_adjacency[current][following]["length"])
widths.append(self.__modified_adjacency[current][following]["width"])
alphas.append(self.__modified_adjacency[current][following]["alpha"])
betas.append(self.__modified_adjacency[current][following]["beta"])
rotations.append((rotations[-1]+junction.correct_orientation())%2)
# Last length is only half
lengths[-1] = lengths[-1]/2
return {"path": path,
"functions": functions,
"rotations": rotations,
"lengths": lengths,
"widths": widths,
"alphas": alphas,
"betas": betas
}
def __rotate(self, previous, current, following):
"""
This private method determines the orientation of the junction and
provides information on street widths and exiting street.
"""
orientation = self.__modified_adjacency[current][previous]["orientation"]
backward = orientation
right = (orientation+1)%4
forward = (orientation+2)%4
left = (orientation+3)%4
rotated = {}
for neighbor in self.__graph[current]:
if self.__modified_adjacency[current][neighbor]["orientation"] == left:
rotated["left"] = self.__modified_adjacency[current][neighbor]["width"]
if following == neighbor:
rotated["next"] = "left"
elif self.__modified_adjacency[current][neighbor]["orientation"] == forward:
rotated["forward"] = self.__modified_adjacency[current][neighbor]["width"]
if following == neighbor:
rotated["next"] = "forward"
elif self.__modified_adjacency[current][neighbor]["orientation"] == right:
rotated["right"] = self.__modified_adjacency[current][neighbor]["width"]
if following == neighbor:
rotated["next"] = "right"
elif self.__modified_adjacency[current][neighbor]["orientation"] == backward:
rotated["backward"] = self.__modified_adjacency[current][neighbor]["width"]
if following == neighbor:
rotated["next"] = "backward"
return rotated
def __integrate(self, integrand):
"""
This private method integrates functions.
"""
path = integrand["path"]
functions = integrand["functions"]
rotations = integrand["rotations"]
lengths = integrand["lengths"]
widths = integrand["widths"]
alphas = integrand["alphas"]
betas = integrand["betas"]
if not self.__height: # 2D
(energy, error) = integrate.quad(compose_f, 0, np.pi/2)
else: # 3D
integrand = lambda theta: compose_f(theta)/compose_L(theta)
(energy, error) = integrate.quad(integrand, 0, np.pi/2)
print("Contribution from path {0}: {1} (error {2})".format(
path, energy, error))
return (energy, error)
def solve_all(self, positions):
"""
This method performs computations of the wave propagation problem from
the source to all possible receivers and returns the result as X and Y
coordinates of the receivers along with the percentage of the power
flow.
"""
receivers = self.__get_receivers()
powers = []
for receiver in receivers:
self.set_receiver(*receiver) # * unpacks tuple
powers.append(self.solve())
receiver_positions = self.__get_positions(receivers, positions)
source_position = self.__get_positions([self.__source], positions)
X = [element[0] for element in receiver_positions]
Y = [element[1] for element in receiver_positions]
Z = [element[0] for element in powers]
X.append(source_position[0][0])
Y.append(source_position[0][1])
Z.append(1)
return (X, Y, Z)
def __get_receivers(self):
"""
This private method returns a list of tuples of all possible receivers.
"""
receivers = []
for j in range(len(self.__modified_adjacency)):
for i in range(j):
if self.__modified_adjacency[i][j] != 0:
if self.__source != (i,j) and self.__source != (j,i):
receivers.append((i,j))
return receivers
def __get_positions(self, streets, positions):
"""
This private method returns a list of tuples of (X, Y) coordinates of
middles of the given streets based on the input positions matrix.
"""
center_positions = []
for street in streets:
x1, y1 = positions[street[0]][0], positions[street[0]][1]
x2, y2 = positions[street[1]][0], positions[street[1]][1]
if x1 == x2:
center_positions.append((x1, (y1+y2)/2))
elif y1 == y2:
center_positions.append(((x1+x2)/2, y1))
return center_positions
| [
37811,
198,
1212,
8265,
17706,
1861,
14991,
2569,
2746,
319,
617,
3127,
286,
6483,
1813,
416,
198,
1169,
9518,
9224,
330,
1387,
17593,
357,
4480,
22155,
286,
4129,
11,
9647,
11,
17130,
11,
12159,
11,
198,
13989,
341,
737,
198,
37811,
... | 2.305043 | 5,255 |
from __future__ import print_function
import argparse
import yaml
from logger import Logger
from test_model.test import selfsupervised
if __name__ == "__main__":
# Load the config file
parser = argparse.ArgumentParser(description="Sensor fusion model")
parser.add_argument("--config", help="YAML config file")
parser.add_argument("--notes", default="", help="run notes")
parser.add_argument("--dev", type=bool, default=False, help="run in dev mode")
parser.add_argument(
"--continuation",
type=bool,
default=False,
help="continue a previous run. Will continue the log file",
)
args = parser.parse_args()
# Add the yaml to the config args parse
with open(args.config) as f:
configs = yaml.load(f)
# Merge configs and args
for arg in vars(args):
configs[arg] = getattr(args, arg)
# Initialize the loggers
logger = Logger(configs)
# Initialize the trainer
test = selfsupervised(configs, logger)
test.tests()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
1822,
29572,
198,
11748,
331,
43695,
198,
198,
6738,
49706,
1330,
5972,
1362,
198,
6738,
1332,
62,
19849,
13,
9288,
1330,
2116,
16668,
16149,
198,
198,
361,
11593,
3672,
834,
66... | 2.758713 | 373 |
from flask import Blueprint, request
from routers.geolocation.use_case_geolocation import get_address_from_geolocation
geolocation_router = Blueprint('geolocation_router', __name__)
# ------------------------------------------------------------------
# Routers
# ------------------------------------------------------------------
@geolocation_router.route('/address/', methods=['GET'])
| [
6738,
42903,
1330,
39932,
11,
2581,
198,
198,
6738,
41144,
13,
469,
349,
5040,
13,
1904,
62,
7442,
62,
469,
349,
5040,
1330,
651,
62,
21975,
62,
6738,
62,
469,
349,
5040,
198,
198,
469,
349,
5040,
62,
472,
353,
796,
39932,
10786,
... | 4.344444 | 90 |
# -*- coding: utf-8 -*-
__author__ = 'Weiran Huang'
__email__ = 'huangweiran1998@gmail.com'
__version__ = '1.0.0' | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9800,
834,
796,
705,
1135,
343,
272,
31663,
6,
198,
834,
12888,
834,
796,
705,
13415,
648,
732,
343,
272,
21113,
31,
14816,
13,
785,
6,
198,
834,
9641,
834... | 2.111111 | 54 |
import math
f()
| [
11748,
10688,
198,
69,
3419,
198
] | 2.666667 | 6 |
n = int(input())
score = dict([])
max_dict = dict([])
for i in range(1, n + 1):
score[i] = int(input())
# max(n) = max(max(n-2), max(n-3) + score(n-1)) + score(n)
# Key IDEA : 맨 끝 계단을 밟아야 한다는 조건에 주목. 그러기 위해서는 두 가지 선택지 밖에 없음
# Dynamic Programming과 Recursion을 활용해서 해결. => n보다 작은 수에서는 작동하는 함수가 있다고 가정. (SICP)
if n >= 1:
max_dict[1] = score[1]
if n >= 2:
max_dict[2] = score[1] + score[2]
if n >= 3:
max_dict[3] = max(score[1], score[2]) + score[3]
if n >= 4:
get_max(n)
print(max_dict[n]) | [
77,
796,
493,
7,
15414,
28955,
198,
198,
26675,
796,
8633,
26933,
12962,
198,
9806,
62,
11600,
796,
8633,
26933,
12962,
198,
198,
1640,
1312,
287,
2837,
7,
16,
11,
299,
1343,
352,
2599,
198,
220,
220,
220,
4776,
58,
72,
60,
796,
4... | 1.313924 | 395 |
from django.shortcuts import get_object_or_404
from apps.canvas_auth.models import User
from apps.suggest.models import get_suggested_tags
from apps.tags.models import Tag
from canvas import bgwork, models
from canvas.api_decorators import api_decorator
from canvas.exceptions import ServiceError
from canvas.metrics import Metrics
from canvas.view_guards import require_staff, require_POST, require_user
from services import Services
urlpatterns = []
api = api_decorator(urlpatterns)
@api('follow_tag')
@require_user
@api('unfollow_tag')
@require_user
@api('update_comment_tags')
@require_user
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
651,
62,
15252,
62,
273,
62,
26429,
198,
198,
6738,
6725,
13,
5171,
11017,
62,
18439,
13,
27530,
1330,
11787,
198,
6738,
6725,
13,
47811,
13,
27530,
1330,
651,
62,
47811,
276,
62,
31499,
198... | 3.302198 | 182 |
# Progress indicator for Python 3.5.1
# Version 0.1.1
# Johannes Asplund-Samuelsson
class Progress():
"""Progress indication thingamabob"""
# Design variables
__allowed_designs = set(['p','s','b','t','c'])
# Spinner variables
__spin_stages = ['/','-','\\','|']
__spin_stage = 0
__last_spin_time = 0
# Timer variables
__previous_progress = 0
__last_timer_time = 0
def __init__(self, max_val = 100, design = 'p', val = 0):
"""Initialize the Progress indicator
ARGUMENTS
max_val : int, float
The expected maximum or target value.
design : string
The type and order of progress indicators.
'p' : percent
's' : spinner
'b' : bar
't' : timer
'c' : counter
val : int, float
The current value.
"""
# Import functions
from os import popen
from sys import stdout
from time import time
from time import sleep
from collections import deque
from datetime import timedelta
# Bind imported functions to self
self.__popen = popen
self.__stdout = stdout
self.__time = time
self.__sleep = sleep
self.__deque = deque
self.__timedelta = timedelta
# Initialize variables
self.update(val, max_val, design)
self.__speed_samples = list()
def __call__(self, val = None):
"""Calling returns a string of the current progress"""
if val:
self.val = val
return self.to_string()
def __format(self):
"""Format the progress indicator output string"""
output = []
for variant in self.design:
if variant == 'p':
output.append(self.percent())
if variant == 's':
output.append(self.spinner())
if variant == 'b':
output.append(self.bar())
if variant == 't':
output.append(self.timer())
if variant == 'c':
output.append(self.counter())
return ' '.join(output)
def __avg_speed(self):
"""Calculate the average speed"""
if self.__speed_samples:
avg_speed = sum([s[0] / s[1] for s in self.__speed_samples]) \
/ len(self.__speed_samples)
else:
avg_speed = 0
return avg_speed
def percent(self):
"""Percent progress towards the maximum"""
# Calculate percent as float and format as string
if self.max_val:
percent = float(self.val / self.max_val * 100)
return "{0:>6}".format("%0.1f%%" % percent)
else:
percent = "INF%"
return "{0:>6}".format(percent)
def spinner(self):
"""A spinner that (might) indicate that something is happening"""
# Calculate time since last spin
time_since_spin = self.__time() - self.__last_spin_time
# Determine whether to spin or not
# Don't spin...
if time_since_spin < 0.1:
return self.__spin_stages[self.__spin_stage]
# Spin...
# Calculate spin stage
if self.__spin_stage < len(self.__spin_stages) - 1:
self.__spin_stage += 1
else:
self.__spin_stage = 0
# Set time of last spin
self.__last_spin_time = self.__time()
# Return spinner character of current spin stage
return self.__spin_stages[self.__spin_stage]
def bar(self):
"""Progress bar with the wget design"""
# Get the current terminal width
try:
rows, columns = self.__popen('stty size', 'r').read().split()
columns = int(columns)
except ValueError:
columns = 80
# Count the width of other indicators
width = 0
for variant in self.design:
if variant == 'p':
width += len(self.percent())
width += 1
if variant == 's':
width += 2
if variant == 't':
width += len(self.timer())
width += 1
if variant == 'c':
width += len(self.counter())
# Calculate the allowed bar width
allowed_width = min(columns - width, 55)
# Calculate the filled width at current progress state
full_width = allowed_width - 2 # 2 for the caps
# If the maximum value is zero, the bar cannot be calculated
if not self.max_val:
return ''.join(['['] + ['='] * full_width + [']'])
# Construct the progress bar - Example: [=====> ]
n_filled = round((self.val / self.max_val) * full_width)
n_empty = full_width - n_filled
bar = ['='] * n_filled
if bar and n_empty > 0:
bar[-1] = '>'
bar = ['['] + bar + [' '] * n_empty + [']']
return ''.join(bar)
def timer(self):
"""Countdown timer based on average progress speed"""
time_since_timer = self.__time() - self.__last_timer_time
# Add a speed sample if 1 or more seconds have passed
if time_since_timer >= 1:
self.__speed_samples.append(
(self.val - self.__previous_progress, time_since_timer)
)
self.__previous_progress = self.val
self.__last_timer_time = self.__time()
# If an average speed can be calculated, return a remaining time string
if self.__avg_speed():
remaining_time = (self.max_val - self.val) / self.__avg_speed()
if remaining_time <= 24*3600-1:
clock = str(self.__timedelta(seconds=int(remaining_time)))
return "{0:>8}".format(clock)
elif remaining_time <= 7*24*3600-1:
days = str(self.__timedelta(seconds=int(remaining_time)).days)
return ' >' + days + ' days'
else:
return ' >1 week'
# Return a blank remaining time string if the average speed is zero
else:
return '--:--:--'
def counter(self):
"""The actual value as a fraction of the maximum"""
form = "{0:>%s}" % str(2*len(str(self.max_val)) + 2)
return form.format(str(self.val) + '/' + str(self.max_val))
def update(self, val = None, max_val = None, design = None):
"""Update parameters of the progress indicator"""
if val is not None:
self.val = val
if max_val is not None:
self.max_val = max_val
if design is not None:
if set(design) - self.__allowed_designs:
raise Exception('Invalid progress indicator type.')
else:
self.design = list(design)
def to_string(self, val = None):
"""Produce a string of the current progress"""
if val:
self.val = val
return self.__format()
def write(self, val = None):
"""Write progress to standard output with carriage return and flush."""
if val:
self.val = val
self.__stdout.write("\r" + self.__format())
self.__stdout.flush()
def test(self):
"""Test the output of the active parameters"""
val = self.val
for i in range(1000):
x = (i + 1) / 10
self.write(x)
self.__sleep(0.01)
self.val = val
self.__stdout.write("\n")
| [
2,
18387,
16916,
329,
11361,
513,
13,
20,
13,
16,
198,
2,
10628,
657,
13,
16,
13,
16,
198,
2,
38579,
1081,
489,
917,
12,
16305,
2731,
16528,
198,
198,
4871,
18387,
33529,
198,
220,
220,
220,
37227,
32577,
12955,
1517,
321,
397,
67... | 2.138667 | 3,512 |
"""Simple tests for the simple stack language."""
import simpleStack
def test_hello_world():
"""Tests hello world."""
logs = []
def log_print(val, end=""):
"""Logging print statement."""
logs.append(val + end)
with open("examples/hello.ss") as hello:
simpleStack.run_simple_stack(hello.readlines(), printFun=log_print)
assert logs[0] == "Hello"
assert logs[1] == " World!\n"
def test_fizz_buzz():
"""Test fizz buzz."""
logs = []
def log_print(val, end=""):
"""Logging print statement. Ignores plain newlines."""
if (str(val) + end) == "\n":
return
logs.append(str(val) + end)
with open("examples/fizzbuzz.ss") as fizzbuzz:
simpleStack.run_simple_stack(fizzbuzz.readlines(), printFun=log_print)
assert(len(logs)) == 100
for idx, line in enumerate(logs):
num = idx + 1
if num % 3 == 0:
assert "Fizz" in line
if num % 5 == 0:
assert "Buzz" in line
if (num % 3 != 0) and (num % 5 != 0):
assert str(num) in line
| [
37811,
26437,
5254,
329,
262,
2829,
8931,
3303,
526,
15931,
198,
198,
11748,
2829,
25896,
628,
198,
4299,
1332,
62,
31373,
62,
6894,
33529,
198,
220,
220,
220,
37227,
51,
3558,
23748,
995,
526,
15931,
198,
220,
220,
220,
17259,
796,
1... | 2.260246 | 488 |
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from config import BASE_URL
| [
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
1525,
1330,
2750,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
1330,
2938,
62,
17561,
1756,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
13,
17077,
1330,... | 3.603774 | 53 |
import os
from collections import defaultdict
import click
import spacy
from allennlp.common.logging import logger
from spacy.symbols import ORTH
from bs4 import BeautifulSoup, Comment
from tqdm import tqdm
from conllu import TokenList
import wiktionarifier.scrape.db as db
from wiktionarifier.format.const import VALID_POS, NON_DEFINITION_HEADINGS
from wiktionarifier.format.exceptions import FormatException
def discard_empty_elements(soup, exempt=()):
"""Remove HTML elements that have no/whitespace-only content"""
for tag in soup.find_all():
if len(tag.get_text(strip=True)) == 0 and tag.name not in exempt:
tag.extract()
return soup
def excise_elements(soup, css_selectors=()):
"""
For each css selector, get rid of them while preserving their children.
E.g., if the css selector is "span":
<p>Exact science based on <b><span><em><span>Cubics</span></em></span></b>,
not on <span>theories</span>. Wisdom is Cubic testing of knowledge.</p>
becomes
<p>Exact science based on <b><em>Cubics</em></b>,
not on theories. Wisdom is Cubic testing of knowledge.</p>
inspired by: https://stackoverflow.com/questions/1765848/remove-a-tag-using-beautifulsoup-but-keep-its-contents
"""
for selector in css_selectors:
for tag in sorted(soup.select(selector), reverse=True, key=depth):
if getattr(tag, "parent", None):
while len(tag.contents) > 0:
c = tag.contents[0]
tag.insert_before(c)
tag.extract()
return soup
def find_entries(tokenizer, text, soup):
"""
Given parsed HTML for a wiktionary page, use heuristics to find the dictionary
entries for each language on the page. The heuristics assume that entries
correspond to <li> elements under an <h3> or <h4> element with a valid POS, and
that the corresponding "parent" <h2> or <h3> tag above the POS tag has the name
of the language. See https://en.wiktionary.org/wiki/Wiktionary:Entry_layout
Args:
tokenizer: spacy tokenizer that knows how to tokenize <a> and </a>
text: SQLite text object
soup: parsed page HTML
Returns:
dict where keys are language names and values are lists of tokenized strings
"""
entries = defaultdict(list)
# A list of pairs, where the first is the level of the header (1 for <h1>, etc.)
# and the second is the BeautifulSoup node reference
headers = []
# keep track of whether we're currently consuming entries--while we're doing so,
# we also need to know the header level of the POS header, the name of the language,
# and a reference to the parent of the first <li> elements we encounter that we treat
# as definitions
reading_entries = False
pos_header_level = None
language_name = None
li_container = None
# depth-first traversal of the page
for node in soup.find_all():
tag_type = node.name
# keep track of ALL of these headers as we traverse the document
if tag_type in ["h2", "h3", "h4"]:
headers.append((int(tag_type[-1]), node.text))
# if we encounter a header that looks like a POS header, begin reading entries
# and also note the language name, which will be the last header we read on a level
# higher than the POS header's level. E.g. if we find <h3>Noun</h3> and our last <h2>
# element was <h2>English</h2>, the language name is English
if is_pos_header(node):
pos_header_level = int(tag_type[-1])
parent_titles = [title for level, title in headers if level == pos_header_level - 1]
if len(parent_titles) == 0:
logger.warn(
f"Found a definition entry that does not appear to be nested under a language header on {text.url}"
)
pos_header_level = None
continue
reading_entries = True
language_name = parent_titles[-1]
# Read definitions if the flag is set and the node is <li>
elif reading_entries and tag_type == "li":
# If this is the first <li> we're reading, hold a ref to its parent
if li_container is None:
li_container = node.parent
# if we encounter an <li> and it does NOT share a parent with the other <li>
# items we've seen, assume we've consumed all available definitions and bail
# out. (This can happen if there's another list e.g. for derived terms)
elif li_container != node.parent:
li_container = None
reading_entries = None
pos_header_level = None
continue
# parse the inner html
inner_content = BeautifulSoup(node.decode_contents(), features="html.parser")
# discard all tags which are not <a>
inner_content = excise_elements(inner_content, [":not(a[href])"])
# to make tokenization simpler, remove attrs from all <a> elements and store them in a separate list
inner_content, a_attrs = remove_a_attrs(inner_content)
# get the tokens with the dehydrated <a> tags
tokenized = tokenizer(str(inner_content).replace("</a>", " </a> ").replace("<a>", " <a> "))
# build the list of final tokens
tokens = []
i = 0
for t in tokenized:
t = t.text
# rehydrate <a> tags using the a_attrs list we got earlier
if t == "<a>":
soup = BeautifulSoup("<a></a>", features="html.parser").find("a")
soup.attrs = a_attrs[i]
t = str(soup)[:-4]
i += 1
tokens.append(t)
# store list of tokens
entries[language_name].append(tokens)
# We're done reading entries if we run into a header that's at least as high as the
# POS tag header (if not higher)
elif (
reading_entries
and tag_type in [f"h{i}" for i in range(1, pos_header_level + 1)]
and int(tag_type[-1]) == pos_header_level
):
li_container = None
reading_entries = False
pos_header_level = None
return entries
| [
11748,
28686,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
11748,
3904,
198,
11748,
599,
1590,
198,
6738,
477,
1697,
34431,
13,
11321,
13,
6404,
2667,
1330,
49706,
198,
6738,
599,
1590,
13,
1837,
2022,
10220,
1330,
6375,
4221,
198,
67... | 2.413741 | 2,649 |
#!/usr/bin/python3
'''
Check the given file for WOPI extended attributes
Author: Giuseppe.LoPresti@cern.ch
CERN IT/ST
'''
import sys, os, getopt, configparser, logging, jwt
storage = None
# usage function
def usage(exitcode):
'''Prints usage'''
print('Usage : ' + sys.argv[0] + ' [-h|--help] <filename>')
sys.exit(exitcode)
def storage_layer_import(storagetype):
'''A convenience function to import the storage layer module specified in the config and make it globally available'''
global storage # pylint: disable=global-statement
if storagetype in ['local', 'xroot', 'cs3']:
storagetype += 'iface'
else:
raise ImportError('Unsupported/Unknown storage type %s' % storagetype)
try:
storage = __import__(storagetype, globals(), locals())
except ImportError:
print("Missing module when attempting to import {}. Please make sure dependencies are met.", storagetype)
raise
def _getLockName(fname):
'''Generates a hidden filename used to store the WOPI locks. Copied from wopiserver.py.'''
return os.path.dirname(fname) + os.path.sep + '.sys.wopilock.' + os.path.basename(fname) + '.'
# first parse the options
try:
options, args = getopt.getopt(sys.argv[1:], 'hv', ['help', 'verbose'])
except getopt.GetoptError as e:
print(e)
usage(1)
verbose = False
for f, v in options:
if f == '-h' or f == '--help':
usage(0)
elif f == '-v' or f == '--verbose':
verbose = True
else:
print("unknown option : " + f)
usage(1)
# deal with arguments
if len(args) < 1:
print('Not enough arguments')
usage(1)
if len(args) > 1:
print('Too many arguments')
usage(1)
filename = args[0]
# initialization
console = logging.StreamHandler()
console.setLevel(logging.ERROR)
logging.getLogger('').addHandler(console)
config = configparser.ConfigParser()
config.read_file(open('/etc/wopi/wopiserver.defaults.conf')) # fails if the file does not exist
config.read('/etc/wopi/wopiserver.conf')
wopisecret = open(config.get('security', 'wopisecretfile')).read().strip('\n')
storage_layer_import(config.get('general', 'storagetype'))
storage.init(config, logging.getLogger(''))
# stat + getxattr the given file
try:
instance = 'default'
if filename.find('/eos/user/') == 0:
instance = 'eoshome-' + filename[10] + '.cern.ch'
statInfo = storage.statx(instance, filename, '0:0')
try:
wopiTime = storage.getxattr(instance, filename, '0:0', 'oc.wopi.lastwritetime')
try:
lockcontent = b''
for line in storage.readfile(instance, _getLockName(filename), '0:0'):
if isinstance(line, IOError):
raise line # no pre-existing lock found, or error attempting to read it: assume it does not exist
# the following check is necessary as it happens to get a str instead of bytes
lockcontent += line if isinstance(line, type(lockcontent)) else line.encode()
wopiLock = jwt.decode(lockcontent, wopisecret, algorithms=['HS256'])
print('%s: inode = %s, mtime = %s, last WOPI write time = %s, locked: %s' % (filename, statInfo['inode'], statInfo['mtime'], wopiTime, wopiLock))
except jwt.exceptions.DecodeError:
print('%s: inode = %s, mtime = %s, last WOPI write time = %s, unreadable lock' % (filename, statInfo['inode'], statInfo['mtime'], wopiTime))
except IOError:
print('%s: inode = %s, mtime = %s, not being written by the WOPI server' % (filename, statInfo['inode'], statInfo['mtime']))
except IOError as e:
print('%s: %s' % (filename, e))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
7061,
6,
198,
9787,
262,
1813,
2393,
329,
370,
3185,
40,
7083,
12608,
198,
198,
13838,
25,
8118,
1904,
27768,
13,
27654,
47,
2118,
72,
31,
30903,
13,
354,
198,
34,
28778,
7283,
14,
22... | 2.684373 | 1,299 |
import os
# Flask settings
FLASK_SERVER_NAME = os.environ.get('SERVER_NAME', 'localhost:8888')
FLASK_DEBUG = True # Do not use debug mode in production
# SQLAlchemy settings
SQLALCHEMY_DATABASE_URI = 'postgresql://readonly:readonly@data-next.obudget.org:5432/budgetkey'
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = True # Do not use debug mode in production
| [
11748,
28686,
198,
198,
2,
46947,
6460,
198,
3697,
1921,
42,
62,
35009,
5959,
62,
20608,
796,
28686,
13,
268,
2268,
13,
1136,
10786,
35009,
5959,
62,
20608,
3256,
705,
36750,
25,
3459,
3459,
11537,
198,
3697,
1921,
42,
62,
30531,
796,... | 2.77037 | 135 |
import argparse
import sys
from os import path
from time import time
import cv2 as cv
from cvtlib.drawing import Drawer
from cvtlib.files import list_files
from cvtlib.image import resize
IMAGE_EXT = ('.jpeg', '.jpg', '.png')
if __name__ == '__main__':
curr_dir = path.dirname(path.abspath(__file__))
parent_dir, _ = path.split(curr_dir)
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
type=str,
required=False,
default=path.join(curr_dir, 'data/images/pose/'),
help='Path to input image file or directory containing image files.'
)
parser.add_argument(
'--show_scores',
type=bool,
required=False,
default=False,
help='Show detection and marking score of each face.'
)
parser.add_argument(
'--weights_detector',
type=str,
required=False,
default=path.join(parent_dir, 'models/weights_face_detector.pth'),
help='Path to file containing the model weights of face detector.'
)
parser.add_argument(
'--weights_marker',
type=str,
required=False,
default=path.join(parent_dir, 'models/weights_face_marker.npy'),
help='Path to file containing the model weights of face marker.'
)
parser.add_argument(
'--weights_encoder',
type=str,
required=False,
default=path.join(parent_dir, 'models/weights_face_encoder.pth'),
help='Path to file containing the model weights of face enocoder.'
)
args = parser.parse_args(sys.argv[1:])
run(
args.input,
detector_weights_path=args.weights_detector,
marker_weights_path=args.weights_marker,
encoder_weights_path=args.weights_encoder,
show_scores=args.show_scores
)
| [
11748,
1822,
29572,
198,
11748,
25064,
198,
6738,
28686,
1330,
3108,
198,
6738,
640,
1330,
640,
198,
198,
11748,
269,
85,
17,
355,
269,
85,
198,
6738,
269,
36540,
8019,
13,
19334,
278,
1330,
15315,
263,
198,
6738,
269,
36540,
8019,
13... | 2.354839 | 775 |
import csv
with open('C:\\gta5_console\\keys_data\\keys_data.csv', 'a', newline='') as csvfile:
fieldnames = ['position', 'data']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'name': 'Baked', 'data': 'Beans'})
| [
11748,
269,
21370,
198,
198,
4480,
1280,
10786,
34,
25,
6852,
70,
8326,
20,
62,
41947,
6852,
13083,
62,
7890,
6852,
13083,
62,
7890,
13,
40664,
3256,
705,
64,
3256,
649,
1370,
28,
7061,
8,
355,
269,
21370,
7753,
25,
198,
220,
220,
... | 2.52 | 100 |
import discord
from discord.ext import commands
import asyncio
import os
import time
from utils.dataIO import dataIO, fileIO
class General:
"""General cogs for the bot."""
@commands.command(pass_context=True)
async def reminder(self, ctx, quantity: int, time_unit: str, *, text: str):
"""Gives you a reminder."""
time_unit = time_unit.lower()
author = ctx.message.author
s = ""
if time_unit.endswith("s"):
time_unit = time_unit[:-1]
s = "s"
if not time_unit in self.units:
await self.bot.say(":x: Invalid time unit. You must choose: minute/hour/day/week/month")
return
if quantity < 1:
await self.bot.say(":x: Quantity must not be 0 or negative.")
return
if len(text) > 1960:
await self.bot.say(":x: Text is too long. Shorten your text.")
return
seconds = self.units[time_unit] * quantity
future = int(time.time() + seconds)
self.reminders.append({"ID": author.id, "FUTURE": future, "TEXT": text})
await self.bot.say(":clock: I will remind you in {} {}.".format(str(quantity), time_unit + s))
fileIO("data/general/reminders.json", "save", self.reminders)
| [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
11748,
30351,
952,
198,
11748,
28686,
198,
11748,
640,
198,
6738,
3384,
4487,
13,
7890,
9399,
1330,
1366,
9399,
11,
2393,
9399,
628,
198,
4871,
3611,
25,
198,
220,
220,
220,
3... | 2.327839 | 546 |
# -*- coding: utf-8 -*-
"""
CRF DU task core. Supports classical CRF and Typed CRF
Copyright Xerox(C) 2016, 2017 JL. Meunier
Developed for the EU project READ. The READ project has received funding
from the European Union�s Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import sys, os
try: #to ease the use without proper Python installation
import TranskribusDU_version
except ImportError:
sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) )
import TranskribusDU_version
from common.trace import trace, traceln
from common.chrono import chronoOn, chronoOff
from tasks.DU_Task import DU_Task
import graph.GraphModel
# dynamically imported
Model_SSVM_AD3 = None
Model_SSVM_AD3_Multitype = None
class DU_CRF_Task(DU_Task):
"""
DU learner based on graph CRF
"""
VERSION = "CRF_v19"
version = None # dynamically computed
sMetadata_Creator = "NLE Document Understanding Typed CRF-based - v0.4"
# sXmlFilenamePattern = "*[0-9]"+MultiPageXml.sEXT #how to find the Xml files
#--- CONFIGURATION setters --------------------------------------------------------------------
def isTypedCRF(self):
"""
if this a classical CRF or a Typed CRF?
"""
return bool(self.iNbNodeType > 1)
#--- COMMAND LINE PARSZER --------------------------------------------------------------------
@classmethod
@classmethod
#--- UTILITIES ------------------------------------------------------------------------------------------
def getStandardLearnerConfig(self, options):
"""
Once the command line has been parsed, you can get the standard learner
configuration dictionary from here.
"""
o = options
return {
'njobs' : 16 if o.crf_njobs is None else o.crf_njobs
, 'max_iter' : 1000 if o.max_iter is None else o.max_iter
, 'C' : .1 if o.crf_C is None else o.crf_C
, 'inference_cache' : 50 if o.crf_inference_cache is None else o.crf_inference_cache
, 'tol' : .05 if o.crf_tol is None else o.crf_tol
, 'save_every' : 10
, 'balanced' : False # balanced instead of uniform class weights
}
# ------------------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
version = "v.01"
usage, description, parser = DU_CRF_Task.getStandardOptionsParser(sys.argv[0], version)
parser.print_help()
traceln("\nThis module should not be run as command line. It does nothing. (And did nothing!)")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
220,
220,
220,
8740,
37,
35480,
4876,
4755,
13,
45267,
15993,
8740,
37,
290,
17134,
276,
8740,
37,
198,
220,
220,
220,
220,
198,
220,
220,
220,
15069,... | 2.476675 | 1,179 |
import unittest
import warnings
import pygsti
from pygsti.modelpacks.legacy import std1Q_XYI as stdxyi
from pygsti.modelpacks.legacy import std1Q_XY as stdxy
from pygsti.objects import modelfunction as gsf
from pygsti.objects.mapforwardsim import MapForwardSimulator
from pygsti.objects import Label as L
import numpy as np
import sys, os
import pickle
from ..testutils import BaseTestCase, compare_files, temp_files
if __name__ == "__main__":
unittest.main(verbosity=2)
| [
11748,
555,
715,
395,
198,
11748,
14601,
198,
11748,
12972,
70,
301,
72,
198,
6738,
12972,
70,
301,
72,
13,
19849,
32377,
13,
1455,
1590,
1330,
14367,
16,
48,
62,
34278,
40,
355,
14367,
5431,
72,
198,
6738,
12972,
70,
301,
72,
13,
... | 2.915663 | 166 |
# Generated by Django 3.1 on 2020-09-20 15:14
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
319,
12131,
12,
2931,
12,
1238,
1315,
25,
1415,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
628
] | 2.904762 | 42 |
# -*- coding: utf-8 -*-
'''
@author: jeffzhengye
@contact: yezheng@scuec.edu.cn
@file: tensorboard_profile.py.py
@time: 2021/1/23 17:18
@desc:
'''
from datetime import datetime
from packaging import version
import os
import tensorflow as tf
print("TensorFlow version: ", tf.__version__)
device_name = tf.test.gpu_device_name()
if not device_name:
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
import tensorflow_datasets as tfds
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
ds_train = ds_train.map(normalize_img)
ds_train = ds_train.batch(128)
ds_test = ds_test.map(normalize_img)
ds_test = ds_test.batch(128)
#%%
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28, 1)),
tf.keras.layers.Dense(128,activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=tf.keras.optimizers.Adam(0.001),
metrics=['accuracy']
)
#%%
# Create a TensorBoard callback
logs = "logs/" + datetime.now().strftime("%Y%m%d-%H%M%S")
tboard_callback = tf.keras.callbacks.TensorBoard(log_dir = logs,
histogram_freq = 1,
profile_batch = '500,520')
model.fit(ds_train,
epochs=2,
validation_data=ds_test,
callbacks = [tboard_callback]) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
7061,
6,
201,
198,
220,
2488,
9800,
25,
11223,
487,
89,
31753,
5948,
201,
198,
220,
2488,
32057,
25,
331,
8471,
31753,
31,
1416,
518,
66,
13,
15532,
13,
31522,
... | 2.115995 | 819 |
class StorageException(Exception):
"""Base class for all storage exceptions"""
class PathExistsException(StorageException):
"""The given path already exists"""
class StorageNotSupported(StorageException):
"""Storage type not supported"""
class InvalidStoragePath(StorageException):
"""Invalid storage path given"""
| [
198,
4871,
20514,
16922,
7,
16922,
2599,
198,
220,
220,
220,
37227,
14881,
1398,
329,
477,
6143,
13269,
37811,
198,
198,
4871,
10644,
3109,
1023,
16922,
7,
31425,
16922,
2599,
198,
220,
220,
220,
37227,
464,
1813,
3108,
1541,
7160,
3781... | 4.085366 | 82 |