content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
"""sophon python module
"""
| [
37811,
82,
48982,
21015,
8265,
198,
37811,
198
] | 3.5 | 8 |
import utils
import read
from argparser import args, need_to_compare, project_dir
settings = read.read_settings_file(f'{project_dir}/{args.project}.parameters.xml',
mel=args.mel,
recon=args.recon)
for gloss in utils.all_glosses(read.read_attested_lexicons(settings)):
print(gloss)
| [
11748,
3384,
4487,
198,
11748,
1100,
198,
6738,
1822,
48610,
1330,
26498,
11,
761,
62,
1462,
62,
5589,
533,
11,
1628,
62,
15908,
198,
198,
33692,
796,
1100,
13,
961,
62,
33692,
62,
7753,
7,
69,
6,
90,
16302,
62,
15908,
92,
14,
90,... | 2.063584 | 173 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
_SNAKE_TO_CAMEL_CASE_TABLE = {
"availability_zones": "availabilityZones",
"backend_services": "backendServices",
"beanstalk_environment_name": "beanstalkEnvironmentName",
"block_devices_mode": "blockDevicesMode",
"capacity_unit": "capacityUnit",
"cluster_id": "clusterId",
"cluster_zone_name": "clusterZoneName",
"controller_id": "controllerId",
"cpu_credits": "cpuCredits",
"desired_capacity": "desiredCapacity",
"draining_timeout": "drainingTimeout",
"ebs_block_devices": "ebsBlockDevices",
"ebs_optimized": "ebsOptimized",
"elastic_ips": "elasticIps",
"elastic_load_balancers": "elasticLoadBalancers",
"enable_monitoring": "enableMonitoring",
"ephemeral_block_devices": "ephemeralBlockDevices",
"event_type": "eventType",
"fallback_to_ondemand": "fallbackToOndemand",
"health_check": "healthCheck",
"health_check_grace_period": "healthCheckGracePeriod",
"health_check_type": "healthCheckType",
"health_check_unhealthy_duration_before_replacement": "healthCheckUnhealthyDurationBeforeReplacement",
"iam_instance_profile": "iamInstanceProfile",
"image_id": "imageId",
"instance_types_customs": "instanceTypesCustoms",
"instance_types_ondemand": "instanceTypesOndemand",
"instance_types_preemptibles": "instanceTypesPreemptibles",
"instance_types_preferred_spots": "instanceTypesPreferredSpots",
"instance_types_spots": "instanceTypesSpots",
"instance_types_weights": "instanceTypesWeights",
"integration_codedeploy": "integrationCodedeploy",
"integration_docker_swarm": "integrationDockerSwarm",
"integration_ecs": "integrationEcs",
"integration_gitlab": "integrationGitlab",
"integration_kubernetes": "integrationKubernetes",
"integration_mesosphere": "integrationMesosphere",
"integration_multai_runtime": "integrationMultaiRuntime",
"integration_nomad": "integrationNomad",
"integration_rancher": "integrationRancher",
"integration_route53": "integrationRoute53",
"ip_forwarding": "ipForwarding",
"key_name": "keyName",
"lifetime_period": "lifetimePeriod",
"load_balancers": "loadBalancers",
"low_priority_sizes": "lowPrioritySizes",
"max_size": "maxSize",
"min_size": "minSize",
"multai_target_sets": "multaiTargetSets",
"network_interfaces": "networkInterfaces",
"node_image": "nodeImage",
"od_sizes": "odSizes",
"ondemand_count": "ondemandCount",
"persist_block_devices": "persistBlockDevices",
"persist_private_ip": "persistPrivateIp",
"persist_root_device": "persistRootDevice",
"placement_tenancy": "placementTenancy",
"preemptible_percentage": "preemptiblePercentage",
"preferred_availability_zones": "preferredAvailabilityZones",
"private_ips": "privateIps",
"resource_group_name": "resourceGroupName",
"resource_id": "resourceId",
"revert_to_spot": "revertToSpot",
"scaling_down_policies": "scalingDownPolicies",
"scaling_target_policies": "scalingTargetPolicies",
"scaling_up_policies": "scalingUpPolicies",
"scheduled_tasks": "scheduledTasks",
"security_groups": "securityGroups",
"service_account": "serviceAccount",
"shutdown_script": "shutdownScript",
"spot_percentage": "spotPercentage",
"startup_script": "startupScript",
"stateful_deallocation": "statefulDeallocation",
"subnet_ids": "subnetIds",
"target_group_arns": "targetGroupArns",
"update_policy": "updatePolicy",
"user_data": "userData",
"utilize_reserved_instances": "utilizeReservedInstances",
"wait_for_capacity": "waitForCapacity",
"wait_for_capacity_timeout": "waitForCapacityTimeout",
}
_CAMEL_TO_SNAKE_CASE_TABLE = {
"availabilityZones": "availability_zones",
"backendServices": "backend_services",
"beanstalkEnvironmentName": "beanstalk_environment_name",
"blockDevicesMode": "block_devices_mode",
"capacityUnit": "capacity_unit",
"clusterId": "cluster_id",
"clusterZoneName": "cluster_zone_name",
"controllerId": "controller_id",
"cpuCredits": "cpu_credits",
"desiredCapacity": "desired_capacity",
"drainingTimeout": "draining_timeout",
"ebsBlockDevices": "ebs_block_devices",
"ebsOptimized": "ebs_optimized",
"elasticIps": "elastic_ips",
"elasticLoadBalancers": "elastic_load_balancers",
"enableMonitoring": "enable_monitoring",
"ephemeralBlockDevices": "ephemeral_block_devices",
"eventType": "event_type",
"fallbackToOndemand": "fallback_to_ondemand",
"healthCheck": "health_check",
"healthCheckGracePeriod": "health_check_grace_period",
"healthCheckType": "health_check_type",
"healthCheckUnhealthyDurationBeforeReplacement": "health_check_unhealthy_duration_before_replacement",
"iamInstanceProfile": "iam_instance_profile",
"imageId": "image_id",
"instanceTypesCustoms": "instance_types_customs",
"instanceTypesOndemand": "instance_types_ondemand",
"instanceTypesPreemptibles": "instance_types_preemptibles",
"instanceTypesPreferredSpots": "instance_types_preferred_spots",
"instanceTypesSpots": "instance_types_spots",
"instanceTypesWeights": "instance_types_weights",
"integrationCodedeploy": "integration_codedeploy",
"integrationDockerSwarm": "integration_docker_swarm",
"integrationEcs": "integration_ecs",
"integrationGitlab": "integration_gitlab",
"integrationKubernetes": "integration_kubernetes",
"integrationMesosphere": "integration_mesosphere",
"integrationMultaiRuntime": "integration_multai_runtime",
"integrationNomad": "integration_nomad",
"integrationRancher": "integration_rancher",
"integrationRoute53": "integration_route53",
"ipForwarding": "ip_forwarding",
"keyName": "key_name",
"lifetimePeriod": "lifetime_period",
"loadBalancers": "load_balancers",
"lowPrioritySizes": "low_priority_sizes",
"maxSize": "max_size",
"minSize": "min_size",
"multaiTargetSets": "multai_target_sets",
"networkInterfaces": "network_interfaces",
"nodeImage": "node_image",
"odSizes": "od_sizes",
"ondemandCount": "ondemand_count",
"persistBlockDevices": "persist_block_devices",
"persistPrivateIp": "persist_private_ip",
"persistRootDevice": "persist_root_device",
"placementTenancy": "placement_tenancy",
"preemptiblePercentage": "preemptible_percentage",
"preferredAvailabilityZones": "preferred_availability_zones",
"privateIps": "private_ips",
"resourceGroupName": "resource_group_name",
"resourceId": "resource_id",
"revertToSpot": "revert_to_spot",
"scalingDownPolicies": "scaling_down_policies",
"scalingTargetPolicies": "scaling_target_policies",
"scalingUpPolicies": "scaling_up_policies",
"scheduledTasks": "scheduled_tasks",
"securityGroups": "security_groups",
"serviceAccount": "service_account",
"shutdownScript": "shutdown_script",
"spotPercentage": "spot_percentage",
"startupScript": "startup_script",
"statefulDeallocation": "stateful_deallocation",
"subnetIds": "subnet_ids",
"targetGroupArns": "target_group_arns",
"updatePolicy": "update_policy",
"userData": "user_data",
"utilizeReservedInstances": "utilize_reserved_instances",
"waitForCapacity": "wait_for_capacity",
"waitForCapacityTimeout": "wait_for_capacity_timeout",
}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 2.64986 | 2,856 |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designateclient.v2.base import V2Controller
| [
2,
15069,
1853,
30446,
15503,
12,
11869,
446,
7712,
5834,
11,
406,
13,
47,
13,
198,
2,
198,
2,
6434,
25,
5268,
260,
15415,
1559,
1279,
437,
260,
13,
74,
7063,
1559,
31,
24831,
13,
785,
29,
198,
2,
198,
2,
49962,
739,
262,
24843,... | 3.634021 | 194 |
import cv2
import numpy as np
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade=cv2.CascadeClassifier('haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
while True:
ret,img = cap.read()
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=face_cascade.detectMultiScale(gray,1.3,5)
for(x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,255),2)
roi_gray=gray[y:y+h,x:x+w]
roi_color=img[y:y+h,x:x+w]
eyes=eye_cascade.detectMultiScale(roi_gray)
for(ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color, (ex,ey), (ex+ew,ey+eh),(0,255,0),2)
cv2.imshow('img',img)
k=cv2.waitKey(30) & 0xff
if k==27:
break
cap.release()
cv2.destroyAllWindows()
| [
11748,
269,
85,
17,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
201,
198,
2550,
62,
66,
28966,
796,
269,
85,
17,
13,
34,
28966,
9487,
7483,
10786,
3099,
5605,
28966,
62,
8534,
1604,
558,
62,
12286,
13,
19875,
11537,
201,
198,... | 1.789709 | 447 |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.criterion import XgboostCriterion
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.node import Node
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.decision_tree import DecisionTree
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.splitter import SplitInfo
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.splitter import Splitter
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.feature_histogram import FeatureHistogram
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.feature_histogram import HistogramBag, FeatureHistogramWeights
from federatedml.ensemble.basic_algorithms.decision_tree.hetero.hetero_decision_tree_host import HeteroDecisionTreeHost
from federatedml.ensemble.basic_algorithms.decision_tree.hetero.hetero_decision_tree_guest import HeteroDecisionTreeGuest
from federatedml.ensemble.boosting.hetero.hetero_secureboost_guest import HeteroSecureBoostingTreeGuest
from federatedml.ensemble.boosting.hetero.hetero_secureboost_host import HeteroSecureBoostingTreeHost
from federatedml.ensemble.boosting.hetero.hetero_fast_secureboost_guest import HeteroFastSecureBoostingTreeGuest
from federatedml.ensemble.boosting.hetero.hetero_fast_secureboost_host import HeteroFastSecureBoostingTreeHost
from federatedml.ensemble.boosting.homo.homo_secureboosting_aggregator import SecureBoostClientAggregator, \
SecureBoostArbiterAggregator,\
DecisionTreeClientAggregator, DecisionTreeArbiterAggregator
from federatedml.ensemble.basic_algorithms.decision_tree.homo.homo_decision_tree_client import HomoDecisionTreeClient
from federatedml.ensemble.basic_algorithms.decision_tree.homo.homo_decision_tree_arbiter import HomoDecisionTreeArbiter
from federatedml.ensemble.boosting.homo.homo_secureboost_client import HomoSecureBoostingTreeClient
from federatedml.ensemble.boosting.homo.homo_secureboost_arbiter import HomoSecureBoostingTreeArbiter
__all__ = ["Node",
"HeteroDecisionTreeHost", "HeteroDecisionTreeGuest", "Splitter",
"FeatureHistogram", "XgboostCriterion", "DecisionTree", 'SplitInfo',
"HomoDecisionTreeClient", "HomoDecisionTreeArbiter", "SecureBoostArbiterAggregator", "SecureBoostClientAggregator"
, "DecisionTreeArbiterAggregator", 'DecisionTreeClientAggregator',
"HeteroSecureBoostingTreeGuest", "HeteroSecureBoostingTreeHost", "HomoSecureBoostingTreeArbiter",
"HomoSecureBoostingTreeClient", "HistogramBag", "FeatureHistogramWeights",
"HeteroFastSecureBoostingTreeGuest", "HeteroFastSecureBoostingTreeHost"]
| [
2,
198,
2,
220,
15069,
13130,
383,
376,
6158,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
2393,
... | 3.07907 | 1,075 |
from ophyd import EpicsSignal
from ..CommonFunctions.functions import run_report
from ..Base.mirrors import HexapodMirror, FMBHexapodMirror
run_report(__file__)
mir2_type = EpicsSignal(
"XF:07ID1-OP{Mono:PGM1-Ax:MirX}Mtr_TYPE_MON", name="SST 1 Mirror 2 Stripe"
)
mir4OLD = HexapodMirror(
"XF:07ID2-OP{Mir:M4CD-Ax:", name="SST 1 Mirror 4", kind="hinted"
)
mir3OLD = HexapodMirror(
"XF:07ID1-OP{Mir:M3ABC-Ax:", name="SST 1 Mirror 3", kind="hinted"
)
mir1OLD = HexapodMirror("XF:07IDA-OP{Mir:M1-Ax:", name="SST 1 Mirror 1", kind="hinted")
mir4 = FMBHexapodMirror(
"XF:07ID2-OP{Mir:M4CD", name="SST 1 Mirror 4 fmb", kind="hinted"
)
mir3 = FMBHexapodMirror(
"XF:07ID1-OP{Mir:M3ABC", name="SST 1 Mirror 3 fmb", kind="hinted"
)
mir1 = FMBHexapodMirror("XF:07IDA-OP{Mir:M1", name="SST 1 Mirror 1 fmb", kind="hinted")
| [
6738,
267,
746,
5173,
1330,
4551,
873,
11712,
282,
198,
198,
6738,
11485,
17227,
24629,
2733,
13,
12543,
2733,
1330,
1057,
62,
13116,
198,
6738,
11485,
14881,
13,
10793,
5965,
1330,
22212,
499,
375,
27453,
1472,
11,
376,
10744,
39,
1069... | 2.0875 | 400 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Telegram bot @RaspberyPi3Bot
"""
"""
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, Job
import telegram
import logging
import pickle
import os
from subprocess import call
import datetime
import sys
if not 'win' in sys.platform.lower():
import RPi.GPIO as GPIO
import Adafruit_DHT
HOME_PATH='/home/pi/myscripts/'
GPIO.setmode(GPIO.BCM)
#DHT
#Adafruit: instructions and wiring
#https://learn.adafruit.com/dht-humidity-sensing-on-raspberry-pi-with-gdocs-logging/overview
DHT_SENSOR_NAME=Adafruit_DHT.AM2302
DHT_GPIO_PIN='26'
GPIO_PIN_MQ2 = 23
GPIO_PIN_YL_69 = 25
GPIO_PIN_HC_SR501 = 14 #Associate pin 26 to pir
#LED
LED_ENABLE = 1
LED_DISABLE = 0
RGB_BLUE = 17
else:
HOME_PATH='.\\'
TOKEN='111111111:QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ'
LOG_NAME=HOME_PATH+'pibot.log'
# Define a few command handlers. These usually take the two arguments bot and
# update. Error handlers also receive the raised TelegramError object in error.
if __name__ == '__main__':
rbot = RaspberrySensorsBot()
rbot.activate()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
201,
198,
2,
50203,
10214,
2488,
49,
5126,
13001,
38729,
18,
20630,
201,
198,
201,
198,
37811,
201,
198,... | 2.048137 | 644 |
import re
from src.util import merge_params, unparsed_to_map
import subprocess
from termcolor import colored
from src.config import get_config, persist_config
import click
import tempfile
from pathlib import Path
pathDataRe = re.compile(r"path\.data\s?=", re.IGNORECASE)
@click.command(context_settings=dict(
ignore_unknown_options=True,
))
@click.option('--data-dir', '-d', type=click.STRING, default="esdata", help="Path where this elastic search will store its data (path.data)")
@click.option('--no-persist', '-n', default=False, is_flag=True, help="If passed will use a disposable data dir. This option will overwrite other options related to data dir.")
@click.option('--save-config', default=False, is_flag=True, help="If passed it will write your kibbe configuration with all the current passed parameters. This will not modify your kibana repo clone.")
@click.option('-E', multiple=True, help="Additional options to pass to elastic search. `path.data` will be ignored")
@click.argument('unparsed_args', nargs=-1, type=click.UNPROCESSED)
def es(data_dir, no_persist, e, unparsed_args, save_config):
"""
Runs elastic search from the current kibana clone.
You can also pass the same parameters as you'd pass to `node scritps/es`
You can persist the -E parameters by using a configuration file `~/.kibbe`.
with the [elastic.params] section.
See more about the configuration file here:
https://github.com/academo/kibbe#configuration-file
"""
e_params = process_params(data_dir, no_persist)
# additional -E params
for item in e:
item = item.strip()
# ignore path.data
if pathDataRe.match(item):
continue
e_params.append(item)
params = []
config = get_config()
config_params = []
if 'elastic.params' in config:
config_params = config.items('elastic.params', raw=True)
params = merge_params(config_params, unparsed_args)
if save_config:
persist_config({
'elastic.eparams': e_params,
'elastic.params': unparsed_to_map(params)
})
exit()
command = get_command(e_params, extra_params=params)
click.echo("Will run elastic search as: " + colored(' '.join(command), 'yellow'))
subprocess.run(command)
| [
11748,
302,
198,
6738,
12351,
13,
22602,
1330,
20121,
62,
37266,
11,
8593,
945,
276,
62,
1462,
62,
8899,
198,
11748,
850,
14681,
198,
198,
6738,
3381,
8043,
1330,
16396,
198,
6738,
12351,
13,
11250,
1330,
651,
62,
11250,
11,
21160,
62... | 2.78726 | 832 |
# coding=utf-8
import socket
import select
s = socket.socket()
# host = socket.gethostname()
host = '127.0.0.1'
port = 1234
s.bind((host, port))
s.listen(5)
inputs = [s]
while True:
rs, ws, es = select.select(inputs, [], [], 5) # select函数
for r in rs:
if r is s:
c, address = s.accept()
print('Got connection from', address)
inputs.append(c)
else:
try:
data = r.recv(1024)
disconnected = not data
except socket.error:
disconnected = True
if disconnected:
print(r.getpeername(), 'disconnected')
inputs.remove(r)
else:
print(data)
# ### 脚本说明
# 实现功能:打印所有来自客户端的数据;
# - 模块select的select函数实现异步I/O;
# - 只处理当前正在通信的客户端,监听后将客户端加入队列;
# - 客户端程序可使用:Chapter17_Socket04_Client.py;
#
# ### select.select()
# - 第1个必选参数:序列,需要输入的连接;
# - 第2个必选参数:序列,需要输出的连接;
# - 第3个必选参数:序列,发生异常(错误)的连接;
# - 第4个可选参数:超时时间(单位为秒),如果不指定,select将阻断(等待)直到准备就绪,如果为零,select将不断轮询(不阻断);
# 返回一个元组(包含3个序列),每个序列都包含相应参数中处于活动状态的文件描述符;
| [
2,
19617,
28,
40477,
12,
23,
198,
11748,
17802,
198,
11748,
2922,
198,
198,
82,
796,
17802,
13,
44971,
3419,
198,
198,
2,
2583,
796,
17802,
13,
1136,
4774,
3672,
3419,
198,
4774,
796,
705,
16799,
13,
15,
13,
15,
13,
16,
6,
198,
... | 1.151261 | 952 |
import numpy as np
import random
from keras.models import Sequential, Model
from keras.layers import Dense, LSTM, Dropout, merge, Input
from keras.layers.embeddings import Embedding
from keras.regularizers import l2
from keras.callbacks import ModelCheckpoint
import cPickle
# Create data matrices and labels list from processed data tuples
# load the processed Quora dataset
with open("data/data_tuples_glovem.p", "rb") as f:
pre_data_tuples = cPickle.load(f)
print "Loaded the data tuples"
data_tuples = []
for tup in pre_data_tuples:
if len(tup[0].split())==0 or len(tup[1].split())==0:
continue
data_tuples.append(tup)
print "Removed pairs with empty sentences. Remaining num. of data tuples ", len(data_tuples)
# Load glove vector dict (only for the needed words)
with open("data/needed_glovem_dict.p", "rb") as f:
glove_dict = cPickle.load(f)
print "Loaded the Glove dictionary for necessary words"
glove_dim = glove_dict['the'].shape[0]
total_num_words = 80419 # Pass this from analyze_data, instead of hardcoding.
# Initialize embedding matrix with each entry sampled uniformly at random between -1.0 and 1.0
init_glove_matrix = np.random.uniform(-1.0, 1.0, size=(total_num_words+1, glove_dim))
print "Initialized glove matrix with uniform. Will overwrite known vectors in it now"
# First create a dictionary from word to idx (for all distinct words)
word_to_id = {}
max_sentence_len = 0
sentence_lengths = []
curr_id = 1 # Start with 1, since 0 is used for <none> token (i.e., padding sentences to get to max length)
words_in_order = []
for tup in data_tuples:
s1 = tup[0]
s2 = tup[1]
# Update max_sentence_len as necessary
if len(s1.split()) > max_sentence_len:
max_sentence_len = len(s1.split())
if len(s2.split()) > max_sentence_len:
max_sentence_len = len(s2.split())
sentence_lengths.append(len(s1.split()))
sentence_lengths.append(len(s2.split()))
for word in s1.split():
if not (word in word_to_id):
word_to_id[word] = curr_id
if word in glove_dict:
init_glove_matrix[curr_id] = glove_dict[word]
curr_id += 1
for word in s2.split():
if not (word in word_to_id):
word_to_id[word] = curr_id
if word in glove_dict:
init_glove_matrix[curr_id] = glove_dict[word]
curr_id += 1
print "Max sentence length in data ", max_sentence_len
sentence_lengths = np.array(sentence_lengths)
print "Num more than 50 ", np.sum(sentence_lengths>=50)
print "Num more than 60 ", np.sum(sentence_lengths>=60)
if max_sentence_len > 60:
max_sentence_len = 60 # Can change the choice of this. This is a free parameter too.
# Train, Test lists creation. Test here is technically more like Validation
X_train_1 = []
X_train_2 = []
y_train = []
X_test_1 = []
X_test_2 = []
y_test = []
train_pc = 0.8
num_train = int(np.ceil(train_pc*len(data_tuples)))
random.seed(186) # Fixing random seed for reproducibility
random.shuffle(data_tuples)
# TRAIN - TEST SPLIT OF THE TUPLES
train_data_tuples = data_tuples[0: num_train]
test_data_tuples = data_tuples[num_train:]
print "Num of training examples ", len(train_data_tuples)
X_train_1, X_train_2, y_train = create_data_matrices(train_data_tuples)
X_test_1, X_test_2, y_test = create_data_matrices(test_data_tuples)
print "Created Training and Test Matrices, and corresponding label vectors"
# create the model
embedding_vecor_length = 300
num_vocab = total_num_words + 1 # since the <none> token is extra
model = Sequential()
model.add(Embedding(input_dim=num_vocab, output_dim=embedding_vecor_length, weights=[init_glove_matrix]))
model.add(LSTM(100, dropout_W=0.5, dropout_U=0.5))
print "Done building core model"
# Inputs to Full Model
input_dim = max_sentence_len
input_1 = Input(shape=(input_dim,))
input_2 = Input(shape=(input_dim,))
# Send them through same model (weights will be thus shared)
processed_1 = model(input_1)
processed_2 = model(input_2)
print "Going to merge the two branches at model level"
merged = merge([processed_1, processed_2], mode='concat')
# Add an FC layer before the Clf layer (non-lin layer after the lstm 'thought vecs' concatenation)
merged_fc = Dense(100, activation='relu', W_regularizer=l2(0.0001), b_regularizer=l2(0.0001), name='merged_fc')(merged)
merged_fc_drop = Dropout(0.4)(merged_fc) # Prevent overfitting at the fc layer
main_output = Dense(1, activation='sigmoid', name='main_output')(merged_fc_drop)
full_model = Model( input=[input_1, input_2], output=main_output )
full_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(full_model.summary())
#saves the model weights after each epoch if the validation loss decreased
checkpointer = ModelCheckpoint(filepath="models/weights-{epoch:02d}-{val_loss:.2f}.hdf5",monitor='val_acc', verbose=1, save_best_only=False)
full_model.fit( [X_train_1, X_train_2], y_train, validation_data=([X_test_1, X_test_2], y_test), nb_epoch=12, batch_size=128, verbose=1, callbacks=[checkpointer])
# Final evaluation of the model
scores = full_model.evaluate( [X_test_1, X_test_2], y_test, verbose=1)
print("Accuracy: %.2f%%" % (scores[1]*100))
| [
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
11,
9104,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
360,
1072,
11,
406,
2257,
44,
11,
14258,
448,
11,
20121,
11,
23412,
220,
198,
... | 2.714133 | 1,868 |
"""Classifier for ARDA.
guarantee learned domain-invariant representations are discriminative enough
to accomplish the final classification task
"""
import torch.nn.functional as F
from torch import nn
class Classifier(nn.Module):
"""LeNet classifier model for ARDA."""
def __init__(self):
"""Init LeNet encoder."""
super(Classifier, self).__init__()
self.fc2 = nn.Linear(500, 10)
def forward(self, feat):
"""Forward the LeNet classifier."""
out = F.dropout(F.relu(feat), training=self.training)
out = self.fc2(out)
return out
| [
37811,
9487,
7483,
329,
5923,
5631,
13,
198,
198,
5162,
4741,
1453,
4499,
7386,
12,
16340,
2743,
415,
24612,
389,
6534,
259,
876,
1576,
198,
1462,
9989,
262,
2457,
17923,
4876,
198,
37811,
198,
198,
11748,
28034,
13,
20471,
13,
45124,
... | 2.647577 | 227 |
# encoding: utf-8
from django import template
from django.utils.safestring import mark_safe
from django.utils.html import escape
import re
register = template.Library()
@register.filter
@register.filter
@register.filter
def wikilink(value):
"""
Produce wiki style links to other pages within the database, for use in
comments fields: {{ a_note|wikilink|truncatewords_html:5 }}
Note that it's better to use truncatewords_html with this filter, rather
than plain truncatewords
"""
WIKILINK_RE = re.compile(r"""
(?P<lead>\s|^) # possible leading whitespace
(?P<wikilink>/ # an initial /
(\w+/)+ # multiples of any number of identifier chars + /
)
""",
re.VERBOSE)
return mark_safe(WIKILINK_RE.sub(wikilink_sub_callback, value))
@register.filter
def asint(value):
"""Try to convert the value to an integer"""
try:
return int(value)
except TypeError:
return value
@register.filter
def starrating(key):
"""Convert the A|B|C|X|L reliability rating to an image"""
ratings = {"A": u"★★★",
"B": u"★★",
"C": u"★",
"X": "X",
"L": "L"}
return ratings[key]
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
6738,
42625,
14208,
1330,
11055,
198,
6738,
42625,
14208,
13,
26791,
13,
49585,
395,
1806,
1330,
1317,
62,
21230,
198,
6738,
42625,
14208,
13,
26791,
13,
6494,
1330,
6654,
198,
11748,
302,
198,
198... | 2.258348 | 569 |
from notebooks.computer_science.high_performance_python.profiling.utils_timing import timefn
@timefn
@timefn
for i in range(1, 10000):
pass
for i in xrange(1, 10000):
pass
| [
6738,
43935,
13,
33215,
62,
16801,
13,
8929,
62,
26585,
62,
29412,
13,
5577,
4386,
13,
26791,
62,
16514,
278,
1330,
640,
22184,
198,
198,
31,
2435,
22184,
198,
198,
31,
2435,
22184,
628,
198,
1640,
1312,
287,
2837,
7,
16,
11,
33028,... | 2.776119 | 67 |
# Create your views here.
from django.views.generic.list import ListView
from django_gotolong.amfi.models import Amfi
from django_gotolong.bhav.models import Bhav
from django_gotolong.corpact.models import Corpact
from django_gotolong.dematsum.models import DematSum
from django_gotolong.demattxn.models import DematTxn
from django_gotolong.ftwhl.models import Ftwhl
from django_gotolong.gfundareco.models import Gfundareco
from django_gotolong.gweight.models import Gweight
from django_gotolong.trendlyne.models import Trendlyne
from django.db.models import (OuterRef, Subquery, ExpressionWrapper, F, IntegerField, Count, Q)
from django_gotolong.jsched.tasks import jsched_task_bg, jsched_task_daily
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
# from django_gotolong.ftwhl.views import ftwhl_fetch
# Insufficient data
| [
2,
13610,
534,
5009,
994,
13,
198,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
13,
4868,
1330,
7343,
7680,
198,
198,
6738,
42625,
14208,
62,
23442,
349,
506,
13,
321,
12463,
13,
27530,
1330,
1703,
12463,
198,
198,
6738,
42625,
1420... | 2.891975 | 324 |
# Characters
# Write a Python program to create
# a dictionary from a string where
# the keys are the characters
# and the values are the frequency
# of the characters.
#
# Your program should ask the user
# to enter a string, convert it into
# the dictionary described above,
# and print the resulting dictionary.
#
# Example output:
# Enter a string: hello244oh
# {'h': 2, 'e': 1, 'l': 2, 'o': 2, '2': 1, '4': 2}
characters = {}
string = input("Enter a string: ")
for char in string:
if char in characters:
characters[char] += 1
else:
characters[char] = 1
print(characters)
| [
2,
26813,
198,
2,
19430,
257,
11361,
1430,
284,
2251,
198,
2,
257,
22155,
422,
257,
4731,
810,
198,
2,
262,
8251,
389,
262,
3435,
198,
2,
290,
262,
3815,
389,
262,
8373,
198,
2,
286,
262,
3435,
13,
198,
2,
198,
2,
3406,
1430,
... | 3.045455 | 198 |
"""
Create 3d elevation plots of model runs and then animate them
"""
from LSDPlottingTools import LSDMap_GDALIO as IO
import matplotlib.pyplot as plt
from mayavi import mlab
from mayavi.modules.grid_plane import GridPlane
from matplotlib import cm
import numpy as np
import os
from sys import platform
import sys
from glob import glob
def animate_plots(base_directory, fname_prefix):
"""
This function creates a movie of the plots using ffmpeg
Args:
base_directory (str): the directory with the plots.
fname_prefix (str): the filename for the model run
Returns:
none but creates mp4 from pngs in base directory
Author: FJC
"""
import subprocess
# animate the pngs using ffmpeg
system_call = "ffmpeg -framerate 5 -pattern_type glob -i '"+base_directory+"*.png' -vcodec libx264 -s 1000x1000 -pix_fmt yuv420p "+base_directory+fname_prefix+"_movie.mp4"
print(system_call)
subprocess.call(system_call, shell=True)
#=============================================================================
# This is just a welcome screen that is displayed if no arguments are provided.
#=============================================================================
#=============================================================================
# This is the main function that runs the whole thing
#=============================================================================
def main(argv):
"""
This is just a few lines for keeping track of how long the program is taking.
You can ignore it.
"""
import time
tic = time.clock()
# If there are no arguments, send to the welcome screen
if not len(sys.argv) > 1:
full_paramfile = print_welcome()
sys.exit()
# Get the arguments
import argparse
parser = argparse.ArgumentParser()
# The location of the data files
parser.add_argument("-dir", "--base_directory", type=str, help="The base directory with the hillshades. If this isn't defined I'll assume it's the same as the current directory.")
parser.add_argument("-fname", "--fname_prefix", type=str, help="The base file name of the hillshades.")
parser.add_argument("-animate", "--animate", type=bool, default=False, help="If this is true I'll create a movie of the model run.")
parser.add_argument("-zmax", "--maximum_elevation_for_plotting", type=float, default = 400, help="This is the maximum elevation in the colourbar of the landscape plot.")
args = parser.parse_args()
run_plots(args.base_directory,args.fname_prefix)
if (args.animate):
animate_plots(args.base_directory, args.fname_prefix)
toc = time.clock()
print("This took: "+str(toc - tic)+" units of time")
#=============================================================================
if __name__ == "__main__":
main(sys.argv[1:])
| [
37811,
198,
16447,
513,
67,
22910,
21528,
286,
2746,
4539,
290,
788,
43828,
606,
198,
37811,
628,
198,
6738,
27483,
43328,
889,
33637,
1330,
27483,
13912,
62,
45113,
1847,
9399,
355,
24418,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
... | 3.312212 | 868 |
# Apache License Version 2.0
#
# Copyright (c) 2021., Redis Labs Modules
# All rights reserved.
#
import csv
import json
import logging
import datetime
import redis
from redistimeseries.client import Client
from redisbench_admin.export.common.common import split_tags_string
from redisbench_admin.run.git import git_vars_crosscheck
from redisbench_admin.run.redistimeseries import timeseries_test_sucess_flow
from redisbench_admin.utils.benchmark_config import (
get_defaults,
parse_exporter_timemetric,
)
from redisbench_admin.utils.remote import get_ts_tags_and_name
| [
2,
220,
24843,
13789,
10628,
362,
13,
15,
198,
2,
198,
2,
220,
15069,
357,
66,
8,
33448,
1539,
2297,
271,
23500,
3401,
5028,
198,
2,
220,
1439,
2489,
10395,
13,
198,
2,
198,
11748,
269,
21370,
198,
11748,
33918,
198,
11748,
18931,
... | 3.111702 | 188 |
ALL_COMPLETED = 1
ANY_COMPLETED = 2
ALWAYS = 3
| [
7036,
62,
41335,
36493,
796,
352,
198,
31827,
62,
41335,
36493,
796,
362,
198,
1847,
42451,
796,
513,
198
] | 2.473684 | 19 |
from typing import List
import numpy as np
def calculate_dice(
true_positives: np.array,
false_positives: np.array,
false_negatives: np.array,
) -> np.array:
"""
Calculate list of Dice coefficients.
Args:
true_positives: true positives numpy tensor
false_positives: false positives numpy tensor
false_negatives: false negatives numpy tensor
Returns:
np.array: dice score
Raises:
ValueError: if `dice` is out of [0; 1] bounds
"""
epsilon = 1e-7
dice = (2 * true_positives + epsilon) / (
2 * true_positives + false_positives + false_negatives + epsilon
)
if not np.all(dice <= 1):
raise ValueError("Dice index should be less or equal to 1")
if not np.all(dice > 0):
raise ValueError("Dice index should be more than 1")
return dice
def get_default_topk_args(num_classes: int) -> List[int]:
"""Calculate list params for ``Accuracy@k`` and ``mAP@k``.
Examples:
>>> get_default_topk_args(num_classes=4)
>>> [1, 3]
>>> get_default_topk_args(num_classes=8)
>>> [1, 3, 5]
Args:
num_classes (int): number of classes
Returns:
iterable: array of accuracy arguments
"""
result = [1]
if num_classes is None:
return result
if num_classes > 3:
result.append(3)
if num_classes > 5:
result.append(5)
return result
| [
6738,
19720,
1330,
7343,
198,
198,
11748,
299,
32152,
355,
45941,
628,
198,
4299,
15284,
62,
67,
501,
7,
198,
220,
220,
220,
2081,
62,
1930,
20288,
25,
45941,
13,
18747,
11,
198,
220,
220,
220,
3991,
62,
1930,
20288,
25,
45941,
13,
... | 2.353896 | 616 |
from eventstore_grpc.operations.merge_indexes import merge_indexes
from eventstore_grpc.operations.resign_node import resign_node
from eventstore_grpc.operations.restart_persistent_subscritions import (
restart_persistent_subscriptions,
)
from eventstore_grpc.operations.set_node_priority import set_node_priority
from eventstore_grpc.operations.shutdown import shutdown
from eventstore_grpc.operations.start_scavenge import start_scavenge
from eventstore_grpc.operations.stop_scavenge import stop_scavenge
| [
6738,
1785,
8095,
62,
2164,
14751,
13,
3575,
602,
13,
647,
469,
62,
9630,
274,
1330,
20121,
62,
9630,
274,
198,
6738,
1785,
8095,
62,
2164,
14751,
13,
3575,
602,
13,
411,
570,
62,
17440,
1330,
10931,
62,
17440,
198,
6738,
1785,
8095... | 3.213836 | 159 |
import pytest
from base.webdriverfactory import WebDriverFactory
from pages.home.login_page import LoginPage
@pytest.yield_fixture()
@pytest.yield_fixture(scope="class")
@pytest.fixture(scope="session")
@pytest.fixture(scope="session") | [
11748,
12972,
9288,
198,
6738,
2779,
13,
12384,
26230,
69,
9548,
1330,
5313,
32103,
22810,
198,
6738,
5468,
13,
11195,
13,
38235,
62,
7700,
1330,
23093,
9876,
198,
198,
31,
9078,
9288,
13,
88,
1164,
62,
69,
9602,
3419,
198,
198,
31,
... | 2.950617 | 81 |
import contextlib
import json
import os
import sys
import urllib
import urllib2
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import webapp2
# ------------------------------------------------------------------------- #
API_KEY = "8e635b5992d237d6"
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
# ------------------------------------------------------------------------- #
# http://www.wunderground.com/weather/api/d/docs
# http://www.wunderground.com/weather/api/d/docs?d=data/hourly10day
# http://api.wunderground.com/api/8e635b5992d237d6/hourly10day/q/CA/San_Francisco.json
# e.g.
# http://api.wunderground.com/api/8e635b5992d237d6/conditions/q/CA/San_Francisco.json
def make_location(city=None, state=None, country=None,
zipcode=None,
lat=None, lng=None,
airport=None,
pws_id=None,
autoip=False,
ip_address=None):
"""
CA/San_Francisco US state/city
60290 US zipcode
Australia/Sydney country/city
37.8,-122.4 latitude,longitude
KJFK airport code
pws:KCASANFR70 PWS id
autoip AutoIP address location
autoip.json?geo_ip=38.102.136.138 specific IP address location
"""
if autoip:
return "autoip"
if zipcode:
return zipcode
if pws_id:
return pws_id
if airport:
return airport
if ip_address:
return "autoip.json?geo_ip={0}".format(ip_address)
if lat and lng:
return "{lat},{lng}".format(lat=lat, lng=lng)
if city:
if country:
return "{country}/{city}".format(country=country, city=city)
if state:
return "{state}/{city}".format(state=state, city=city)
# When the API location query does not produce an exact location match,
# a results array will be present in the JSON response.
# Each result object has an l parameter (short for link) that can be
# used for constructing wunderground URLs:
return city
elif not state:
return "NY/New_York"
raise ValueError("Not enough information to make a location request")
def request(key,
features="hourly10day",
location="NY/New_York",
fmt="json",
settings=None):
"""
Features:
alerts Returns the short name description, expiration time and a long text description of a severe alert - if one has been issued for the searched upon location.
almanac Historical average temperature for today
astronomy Returns the moon phase, sunrise and sunset times.
conditions Returns the current temperature, weather condition, humidity, wind, 'feels like' temperature, barometric pressure, and visibility.
currenthurricane Returns the current position, forecast, and track of all current hurricanes.
forecast Returns a summary of the weather for the next 3 days. This includes high and low temperatures, a string text forecast and the conditions.
forecast10day Returns a summary of the weather for the next 10 days. This includes high and low temperatures, a string text forecast and the conditions.
geolookup Returns the the city name, zip code / postal code, latitude-longitude coordinates and nearby personal weather stations.
history history_YYYYMMDD returns a summary of the observed weather for the specified date.
hourly Returns an hourly forecast for the next 36 hours immediately following the API request.
hourly10day Returns an hourly forecast for the next 10 days
planner planner_MMDDMMDD returns a weather summary based on historical information between the specified dates (30 days max).
rawtide Raw Tidal information for graphs
tide Tidal information
webcams Returns locations of nearby Personal Weather Stations and URL's for images from their web cams.
yesterday Returns a summary of the observed weather history for yesterday.
settings (optional)
One or more of the following settings, given as key:value pairs separated by a colon.
Example: lang:FR/pws:0
lang lang code Default: EN. Returns the API response in the specified language.
pws 0 or 1 Default: 1 (true). Use personal weather stations for conditions.
bestfct 0 or 1 Default: 1 (true). Use Weather Undergrond Best Forecast for forecast.
query
The location for which you want weather information. Examples:
CA/San_Francisco US state/city
60290 US zipcode
Australia/Sydney country/city
37.8,-122.4 latitude,longitude
KJFK airport code
pws:KCASANFR70 PWS id
autoip AutoIP address location
autoip.json?geo_ip=38.102.136.138 specific IP address location
format
json, or xml
Output format.
For JSONP, you may add ?callback=your_js_callback_function to the request URL
"""
if not settings:
settings = ""
else:
settings = "".join("/{key}:{value}".format(key=key, value=value)
for (key, value) in settings.iteritems())
return "http://api.wunderground.com/api/{key}/{features}{settings}/q/{location}.{fmt}".format(
key=key,
features=features,
settings=settings,
location=location,
fmt=fmt)
# ------------------------------------------------------------------------- #
# ------------------------------------------------------------------------- #
app = webapp2.WSGIApplication([
('/', MainPage),
], debug=True)
# ------------------------------------------------------------------------- #
| [
11748,
4732,
8019,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
2956,
297,
571,
198,
11748,
2956,
297,
571,
17,
198,
198,
6738,
23645,
13,
1324,
18392,
13,
15042,
1330,
2985,
198,
6738,
23645,
13,
1324,
18392,
13... | 2.811182 | 2,039 |
#!/user/bin/env python
# -*- coding: UTF-8 -*-
__author__ = 'miaoChenLiang'
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# import++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# ↓++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
from UI.GUIImport import *
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# import++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# ↓++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# def mousePressEvent(self, event):
# self.parent().btnClick(self.ID)
| [
2,
48443,
7220,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
834,
9800,
834,
796,
705,
76,
13481,
34,
831,
43,
15483,
6,
198,
198,
2,
1343,
44627,
44627,
44627,
44627,
44627,
44627,
... | 5.75625 | 160 |
import array
from samples.abstract_song import Song
import time
| [
11748,
7177,
198,
6738,
8405,
13,
397,
8709,
62,
34050,
1330,
10940,
198,
11748,
640,
198
] | 4 | 16 |
# (c) 2020 Kazuki KOHZUKI
import cpu
from utils import *
BOARD = [[
[[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0]],
[[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0]],
[[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0]],
[[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0]]
], [
[[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0]],
[[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0]],
[[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0]],
[[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0]]
], [
[[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0]],
[[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0]],
[[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0]],
[[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0]]
], [
[[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0]],
[[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0]],
[[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0]],
[[ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0], [ 0, 0, 0, 0]]
]]
for i, W in enumerate(BOARD):
BOARD[i] = [list(w) for w in zip(*W)]
if __name__ == '__main__':
main()
| [
198,
2,
357,
66,
8,
12131,
16385,
11308,
509,
12096,
57,
15039,
40,
198,
198,
11748,
42804,
198,
6738,
3384,
4487,
1330,
1635,
198,
198,
8202,
9795,
796,
16410,
198,
197,
197,
30109,
657,
11,
220,
657,
11,
220,
657,
11,
220,
657,
... | 1.504644 | 969 |
num1 = input()
num2 = input()
nums = [num1, num2]
result = ''
result += 'Before:\n'
result += f'a = {nums[0]}\n'
result += f'b = {nums[1]}\n'
result += 'After:\n'
result += f'a = {nums[1]}\n'
result += f'b = {nums[0]}'
print(result)
| [
22510,
16,
796,
5128,
3419,
198,
22510,
17,
796,
5128,
3419,
198,
77,
5700,
796,
685,
22510,
16,
11,
997,
17,
60,
198,
198,
20274,
796,
10148,
198,
20274,
15853,
705,
8421,
7479,
77,
6,
198,
20274,
15853,
277,
6,
64,
796,
1391,
77... | 2.108108 | 111 |
import shutil
import numpy as np
from scp import SCP
import robots
import yaml
import argparse
import subprocess
import time
import random
import copy
import shutil
from collections import defaultdict
import tempfile
from pathlib import Path
import msgpack
import sys
import os
sys.path.append(os.getcwd())
import main_scp
import main_komo
import gen_motion_primitive
from motionplanningutils import RobotHelper
import checker
# ./dbastar -i ../benchmark/dubins/kink_0.yaml -m motions.yaml -o output.yaml --delta 0.3
if __name__ == '__main__':
main()
| [
11748,
4423,
346,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
79,
1330,
17527,
198,
11748,
14193,
198,
11748,
331,
43695,
198,
11748,
1822,
29572,
198,
11748,
850,
14681,
198,
11748,
640,
198,
11748,
4738,
198,
11748,
4866,
198,
... | 3.134831 | 178 |
print("Test-1------------------------")
print(rearrange_digits([4, 6, 2, 5, 9, 8]))
# [964, 852]
print(rearrange_digits([1, 2, 3, 4, 5]))
# [42, 531]
print("Test-2------------------------")
print(rearrange_digits([]))
# At least 2 numbers required
print("Test-3------------------------")
print(rearrange_digits([1]))
# At least 2 numbers required
| [
220,
220,
198,
198,
4798,
7203,
14402,
12,
16,
22369,
4943,
198,
4798,
7,
260,
3258,
858,
62,
12894,
896,
26933,
19,
11,
718,
11,
362,
11,
642,
11,
860,
11,
807,
60,
4008,
198,
2,
685,
24,
2414,
11,
807,
4309,
60,
198,
4798,
7... | 2.70229 | 131 |
#!/usr/bin/env python
"""
Analyze Merfish data
========================
This tutorial shows how to apply Squidpy for the analysis of Merfish data.
The data used here was obtained from :cite:`Moffitt2018-me`.
We provide a pre-processed subset of the data, in :class:`anndata.AnnData` format.
For details on how it was pre-processed, please refer to the original paper.
.. seealso::
See :ref:`sphx_glr_auto_tutorials_tutorial_slideseqv2.py` and
:ref:`sphx_glr_auto_tutorials_tutorial_seqfish.py` for additional analysis examples.
Import packages & data
----------------------
To run the notebook locally, create a conda environment as *conda env create -f environment.yml* using this
`environment.yml <https://github.com/theislab/squidpy_notebooks/blob/master/environment.yml>`_.
"""
import scanpy as sc
import squidpy as sq
sc.logging.print_header()
print(f"squidpy=={sq.__version__}")
# load the pre-processed dataset
adata = sq.datasets.merfish()
adata
###############################################################################
# This datasets consists of consecutive slices from the mouse hypothalamic preoptic region.
# It represents an interesting example of how to work with 3D spatial data in Squidpy.
# Let's start with visualization: we can either visualize the 3D stack of slides
# using :func:`scanpy.pl.embedding`:
sc.pl.embedding(adata, basis="spatial3d", projection="3d", color="Cell_class")
###############################################################################
# Or visualize a single slide with :func:`scanpy.pl.spatial`. Here the slide identifier
# is stored in `adata.obs["Bregma"]`, see original paper for definition.
sc.pl.spatial(adata[adata.obs.Bregma == -9], color="Cell_class", spot_size=0.01)
###############################################################################
# Neighborhood enrichment analysis in 3D
# --------------------------------------
# It is important to consider whether the analysis should be performed on the 3D
# spatial coordinates or the 2D coordinates for a single slice. Functions that
# make use of the spatial graph can already support 3D coordinates, but it is important
# to consider that the z-stack coordinate is in the same unit metrics as the x, y coordinates.
# Let's start with the neighborhood enrichment score. You can read more on the function
# in the docs at :ref:`sphx_glr_auto_examples_graph_compute_spatial_neighbors.py`.
# First, we need to compute a neighbor graph with :func:`squidpy.gr.spatial_neighbors`.
# If we want to compute the neighbor graph on the 3D coordinate space,
# we need to specify ``spatial_key = "spatial3d"``.
# Then we can use :func:`squidpy.gr.nhood_enrichment` to compute the score, and visualize
# it with :func:`squidpy.gr.nhood_enrichment`.
sq.gr.spatial_neighbors(adata, coord_type="generic", spatial_key="spatial3d")
sq.gr.nhood_enrichment(adata, cluster_key="Cell_class")
sq.pl.nhood_enrichment(adata, cluster_key="Cell_class", method="single", cmap="inferno", vmin=-50, vmax=100)
###############################################################################
# We can visualize some of the co-enriched clusters with :func:`scanpy.pl.embedding`.
# We will set `na_colors=(1,1,1,0)` to make transparent the other observations,
# in order to better visualize the clusters of interests across z-stacks.
sc.pl.embedding(
adata,
basis="spatial3d",
groups=["OD Mature 1", "OD Mature 2", "OD Mature 4"],
na_color=(1, 1, 1, 0),
projection="3d",
color="Cell_class",
)
###############################################################################
# We can also visualize gene expression in 3D coordinates. Let's perform differential
# expression testing with :func:`scanpy.tl.rank_genes_groups` and visualize the results
sc.tl.rank_genes_groups(adata, groupby="Cell_class")
sc.pl.rank_genes_groups(adata, groupby="Cell_class")
###############################################################################
# and the expression in 3D.
sc.pl.embedding(adata, basis="spatial3d", projection="3d", color=["Gad1", "Mlc1"])
###############################################################################
# If the same analysis should be performed on a single slice, then it is advisable to
# copy the sample of interest in a new :class:`anndata.AnnData` and use it as
# a standard 2D spatial data object.
adata_slice = adata[adata.obs.Bregma == -9].copy()
sq.gr.spatial_neighbors(adata_slice, coord_type="generic")
sq.gr.nhood_enrichment(adata, cluster_key="Cell_class")
sc.pl.spatial(
adata_slice,
color="Cell_class",
groups=["Ependymal", "Pericytes", "Endothelial 2"],
spot_size=0.01,
)
###############################################################################
# Spatially variable genes with spatial autocorrelation statistics
# ----------------------------------------------------------------
# With Squidpy we can investigate spatial variability of gene expression.
# This is an example of a function that only supports 2D data.
# :func:`squidpy.gr.spatial_autocorr` conveniently wraps two
# spatial autocorrelation statistics: *Moran's I* and *Geary's C*.
# They provide a score on the degree of spatial variability of gene expression.
# The statistic as well as the p-value are computed for each gene, and FDR correction
# is performed. For the purpose of this tutorial, let's compute the *Moran's I* score.
# The results are stored in `adata.uns['moranI']` and we can visualize selected genes
# with :func:`scanpy.pl.spatial`.
sq.gr.spatial_autocorr(adata_slice, mode="moran")
adata_slice.uns["moranI"].head()
sc.pl.spatial(
adata_slice,
color=["Cd24a", "Necab1", "Mlc1"],
spot_size=0.01,
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
37702,
2736,
4638,
11084,
1366,
198,
4770,
2559,
198,
198,
1212,
11808,
2523,
703,
284,
4174,
48799,
9078,
329,
262,
3781,
286,
4638,
11084,
1366,
13,
198,
198,
464,
1366,
... | 3.442494 | 1,652 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Blueprint
from . import views
message_api = Blueprint('message', __name__)
# 消息入口
message_api.add_url_rule('/entry', view_func=views.EntryView.as_view('entry'))
# 查询任务状态
message_api.add_url_rule(
'/notify_task/<task_id>',
view_func=views.NotifyTaskView.as_view('notify_task'))
# 查询消息发送结果
message_api.add_url_rule(
'/notify_status/<task_id>',
view_func=views.NotifyStatusView.as_view('notify_status'))
# 数据库操作
records_view = views.RecordsView.as_view('records')
message_api.add_url_rule(
'/records', view_func=records_view,
methods=['GET', 'POST']) # methods for flasgger
message_api.add_url_rule(
'/records/<int:id>',
view_func=records_view,
methods=['GET', 'DELETE', 'PUT'])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
42903,
1330,
39932,
198,
198,
6738,
764,
1330,
5009,
198,
198,
20500,
62,
15042,
796,
39932,
10786,
20500,
3256,
... | 2.099462 | 372 |
import datetime
from ..api import project_data_from_master, project_data_from_master_month
from ..core.temporal import Month
| [
11748,
4818,
8079,
198,
198,
6738,
11485,
15042,
1330,
1628,
62,
7890,
62,
6738,
62,
9866,
11,
1628,
62,
7890,
62,
6738,
62,
9866,
62,
8424,
198,
6738,
11485,
7295,
13,
11498,
35738,
1330,
16061,
628,
198
] | 3.459459 | 37 |
import json
import xml.etree.ElementTree as et
# class Song:
# def __init__(self, song_id, title, artist):
# self.song_id = song_id
# self.title = title
# self.artist = artist
#
#
# class SongSerializer:
# def serialize(self, song, format):
# if format == "JSON":
# song_info = {"id": song.song_id, "title": song.title, "artist": song.artist}
# return json.dumps(song_info)
# elif format == "XML":
# song_info = et.Element("song", attrib={"id": song.song_id})
# title = et.SubElement(song_info, "title")
# title.text = song.title
# artist = et.SubElement(song_info, "artist")
# artist.text = song.artist
# return et.tostring(song_info, encoding="unicode")
# else:
# raise ValueError(format)
# Client
# Creator component
# the component chooses which implementation to use
#
# The creator returns the concrete implementation
# according to the value of the parameter to the client,
# and the client uses the provided object to complete its task.
# an Product implementation
# an Product implementation
if __name__ == "__main__":
song = Song("1", "Shape Of My Heart", "Sting")
serializer = SongSerializer()
print(serializer.serialize(song, "JSON"))
print(serializer.serialize(song, "XML"))
print(serializer.serialize(song, "YAML"))
| [
11748,
33918,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
2123,
628,
198,
2,
1398,
10940,
25,
198,
2,
220,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
11,
3496,
62,
312,
11,
3670,
11,
6802,
2599,
198,
2,
220,
220,
... | 2.433333 | 600 |
import os
from step_impl.utils.driver import Driver
from getgauge.python import step
@step("Give an option to Log In")
@step("Show the log in status for user <name>")
@step("Login as name <name> and <password>") | [
11748,
28686,
198,
6738,
2239,
62,
23928,
13,
26791,
13,
26230,
1330,
12434,
198,
6738,
651,
70,
559,
469,
13,
29412,
1330,
2239,
198,
198,
31,
9662,
7203,
23318,
281,
3038,
284,
5972,
554,
4943,
198,
198,
31,
9662,
7203,
15307,
262,
... | 3.19403 | 67 |
#IMPORTANT: pydevd_constants must be the 1st thing defined because it'll keep a reference to the original sys._getframe
from pydevd_constants import * #@UnusedWildImport
import pydev_imports
from pydevd_comm import CMD_CHANGE_VARIABLE, \
CMD_EVALUATE_EXPRESSION, \
CMD_EVALUATE_CONSOLE_EXPRESSION, \
CMD_EXEC_EXPRESSION, \
CMD_GET_COMPLETIONS, \
CMD_GET_FRAME, \
CMD_SET_PY_EXCEPTION, \
CMD_GET_VARIABLE, \
CMD_LIST_THREADS, \
CMD_REMOVE_BREAK, \
CMD_RUN, \
CMD_SET_BREAK, \
CMD_SET_NEXT_STATEMENT, \
CMD_STEP_INTO, \
CMD_STEP_OVER, \
CMD_STEP_RETURN, \
CMD_THREAD_CREATE, \
CMD_THREAD_KILL, \
CMD_THREAD_RUN, \
CMD_THREAD_SUSPEND, \
CMD_RUN_TO_LINE, \
CMD_RELOAD_CODE, \
CMD_VERSION, \
CMD_GET_FILE_CONTENTS, \
CMD_SET_PROPERTY_TRACE, \
GetGlobalDebugger, \
InternalChangeVariable, \
InternalGetCompletions, \
InternalEvaluateExpression, \
InternalGetFrame, \
InternalGetVariable, \
InternalEvaluateConsoleExpression, \
InternalConsoleGetCompletions, \
InternalTerminateThread, \
InternalRunThread, \
InternalStepThread, \
NetCommand, \
NetCommandFactory, \
PyDBDaemonThread, \
PydevQueue, \
ReaderThread, \
SetGlobalDebugger, \
WriterThread, \
PydevdFindThreadById, \
PydevdLog, \
StartClient, \
StartServer, \
InternalSetNextStatementThread
from pydevd_file_utils import NormFileToServer, GetFilenameAndBase
import pydevd_import_class
import pydevd_vars
import traceback
import pydevd_vm_type
import pydevd_tracing
import pydevd_io
from pydevd_additional_thread_info import PyDBAdditionalThreadInfo
import pydevd_traceproperty
import time
threadingEnumerate = threading.enumerate
threadingCurrentThread = threading.currentThread
DONT_TRACE = {
#commonly used things from the stdlib that we don't want to trace
'threading.py':1,
'Queue.py':1,
'socket.py':1,
#things from pydev that we don't want to trace
'pydevd_additional_thread_info.py':1,
'pydevd_comm.py':1,
'pydevd_constants.py':1,
'pydevd_file_utils.py':1,
'pydevd_frame.py':1,
'pydevd_io.py':1 ,
'pydevd_resolver.py':1 ,
'pydevd_tracing.py':1 ,
'pydevd_vars.py':1,
'pydevd_vm_type.py':1,
'pydevd.py':1 ,
'pydevd_psyco_stub.py':1,
'pydevd_traceproperty.py':1
}
if IS_PY3K:
#if we try to trace io.py it seems it can get halted (see http://bugs.python.org/issue4716)
DONT_TRACE['io.py'] = 1
#Don't trace common encodings too
DONT_TRACE['cp1252.py'] = 1
DONT_TRACE['utf_8.py'] = 1
connected = False
bufferStdOutToServer = False
bufferStdErrToServer = False
#=======================================================================================================================
# PyDBCommandThread
#=======================================================================================================================
_original_excepthook = None
#=======================================================================================================================
# excepthook
#=======================================================================================================================
#=======================================================================================================================
# set_pm_excepthook
#=======================================================================================================================
def set_pm_excepthook(handle_exceptions=None):
'''
This function is now deprecated (PyDev provides an UI to handle that now).
'''
raise DeprecationWarning(
'This function is now replaced by GetGlobalDebugger().setExceptHook and is now controlled by the PyDev UI.')
try:
import thread
except ImportError:
import _thread as thread #Py3K changed it.
_original_start_new_thread = thread.start_new_thread
#=======================================================================================================================
# NewThreadStartup
#=======================================================================================================================
#=======================================================================================================================
# ClassWithPydevStartNewThread
#=======================================================================================================================
#This is a hack for the situation where the thread.start_new_thread is declared inside a class, such as the one below
#class F(object):
# start_new_thread = thread.start_new_thread
#
# def start_it(self):
# self.start_new_thread(self.function, args, kwargs)
#So, if it's an already bound method, calling self.start_new_thread won't really receive a different 'self' -- it
#does work in the default case because in builtins self isn't passed either.
pydev_start_new_thread = ClassWithPydevStartNewThread().pydev_start_new_thread
#=======================================================================================================================
# PyDB
#=======================================================================================================================
class PyDB:
""" Main debugging class
Lots of stuff going on here:
PyDB starts two threads on startup that connect to remote debugger (RDB)
The threads continuously read & write commands to RDB.
PyDB communicates with these threads through command queues.
Every RDB command is processed by calling processNetCommand.
Every PyDB net command is sent to the net by posting NetCommand to WriterThread queue
Some commands need to be executed on the right thread (suspend/resume & friends)
These are placed on the internal command queue.
"""
def getInternalQueue(self, thread_id):
""" returns internal command queue for a given thread.
if new queue is created, notify the RDB about it """
try:
return self._cmd_queue[thread_id]
except KeyError:
return self._cmd_queue.setdefault(thread_id, PydevQueue.Queue()) #@UndefinedVariable
def postInternalCommand(self, int_cmd, thread_id):
""" if thread_id is *, post to all """
if thread_id == "*":
for k in self._cmd_queue.keys():
self._cmd_queue[k].put(int_cmd)
else:
queue = self.getInternalQueue(thread_id)
queue.put(int_cmd)
def checkOutput(self, out, outCtx):
'''Checks the output to see if we have to send some buffered output to the debug server
@param out: sys.stdout or sys.stderr
@param outCtx: the context indicating: 1=stdout and 2=stderr (to know the colors to write it)
'''
try:
v = out.getvalue()
if v:
self.cmdFactory.makeIoMessage(v, outCtx, self)
except:
traceback.print_exc()
def processInternalCommands(self):
'''This function processes internal commands
'''
curr_thread_id = GetThreadId(threadingCurrentThread())
program_threads_alive = {}
all_threads = threadingEnumerate()
program_threads_dead = []
self._main_lock.acquire()
try:
if bufferStdOutToServer:
self.checkOutput(sys.stdoutBuf, 1) #@UndefinedVariable
if bufferStdErrToServer:
self.checkOutput(sys.stderrBuf, 2) #@UndefinedVariable
self._lock_running_thread_ids.acquire()
try:
for t in all_threads:
thread_id = GetThreadId(t)
if not isinstance(t, PyDBDaemonThread) and t.isAlive():
program_threads_alive[thread_id] = t
if not DictContains(self._running_thread_ids, thread_id):
if not hasattr(t, 'additionalInfo'):
#see http://sourceforge.net/tracker/index.php?func=detail&aid=1955428&group_id=85796&atid=577329
#Let's create the additional info right away!
t.additionalInfo = PyDBAdditionalThreadInfo()
self._running_thread_ids[thread_id] = t
self.writer.addCommand(self.cmdFactory.makeThreadCreatedMessage(t))
queue = self.getInternalQueue(thread_id)
cmdsToReadd = [] #some commands must be processed by the thread itself... if that's the case,
#we will re-add the commands to the queue after executing.
try:
while True:
int_cmd = queue.get(False)
if int_cmd.canBeExecutedBy(curr_thread_id):
PydevdLog(2, "processing internal command ", str(int_cmd))
int_cmd.doIt(self)
else:
PydevdLog(2, "NOT processing internal command ", str(int_cmd))
cmdsToReadd.append(int_cmd)
except PydevQueue.Empty: #@UndefinedVariable
for int_cmd in cmdsToReadd:
queue.put(int_cmd)
# this is how we exit
thread_ids = list(self._running_thread_ids.keys())
for tId in thread_ids:
if not DictContains(program_threads_alive, tId):
program_threads_dead.append(tId)
finally:
self._lock_running_thread_ids.release()
for tId in program_threads_dead:
try:
self.processThreadNotAlive(tId)
except:
sys.stderr.write('Error iterating through %s (%s) - %s\n' % (
program_threads_alive, program_threads_alive.__class__, dir(program_threads_alive)))
raise
if len(program_threads_alive) == 0:
self.FinishDebuggingSession()
for t in all_threads:
if hasattr(t, 'doKillPydevThread'):
t.doKillPydevThread()
finally:
self._main_lock.release()
def processNetCommand(self, cmd_id, seq, text):
'''Processes a command received from the Java side
@param cmd_id: the id of the command
@param seq: the sequence of the command
@param text: the text received in the command
@note: this method is run as a big switch... after doing some tests, it's not clear whether changing it for
a dict id --> function call will have better performance result. A simple test with xrange(10000000) showed
that the gains from having a fast access to what should be executed are lost because of the function call in
a way that if we had 10 elements in the switch the if..elif are better -- but growing the number of choices
makes the solution with the dispatch look better -- so, if this gets more than 20-25 choices at some time,
it may be worth refactoring it (actually, reordering the ifs so that the ones used mostly come before
probably will give better performance).
'''
self._main_lock.acquire()
try:
try:
cmd = None
if cmd_id == CMD_RUN:
self.readyToRun = True
elif cmd_id == CMD_VERSION:
# response is version number
cmd = self.cmdFactory.makeVersionMessage(seq)
elif cmd_id == CMD_LIST_THREADS:
# response is a list of threads
cmd = self.cmdFactory.makeListThreadsMessage(seq)
elif cmd_id == CMD_THREAD_KILL:
int_cmd = InternalTerminateThread(text)
self.postInternalCommand(int_cmd, text)
elif cmd_id == CMD_THREAD_SUSPEND:
#Yes, thread suspend is still done at this point, not through an internal command!
t = PydevdFindThreadById(text)
if t:
additionalInfo = None
try:
additionalInfo = t.additionalInfo
except AttributeError:
pass #that's ok, no info currently set
if additionalInfo is not None:
for frame in additionalInfo.IterFrames():
self.SetTraceForFrameAndParents(frame)
del frame
self.setSuspend(t, CMD_THREAD_SUSPEND)
elif cmd_id == CMD_THREAD_RUN:
t = PydevdFindThreadById(text)
if t:
thread_id = GetThreadId(t)
int_cmd = InternalRunThread(thread_id)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_STEP_INTO or cmd_id == CMD_STEP_OVER or cmd_id == CMD_STEP_RETURN:
#we received some command to make a single step
t = PydevdFindThreadById(text)
if t:
thread_id = GetThreadId(t)
int_cmd = InternalStepThread(thread_id, cmd_id)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_RUN_TO_LINE or cmd_id == CMD_SET_NEXT_STATEMENT:
#we received some command to make a single step
thread_id, line, func_name = text.split('\t', 2)
t = PydevdFindThreadById(thread_id)
if t:
int_cmd = InternalSetNextStatementThread(thread_id, cmd_id, line, func_name)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_RELOAD_CODE:
#we received some command to make a reload of a module
module_name = text.strip()
from pydevd_reload import xreload
if not DictContains(sys.modules, module_name):
if '.' in module_name:
new_module_name = module_name.split('.')[-1]
if DictContains(sys.modules, new_module_name):
module_name = new_module_name
if not DictContains(sys.modules, module_name):
sys.stderr.write('pydev debugger: Unable to find module to reload: "' + module_name + '".\n')
sys.stderr.write('pydev debugger: This usually means you are trying to reload the __main__ module (which cannot be reloaded).\n')
else:
sys.stderr.write('pydev debugger: Reloading: ' + module_name + '\n')
xreload(sys.modules[module_name])
elif cmd_id == CMD_CHANGE_VARIABLE:
#the text is: thread\tstackframe\tFRAME|GLOBAL\tattribute_to_change\tvalue_to_change
try:
thread_id, frame_id, scope, attr_and_value = text.split('\t', 3)
tab_index = attr_and_value.rindex('\t')
attr = attr_and_value[0:tab_index].replace('\t', '.')
value = attr_and_value[tab_index + 1:]
int_cmd = InternalChangeVariable(seq, thread_id, frame_id, scope, attr, value)
self.postInternalCommand(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_VARIABLE:
#we received some command to get a variable
#the text is: thread_id\tframe_id\tFRAME|GLOBAL\tattributes*
try:
thread_id, frame_id, scopeattrs = text.split('\t', 2)
if scopeattrs.find('\t') != -1: # there are attributes beyond scope
scope, attrs = scopeattrs.split('\t', 1)
else:
scope, attrs = (scopeattrs, None)
int_cmd = InternalGetVariable(seq, thread_id, frame_id, scope, attrs)
self.postInternalCommand(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_COMPLETIONS:
#we received some command to get a variable
#the text is: thread_id\tframe_id\tactivation token
try:
thread_id, frame_id, scope, act_tok = text.split('\t', 3)
int_cmd = InternalGetCompletions(seq, thread_id, frame_id, act_tok)
self.postInternalCommand(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_FRAME:
thread_id, frame_id, scope = text.split('\t', 2)
int_cmd = InternalGetFrame(seq, thread_id, frame_id)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_SET_BREAK:
#func name: 'None': match anything. Empty: match global, specified: only method context.
#command to add some breakpoint.
# text is file\tline. Add to breakpoints dictionary
file, line, condition = text.split('\t', 2)
if condition.startswith('**FUNC**'):
func_name, condition = condition.split('\t', 1)
#We must restore new lines and tabs as done in
#AbstractDebugTarget.breakpointAdded
condition = condition.replace("@_@NEW_LINE_CHAR@_@", '\n').\
replace("@_@TAB_CHAR@_@", '\t').strip()
func_name = func_name[8:]
else:
func_name = 'None' #Match anything if not specified.
file = NormFileToServer(file)
if not os.path.exists(file):
sys.stderr.write('pydev debugger: warning: trying to add breakpoint'\
' to file that does not exist: %s (will have no effect)\n' % (file,))
line = int(line)
if DEBUG_TRACE_BREAKPOINTS > 0:
sys.stderr.write('Added breakpoint:%s - line:%s - func_name:%s\n' % (file, line, func_name))
if DictContains(self.breakpoints, file):
breakDict = self.breakpoints[file]
else:
breakDict = {}
if len(condition) <= 0 or condition == None or condition == "None":
breakDict[line] = (True, None, func_name)
else:
breakDict[line] = (True, condition, func_name)
self.breakpoints[file] = breakDict
self.setTracingForUntracedContexts()
elif cmd_id == CMD_REMOVE_BREAK:
#command to remove some breakpoint
#text is file\tline. Remove from breakpoints dictionary
file, line = text.split('\t', 1)
file = NormFileToServer(file)
try:
line = int(line)
except ValueError:
pass
else:
try:
del self.breakpoints[file][line] #remove the breakpoint in that line
if DEBUG_TRACE_BREAKPOINTS > 0:
sys.stderr.write('Removed breakpoint:%s\n' % (file,))
except KeyError:
#ok, it's not there...
if DEBUG_TRACE_BREAKPOINTS > 0:
#Sometimes, when adding a breakpoint, it adds a remove command before (don't really know why)
sys.stderr.write("breakpoint not found: %s - %s\n" % (file, line))
elif cmd_id == CMD_EVALUATE_EXPRESSION or cmd_id == CMD_EXEC_EXPRESSION:
#command to evaluate the given expression
#text is: thread\tstackframe\tLOCAL\texpression
thread_id, frame_id, scope, expression = text.split('\t', 3)
int_cmd = InternalEvaluateExpression(seq, thread_id, frame_id, expression,
cmd_id == CMD_EXEC_EXPRESSION)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_SET_PY_EXCEPTION:
# Command which receives set of exceptions on which user wants to break the debugger
# text is: break_on_uncaught;break_on_caught;TypeError;ImportError;zipimport.ZipImportError;
splitted = text.split(';')
if len(splitted) >= 2:
if splitted[0] == 'true':
break_on_uncaught = True
else:
break_on_uncaught = False
if splitted[1] == 'true':
break_on_caught = True
else:
break_on_caught = False
handle_exceptions = []
for exception_type in splitted[2:]:
exception_type = exception_type.strip()
if not exception_type:
continue
try:
handle_exceptions.append(eval(exception_type))
except:
try:
handle_exceptions.append(pydevd_import_class.ImportName(exception_type))
except:
sys.stderr.write("Unable to Import: %s when determining exceptions to break.\n" % (exception_type,))
if DEBUG_TRACE_BREAKPOINTS > 0:
sys.stderr.write("Exceptions to hook : %s\n" % (handle_exceptions,))
self.setExceptHook(tuple(handle_exceptions), break_on_uncaught, break_on_caught)
self.setTracingForUntracedContexts()
else:
sys.stderr.write("Error when setting exception list. Received: %s\n" % (text,))
elif cmd_id == CMD_GET_FILE_CONTENTS:
if os.path.exists(text):
f = open(text, 'r')
try:
source = f.read()
finally:
f.close()
cmd = self.cmdFactory.makeGetFileContents(seq, source)
elif cmd_id == CMD_SET_PROPERTY_TRACE:
# Command which receives whether to trace property getter/setter/deleter
# text is feature_state(true/false);disable_getter/disable_setter/disable_deleter
if text != "":
splitted = text.split(';')
if len(splitted) >= 3:
if self.disable_property_trace is False and splitted[0] == 'true':
# Replacing property by custom property only when the debugger starts
pydevd_traceproperty.replace_builtin_property()
self.disable_property_trace = True
# Enable/Disable tracing of the property getter
if splitted[1] == 'true':
self.disable_property_getter_trace = True
else:
self.disable_property_getter_trace = False
# Enable/Disable tracing of the property setter
if splitted[2] == 'true':
self.disable_property_setter_trace = True
else:
self.disable_property_setter_trace = False
# Enable/Disable tracing of the property deleter
if splitted[3] == 'true':
self.disable_property_deleter_trace = True
else:
self.disable_property_deleter_trace = False
else:
# User hasn't configured any settings for property tracing
pass
elif cmd_id == CMD_EVALUATE_CONSOLE_EXPRESSION:
# Command which takes care for the debug console communication
if text != "":
thread_id, frame_id, console_command = text.split('\t', 2)
console_command, line = console_command.split('\t')
if console_command == 'EVALUATE':
int_cmd = InternalEvaluateConsoleExpression(seq, thread_id, frame_id, line)
elif console_command == 'GET_COMPLETIONS':
int_cmd = InternalConsoleGetCompletions(seq, thread_id, frame_id, line)
self.postInternalCommand(int_cmd, thread_id)
else:
#I have no idea what this is all about
cmd = self.cmdFactory.makeErrorMessage(seq, "unexpected command " + str(cmd_id))
if cmd is not None:
self.writer.addCommand(cmd)
del cmd
except Exception:
traceback.print_exc()
cmd = self.cmdFactory.makeErrorMessage(seq,
"Unexpected exception in processNetCommand.\nInitial params: %s" % ((cmd_id, seq, text),))
self.writer.addCommand(cmd)
finally:
self._main_lock.release()
def setExceptHook(self, handle_exceptions, break_on_uncaught, break_on_caught):
'''
Should be called to set the exceptions to be handled and whether it should break on uncaught and
caught exceptions.
Can receive a parameter to stop only on some exceptions.
E.g.:
set_pm_excepthook((IndexError, ValueError), True, True)
or
set_pm_excepthook(IndexError, True, False)
if passed without a parameter, will break on any exception
@param handle_exceptions: exception or tuple(exceptions)
The exceptions that should be handled.
@param break_on_uncaught bool
Whether it should break on uncaught exceptions.
@param break_on_caught: bool
Whether it should break on caught exceptions.
'''
global _original_excepthook
if sys.excepthook != excepthook:
#Only keep the original if it's not our own excepthook (if called many times).
_original_excepthook = sys.excepthook
self.handle_exceptions = handle_exceptions
#Note that we won't set to break if we don't have any exception to break on
self.break_on_uncaught = handle_exceptions and break_on_uncaught
self.break_on_caught = handle_exceptions and break_on_caught
sys.excepthook = excepthook
def processThreadNotAlive(self, threadId):
""" if thread is not alive, cancel trace_dispatch processing """
self._lock_running_thread_ids.acquire()
try:
thread = self._running_thread_ids.pop(threadId, None)
if thread is None:
return
wasNotified = thread.additionalInfo.pydev_notify_kill
if not wasNotified:
thread.additionalInfo.pydev_notify_kill = True
finally:
self._lock_running_thread_ids.release()
cmd = self.cmdFactory.makeThreadKilledMessage(threadId)
self.writer.addCommand(cmd)
def doWaitSuspend(self, thread, frame, event, arg): #@UnusedVariable
""" busy waits until the thread state changes to RUN
it expects thread's state as attributes of the thread.
Upon running, processes any outstanding Stepping commands.
"""
self.processInternalCommands()
cmd = self.cmdFactory.makeThreadSuspendMessage(GetThreadId(thread), frame, thread.stop_reason)
self.writer.addCommand(cmd)
info = thread.additionalInfo
while info.pydev_state == STATE_SUSPEND and not self._finishDebuggingSession:
self.processInternalCommands()
time.sleep(0.01)
#process any stepping instructions
if info.pydev_step_cmd == CMD_STEP_INTO:
info.pydev_step_stop = None
elif info.pydev_step_cmd == CMD_STEP_OVER:
info.pydev_step_stop = frame
self.SetTraceForFrameAndParents(frame)
elif info.pydev_step_cmd == CMD_RUN_TO_LINE or info.pydev_step_cmd == CMD_SET_NEXT_STATEMENT :
self.SetTraceForFrameAndParents(frame)
if event == 'line' or event == 'exception':
#If we're already in the correct context, we have to stop it now, because we can act only on
#line events -- if a return was the next statement it wouldn't work (so, we have this code
#repeated at pydevd_frame).
stop = False
curr_func_name = frame.f_code.co_name
#global context is set with an empty name
if curr_func_name in ('?', '<module>'):
curr_func_name = ''
if curr_func_name == info.pydev_func_name:
line = info.pydev_next_line
if frame.f_lineno == line:
stop = True
else:
if frame.f_trace is None:
frame.f_trace = self.trace_dispatch
frame.f_lineno = line
frame.f_trace = None
stop = True
if stop:
info.pydev_state = STATE_SUSPEND
self.doWaitSuspend(thread, frame, event, arg)
return
elif info.pydev_step_cmd == CMD_STEP_RETURN:
back_frame = frame.f_back
if back_frame is not None:
#steps back to the same frame (in a return call it will stop in the 'back frame' for the user)
info.pydev_step_stop = frame
self.SetTraceForFrameAndParents(frame)
else:
#No back frame?!? -- this happens in jython when we have some frame created from an awt event
#(the previous frame would be the awt event, but this doesn't make part of 'jython', only 'java')
#so, if we're doing a step return in this situation, it's the same as just making it run
info.pydev_step_stop = None
info.pydev_step_cmd = None
info.pydev_state = STATE_RUN
del frame
cmd = self.cmdFactory.makeThreadRunMessage(GetThreadId(thread), info.pydev_step_cmd)
self.writer.addCommand(cmd)
def trace_dispatch(self, frame, event, arg):
''' This is the callback used when we enter some context in the debugger.
We also decorate the thread we are in with info about the debugging.
The attributes added are:
pydev_state
pydev_step_stop
pydev_step_cmd
pydev_notify_kill
'''
try:
if self._finishDebuggingSession:
#that was not working very well because jython gave some socket errors
threads = threadingEnumerate()
for t in threads:
if hasattr(t, 'doKillPydevThread'):
t.doKillPydevThread()
return None
filename, base = GetFilenameAndBase(frame)
is_file_to_ignore = DictContains(DONT_TRACE, base) #we don't want to debug threading or anything related to pydevd
if not self.force_post_mortem_stop: #If we're in post mortem mode, we might not have another chance to show that info!
if is_file_to_ignore:
return None
#print('trace_dispatch', base, frame.f_lineno, event, frame.f_code.co_name)
try:
#this shouldn't give an exception, but it could happen... (python bug)
#see http://mail.python.org/pipermail/python-bugs-list/2007-June/038796.html
#and related bug: http://bugs.python.org/issue1733757
t = threadingCurrentThread()
except:
frame.f_trace = self.trace_dispatch
return self.trace_dispatch
try:
additionalInfo = t.additionalInfo
except:
additionalInfo = t.additionalInfo = PyDBAdditionalThreadInfo()
if self.force_post_mortem_stop: #If we're in post mortem mode, we might not have another chance to show that info!
if additionalInfo.pydev_force_stop_at_exception:
self.force_post_mortem_stop -= 1
frame, frames_byid = additionalInfo.pydev_force_stop_at_exception
thread_id = GetThreadId(t)
used_id = pydevd_vars.addAdditionalFrameById(thread_id, frames_byid)
try:
self.setSuspend(t, CMD_STEP_INTO)
self.doWaitSuspend(t, frame, 'exception', None)
finally:
additionalInfo.pydev_force_stop_at_exception = None
pydevd_vars.removeAdditionalFrameById(thread_id)
# if thread is not alive, cancel trace_dispatch processing
if not t.isAlive():
self.processThreadNotAlive(GetThreadId(t))
return None # suspend tracing
if is_file_to_ignore:
return None
#each new frame...
return additionalInfo.CreateDbFrame((self, filename, additionalInfo, t, frame)).trace_dispatch(frame, event, arg)
except SystemExit:
return None
except Exception:
#Log it
if traceback is not None:
#This can actually happen during the interpreter shutdown in Python 2.7
traceback.print_exc()
return None
if USE_PSYCO_OPTIMIZATION:
try:
import psyco
trace_dispatch = psyco.proxy(trace_dispatch)
processNetCommand = psyco.proxy(processNetCommand)
processInternalCommands = psyco.proxy(processInternalCommands)
doWaitSuspend = psyco.proxy(doWaitSuspend)
getInternalQueue = psyco.proxy(getInternalQueue)
except ImportError:
if hasattr(sys, 'exc_clear'): #jython does not have it
sys.exc_clear() #don't keep the traceback (let's keep it clear for when we go to the point of executing client code)
if not IS_PY3K and not IS_PY27 and not IS_64_BITS and not sys.platform.startswith("java") and not sys.platform.startswith("cli"):
sys.stderr.write("pydev debugger: warning: psyco not available for speedups (the debugger will still work correctly, but a bit slower)\n")
def processCommandLine(argv):
""" parses the arguments.
removes our arguments from the command line """
retVal = {}
retVal['client'] = ''
retVal['server'] = False
retVal['port'] = 0
retVal['file'] = ''
i = 0
del argv[0]
while (i < len(argv)):
if (argv[i] == '--port'):
del argv[i]
retVal['port'] = int(argv[i])
del argv[i]
elif (argv[i] == '--vm_type'):
del argv[i]
retVal['vm_type'] = argv[i]
del argv[i]
elif (argv[i] == '--client'):
del argv[i]
retVal['client'] = argv[i]
del argv[i]
elif (argv[i] == '--server'):
del argv[i]
retVal['server'] = True
elif (argv[i] == '--file'):
del argv[i]
retVal['file'] = argv[i];
i = len(argv) # pop out, file is our last argument
elif (argv[i] == '--DEBUG_RECORD_SOCKET_READS'):
del argv[i]
retVal['DEBUG_RECORD_SOCKET_READS'] = True
else:
raise ValueError("unexpected option " + argv[i])
return retVal
#=======================================================================================================================
# patch_django_autoreload
#=======================================================================================================================
def patch_django_autoreload(patch_remote_debugger=True, patch_show_console=True):
'''
Patch Django to work with remote debugger without adding an explicit
pydevd.settrace to set a breakpoint (i.e.: it'll setup the remote debugger machinery
and don't suspend now -- this will load the breakpoints and will listen to
changes in them so that we do stop on the breakpoints set in the editor).
Checked with with Django 1.2.5.
Checked with with Django 1.3.
Checked with with Django 1.4.
@param patch_remote_debugger: if True, the debug tracing mechanism will be put into place.
@param patch_show_console: if True, each new process created in Django will allocate a new console
outside of Eclipse (so, it can be killed with a Ctrl+C in that console).
Note: when on Linux, even Ctrl+C will do a reload, so, the parent process
(inside Eclipse) must be killed before issuing the Ctrl+C (see TODO in code).
'''
if 'runserver' in sys.argv or 'testserver' in sys.argv:
from django.utils import autoreload
if patch_remote_debugger:
original_main = autoreload.main
autoreload.main = main
if patch_show_console:
autoreload.restart_with_reloader = restart_with_reloader
#=======================================================================================================================
# settrace
#=======================================================================================================================
def settrace(host=None, stdoutToServer=False, stderrToServer=False, port=5678, suspend=True, trace_only_current_thread=True):
'''Sets the tracing function with the pydev debug function and initializes needed facilities.
@param host: the user may specify another host, if the debug server is not in the same machine (default is the local host)
@param stdoutToServer: when this is true, the stdout is passed to the debug server
@param stderrToServer: when this is true, the stderr is passed to the debug server
so that they are printed in its console and not in this process console.
@param port: specifies which port to use for communicating with the server (note that the server must be started
in the same port). @note: currently it's hard-coded at 5678 in the client
@param suspend: whether a breakpoint should be emulated as soon as this function is called.
@param trace_only_current_thread: determines if only the current thread will be traced or all future threads will also have the tracing enabled.
'''
_set_trace_lock.acquire()
try:
_locked_settrace(host, stdoutToServer, stderrToServer, port, suspend, trace_only_current_thread)
finally:
_set_trace_lock.release()
_set_trace_lock = threading.Lock()
#=======================================================================================================================
# main
#=======================================================================================================================
if __name__ == '__main__':
sys.stderr.write("pydev debugger: starting\n")
# parse the command line. --file is our last argument that is required
try:
setup = processCommandLine(sys.argv)
except ValueError:
traceback.print_exc()
usage(1)
#as to get here all our imports are already resolved, the psyco module can be
#changed and we'll still get the speedups in the debugger, as those functions
#are already compiled at this time.
try:
import psyco
except ImportError:
if hasattr(sys, 'exc_clear'): #jython does not have it
sys.exc_clear() #don't keep the traceback -- clients don't want to see it
pass #that's ok, no need to mock psyco if it's not available anyways
else:
#if it's available, let's change it for a stub (pydev already made use of it)
import pydevd_psyco_stub
sys.modules['psyco'] = pydevd_psyco_stub
PydevdLog(2, "Executing file ", setup['file'])
PydevdLog(2, "arguments:", str(sys.argv))
pydevd_vm_type.SetupType(setup.get('vm_type', None))
DebugInfoHolder.DEBUG_RECORD_SOCKET_READS = setup.get('DEBUG_RECORD_SOCKET_READS', False)
debugger = PyDB()
try:
debugger.connect(setup['client'], setup['port'])
except:
sys.stderr.write("Could not connect to %s: %s\n" % (setup['client'], setup['port']))
traceback.print_exc()
sys.exit(1)
connected = True #Mark that we're connected when started from inside eclipse.
debugger.run(setup['file'], None, None)
| [
2,
3955,
15490,
8643,
25,
279,
5173,
1990,
67,
62,
9979,
1187,
1276,
307,
262,
352,
301,
1517,
5447,
780,
340,
1183,
1394,
257,
4941,
284,
262,
2656,
25064,
13557,
1136,
14535,
198,
6738,
279,
5173,
1990,
67,
62,
9979,
1187,
1330,
1... | 2.044336 | 21,247 |
#!/usr/bin/env python
'''Simple functions and variables for easily accessing common files and choices
of parameters.
'''
import os
import linefinder.config as l_config
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
########################################################################
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
7061,
6,
26437,
5499,
290,
9633,
329,
3538,
22534,
2219,
3696,
290,
7747,
198,
1659,
10007,
13,
198,
7061,
6,
198,
198,
11748,
28686,
198,
198,
11748,
1627,
22805,
13,
11250,
355,
300,... | 7.738372 | 172 |
"""
Support module for recruiter, will generate a notification
email to recruiter when admin gives suggestions
"""
import json
from django.shortcuts import render
from django.http.response import HttpResponse
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.template import loader
from peeldb.models import (
Ticket,
STATUS,
TICKET_TYPES,
PRIORITY_TYPES,
Attachment,
Comment,
)
from dashboard.tasks import send_email
from .forms import TicketForm, CommentForm
@login_required
def index(request):
"""
Method: GET
1. Recruiter: Will display the recent tickets created by loggedin user
and sending the priority types, ticket types to the page
2. Admin: Sending the priority types, ticket types to the page
3. For other users, displaying 404 page
Method: POST
1. Validates a post data along with ticket attachments, sends errors as json to browser
2. Creating a ticket with its attachments in open state
3. Sending the email to the created user with respected ticket message
"""
if request.method == "GET":
if request.user.is_agency_recruiter or request.user.is_recruiter:
tickets = Ticket.objects.filter(user=request.user).order_by("-created_on")
template_name = "recruiter/tickets/ticket.html"
data = {
"tickets": tickets,
"priorities": PRIORITY_TYPES,
"ticket_types": TICKET_TYPES,
}
elif request.user.is_staff:
template_name = "dashboard/tickets/ticket.html"
data = {"priorities": PRIORITY_TYPES, "ticket_types": TICKET_TYPES}
else:
template_name = "recruiter/recruiter_404.html"
data = {
"message": "Sorry, No Ticket Found",
"reason": """The URL may be misspelled or the ticket
you're looking for is no longer available.""",
}
return render(request, template_name, data)
validate_ticket = TicketForm(request.POST, request.FILES)
if validate_ticket.is_valid():
ticket = validate_ticket.save(commit=False)
ticket.user = request.user
ticket.status = "Open"
ticket.save()
for key, value in request.FILES.items():
attachment = Attachment.objects.create(
attached_file=value, uploaded_by=request.user
)
ticket.attachments.add(attachment)
temp = loader.get_template("email/new_ticket.html")
subject = "Service Request | Peeljobs"
rendered = temp.render({"ticket": ticket})
user_active = True if ticket.user.is_active else False
mto = [ticket.user.email]
send_email.delay(mto, subject, rendered)
data = {"error": False, "response": "New Ticket Created Successfully"}
else:
errors = validate_ticket.errors
for key in request.POST.keys():
if "attachment_" in key:
errors[key] = "This field is required"
data = {"error": True, "response": errors}
return HttpResponse(json.dumps(data))
@login_required
def new_ticket(request):
"""
Method: GET
1. Recruiter: Will display create ticket page and sending the priority types,
ticket types to the page
Method: POST
1. Validates a post data along with ticket attachments, sends errors as json to browser
2. Creating a ticket with its attachments in open state
3. Sending the email to the created user with respected ticket message
"""
if request.method == "GET":
return render(
request,
"recruiter/tickets/new_ticket.html",
{"priorities": PRIORITY_TYPES, "ticket_types": TICKET_TYPES},
)
validate_ticket = TicketForm(request.POST, request.FILES)
if validate_ticket.is_valid():
ticket = validate_ticket.save(commit=False)
ticket.user = request.user
ticket.status = "Open"
ticket.save()
for key, value in request.FILES.items():
attachment = Attachment.objects.create(
attached_file=value, uploaded_by=request.user
)
ticket.attachments.add(attachment)
temp = loader.get_template("email/new_ticket.html")
subject = "Service Request | Peeljobs"
rendered = temp.render({"ticket": ticket})
user_active = True if ticket.user.is_active else False
mto = ticket.user.email
send_email.delay(mto, subject, rendered)
data = {"error": False, "response": "New Ticket Created Successfully"}
else:
errors = validate_ticket.errors
data = {"error": True, "response": errors}
return HttpResponse(json.dumps(data))
@login_required
def edit_ticket(request, ticket_id):
"""
Method: GET
1. Check for a ticket with the id mentioned in the url
2. Recruiter: Will display edit ticket page and sending the priority types,
ticket types to the page
2. Dashboard: Will display edit ticket page and sending the priority types,
ticket types to the page
Method: POST
1. Validates a post data along with ticket attachments, sends errors as json to browser
2. Updates a ticket with its attachments in open state
"""
ticket = Ticket.objects.filter(id=ticket_id, user=request.user).first()
if request.method == "GET":
if ticket:
template_name = (
"recruiter/tickets/edit_ticket.html"
if request.user.is_agency_recruiter or request.user.is_recruiter
else "dashboard/tickets/edit_ticket.html"
)
data = {
"priorities": PRIORITY_TYPES,
"ticket_types": TICKET_TYPES,
"ticket": ticket,
}
else:
reason = """The URL may be misspelled or the ticket
you're looking for is no longer available."""
template_name = "recruiter/recruiter_404.html"
data = {
"message_type": "404",
"message": "Sorry, No Ticket Found",
"reason": reason,
}
return render(request, template_name, data, status=200 if ticket else 404)
validate_ticket = TicketForm(request.POST, request.FILES, instance=ticket)
if validate_ticket.is_valid():
ticket = validate_ticket.save(commit=False)
ticket.user = request.user
ticket.status = "Open"
ticket.save()
for key, value in request.FILES.items():
attachment = Attachment.objects.create(
attached_file=value, uploaded_by=request.user
)
ticket.attachments.add(attachment)
data = {"error": False, "response": "Ticket Updated Successfully"}
else:
errors = validate_ticket.errors
for key in request.POST.keys():
if "attachment_" in key:
errors[key] = "This field is required"
data = {"error": True, "response": errors}
return HttpResponse(json.dumps(data))
@login_required
def delete_ticket(request, ticket_id):
"""
Method: GET
1. Check for a ticket existed or not with the id mentioned in the url
2. if the ticket created user, loggedin user or wheather the user is admin,
then deleting the ticket
"""
tickets = Ticket.objects.filter(id=ticket_id)
if tickets:
ticket = tickets[0]
if request.user.is_staff or request.user == ticket.user:
ticket.delete()
data = {"error": False, "response": "Ticket Deleted Successfully"}
else:
data = {"error": True, "response": "This Ticket cant be deleted"}
return HttpResponse(json.dumps(data))
else:
data = {"error": True, "response": "This Ticket cant be deleted"}
return HttpResponse(json.dumps(data))
@login_required
def delete_attachment(request, attachment_id):
"""
Method: GET
1. Check for a attachment existed or not with the id mentioned in the url
2. if the ticket attachment created user, loggedin user or wheather the user is admin,
then deleting the ticket attachment
"""
attachments = Attachment.objects.filter(id=attachment_id)
if attachments:
attachment = attachments[0]
if request.user.is_staff or request.user == attachment.uploaded_by:
attachment.delete()
data = {"error": False, "response": "Attachment Deleted Successfully"}
else:
data = {"error": True, "response": "This Attachment cant be deleted"}
return HttpResponse(json.dumps(data))
else:
data = {"error": True, "response": "This Attachment cant be deleted"}
return HttpResponse(json.dumps(data))
@login_required
def delete_comment(request, comment_id):
"""
Method: GET
1. Check for a comment existed or not with the id mentioned in the url
2. if the ticket comment created user, loggedin user or wheather the user is admin,
then deleting the ticket comment
"""
comments = Comment.objects.filter(id=comment_id)
if comments:
comment = comments[0]
if request.user.is_staff or request.user == comment.commented_by:
comment.delete()
data = {"error": False, "response": "Comment Deleted Successfully"}
else:
data = {"error": True, "response": "This Comment cant be deleted"}
return HttpResponse(json.dumps(data))
else:
data = {"error": True, "response": "This Comment cant be deleted"}
return HttpResponse(json.dumps(data))
TICKET_STATUS = (
("Open", "Open"),
("Closed", "Closed"),
)
@login_required
def view_ticket(request, ticket_id):
"""
Method: GET
1. Check for a ticket existed or not with the id mentioned in the url
2. check the loogedin is ticket_created user or admin, If not returns a 404 page
"""
if not request.user.user_type == "JS":
tickets = Ticket.objects.filter(id=ticket_id, user=request.user)
if request.method == "GET":
if tickets:
ticket = tickets[0]
if request.user.is_staff or request.user == ticket.user:
template_name = "recruiter/tickets/view_ticket.html"
return render(
request,
template_name,
{
"priorities": PRIORITY_TYPES,
"ticket_types": TICKET_TYPES,
"ticket": tickets[0],
"status": STATUS,
},
)
message = "Sorry, No Ticket Found"
reason = "The URL may be misspelled or the ticket you're looking for is no longer available."
return render(
request,
"recruiter/recruiter_404.html",
{"message_type": "404", "message": message, "reason": reason},
status=404,
)
@login_required
def ticket_status(request, ticket_id):
"""
1. Check for a ticket existed or not with the id mentioned in the url
2. check the loogedin is ticket_created user or admin, If not returns a 404 page
3. If successfull, then changing the ticket status
"""
tickets = Ticket.objects.filter(id=ticket_id)
if tickets:
ticket = tickets[0]
if request.user.is_staff or request.user == ticket.user:
if request.POST.get("ticket_status"):
ticket.status = request.POST.get("ticket_status")
ticket.save()
temp = loader.get_template("email/new_ticket.html")
subject = "Your Ticket Status | Peeljobs"
rendered = temp.render({"ticket": ticket, "status": True})
mto = ticket.user.email
user_active = True if ticket.user.is_active else False
send_email.delay(mto, subject, rendered)
data = {
"error": False,
"response": "Ticket status changed Successfully",
}
else:
data = {"error": True, "response": "Please select status"}
return HttpResponse(json.dumps(data))
message = "Sorry, No Ticket Found"
reason = "The URL may be misspelled or the ticket you're looking for is no longer available."
return render(
request,
"recruiter/recruiter_404.html",
{"message_type": "404", "message": message, "reason": reason},
status=404,
)
@login_required
def ticket_comment(request, ticket_id):
"""
1. Check for a ticket existed or not with the id mentioned in the url
2. check the loogedin is ticket_created user or admin, If not returns a 404 page
3. Then checking for form validations along with comment attachments
4. If successfull, then comment will be created for a ticket
5. A Notification email has been sent to the ticket_created user with the comment message
"""
ticket = Ticket.objects.filter(id=ticket_id).first()
if ticket:
if request.user.is_staff or request.user == ticket.user:
validate_comment = CommentForm(request.POST, request.FILES)
if validate_comment.is_valid():
comment = Comment.objects.create(
comment=request.POST.get("comment"),
ticket=ticket,
commented_by=request.user,
)
if request.FILES:
for key, value in request.FILES.items():
attachment = Attachment.objects.create(
attached_file=value, uploaded_by=request.user
)
comment.attachments.add(attachment)
if request.user.is_superuser:
temp = loader.get_template("email/new_ticket.html")
subject = "Acknowledgement For Your Request | Peeljobs"
rendered = temp.render({"ticket": ticket, "comment": comment})
mto = ticket.user.email
user_active = True if ticket.user.is_active else False
send_email.delay(mto, subject, rendered)
return HttpResponse(
json.dumps(
{"error": False, "response": "Comment added Successfully"}
)
)
else:
return HttpResponse(
json.dumps({"error": True, "response": validate_comment.errors})
)
reason = "The URL may be misspelled or the ticket you're looking for is no longer available."
return render(
request,
"recruiter/recruiter_404.html",
{"message_type": "404", "message": "Sorry, No Ticket Found", "reason": reason},
status=404,
)
@login_required
def edit_comment(request):
"""
1. Check for a ticket existed or not with the id mentioned in the url
2. check the loogedin user is comment_created user or admin, If not returns a 404 page
3. Then checking for form validations along with comment attachments
4. If successfull, then comment details will be updated for a ticket
"""
comments = Comment.objects.filter(
id=request.POST.get("comment_id"), commented_by=request.user
)
if comments:
validate_comment = CommentForm(
request.POST, request.FILES, instance=comments[0]
)
if validate_comment.is_valid():
comment = validate_comment.save(commit=False)
comment.commented_by = request.user
comment.save()
for key, value in request.FILES.items():
attachment = Attachment.objects.create(
attached_file=value, uploaded_by=request.user
)
comment.attachments.add(attachment)
data = {"error": False, "response": "Comment Updated Successfully"}
else:
errors = validate_comment.errors
for key in request.POST.keys():
if "attachment_" in key:
errors[key] = "This field is required"
data = {"error": True, "response": errors}
else:
data = {
"error": True,
"response_message": "This comment can't edit by the User",
}
return HttpResponse(json.dumps(data))
@login_required
def admin_tickets_list(request):
"""
Method: GET
1. check the loogedin user is admin or not, If not returns a 404 page
2. If user is amdin, then display a recent tickets to admin
3. If successfull, then comment details will be updated for a ticket
"""
if request.user.is_staff:
tickets = Ticket.objects.filter().order_by("-created_on")
return render(
request, "dashboard/tickets/admin_ticket_list.html", {"tickets": tickets}
)
message = "Sorry, No Ticket Found"
reason = "The URL may be misspelled or the ticket you're looking for is no longer available."
return render(
request,
"404.html",
{"message_type": "404", "message": message, "reason": reason},
status=404,
)
TICKET_STATUS = (
("Open", "Open"),
("Closed", "Closed"),
("Ongoing", "Ongoing"),
)
@login_required
def admin_ticket_view(request, ticket_id):
"""
Method: GET
1. check the loogedin user is admin or not, If not returns a 404 page
2. check ticket is existing or not with the id given in url
3. If successfull, then display the ticket details to admin user
"""
if request.user.is_staff:
tickets = Ticket.objects.filter(id=ticket_id)
if tickets:
return render(
request,
"dashboard/tickets/ticket_view.html",
{
"ticket": tickets[0],
"priorities": PRIORITY_TYPES,
"ticket_types": TICKET_TYPES,
"status": TICKET_STATUS,
},
)
message = "Sorry, No Ticket Found"
reason = "The URL may be misspelled or the ticket you're looking for is no longer available."
return render(
request,
"404.html",
{"message_type": "404", "message": message, "reason": reason},
status=404,
)
| [
37811,
198,
220,
220,
220,
7929,
8265,
329,
8921,
2676,
11,
481,
7716,
257,
14483,
198,
220,
220,
220,
3053,
284,
8921,
2676,
618,
13169,
3607,
11776,
198,
37811,
198,
198,
11748,
33918,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
... | 2.336973 | 7,968 |
import idautils
import idaapi
import idc
import subprocess
import pycparser
import ida_typeinf
import ida_nalt
sdk_funcs_file = open(idc.ARGV[1], "w")
sdk_funcs_header = open(idc.ARGV[2], "w")
# Uses demumble as a drop in replacment
cppfilt_path = idc.ARGV[3]
print "Wait for auto analysis"
idc.auto_wait()
# Disable terminal opening up
# https://stackoverflow.com/a/23924771/9329945
if hasattr(subprocess, 'STARTUPINFO'):
# Windows
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# The following is the initialized default, but
# setting it explicitly is self-documenting.
si.wShowWindow = subprocess.SW_HIDE
else:
# POSIX
si = None
print "Starting analysis"
current_index = 0
for ea in idautils.Functions():
flags = idc.GetFunctionFlags(ea)
func_name = idc.get_func_name(ea)
if (current_index % 1000 == 0):
print "Processing function %d" % current_index
if flags & FUNC_THUNK and not func_name.startswith("sub_") and not func_name.startswith("j__ZdlPv") and not "null" in func_name:
# Revert weird designations
# could also use ida_funcs.set_func_name_if_jumpfunc(ea, None)
func_name = func_name.replace("j_", "")
funcdata = ida_typeinf.func_type_data_t()
tinfo = ida_typeinf.tinfo_t();
ida_nalt.get_tinfo(tinfo, ea);
tinfo.get_func_details(funcdata)
if (flags & FUNC_NORET):
retcode = ''
else:
retcode = 'N'
mcsema_def = ("%s %d C %s" % (func_name, funcdata.size(), retcode)).strip()
sdk_funcs_file.write(mcsema_def + '\n')
if func_name.endswith("_1"):
func_name = func_name.replace("_1", "")
if func_name.endswith("_0"):
func_name = func_name.replace("_0", "")
demangled_str = subprocess.check_output([cppfilt_path, func_name], shell=True, startupinfo=si).strip()
# Function types that may be printed
# https://www.hex-rays.com/products/ida/support/idadoc/1361.shtml
if funcdata.rettype.empty():
sdk_funcs_header.write("void " + demangled_str + '\n')
else:
# Check for QWord, can be represented as a 64 bit integer
reported_type = str(funcdata.rettype)
if reported_type == "_QWORD":
sdk_funcs_header.write("uint64_t " + demangled_str + '\n')
else:
sdk_funcs_header.write(reported_type + " " + demangled_str + '\n')
current_index += 1
sdk_funcs_file.close()
sdk_funcs_header.close()
print "Done analysis!"
idc.Exit(0) | [
11748,
4686,
2306,
4487,
198,
11748,
220,
3755,
15042,
198,
11748,
4686,
66,
198,
11748,
850,
14681,
198,
11748,
12972,
13155,
28198,
198,
11748,
220,
3755,
62,
4906,
10745,
198,
11748,
220,
3755,
62,
77,
2501,
198,
198,
21282,
74,
62,
... | 2.449896 | 958 |
import os
import math
from concurrent.futures import ProcessPoolExecutor
import multiprocessing as mp
import logging
import pandas as pd
def df_map(func, df: pd.DataFrame, no_of_cores: int = None, no_of_cores_perc: float = None) -> pd.DataFrame:
"""
Maps function func to DataFrame df using multithreading
:param func: Function to be mapped to a single row. Has to take one parameter of type pandas.DataFrame
:param df: Dataframe of type pandas.DataFrame
:param no_of_cores: Number of cores to use. If it is None maximal number cores minus one will be used
:param no_of_cores_perc: Number of cores to use in relation to all available cores. no_of_cores must be None.
:type func: function, no lambda expression
:type df: pandas.DataFrame
:type no_of_cores: int
:type no_of_cores_perc: float
:return: transformed Dataframe of type pandas.DataFrame
"""
logger = logging.getLogger('pd_multithreading.map')
if no_of_cores is None:
try:
no_of_cores = len(os.sched_getaffinity(0))
except AttributeError:
no_of_cores = mp.cpu_count()
if no_of_cores_perc is not None:
no_of_cores = math.ceil(no_of_cores*no_of_cores_perc)
elif no_of_cores > 1:
no_of_cores -= 1
len_df = len(df)
if no_of_cores > len_df:
no_of_cores = len_df
logger.debug('Number of cores: %i', no_of_cores)
list_df = []
for i in range(no_of_cores):
start = math.ceil(len_df*i/no_of_cores)
end = math.ceil(len_df*(i+1)/no_of_cores) - 1
df_block = df.iloc[start:end+1, :]
list_df.append(df_block)
with ProcessPoolExecutor() as ex:
df_res_list = list(ex.map(func, list_df))
res = None
for df_res_element in df_res_list:
if res is None:
res = df_res_element
else:
res = res.append(df_res_element)
return res
| [
11748,
28686,
198,
11748,
10688,
198,
6738,
24580,
13,
69,
315,
942,
1330,
10854,
27201,
23002,
38409,
198,
11748,
18540,
305,
919,
278,
355,
29034,
198,
11748,
18931,
198,
11748,
19798,
292,
355,
279,
67,
628,
198,
4299,
47764,
62,
889... | 2.272727 | 847 |
#[Super SloMo]
##High Quality Estimation of Multiple Intermediate Frames for Video Interpolation
import argparse
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
from model import superslomo_half as superslomo
from model.extraction import center, ends, MPRNet
from utils import meanshift, load_image
from copy import deepcopy
import os, glob
import numpy as np
# For parsing commandline arguments
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, default='demo_input', help='path to dataset folder containing train-test-validation folders')
parser.add_argument("--output", type=str, default='demo_output', help='path to folder for saving checkpoints')
parser.add_argument("--checkpoint", type=str, help='path of checkpoint for pretrained model')
parser.add_argument("--add_blur", action='store_true', help='Add blurry image')
parser.add_argument("--seq_len", type=int, default=7, help='number of frames that composes a sequence.')
args = parser.parse_args()
### For visualizing loss and interpolated frames
###Initialize flow computation and arbitrary-time flow interpolation CNNs.
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
flowComp = superslomo.UNet(6, 4)
flowComp.to(device)
if args.add_blur:
ArbTimeFlowIntrp = superslomo.UNet(23, 5)
else:
ArbTimeFlowIntrp = superslomo.UNet(20, 5)
ArbTimeFlowIntrp.to(device)
### Load Pretrained Extraction Models
# center_estimation = center.Center()
center_estimation = MPRNet.MPRNet()
border_estimation = ends.Ends()
center_estimation = center_estimation.to(device)
border_estimation = border_estimation.to(device)
### Load Datasets
# Channel wise mean calculated on adobe240-fps training dataset
mean = [0.429, 0.431, 0.397]
std = [1, 1, 1]
seq_len = args.seq_len
ctr_idx = seq_len // 2
### Initialization
dict1 = torch.load(args.checkpoint)
ArbTimeFlowIntrp.load_state_dict(dict1['state_dictAT'])
flowComp.load_state_dict(dict1['state_dictFC'])
center_estimation.load_state_dict(dict1['state_dictCT'])
border_estimation.load_state_dict(dict1['state_dictBD'])
print('Load model from: ', args.checkpoint)
inputfolder = args.input
img_list = sorted(glob.glob(os.path.join(inputfolder, '*.png')))
args.output = os.path.join(args.output, 'Len{:02d}'.format(args.seq_len))
if not os.path.exists(args.output):
os.makedirs(args.output)
for inputFile in img_list:
print(inputFile)
inputs = load_image(inputFile)
fname = os.path.splitext(os.path.basename(inputFile))[0]
width, height= inputs.size
inputs = inputs.crop((0,0, width-width%64, height-height%64))
normalize = transforms.Normalize(mean=mean, std=std)
input_transform = transforms.Compose([transforms.ToTensor(), normalize])
inputs = input_transform(inputs)
inputs = inputs.unsqueeze(0)
test(inputs, fname)
| [
198,
2,
58,
12442,
3454,
78,
16632,
60,
198,
2235,
11922,
14156,
10062,
18991,
286,
20401,
42540,
36291,
329,
7623,
4225,
16104,
341,
198,
198,
11748,
1822,
29572,
198,
198,
11748,
28034,
198,
11748,
28034,
10178,
198,
11748,
28034,
10178... | 2.961934 | 972 |
from math import inf
from .player import AbstractPlayer, player_type
from .board import Outcome
@player_type
| [
6738,
10688,
1330,
1167,
198,
198,
6738,
764,
7829,
1330,
27741,
14140,
11,
2137,
62,
4906,
198,
6738,
764,
3526,
1330,
3806,
2958,
628,
198,
198,
31,
7829,
62,
4906,
198
] | 3.645161 | 31 |
import numpy as np
import string as s
with open("input.txt") as f:
lines = f.readlines()
chars = [list(x.strip()) for x in lines]
oxy_arr = np.array(chars, dtype=int)
co2_arr = np.copy(oxy_arr)
num_cols = len(chars[0])
for i in range(num_cols):
keep_val = int(np.median(oxy_arr[:,i]) + 0.5) # +0.5 to round. round() is to even
keep_rows = oxy_arr[:,i] == keep_val
oxy_arr = oxy_arr[keep_rows,:]
if (oxy_arr.shape[0] < 2):
break
for i in range(num_cols):
keep_val = (int(np.median(co2_arr[:,i])+0.5)+1)%2 # +1 to invert, +.5 to round
keep_rows = co2_arr[:,i] == keep_val
co2_arr = co2_arr[keep_rows,:]
if (co2_arr.shape[0] < 2):
break
oxy = int(np.array2string(oxy_arr)[2:-2].replace(" ",""), base=2)
co2 = int(np.array2string(co2_arr)[2:-2].replace(" ",""), base=2)
print(oxy*co2)
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
4731,
355,
264,
201,
198,
201,
198,
4480,
1280,
7203,
15414,
13,
14116,
4943,
355,
277,
25,
201,
198,
220,
3951,
796,
277,
13,
961,
6615,
3419,
201,
198,
201,
198,
354,
945,
796,
685,
... | 2.081281 | 406 |
from cleo import Command
from pyfiglet import Figlet
| [
6738,
1190,
78,
1330,
9455,
198,
6738,
12972,
5647,
1616,
1330,
12138,
1616,
198
] | 3.785714 | 14 |
from ase.data import covalent_radii as ase_covalent_radii
from ase.data import chemical_symbols
default_element_radii = {}
default_hard_radii = {}
for z in range(len(ase_covalent_radii)):
default_element_radii[chemical_symbols[z]] = ase_covalent_radii[z]
default_hard_radii[chemical_symbols[z]] = 0.9*ase_covalent_radii[z]
if __name__ == "__main__":
elements = [ 'O', 'Si', 'Na', 'Ca']
n_structures = 5
for structure_number in range(n_structures):
print('\n',structure_number)
atoms = reasonable_random_structure_maker(elements,
fill_factor_max = 0.40, # 0.65 is about the max
fill_factor_min = 0.2, #0.2 is default
composition_generator = generate_random_silicate)
#print(atoms)
if False:
from ase import io
from os.path import isfile
try_mkdir(structure_direct)
io.write(structure_direct+'POSCAR', atoms, format = 'vasp')
if True:
from ase import io
io.write('%i.POSCAR'%structure_number, atoms, format = 'vasp')
| [
628,
198,
6738,
257,
325,
13,
7890,
1330,
269,
8325,
298,
62,
6335,
4178,
355,
257,
325,
62,
66,
8325,
298,
62,
6335,
4178,
198,
6738,
257,
325,
13,
7890,
1330,
5931,
62,
1837,
2022,
10220,
628,
198,
12286,
62,
30854,
62,
6335,
41... | 2.084381 | 557 |
import pytest
import numpy as np
from discolight.annotations import (annotations_to_numpy_array)
from discolight.augmentations.motionblur import MotionBlur, Direction
@pytest.mark.usefixtures("sample_image")
@pytest.mark.parametrize("direction", [
Direction.UP, Direction.DOWN, Direction.RIGHT, Direction.LEFT,
Direction.TOPRIGHT, Direction.TOPLEFT, Direction.BOTTOMRIGHT,
Direction.BOTTOMLEFT
])
| [
11748,
12972,
9288,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
1221,
349,
432,
13,
34574,
602,
1330,
357,
34574,
602,
62,
1462,
62,
77,
32152,
62,
18747,
8,
198,
6738,
1221,
349,
432,
13,
559,
5154,
602,
13,
38714,
2436,
33... | 2.942857 | 140 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Proof-of-concept for B |eacute| zier clipping.
.. _algorithm: https://dx.doi.org/10.1016/0010-4485(90)90039-F
.. _quadratic convergence: https://doi.org/10.1016/j.cagd.2007.12.006
The B |eacute| zier clipping `algorithm`_ is used to intersect
two planar B |eacute| zier curves. It proceeds by using "fat lines"
to recursively prune the region of accepted parameter ranges until
the ranges converge to points. (A "fat line" is a rectangular region of a
bounded distance from the line connecting the start and end points of a
B |eacute| zier curve.)
It has `quadratic convergence`_. It can be used to find tangent intersections,
which is the primary usage within ``bezier``.
.. |eacute| unicode:: U+000E9 .. LATIN SMALL LETTER E WITH ACUTE
:trim:
"""
import numpy as np
from bezier import _helpers
from bezier.hazmat import geometric_intersection
NO_PARALLEL = "Parallel lines not supported during clipping."
DEFAULT_S_MIN = 1.0
DEFAULT_S_MAX = 0.0
def compute_implicit_line(nodes):
"""Compute the implicit form of the line connecting curve endpoints.
.. note::
This assumes, but does not check, that the first and last node
in ``nodes`` are different.
Computes :math:`a, b` and :math:`c` in the normalized implicit equation
for the line
.. math::
ax + by + c = 0
where :math:`a^2 + b^2 = 1` (only unique up to sign).
.. image:: ../../images/compute_implicit_line.png
:align: center
.. testsetup:: compute-implicit-line
import numpy as np
import bezier
from bezier.hazmat.clipping import compute_implicit_line
.. doctest:: compute-implicit-line
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 1.0, 3.0, 4.0],
... [0.0, 2.5, 0.5, 3.0],
... ])
>>> compute_implicit_line(nodes)
(-0.6, 0.8, 0.0)
.. testcleanup:: compute-implicit-line
import make_images
make_images.compute_implicit_line(nodes)
Args:
nodes (numpy.ndarray): ``2 x N`` array of nodes in a curve.
The line will be (directed) from the first to last
node in ``nodes``.
Returns:
Tuple[float, float, float]: The triple of
* The :math:`x` coefficient :math:`a`
* The :math:`y` coefficient :math:`b`
* The constant :math:`c`
"""
delta = nodes[:, -1] - nodes[:, 0]
length = np.linalg.norm(delta, ord=2)
# Normalize and rotate 90 degrees to the "left".
coeff_a = -delta[1] / length
coeff_b = delta[0] / length
# c = - ax - by = (delta[1] x - delta[0] y) / L
# NOTE: We divide by ``length`` at the end to "put off" rounding.
coeff_c = (delta[1] * nodes[0, 0] - delta[0] * nodes[1, 0]) / length
return coeff_a, coeff_b, coeff_c
def compute_fat_line(nodes):
"""Compute the "fat line" around a B |eacute| zier curve.
Both computes the implicit (normalized) form
.. math::
ax + by + c = 0
for the line connecting the first and last node in ``nodes``.
Also computes the maximum and minimum distances to that line
from each control point where distance :math:`d` is computed as
:math:`d_i = a x_i + b y_i + c`. (This is made possible by the fact that
:math:`a^2 + b^2 = 1`.)
.. image:: ../../images/compute_fat_line.png
:align: center
.. testsetup:: compute-fat-line
import numpy as np
import bezier
from bezier.hazmat.clipping import compute_fat_line
.. doctest:: compute-fat-line
:options: +NORMALIZE_WHITESPACE
>>> nodes = np.asfortranarray([
... [0.0, 1.0, 3.0, 4.0],
... [2.0, 4.5, 2.5, 5.0],
... ])
>>> info = compute_fat_line(nodes)
>>> info
(-0.6, 0.8, -1.6, -1.4, 1.4)
.. testcleanup:: compute-fat-line
import make_images
make_images.compute_fat_line(nodes, info)
Args:
nodes (numpy.ndarray): ``2 x N`` array of nodes in a curve.
Returns:
Tuple[float, float, float, float, float]: The 5-tuple of
* The :math:`x` coefficient :math:`a`
* The :math:`y` coefficient :math:`b`
* The constant :math:`c`
* The "minimum" distance to the fat line among the control points.
* The "maximum" distance to the fat line among the control points.
"""
coeff_a, coeff_b, coeff_c = compute_implicit_line(nodes)
# NOTE: This assumes, but does not check, that there are two rows.
_, num_nodes = nodes.shape
d_min = 0.0
d_max = 0.0
for index in range(1, num_nodes - 1): # Only interior nodes.
curr_dist = (
coeff_a * nodes[0, index] + coeff_b * nodes[1, index] + coeff_c
)
if curr_dist < d_min:
d_min = curr_dist
elif curr_dist > d_max:
d_max = curr_dist
return coeff_a, coeff_b, coeff_c, d_min, d_max
def _update_parameters(s_min, s_max, start0, end0, start1, end1):
"""Update clipped parameter range.
.. note::
This is a helper for :func:`clip_range`.
Does so by intersecting one of the two fat lines with an edge
of the convex hull of the distance polynomial of the curve being
clipped.
If both of ``s_min`` and ``s_max`` are "unset", then any :math:`s`
value that is valid for ``s_min`` would also be valid for ``s_max``.
Rather than adding a special case to handle this scenario, **only**
``s_min`` will be updated.
In cases where a given parameter :math:`s` would be a valid update
for both ``s_min`` and ``s_max``
This function **only** updates ``s_min``
Args:
s_min (float): Current start of clipped interval. If "unset", this
value will be ``DEFAULT_S_MIN``.
s_max (float): Current end of clipped interval. If "unset", this
value will be ``DEFAULT_S_MAX``.
start0 (numpy.ndarray): A 1D NumPy ``2``-array that is the start
vector of one of the two fat lines.
end0 (numpy.ndarray): A 1D NumPy ``2``-array that is the end
vector of one of the two fat lines.
start1 (numpy.ndarray): A 1D NumPy ``2``-array that is the start
vector of an edge of the convex hull of the distance
polynomial :math:`d(t)` as an explicit B |eacute| zier curve.
end1 (numpy.ndarray): A 1D NumPy ``2``-array that is the end
vector of an edge of the convex hull of the distance
polynomial :math:`d(t)` as an explicit B |eacute| zier curve.
Returns:
Tuple[float, float]: The (possibly updated) start and end
of the clipped parameter range.
Raises:
NotImplementedError: If the two line segments are parallel. (This
case will be supported at some point, just not now.)
"""
s, t, success = geometric_intersection.segment_intersection(
start0, end0, start1, end1
)
if not success:
raise NotImplementedError(NO_PARALLEL)
if _helpers.in_interval(t, 0.0, 1.0):
if _helpers.in_interval(s, 0.0, s_min):
return s, s_max
elif _helpers.in_interval(s, s_max, 1.0):
return s_min, s
return s_min, s_max
def _check_parameter_range(s_min, s_max):
r"""Performs a final check on a clipped parameter range.
.. note::
This is a helper for :func:`clip_range`.
If both values are unchanged from the "unset" default, this returns
the whole interval :math:`\left[0.0, 1.0\right]`.
If only one of the values is set to some parameter :math:`s`, this
returns the "degenerate" interval :math:`\left[s, s\right]`. (We rely
on the fact that ``s_min`` must be the only set value, based on how
:func:`_update_parameters` works.)
Otherwise, this simply returns ``[s_min, s_max]``.
Args:
s_min (float): Current start of clipped interval. If "unset", this
value will be ``DEFAULT_S_MIN``.
s_max (float): Current end of clipped interval. If "unset", this
value will be ``DEFAULT_S_MAX``.
Returns:
Tuple[float, float]: The (possibly updated) start and end
of the clipped parameter range.
"""
if s_min == DEFAULT_S_MIN:
# Based on the way ``_update_parameters`` works, we know
# both parameters must be unset if ``s_min``.
return 0.0, 1.0
if s_max == DEFAULT_S_MAX:
return s_min, s_min
return s_min, s_max
def _clip_range_polynomial(nodes, coeff_a, coeff_b, coeff_c):
"""Compute control points for a polynomial used to clip range.
Args:
nodes (numpy.ndarray): ``2 x N`` array of nodes in a curve.
The line will be (directed) from the first to last
node in ``nodes``.
coeff_a (float): The :math:`a` coefficient in a line
:math:`ax + by + c = 0`.
coeff_b (float): The :math:`b` coefficient in a line
:math:`ax + by + c = 0`.
coeff_c (float): The :math:`c` coefficient in a line
:math:`ax + by + c = 0`.
Returns:
numpy.ndarray: ``2 x N`` array of polynomial curve with distances
:math:`d_i = a x_i + b y_i + c` as the control points (and the
``x``-coordinates evenly spaced).
"""
_, num_nodes = nodes.shape
polynomial = np.empty((2, num_nodes), order="F")
denominator = float(num_nodes - 1)
for index in range(num_nodes):
polynomial[0, index] = index / denominator
polynomial[1, index] = (
coeff_a * nodes[0, index] + coeff_b * nodes[1, index] + coeff_c
)
return polynomial
def clip_range(nodes1, nodes2):
r"""Reduce the parameter range where two curves can intersect.
.. note::
This assumes, but does not check that the curves being considered
will only have one intersection in the parameter ranges
:math:`s \in \left[0, 1\right]`, :math:`t \in \left[0, 1\right]`.
This assumption is based on the fact that B |eacute| zier clipping
is meant to be used to find tangent intersections for already
subdivided (i.e. sufficiently zoomed in) curve segments.
Two B |eacute| zier curves :math:`B_1(s)` and :math:`B_2(t)` are defined by
``nodes1`` and ``nodes2``. The "fat line" (see :func:`compute_fat_line`)
for :math:`B_1(s)` is used to narrow the range of possible :math:`t`-values
in an intersection by considering the distance polynomial for
:math:`B_2(t)`:
.. math::
d(t) = \sum_{j = 0}^m \binom{n}{j} t^j (1 - t)^{n - j} \cdot d_j
Here :math:`d_j = a x_j + b y_j + c` are the distances of each control
point :math:`(x_j, y_j)` of :math:`B_2(t)` to the implicit line for
:math:`B_1(s)`.
Consider the following pair of B |eacute| zier curves and the distances
from **all** of the control points to the implicit line for :math:`B_1(s)`:
.. testsetup:: clip-range-start, clip-range
import numpy as np
import bezier
from bezier.hazmat.clipping import clip_range
nodes1 = np.asfortranarray([
[2.0, 4.5, 2.5, 5.0],
[0.0, 1.0, 3.0, 4.0],
])
nodes2 = np.asfortranarray([
[-0.25 , 3.75 , 7.0 ],
[ 3.125, 0.875, 3.125],
])
.. doctest:: clip-range-start
:options: +NORMALIZE_WHITESPACE
>>> nodes1
array([[2. , 4.5, 2.5, 5. ],
[0. , 1. , 3. , 4. ]])
>>> nodes2
array([[-0.25 , 3.75 , 7. ],
[ 3.125, 0.875, 3.125]])
.. image:: ../../images/clip_range.png
:align: center
The distances from the control points of :math:`B_2(t)` define the
distance polynomial :math:`d(t)`. By writing this polynomial as a
B |eacute| zier curve, a convex hull can be formed. The intersection of
this convex hull with the "fat line" of :math:`B_1(s)` determines the
extreme :math:`t` values possible and allows clipping the range of
:math:`B_2(t)`:
.. image:: ../../images/clip_range_distances.png
:align: center
.. doctest:: clip-range
:options: +NORMALIZE_WHITESPACE
>>> s_min, s_max = clip_range(nodes1, nodes2)
>>> s_min
0.25
>>> np.allclose(s_max, 0.875, rtol=0.5 ** 52, atol=0.0)
True
.. testcleanup:: clip-range
import make_images
make_images.clip_range(nodes1, nodes2)
make_images.clip_range_distances(nodes1, nodes2)
Args:
nodes1 (numpy.ndarray): ``2 x N1`` array of nodes in a curve which
will define the clipping region.
nodes2 (numpy.ndarray): ``2 x N2`` array of nodes in a curve which
will be clipped.
Returns:
Tuple[float, float]: The pair of
* The start parameter of the clipped range.
* The end parameter of the clipped range.
"""
# NOTE: There is no corresponding "enable", but the disable only applies
# in this lexical scope.
# pylint: disable=too-many-locals
coeff_a, coeff_b, coeff_c, d_min, d_max = compute_fat_line(nodes1)
polynomial = _clip_range_polynomial(nodes2, coeff_a, coeff_b, coeff_c)
# Define segments for the top and the bottom of the region
# bounded by the fat line.
start_bottom = np.asfortranarray([0.0, d_min])
end_bottom = np.asfortranarray([1.0, d_min])
start_top = np.asfortranarray([0.0, d_max])
end_top = np.asfortranarray([1.0, d_max])
s_min = DEFAULT_S_MIN
s_max = DEFAULT_S_MAX
# NOTE: We avoid computing the convex hull and just compute where
# all segments connecting two control points intersect the
# fat lines.
_, num_nodes2 = nodes2.shape
for start_index in range(num_nodes2 - 1):
for end_index in range(start_index + 1, num_nodes2):
s_min, s_max = _update_parameters(
s_min,
s_max,
start_bottom,
end_bottom,
polynomial[:, start_index],
polynomial[:, end_index],
)
s_min, s_max = _update_parameters(
s_min,
s_max,
start_top,
end_top,
polynomial[:, start_index],
polynomial[:, end_index],
)
return _check_parameter_range(s_min, s_max)
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
7330,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 2.328167 | 6,387 |
"""
Establish a websocket connection and replace local terminal with a pty
that sends all output to the server.
"""
import asyncio
import base64
import datetime
import json
import os
import pty
import signal
import ssl
import sys
import webbrowser
from math import floor
from typing import Callable, List, Optional
from urllib.parse import urlencode, urljoin
import websockets # type: ignore
from . import encryption, utils
from .constants import TermPairError, subprotocol_version
from .Terminal import TerminalId
max_read_bytes = 1024 * 2
ws_queue: asyncio.Queue = asyncio.Queue()
JS_MAX_SAFE_INTEGER = 2**53 - 1
async def broadcast_terminal(
cmd: List[str], url: str, allow_browser_control: bool, open_browser: bool
):
"""Fork this process and connect it to websocket to broadcast it"""
# create child process attached to a pty we can read from and write to
(child_pid, pty_fd) = pty.fork()
if child_pid == 0:
# This is the forked process. Replace it with the shell command
# the user wants to run.
env = os.environ.copy()
env["TERMPAIR_BROADCASTING"] = "1"
env["TERMPAIR_BROWSERS_CAN_CONTROL"] = "1" if allow_browser_control else "0"
os.execvpe(cmd[0], cmd, env)
return
stdin_fd = sys.stdin.fileno()
stdout_fd = sys.stdout.fileno()
ssl_context: Optional[ssl.SSLContext] = (
ssl.SSLContext(ssl.PROTOCOL_TLS) if url.startswith("https") else None
)
ws_url = url.replace("http", "ws")
ws_endpoint = urljoin(ws_url, "connect_to_terminal")
try:
async with websockets.connect(ws_endpoint, ssl=ssl_context) as ws:
sharing_session = SharingSession(
url,
cmd,
pty_fd,
stdin_fd,
stdout_fd,
ws,
open_browser,
allow_browser_control,
)
await sharing_session.run()
print(
f"You are no longer broadcasting terminal id {sharing_session.terminal_id}"
)
except ConnectionRefusedError as e:
raise TermPairError(
"Connection was refused. Is the TermPair server running on the host and port specified? "
+ str(e),
)
| [
37811,
198,
22362,
17148,
257,
2639,
5459,
4637,
290,
6330,
1957,
12094,
351,
257,
279,
774,
198,
5562,
12800,
477,
5072,
284,
262,
4382,
13,
198,
37811,
198,
198,
11748,
30351,
952,
198,
11748,
2779,
2414,
198,
11748,
4818,
8079,
198,
... | 2.356405 | 968 |
#!/usr/bin/python
class Error(Exception):
"""Base class for exceptions """
pass
class BlockError(Error):
"""Raised when Block processing fails. Rollback not required
Attributes:
reason -- what caused the block to fail (eg instruction that is invalid)
id -- id of the block that failed
previous -- id of the previous block this failed block points to
"""
class RedisError(Error):
"""Raised when redis encounters an error because it cannot 'get' find a value
Attributes:
reason -- what redis cannot find
"""
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
4871,
13047,
7,
16922,
2599,
198,
220,
220,
220,
37227,
14881,
1398,
329,
13269,
37227,
198,
220,
220,
220,
1208,
628,
198,
4871,
9726,
12331,
7,
12331,
2599,
198,
220,
220,
220,
37227,
... | 3.17033 | 182 |
import logging
from flask import jsonify
from kgeditor import db, models
from kgeditor.utils.common import login_required
from kgeditor.utils.response_code import RET
from kgeditor.models import Graph
from . import api
@api.route("/demo")
@api.route('/public_graph', methods=['GET'])
| [
11748,
18931,
198,
6738,
42903,
1330,
33918,
1958,
198,
6738,
479,
2004,
2072,
1330,
20613,
11,
4981,
198,
6738,
479,
2004,
2072,
13,
26791,
13,
11321,
1330,
17594,
62,
35827,
198,
6738,
479,
2004,
2072,
13,
26791,
13,
26209,
62,
8189,
... | 3.287356 | 87 |
import dash
import dash_html_components as html
app = dash.Dash(__name__)
app.layout = html.Div([html.Div(className="circle")]) # className属性を設定
if __name__ == "__main__":
app.run_server(debug=True)
| [
11748,
14470,
198,
11748,
14470,
62,
6494,
62,
5589,
3906,
355,
27711,
198,
198,
1324,
796,
14470,
13,
43041,
7,
834,
3672,
834,
8,
198,
198,
1324,
13,
39786,
796,
27711,
13,
24095,
26933,
6494,
13,
24095,
7,
4871,
5376,
2625,
45597,
... | 2.406977 | 86 |
# encoding: utf-8
"""Settings class and default values for an API client.
"""
from os import environ
from .version import __version__
# Default API endpoint
API_ENDPOINT = "https://api.github.com/"
# Default WEB endpoint
WEB_ENDPOINT = "https://github.com"
# Default User Agent header string
USER_AGENT = "octokit.py/%s" % __version__
# Default media type
MEDIA_TYPE = "application/vnd.github.beta+json"
# Default page size
PAGE_SIZE = 50
# Do not auto paginate by default
AUTO_PAGINATE = False
# Can we trust env or not
TRUST_ENV = True
# Verify SSL certificate
VERIFY_SSL = True
class Settings(object):
"""Octokit settings class.
"""
@property
def is_credentials_passed(self):
"""Checks if creadentials have been passed to settings.
"""
if ((self.login and self.password) or self.access_token or
(self.client_id and self.client_secret)):
return True
return False
def set_from_env(self):
"""Sets creadentials from ENV variables if possible.
"""
self.login = environ.get('OCTOKIT_LOGIN')
self.password = environ.get('OCTOKIT_PASSWORD')
self.access_token = environ.get('OCTOKIT_ACCESS_TOKEN')
self.client_id = environ.get('OCTOKIT_CLIENT_ID')
self.client_secret = environ.get('OCTOKIT_SECRET')
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
37811,
26232,
1398,
290,
4277,
3815,
329,
281,
7824,
5456,
13,
198,
37811,
198,
198,
6738,
28686,
1330,
551,
2268,
198,
6738,
764,
9641,
1330,
11593,
9641,
834,
628,
198,
2,
15161,
7824,
361... | 2.520599 | 534 |
from __future__ import print_function
import numpy as np
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
299,
32152,
355,
45941,
628,
198,
220,
220,
220,
220,
198
] | 3.047619 | 21 |
import numpy as np
import json
from pathlib import Path
from tensorflow import keras
| [
11748,
299,
32152,
355,
45941,
198,
11748,
33918,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
11192,
273,
11125,
1330,
41927,
292,
628,
628,
628,
628
] | 3.538462 | 26 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'apiwindow.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
15042,
17497,
13,
9019,
6,
198,
2,
198,
2,
15622,
416,
25,
9485,
48,
83,
20,
12454,
2438,
17301,
642,
13,
... | 2.879518 | 83 |
# Copyright (C) 2012 Andy Balaam and The Pepper Developers
# Released under the MIT License. See the file COPYING.txt for details.
from assert_parser_result import assert_parser_result
from assert_parser_result import assert_parser_result_from_code
| [
2,
15069,
357,
34,
8,
2321,
12382,
347,
6081,
321,
290,
383,
24346,
34152,
198,
2,
28728,
739,
262,
17168,
13789,
13,
220,
4091,
262,
2393,
27975,
45761,
13,
14116,
329,
3307,
13,
198,
198,
6738,
6818,
62,
48610,
62,
20274,
1330,
68... | 3.820896 | 67 |
# Generated by Django 3.1.6 on 2021-02-13 15:35
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
21,
319,
33448,
12,
2999,
12,
1485,
1315,
25,
2327,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
#!/usr/bin/env python3
# -*- coding:UTF-8 -*-
__author__ = 'zachary'
"""
File Name: case.py
Created Time: 2020-04-04 19:32:10
Last Modified:
"""
import requests
import os
import re
from parsel import Selector
if __name__ == "__main__":
# url = ''
# resp = requests.get(url)
resp = open('./html/flight.html').read()
sel = Selector(resp)
em = sel.css('em.rel').extract()
for element in em:
element = Selector(element)
element_b = element.css('b').extract()
bl = Selector(element_b.pop(0))
bl_style = bl.css('b::attr("style")').get()
bl_width = ''.join(re.findall('width:(.*?)px', bl_style))
number = int(int(bl_width) / 16)
base_price = bl.css('i::text').extract()
base_price = bl.css('i::text').extract()[:number]
alternate_price = []
for eb in element_b:
eb = Selector(eb)
style = eb.css('b::attr("style")').get()
position = ''.join(re.findall('left:(.*?)px', style))
value = eb.css('b::text').get()
alternate_price.append({'position': position, 'value': value})
for al in alternate_price:
position = int(al.get('position'))
value = al.get('value')
plus = True if position >= 0 else False
index = int(position / 16)
# replace
base_price[index] = value
print(base_price)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
48504,
12,
23,
532,
9,
12,
198,
834,
9800,
834,
796,
705,
89,
620,
560,
6,
198,
37811,
198,
8979,
6530,
25,
1339,
13,
9078,
198,
41972,
3862,
25,
... | 2.171733 | 658 |
import unittest
import mock
from Pubsub import Publisher, Broker, Channel, Subscriber
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
15290,
198,
6738,
8525,
7266,
1330,
28045,
11,
2806,
6122,
11,
11102,
11,
3834,
1416,
24735,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,... | 2.93617 | 47 |
# -*- coding: utf-8 -*-
from anmodel import models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
281,
19849,
1330,
4981,
628,
220,
220,
220,
220
] | 2.28 | 25 |
#! /usr/bin/env python
'''
Adam Saland
CSCI 330
Command line interface for cpu scheduling simulations
'''
import sys
from optparse import OptionParser
import random
parser = OptionParser()
parser.add_option("-s", "--seed", default=0, help="random", action="store", type="int", dest="seed")
parser.add_option("-p", "--procs", default=3, help="number of processes to generate", action="store", type="int", dest="procs")
parser.add_option("-l", "--plist", default="", help="input a comma-separated list of processes", action="store", type="string", dest="plist")
parser.add_option("-m", "--maxlen", default=10, help="max process time", action="store", type="int", dest="maxlen")
parser.add_option("-a", "--algorithm", default="RR", help="Choose Algorithm from: SJF, FIFO, RR", action="store", type="string", dest="algorithm")
parser.add_option("-q", "--quantum", default=1, help="Round Robin time quantum", action="store", type="int", dest="quantum")
parser.add_option("-c", "--compute", default=False, help="compute", action="store_true", dest="solve")
(options, args) = parser.parse_args()
random.seed(options.seed)
#show options chosen
print('ARG algorithm', options.algorithm)
if options.plist == '':
print('ARG procs', options.procs)
print('ARG maxlen', options.maxlen)
print('ARG seed', options.seed)
else:
print('ARG plist', options.plist)
print('')
import operator
#initialize proccess list variable
proclist = []
#load processes
if options.plist == '':
for pid in range(0,options.procs):
proc_runtime = int(options.maxlen * random.random()) + 1
proclist.append([pid, proc_runtime])
print('pid: {}, burst_time: {}'.format(pid, proc_runtime))
else:
pid = 0
for proc_runtime in options.plist.split(','):
proclist.append([pid, float(proc_runtime)])
pid += 1
for proc in proclist:
print('pid : {}, burst_time: {}'.format(proc[0], proc[1]))
print('\n')
#Solve by algorithm chosen
if options.solve == True:
#Shortest Job first
if options.algorithm == 'SJF':
proclist = sorted(proclist, key=operator.itemgetter(1))
options.algorithm = 'FIFO'
#First in First Out
if options.algorithm == 'FIFO':
curr_time = 0
print('Execution trace:')
for proc in proclist:
print('[ time: {} ] run proccess {} for {} secs, burst_completion: {}'.format(curr_time, proc[0], proc[1], curr_time + proc[1]))
curr_time += proc[1]
#Print Simulation Statistics
print('\nSimulation Statistics:')
curr_time = 0.0
count = 0
turnaround_time_sum = 0.0
wait_time_sum = 0.0
response_time_sum = 0.0
for tmp in proclist:
pid = tmp[0]
proc_runtime = tmp[1]
response_time = curr_time
turnaround_time = curr_time + proc_runtime
wait_time = curr_time
print('pid: {} | resonse: {} | turnaround: {} | wait: {}'.format(pid, response_time, turnaround_time, wait_time))
response_time_sum += response_time
turnaround_time_sum += turnaround_time
wait_time_sum += wait_time
curr_time += proc_runtime
count = count + 1
print('Average: resonse: {} | turnaround: {} | wait: {}'.format(response_time_sum/count, turnaround_time_sum/count, wait_time_sum/count))
############# Round Robin ###############
if options.algorithm == 'RR':
#Trace iterations
print('Execution trace:')
turnaround_time = {}
response_time = {}
lastran_time = {}
wait_time = {}
quantum = float(options.quantum)
proccount = len(proclist)
for i in range(0,proccount):
lastran_time[i] = 0.0
wait_time[i] = 0.0
turnaround_time[i] = 0.0
response_time[i] = -1
runlist = []
for e in proclist:
runlist.append(e)
curr_time = 0.0
#Iterate through process list subtracting the time quantum until the list is empty
while proccount > 0:
print('%d procs remaining' % proccount)
proc = runlist.pop(0)
pid = proc[0]
proc_runtime = float(proc[1])
if response_time[pid] == -1:
response_time[pid] = curr_time
curr_wait = curr_time - lastran_time[pid]
wait_time[pid] += curr_wait
if proc_runtime > quantum:
proc_runtime -= quantum
burst_run_time = quantum
print('[ time {} ] burst_run_time pid: {} | burst: {} secs'.format(curr_time, pid, burst_run_time))
runlist.append([pid, proc_runtime])
else:
burst_run_time = proc_runtime;
print('[ time {} ] burst_run_time pid: {} | burst: {} secs | burst_completion: {}'.format(curr_time, pid, burst_run_time, curr_time + burst_run_time))
turnaround_time[pid] = curr_time + burst_run_time
proccount -= 1
curr_time += burst_run_time
lastran_time[pid] = curr_time
##### Print Round Robin Simulation Statistics #####
print('\nSimulation Statistics:')
turnaround_time_sum = 0.0
wait_time_sum = 0.0
response_time_sum = 0.0
for i in range(0,len(proclist)):
turnaround_time_sum += turnaround_time[i]
response_time_sum += response_time[i]
wait_time_sum += wait_time[i]
print('pid: {} | resonse: {} | turnaround: {} | wait: {}'.format(i, response_time[i], turnaround_time[i], wait_time[i]))
count = len(proclist)
print('Average: response: {}, turnaround: {}, wait: {}\n'.format(response_time_sum/count, turnaround_time_sum/count, wait_time_sum/count))
#print('\nAverage -- Response: %3.2f Turnaround %3.2f Wait %3.2f\n' % (response_time_sum/count, turnaround_time_sum/count, wait_time_sum/count))
if options.algorithm != 'FIFO' and options.algorithm != 'SJF' and options.algorithm != 'RR':
print('Error: Policy', options.algorithm, 'is not available.')
sys.exit(0)
else:
print('Give it a shot and try to compute it by hand, if you cant figure it out, or want to see the use the -c flag to run computations')
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
7061,
6,
198,
23159,
4849,
392,
198,
34,
6173,
40,
25508,
198,
21575,
1627,
7071,
329,
42804,
26925,
27785,
198,
7061,
6,
198,
11748,
25064,
198,
6738,
2172,
29572,
1330,
16018,
4667... | 2.308279 | 2,754 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Test Nature logging methods
"""
from typing import List, Dict, Set
import sys
import unittest
import logging
import tempfile
import contextlib
import io
import os
from test import QiskitNatureTestCase
from qiskit_nature import logging as nature_logging
class TestHandler(logging.StreamHandler):
"""Unit Test Handler"""
def __init__(self):
"""
Initialize the handler.
"""
super().__init__(sys.stdout)
self.records: List[logging.LogRecord] = []
def emit(self, record) -> None:
"""handle record"""
self.records.append(record)
class TestLogging(QiskitNatureTestCase):
"""Test logging"""
def test_logging_to_handler(self):
"""logging test"""
self._set_logging(False)
# ignore Qiskit TextProgressBar that prints to stderr
with contextlib.redirect_stderr(io.StringIO()):
TestLogging._run_test()
# check that logging was handled
self._validate_records(self._test_handler.records)
def test_logging_to_default_handler(self):
"""logging test to file"""
self._set_logging(True)
# ignore Qiskit TextProgressBar that prints to stderr
with contextlib.redirect_stderr(io.StringIO()):
with self.assertLogs("qiskit", level="DEBUG") as qiskit_cm:
with self.assertLogs("qiskit_nature", level="DEBUG") as nature_cm:
TestLogging._run_test()
# check that logging was handled
records = qiskit_cm.records.copy()
records.extend(nature_cm.records)
self._validate_records(records)
def test_logging_to_file(self):
"""logging test to file"""
self._set_logging(False)
# pylint: disable=consider-using-with
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.close()
os.unlink(tmp_file.name)
file_handler = nature_logging.log_to_file(
self._logging_dict.keys(), path=tmp_file.name, mode="w"
)
try:
# ignore Qiskit TextProgressBar that prints to stderr
with contextlib.redirect_stderr(io.StringIO()):
TestLogging._run_test()
finally:
with open(tmp_file.name, encoding="utf8") as file:
lines = file.read()
file_handler.close()
os.unlink(tmp_file.name)
for name in self._logging_dict:
self.assertTrue(f"{name}." in lines, msg=f"name {name} not found in log file.")
@staticmethod
def _run_test():
"""Run external test and ignore any failures. Intention is just check logging."""
# pylint: disable=import-outside-toplevel
from test.algorithms.excited_state_solvers.test_bosonic_esc_calculation import (
TestBosonicESCCalculation,
)
unittest.TextTestRunner().run(TestBosonicESCCalculation("test_numpy_mes"))
if __name__ == "__main__":
unittest.main()
| [
2,
770,
2438,
318,
636,
286,
1195,
1984,
270,
13,
198,
2,
198,
2,
357,
34,
8,
15069,
19764,
33160,
13,
198,
2,
198,
2,
770,
2438,
318,
11971,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
13,
921,
743,
198,
2,
7330,
257,
486... | 2.434936 | 1,414 |
from configparser import ConfigParser
| [
6738,
4566,
48610,
1330,
17056,
46677,
628,
198
] | 5 | 8 |
import unittest
import os
import json
from uitester import config
| [
11748,
555,
715,
395,
198,
11748,
28686,
198,
11748,
33918,
198,
6738,
334,
2737,
353,
1330,
4566,
628
] | 3.722222 | 18 |
from meross_iot.controller.device import BaseDevice
from meross_iot.controller.mixins.toggle import ToggleXMixin
class MSS420F(ToggleXMixin, BaseDevice):
"""
MSS420F power strip
"""
class MSS425E(ToggleXMixin, BaseDevice):
"""
MSS425E power strip
"""
class MSS425F(ToggleXMixin, BaseDevice):
"""
MSS425F power strip
"""
class MSS530(ToggleXMixin, BaseDevice):
"""
MSS530 Multiple light control switches
"""
| [
6738,
4017,
793,
62,
5151,
13,
36500,
13,
25202,
1330,
7308,
24728,
198,
6738,
4017,
793,
62,
5151,
13,
36500,
13,
19816,
1040,
13,
44256,
1330,
34098,
55,
35608,
259,
628,
198,
4871,
337,
5432,
27211,
37,
7,
51,
20258,
55,
35608,
2... | 2.586592 | 179 |
import os
from math import exp
from parsable import parsable
import pomagma.util
import util
SPECS = {}
SPECS['sk'] = {
'binary_probs': {
'APP': 0.374992,
'COMP': 0.198589,
},
'nullary_weights': {
'B': 1.0,
'C': 1.30428,
'CB': 1.35451,
'CI': 1.74145,
'I': 2.21841,
'Y': 2.2918,
'K': 2.6654,
'S': 2.69459,
# 'S B': 3.5036,
# 'F': 3.72682,
# 'S I': 4.12483,
'W': 4.36313,
# 'W B': 4.3719,
# 'W I': 6.21147,
},
}
SPECS['skj'] = {
'binary_probs': {
'APP': 0.374992,
'COMP': 0.198589,
},
'symmetric_probs': {
'JOIN': 0.0569286,
},
'nullary_weights': {
'B': 1.0,
'C': 1.30428,
'CB': 1.35451,
'CI': 1.74145,
'I': 2.21841,
'Y': 2.2918,
'K': 2.6654,
'S': 2.69459,
'J': 2.81965,
'V': 2.87327,
'BOT': 3.0,
'TOP': 3.0,
# 'S B': 3.5036,
'P': 3.69204,
'F': 3.72682,
# 'S I': 4.12483,
'W': 4.36313,
# 'W B': 4.3719,
# 'W I': 6.21147,
'U': 6.3754,
},
}
SPECS['skja'] = {
'binary_probs': {
'APP': 0.374992,
'COMP': 0.198589,
},
'symmetric_probs': {
'JOIN': 0.0569286,
},
'nullary_weights': {
'B': 1.0,
'C': 1.30428,
'CB': 1.35451,
'CI': 1.74145,
'I': 2.21841,
'Y': 2.2918,
'K': 2.6654,
'S': 2.69459,
'J': 2.81965,
'V': 2.87327,
'BOT': 3.0,
'TOP': 3.0,
'DIV': 3.06752,
# 'S B': 3.5036,
'P': 3.69204,
'F': 3.72682,
# 'S I': 4.12483,
'SEMI': 4.18665,
'W': 4.36313,
'UNIT': 4.3634,
# 'W B': 4.3719,
'A': 5.0,
# 'SECTION': 5.0,
# 'RETRACT': 5.0,
'BOOL': 5.21614,
# 'W I': 6.21147,
'U': 6.3754,
'BOOOL': 7.0,
# 'PROD': 12.0,
# 'SUM': 12.0,
# 'MAYBE': 12.0,
# 'SSET': 12.0,
},
}
SPECS['skrj'] = {
'binary_probs': {
'APP': 0.34,
'COMP': 0.18,
},
'symmetric_probs': {
'JOIN': 0.05,
'RAND': 0.05,
},
'nullary_weights': {
'B': 1.0,
'C': 1.30428,
'CB': 1.35451,
'CI': 1.74145,
'I': 2.21841,
'Y': 2.2918,
'K': 2.6654,
'S': 2.69459,
'J': 2.81965,
'R': 2.81965,
'V': 2.87327,
'BOT': 3.0,
'TOP': 3.0,
# 'DIV': 3.06752,
# 'S B': 3.5036,
'P': 3.69204,
'F': 3.72682,
# 'S I': 4.12483,
# 'SEMI': 4.18665,
'W': 4.36313,
# 'UNIT': 4.3634,
# 'W B': 4.3719,
# 'A': 5.0,
# 'SECTION': 5.0,
# 'RETRACT': 5.0,
# 'BOOL': 5.21614,
# 'W I': 6.21147,
'U': 6.3754,
# 'PROD': 12.0,
# 'SUM': 12.0,
# 'MAYBE': 12.0,
# 'SSET': 12.0,
},
}
@parsable
def make(theory):
"""Bootstrap a language from Johann.
Inputs: theory in ['sk', 'skj', 'skja', 'skrj']
"""
spec = SPECS[theory]
nullary_weights = spec.get('nullary_weights', {})
injective_probs = spec.get('injective_probs', {})
binary_probs = spec.get('binary_probs', {})
symmetric_probs = spec.get('symmetric_probs', {})
compound_prob = (
sum(injective_probs.values()) +
sum(binary_probs.values()) +
sum(symmetric_probs.values()))
assert compound_prob < 1
nullary_prob = 1.0 - compound_prob
nullary_probs = {
key: exp(-val)
for key, val in nullary_weights.iteritems()
}
scale = nullary_prob / sum(nullary_probs.values())
for key in nullary_probs.keys():
nullary_probs[key] *= scale
probs = {
'NULLARY': nullary_probs,
'INJECTIVE': injective_probs,
'BINARY': binary_probs,
'SYMMETRIC': symmetric_probs,
}
for arity, group in probs.items():
if not group:
del probs[arity]
with pomagma.util.chdir(os.path.dirname(os.path.abspath(__file__))):
util.json_dump(probs, '{}.json'.format(theory))
# util.compile('{}.json'.format(theory), '{}.language'.format(theory))
if __name__ == '__main__':
parsable()
| [
11748,
28686,
198,
6738,
10688,
1330,
1033,
198,
198,
6738,
13544,
540,
1330,
13544,
540,
198,
198,
11748,
279,
296,
363,
2611,
13,
22602,
198,
11748,
7736,
198,
198,
48451,
50,
796,
23884,
198,
198,
48451,
50,
17816,
8135,
20520,
796,
... | 1.620089 | 2,698 |
import configargparse
from dataset import dataloader as loader
from fastspeech import FeedForwardTransformer
import sys
import torch
from dataset.texts import valid_symbols
import os
from utils.hparams import HParam, load_hparam_str
import numpy as np
def get_parser():
"""Get parser of training arguments."""
parser = configargparse.ArgumentParser(
description="Train a new text-to-speech (TTS) model on one CPU, one or multiple GPUs",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-c", "--config", type=str, required=True, help="yaml file for configuration"
)
parser.add_argument(
"-p",
"--checkpoint_path",
type=str,
default=None,
help="path of checkpoint pt to evaluate",
)
parser.add_argument("--outdir", type=str, required=True, help="Output directory")
return parser
def main(cmd_args):
"""Run training."""
parser = get_parser()
args, _ = parser.parse_known_args(cmd_args)
args = parser.parse_args(cmd_args)
if os.path.exists(args.checkpoint_path):
checkpoint = torch.load(args.checkpoint_path)
else:
print("Checkpoint not exixts")
return None
if args.config is not None:
hp = HParam(args.config)
else:
hp = load_hparam_str(checkpoint["hp_str"])
validloader = loader.get_tts_dataset(hp.data.data_dir, 1, hp, True)
print("Checkpoint : ", args.checkpoint_path)
idim = len(valid_symbols)
odim = hp.audio.num_mels
model = FeedForwardTransformer(
idim, odim, hp
)
# os.makedirs(args.out, exist_ok=True)
checkpoint = torch.load(args.checkpoint_path)
model.load_state_dict(checkpoint["model"])
evaluate(hp, validloader, model)
if __name__ == "__main__":
main(sys.argv[1:])
| [
11748,
4566,
853,
29572,
198,
6738,
27039,
1330,
4818,
282,
1170,
263,
355,
40213,
198,
6738,
3049,
45862,
1330,
18272,
39746,
8291,
16354,
198,
11748,
25064,
198,
11748,
28034,
198,
6738,
27039,
13,
5239,
82,
1330,
4938,
62,
1837,
2022,
... | 2.512987 | 770 |
""" A test script to repair cassandra. """
import logging
from cassandra_interface import NODE_TOOL
from subprocess import call
def run():
""" Starts up cassandra. """
logging.warning("Repairing Cassandra")
call([NODE_TOOL, 'repair'])
logging.warning("Done!")
if __name__ == '__main__':
run()
| [
37811,
317,
1332,
4226,
284,
9185,
30606,
15918,
13,
37227,
198,
11748,
18931,
198,
198,
6738,
30606,
15918,
62,
39994,
1330,
399,
16820,
62,
10468,
3535,
198,
6738,
850,
14681,
1330,
869,
198,
198,
4299,
1057,
33529,
220,
198,
220,
372... | 3.009615 | 104 |
# This file is part of the NESi software.
#
# Copyright (c) 2020
# Original Software Design by Ilya Etingof <https://github.com/etingof>.
#
# Software adapted by inexio <https://github.com/inexio>.
# - Janis Groß <https://github.com/unkn0wn-user>
# - Philip Konrath <https://github.com/Connyko65>
# - Alexander Dincher <https://github.com/Dinker1996>
#
# License: https://github.com/inexio/NESi/LICENSE.rst
import os
import threading
from time import sleep
from nesi import exceptions
from .baseCommandProcessor import BaseCommandProcessor
from.baseMixIn import BaseMixIn
| [
2,
770,
2393,
318,
636,
286,
262,
31925,
72,
3788,
13,
198,
2,
198,
2,
15069,
357,
66,
8,
12131,
198,
2,
13745,
10442,
8495,
416,
49804,
64,
412,
889,
1659,
1279,
5450,
1378,
12567,
13,
785,
14,
13629,
1659,
28401,
198,
2,
198,
... | 3.074866 | 187 |
import gym
from gym import error, spaces, utils
from gym.utils import seeding
#
import cityflow
import pandas as pd
import os
import numpy as np
import json
import math
from gym.spaces import Discrete, Box
# metadata = {'render.modes': ['human']}
# def render(self, mode='human', close=False):
# ...
| [
11748,
11550,
201,
198,
6738,
11550,
1330,
4049,
11,
9029,
11,
3384,
4487,
201,
198,
6738,
11550,
13,
26791,
1330,
384,
8228,
201,
198,
2,
201,
198,
11748,
1748,
11125,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
11748,
28686... | 2.693548 | 124 |
from couchbase_core.subdocument import Spec
from .options import Seconds, FiniteDuration, forward_args
from couchbase_core.transcodable import Transcodable
from couchbase_core._libcouchbase import Result as SDK2Result
from couchbase_core.result import MultiResult, SubdocResult
from typing import *
from boltons.funcutils import wraps
from couchbase_core import abstractmethod
from couchbase_core.result import AsyncResult
try:
from asyncio.futures import Future
except:
Future=object
Proxy_T = TypeVar('Proxy_T')
ResultPrecursor = NamedTuple('ResultPrecursor', [('orig_result', SDK2Result), ('orig_options', Mapping[str, Any])])
| [
6738,
18507,
8692,
62,
7295,
13,
7266,
22897,
1330,
18291,
198,
6738,
764,
25811,
1330,
40876,
11,
4463,
578,
26054,
11,
2651,
62,
22046,
198,
6738,
18507,
8692,
62,
7295,
13,
7645,
19815,
540,
1330,
3602,
19815,
540,
198,
6738,
18507,
... | 3.407216 | 194 |
#!/usr/bin/env python3
from abc import ABC
import typing
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
6738,
450,
66,
1330,
9738,
198,
11748,
19720,
628
] | 3.052632 | 19 |
import os
import subprocess
from datetime import datetime
from time import time
from os.path import isdir, exists, join, abspath
main()
# os.chdir(r'C:\Users\SANA-Sirius\Desktop\Software\AutoHotkey\repo')
# main()
if __name__ == '__main__':
input()
| [
11748,
28686,
201,
198,
11748,
850,
14681,
201,
198,
6738,
4818,
8079,
1330,
4818,
8079,
201,
198,
6738,
640,
1330,
640,
201,
198,
6738,
28686,
13,
6978,
1330,
318,
15908,
11,
7160,
11,
4654,
11,
2352,
6978,
201,
198,
201,
198,
201,
... | 2.556604 | 106 |
# Copyright (c) 2019, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: Giselle Serate <gserate@paloaltonetworks.com>
'''
Palo Alto Networks test_parser.py
Tests the parse code by verifying selected domains get written to the index
and the version is swapped to be parsed.
Run this file from the pandorica root.
This software is provided without support, warranty, or guarantee.
Use at your own risk.
'''
import logging
from logging.config import dictConfig
import os
from time import sleep
from dotenv import load_dotenv
from elasticsearch import Elasticsearch
from elasticsearch_dsl import connections, Search
import requests
from domain_processor import process_domains
from notes_parser import try_parse
from lib.setuputils import connect_to_elastic
from scraper import DocStatus, VersionDocument
from testcases import ParseTest
def setup_mappings(mappings_path, ip):
'''
Maps domain and tag caches. ELK must be up first.
'''
headers = {'Content-Type':'application/json'}
logging.info("Installing domain details mapping.")
contents = open(os.path.join(mappings_path, 'sfn-domain-details.json')).read()
r = requests.put(f'http://{ip}:9200/sfn-domain-details/', data=contents, headers=headers)
# Unless accepted or already mapped
if r.status_code != 200 and r.status_code != 400:
logging.warning("Unsuccessful write of domain details mapping.")
logging.warning(r.text)
logging.info("Installing tag details mapping.")
contents = open(os.path.join(mappings_path, 'sfn-tag-details.json')).read()
r = requests.put(f'http://{ip}:9200/sfn-tag-details/', data=contents, headers=headers)
# Unless accepted or already mapped
if r.status_code != 200 and r.status_code != 400:
logging.warning("Unsuccessful write of tag details mapping.")
logging.warning(r.text)
if __name__ == '__main__':
test_all()
| [
2,
15069,
357,
66,
8,
13130,
11,
44878,
34317,
27862,
198,
2,
198,
2,
2448,
3411,
284,
779,
11,
4866,
11,
13096,
11,
290,
14,
273,
14983,
428,
3788,
329,
597,
198,
2,
4007,
351,
393,
1231,
6838,
318,
29376,
7520,
11,
2810,
326,
... | 3.255639 | 798 |
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse("user:create")
TOKEN_URL = reverse("user:token")
ME_URL = reverse("user:me")
# defining create here to access it from all classes bellow
def create_user(**params):
"""Helper function to create new user"""
return get_user_model().objects.create_user(**params)
class PublicUserApiTests(TestCase):
"""Test the user API which don't require login"""
def setUp(self):
"""Setup method for makigin its attribute available to every method"""
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test create api with valid payload is successful"""
payload = {
"email": "test@api.com",
"password": "tesapipass",
"name": "Test Api User"
}
res = self.client.post(CREATE_USER_URL, payload)
user = get_user_model().objects.get(**res.data)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertTrue(user.check_password(payload["password"]))
# get response should not send user's password data
self.assertNotIn("password", res.data)
def test_user_duplicate(self):
"""test creating user already exits fails"""
payload = {
"email": "test@api.com",
"password": "testapipasswore",
"name": "Api User"
}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""test 5 car password condition works"""
"""Then check whether the user is added or not"""
payload = {
"email": "test@api.com",
"password": "pas",
"name": "Name"
}
res = self.client.post(CREATE_USER_URL, **payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
# The user doesn't exsit in our database as post wasn't successful
user_exist = get_user_model().objects.filter(
email=payload["email"]
).exists()
self.assertFalse(user_exist)
def test_create_token_for_user(self):
"""Test token is created for the user"""
payload = {
"email": "test@email.com",
"password": "testPass123"
}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn("token", res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Thes with in valid user and password"""
create_user(email="test@email.com", password="testpassword")
payload = {
"email": "test@emal.com",
"password": "wrongpass"
}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn("token", res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test token is not created if user doesn't exist"""
payload = {
"email": "test@email.com",
"password": "password"
}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn("token", res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test email and password is must"""
res = self.client.post(TOKEN_URL, {
"email": "test@email.com",
"password": ""
})
self.assertNotIn("token", res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test that authentication required get update option"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
"""Test Api requests that require authentications"""
def test_retrieve_profile_success(self):
"""Test retriving profile for logged in user success"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
"name": self.user.name,
"email": self.user.email
})
def test_post_me_not_allowed(self):
"""Test that POST is not allowed on the me URL"""
"""Post is used for creating objects"""
"""because this option is olnly for update or PUT"""
res = self.client.post(ME_URL, {}) # we are posting emptly input
self.assertEqual(
res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_success(self):
"""Test Updating the user profile for authenticated user"""
payload = {
"name": "New Name",
"password": "updatedPassword"
}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db() # as we just updated the user
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(self.user.name, payload["name"])
self.assertTrue(self.user.check_password(payload["password"]))
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
198,
6738,
1334,
62,
30604,
13,
9288,
1330,
34... | 2.359687 | 2,302 |
import os
from datetime import datetime
import gym
import gym_kuka_mujoco
import numpy as np
from stable_baselines import PPO2
from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv, VecNormalize
from stable_baselines.common.policies import MlpPolicy
if __name__=='__main__':
# Visualize the solution
environment_name = 'PegInsertionNoHole-v0'
environment_name = 'RemoteCenterControlledKukaMujoco-v0'
environment_name = 'KukaMujoco-v0'
running_average_path = os.path.join(os.environ['OPENAI_LOGDIR'],
'stable',
'2019-01-16',
# '15:59:46.148298/alg=PPO2,env=KukaMujoco-v0,total_timesteps=20000000,gamma=1.0,n_steps=2048,ent_coef=0.0,verbose=0,cliprange=0.2,learning_rate=0.001')
'17:26:40.194093/alg=PPO2,env=KukaMujoco-v0,total_timesteps=10000000,n_steps=2048,ent_coef=0.01,cliprange=0.2,gamma=1.0,learning_rate=0.001,verbose=0')
model_path = os.path.join(running_average_path,
'model.pkl')
orig_env = gym.make(environment_name)
env = DummyVecEnv([lambda: orig_env])
# env = VecNormalize(env, training=False, norm_reward=False, clip_obs=np.inf, clip_reward=np.inf)
# env.load_running_average(running_average_path)
model = PPO2.load(model_path, env=env)
replay_model(orig_env, model) | [
11748,
28686,
220,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
11550,
198,
11748,
11550,
62,
74,
14852,
62,
76,
23577,
25634,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
8245,
62,
12093,
20655,
1330,
350,
16402,
17,
198... | 2.114504 | 655 |
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.Utils import formatdate
import smtplib
date_fmt = '%m/%d/%y %H:%M'
| [
6738,
3053,
13,
76,
524,
13,
5239,
1330,
337,
3955,
2767,
2302,
198,
6738,
3053,
13,
76,
524,
13,
16680,
541,
433,
1330,
337,
3955,
3620,
586,
541,
433,
198,
6738,
3053,
13,
18274,
4487,
1330,
5794,
4475,
198,
198,
11748,
895,
83,
... | 2.411765 | 68 |
from hubcheck.pageobjects.widgets.form_base import FormBase
from hubcheck.pageobjects.basepageelement import Text
class LoginRemindForm_Locators_Base(object):
"""locators for LoginRemindForm object"""
locators = {
'base' : "css=#hubForm",
'email' : "css=#email",
'submit' : "css=#hubForm .validate",
}
| [
6738,
12575,
9122,
13,
7700,
48205,
13,
28029,
11407,
13,
687,
62,
8692,
1330,
5178,
14881,
198,
6738,
12575,
9122,
13,
7700,
48205,
13,
12093,
538,
496,
30854,
1330,
8255,
628,
198,
4871,
23093,
8413,
521,
8479,
62,
33711,
2024,
62,
... | 2.510638 | 141 |
import asyncio
import aiohttp
import time
from aiohttp_requests import requests
try:
from aiohttp import ClientError
except:
from aiohttp import ClientProxyConnectionError as ProxyConnectionError
from proxypool.db import RedisClient
from proxypool.setting import *
if __name__ == '__main__':
Tester().run()
| [
11748,
30351,
952,
198,
11748,
257,
952,
4023,
198,
11748,
640,
198,
6738,
257,
952,
4023,
62,
8897,
3558,
1330,
7007,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
257,
952,
4023,
1330,
20985,
12331,
198,
16341,
25,
198,
220,
220,
... | 3.262626 | 99 |
import threading
import signal
import os
import time
P1 = threading.Thread(target = func)
P1.start()
P1.join()
print "fuck!!!!!"
while 1:
pass
| [
11748,
4704,
278,
198,
11748,
6737,
198,
11748,
28686,
198,
11748,
640,
198,
220,
220,
220,
220,
220,
220,
220,
220,
628,
628,
198,
198,
47,
16,
796,
4704,
278,
13,
16818,
7,
16793,
796,
25439,
8,
198,
198,
47,
16,
13,
9688,
3419,... | 2.469697 | 66 |
# Generated by Django 1.11.29 on 2020-06-18 18:26
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
1959,
319,
12131,
12,
3312,
12,
1507,
1248,
25,
2075,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.967742 | 31 |
import pytest
import peewee
| [
11748,
12972,
9288,
198,
11748,
613,
413,
1453,
628,
198
] | 3 | 10 |
import requests as req
import datetime
try:
import numpy as np
import pandas as pd
except:
pass
from bs4 import BeautifulSoup
from io import StringIO, BytesIO
from nsepy.archives import date_to_str, __raw_zip_data_to_str
PRICE_LIST_URL = 'http://www1.nseindia.com/content/historical/DERIVATIVES/%s/%s/fo%sbhav.csv.zip'
DERIVATIVE_ARCHIVES = 'http://www1.nseindia.com/products/dynaContent/common/productsSymbolMapping.jsp?instrumentType=OPTIDX&symbol=NIFTY&expiryDate=27-07-2006&optionType=CE&strikePrice=&dateRange=week&fromDate=&toDate=&segmentLink=9&symbolCount='
| [
11748,
7007,
355,
43089,
198,
11748,
4818,
8079,
198,
28311,
25,
198,
220,
220,
220,
1330,
299,
32152,
355,
45941,
198,
220,
220,
220,
1330,
19798,
292,
355,
279,
67,
198,
16341,
25,
198,
220,
220,
220,
1208,
198,
198,
6738,
275,
82... | 2.536797 | 231 |
from django.shortcuts import render,redirect
from django.contrib import messages, auth
from django.contrib.auth.models import User
from contacts.models import Contact
# Create your views here.
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
445,
1060,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
11,
6284,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
13961,
13,
27530,
1330,... | 3.88 | 50 |
"""Utility module for sentiment analysis."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
START_CHAR = 1
END_CHAR = 2
OOV_CHAR = 3
def pad_sentence(sentence, sentence_length):
"""Pad the given sentense at the end.
If the input is longer than sentence_length,
the remaining portion is dropped.
END_CHAR is used for the padding.
Args:
sentence: A numpy array of integers.
sentence_length: The length of the input after the padding.
Returns:
A numpy array of integers of the given length.
"""
sentence = sentence[:sentence_length]
if len(sentence) < sentence_length:
sentence = np.pad(sentence, (0, sentence_length - len(sentence)),
"constant", constant_values=(START_CHAR, END_CHAR))
return sentence
| [
37811,
18274,
879,
8265,
329,
15598,
3781,
526,
15931,
201,
198,
201,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
201,
198,
6738,
11593,
37443,
834,
1330,
7297,
201,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
201,
198,... | 2.853896 | 308 |
from pseudoairspeed.analysis import pseudo, load
import argparse
import os
import pandas
# source: https://www.youtube.com/watch?v=_2XDyqGhHI0
csv_fn = os.path.join('..', 'data', 'flight_pawel_wing_2017.09.csv')
if __name__ == '__main__':
main()
| [
6738,
24543,
3468,
39492,
13,
20930,
1330,
24543,
11,
3440,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
19798,
292,
198,
198,
2,
2723,
25,
3740,
1378,
2503,
13,
11604,
13,
785,
14,
8340,
30,
85,
28,
62,
17,
55,
35,
88,
... | 2.534653 | 101 |
# Copyright (C) 2013 by Brian Neal.
# This file is part of m209, the M-209 simulation.
# m209 is released under the MIT License (see LICENSE.txt).
__version__ = '0.1.0'
class M209Error(Exception):
"""Base Exception class for all M209 errors"""
pass
| [
2,
15069,
357,
34,
8,
2211,
416,
8403,
29189,
13,
198,
2,
770,
2393,
318,
636,
286,
285,
22567,
11,
262,
337,
12,
22567,
18640,
13,
198,
2,
285,
22567,
318,
2716,
739,
262,
17168,
13789,
357,
3826,
38559,
24290,
13,
14116,
737,
19... | 3.047059 | 85 |
from django.shortcuts import render, get_object_or_404
from .models import Person, Document
# Create your views here.
from django.http import HttpResponse
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
651,
62,
15252,
62,
273,
62,
26429,
198,
6738,
764,
27530,
1330,
7755,
11,
16854,
198,
198,
2,
13610,
534,
5009,
994,
13,
198,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
... | 3.5 | 46 |
"""
Description taken from: https://archive.ics.uci.edu/ml/datasets/wine
1. Title of Database: Wine recognition data
Updated Sept 21, 1998 by C.Blake : Added attribute information
2. Sources:
(a) Forina, M. et al, PARVUS - An Extendible Package for Data
Exploration, Classification and Correlation. Institute of Pharmaceutical
and Food Analysis and Technologies, Via Brigata Salerno,
16147 Genoa, Italy.
(b) Stefan Aeberhard, email: stefan@coral.cs.jcu.edu.au
(c) July 1991
3. Past Usage:
(1)
S. Aeberhard, D. Coomans and O. de Vel,
Comparison of Classifiers in High Dimensional Settings,
Tech. Rep. no. 92-02, (1992), Dept. of Computer Science and Dept. of
Mathematics and Statistics, James Cook University of North Queensland.
(Also submitted to Technometrics).
The data was used with many others for comparing various
classifiers. The classes are separable, though only RDA
has achieved 100% correct classification.
(RDA : 100%, QDA 99.4%, LDA 98.9%, 1NN 96.1% (z-transformed data))
(All results using the leave-one-out technique)
In a classification context, this is a well posed problem
with "well behaved" class structures. A good data set
for first testing of a new classifier, but not very
challenging.
(2)
S. Aeberhard, D. Coomans and O. de Vel,
"THE CLASSIFICATION PERFORMANCE OF RDA"
Tech. Rep. no. 92-01, (1992), Dept. of Computer Science and Dept. of
Mathematics and Statistics, James Cook University of North Queensland.
(Also submitted to Journal of Chemometrics).
Here, the data was used to illustrate the superior performance of
the use of a new appreciation function with RDA.
4. Relevant Information:
-- These data are the results of a chemical analysis of
wines grown in the same region in Italy but derived from three
different cultivars.
The analysis determined the quantities of 13 constituents
found in each of the three types of wines.
-- I think that the initial data set had around 30 variables, but
for some reason I only have the 13 dimensional version.
I had a list of what the 30 or so variables were, but a.)
I lost it, and b.), I would not know which 13 variables
are included in the set.
-- The attributes are (dontated by Riccardo Leardi,
riclea@anchem.unige.it )
1) Alcohol
2) Malic acid
3) Ash
4) Alcalinity of ash
5) Magnesium
6) Total phenols
7) Flavanoids
8) Nonflavanoid phenols
9) Proanthocyanins
10)Color intensity
11)Hue
12)OD280/OD315 of diluted wines
13)Proline
5. Number of Instances
class 1 59
class 2 71
class 3 48
6. Number of Attributes
13
7. For Each Attribute:
All attributes are continuous
No statistics available, but suggest to standardise
variables for certain uses (e.g. for us with classifiers
which are NOT scale invariant)
NOTE: 1st attribute is class identifier (1-3)
8. Missing Attribute Values:
None
9. Class Distribution: number of instances per class
class 1 59
class 2 71
class 3 48
"""
from aorist import (
RowStruct,
MinioLocation,
WebLocation,
StaticTabularLayout,
ORCEncoding,
CSVEncoding,
SingleFileLayout,
RemoteStorage,
HiveTableStorage,
RemoteStorageSetup,
StaticDataTable,
DataSet,
default_tabular_schema,
attr_list,
)
# hacky import since submodule imports don't work well
from aorist import attributes as attr
"""
Defining dataset
"""
# Attributes in the dataset
attributes = attr_list([
attr.Categorical("wine_class_identifier"),
attr.PositiveFloat("alcohol"),
attr.PositiveFloat("malic_acid"),
attr.PositiveFloat("ash"),
attr.PositiveFloat("alcalinity_of_ash"),
attr.PositiveFloat("magnesium"),
attr.PositiveFloat("total_phenols"),
attr.PositiveFloat("flavanoids"),
attr.PositiveFloat("non_flavanoid_phenols"),
attr.PositiveFloat("proanthocyanins"),
attr.PositiveFloat("color_intensity"),
attr.PositiveFloat("hue"),
attr.PositiveFloat("od_280__od_315_diluted_wines"),
attr.PositiveFloat("proline"),
])
# A row is equivalent to a struct
wine_datum = RowStruct(
name="wine_datum",
attributes=attributes,
)
# Data can be found remotely, on the web
remote = RemoteStorage(
location=WebLocation(
address=("https://archive.ics.uci.edu/ml/"
"machine-learning-databases/wine/wine.data"),
),
layout=SingleFileLayout(),
encoding=CSVEncoding(),
)
# This data is to be replicated locally
local = HiveTableStorage(
location=MinioLocation(name="wine"),
layout=StaticTabularLayout(),
encoding=ORCEncoding(),
)
# We will create a table that will always have the same content
# (we do not expect it to change over time)
wine_table = StaticDataTable(
name="wine_table",
schema=default_tabular_schema(wine_datum),
setup=RemoteStorageSetup(
remote=remote,
),
tag="wine",
)
# our dataset contains only this table and only this datum
# definition. Note that multiple assets can reference the
# same template!
wine_dataset = DataSet(
name="wine",
description="A [chemical analysis](https://archive.ics.uci.edu/ml/datasets/wine) of wines grown in the same region in Italy but derived from three different cultivars.",
sourcePath=__file__,
datumTemplates=[wine_datum],
assets={"wine_table": wine_table},
)
| [
37811,
198,
11828,
2077,
422,
25,
3740,
1378,
17474,
13,
873,
13,
42008,
13,
15532,
14,
4029,
14,
19608,
292,
1039,
14,
39002,
198,
198,
16,
13,
11851,
286,
24047,
25,
20447,
9465,
1366,
198,
220,
220,
220,
19433,
2362,
2310,
11,
77... | 2.934295 | 1,872 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
AddScriptFromTemplateAction.py
---------------------
Date : August 2012
Copyright : (C) 2018 by Matteo Ghetta
Email : matteo dot ghetta at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matteo Ghetta'
__date__ = 'March 2018'
__copyright__ = '(C) 2018, Matteo Ghetta'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import os
import codecs
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import QgsApplication
from processing.gui.ToolboxAction import ToolboxAction
from processing.script.ScriptEditorDialog import ScriptEditorDialog
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
17174,
17174,
4557,
8162,
198,
220,
220,
220,
3060,
7391,
4863,
30800,
12502,
13,
9078,
198,
220,
220,
220,
41436,
12,
198,
220,
220,
220,
7536,
220,
... | 2.533698 | 549 |
from .privx_api import PrivXAPI, PrivXAPIResponse, InternalAPIException
| [
6738,
764,
13776,
87,
62,
15042,
1330,
9243,
55,
17614,
11,
9243,
55,
2969,
4663,
9774,
2591,
11,
18628,
17614,
16922,
198
] | 3.272727 | 22 |
"""
Mask Resolver
-------------
This module provides functionality to resolve and write script masks for ``HTCondor`` jobs
submission.
"""
import logging
import re
from pathlib import Path
import pandas as pd
from pylhc_submitter.htc.utils import COLUMN_JOB_DIRECTORY, COLUMN_JOB_FILE
LOG = logging.getLogger(__name__)
def create_jobs_from_mask(
job_df: pd.DataFrame, maskfile: Path, replace_keys: dict, file_ext: str
) -> pd.DataFrame:
"""
Takes path to mask file, list of parameter to be replaced and pandas dataframe containg per job
the job directory where processed mask is to be put, and columns containing the parameter values
with column named like replace parameters. Job directories have to be created beforehand.
Processed (madx) mask has the same filename as mask but with the given file extension.
Input Dataframe is returned with additional column containing path to the processed script
files.
Args:
job_df (pd.DataFrame): Job parameters as defined in description.
maskfile: `Path` object to the mask file.
replace_keys: keys to be replaced (must correspond to columns in ``job_df``).
file_ext: file extention to use (defaults to **madx**).
Returns:
The provided ``job_df`` but with added path to the scripts.
"""
with maskfile.open("r") as mfile:
template = mfile.read()
jobname = maskfile.with_suffix("").name
jobs = [None] * len(job_df)
for idx, (jobid, values) in enumerate(job_df.iterrows()):
jobfile_fullpath = (Path(values[COLUMN_JOB_DIRECTORY]) / jobname).with_suffix(file_ext)
with jobfile_fullpath.open("w") as madxjob:
madxjob.write(template % dict(zip(replace_keys, values[list(replace_keys)])))
jobs[idx] = jobfile_fullpath.name
job_df[COLUMN_JOB_FILE] = jobs
return job_df
def check_percentage_signs_in_mask(mask: str):
""" Checks for '%' in the mask, that are not replacement variables. """
cleaned_mask = re.sub(r"%\((\w+)\)", "", mask)
n_signs = cleaned_mask.count("%")
if n_signs == 0:
return
# Help the user find the %
for idx, line in enumerate(cleaned_mask.split("\n")):
if "%" in line:
positions = [str(i) for i, char in enumerate(line) if char == "%"]
LOG.error(f"Problematic '%' sign(s) in line {idx}, pos {' ,'.join(positions)}.")
raise KeyError(f"{n_signs} problematic '%' signs found in template. Please remove.")
def generate_jobdf_index(old_df, jobid_mask, keys, values):
""" Generates index for jobdf from mask for job_id naming. """
if not jobid_mask:
nold = len(old_df.index) if old_df is not None else 0
start = nold-1 if nold > 0 else 0
return range(start, start + values.shape[0])
return [jobid_mask % dict(zip(keys, v)) for v in values]
if __name__ == "__main__":
raise EnvironmentError(f"{__file__} is not supposed to run as main.")
| [
37811,
201,
198,
45195,
1874,
14375,
201,
198,
32501,
201,
198,
201,
198,
1212,
8265,
3769,
11244,
284,
10568,
290,
3551,
4226,
20680,
329,
7559,
39,
4825,
623,
273,
15506,
3946,
201,
198,
7266,
3411,
13,
201,
198,
37811,
201,
198,
11... | 2.567932 | 1,185 |
from gui import Gui
myGui: Gui = Gui()
myGui.run()
| [
198,
6738,
11774,
1330,
1962,
72,
198,
198,
1820,
8205,
72,
25,
1962,
72,
796,
1962,
72,
3419,
198,
1820,
8205,
72,
13,
5143,
3419,
198
] | 2.038462 | 26 |
import matplotlib.pyplot as plt
import matplotlib.animation as animation
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
13,
11227,
341,
355,
11034,
198
] | 3.47619 | 21 |
from collections import deque
from .arrange_base import ArrangeBase
class ArrangeRotate(ArrangeBase):
""" Class to rotate through windows """
def do(self, dummy):
""" Main function that performs the arrangement """
windows = self._screen.get_all_windows()
amount_of_windows = len(windows)
if amount_of_windows > self.arrangement_size:
windows = windows[:self.arrangement_size]
geos = []
for window in windows:
window_geo = window.get_geometry()
window_geo = window_geo[:4]
geos.append(window_geo)
# do the actual rotations, lets use deque as it's dramatically more
# efficient than a trivial shift implementation
windows_deq = deque(windows)
windows_deq.rotate(1)
rotation_len = len(windows_deq)
i = 0
while i < rotation_len:
geometry_list_args = [0, 255]
index = rotation_len - (i + 1) #again, start by the tail
geometry_list_args.extend([int(x) for x in geos[index]])
windows_deq[index].unmaximize()
windows_deq[index].set_geometry(*geometry_list_args)
i += 1
#(windows_deq[0]).activate(int(time.time()))
#not sure why it doesn't work. if uncommented causes other windows
# beyond the rotated ones to hide behind current ones even after
# pressing ctrl+tab
| [
6738,
17268,
1330,
390,
4188,
198,
198,
6738,
764,
3258,
858,
62,
8692,
1330,
943,
9521,
14881,
628,
198,
198,
4871,
943,
9521,
24864,
378,
7,
3163,
9521,
14881,
2599,
198,
220,
220,
220,
37227,
5016,
284,
23064,
832,
9168,
37227,
628... | 2.350245 | 611 |
from .downloads import *
| [
6738,
764,
15002,
82,
1330,
1635,
198
] | 3.571429 | 7 |
from setuptools import setup, find_packages
setup(
name='yyam',
version='0.1.4',
packages=find_packages(),
description="include-yy's account manager",
author='include-yy',
author_email='969041171@qq.com',
url='https://github.com/include-yy/account-manager',
license='MIT',
install_requires=[
'toml',
'tomlkit'
],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
],
python_requires='>=3',
entry_points={
'console_scripts': [
'yyam = yyam:main'
]
}
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
628,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
22556,
321,
3256,
198,
220,
220,
220,
2196,
11639,
15,
13,
16,
13,
19,
3256,
198,
220,
220,
220,
10392,
28,
19796,
62,
... | 2.211268 | 284 |
#!/usr/bin/env python
"""Tests for `latex_admin` package."""
import unittest
from click.testing import CliRunner
# from latex_admin import latex_admin
from latex_admin import cli
class TestLatexAdmin(unittest.TestCase):
"""Tests for `latex_admin` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_something(self):
"""Test something."""
def test_command_line_interface(self):
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
self.assertEqual(result.exit_code, 0)
self.assertIn('latex_admin.cli.main', result.output)
help_result = runner.invoke(cli.main, ['--help'])
self.assertEqual(help_result.exit_code, 0)
self.assertIn('--help Show this message and exit.', help_result.output)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
51,
3558,
329,
4600,
17660,
87,
62,
28482,
63,
5301,
526,
15931,
628,
198,
11748,
555,
715,
395,
198,
198,
6738,
3904,
13,
33407,
1330,
1012,
72,
49493,
198,
2,
422,
4703... | 2.530556 | 360 |
from typing import Dict
from pytorch_lightning import LightningDataModule
import pandas as pd
from sklearn.base import TransformerMixin
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import StandardScaler
from torch.utils.data import DataLoader
from lit_saint.dataset import SaintDataset
class SaintDatamodule(LightningDataModule):
"""It preprocess the data, doing LabelEncoding for the categorical values and fitting a StandardScaler
for the numerical columns on the training set. And it splits the data and defines the dataloaders
"""
NAN_LABEL = "SAINT_NAN"
def __init__(self, df: pd.DataFrame, target: str, split_column: str, scaler: TransformerMixin = None):
"""
:param df: contains the data that will be used by the dataLoaders
:param target: name of the target column
:param split_column: name of the column used to split the data
:param scaler: a scikit learn transformer in order to rescale the continuos variables, if not specified
it will use the StandardScaler
"""
super().__init__()
self.target: str = target
self.pretraining = False
self.data_loader_params = {"batch_size": 256}
self.categorical_columns = []
self.categorical_dims = []
self.numerical_columns = []
self.dim_target = 1
self.target_nan_index = None
self.dict_label_encoder = {}
self.predict_set = None
self.scaler = scaler if scaler else StandardScaler()
self.split_column = split_column
self.prep(df)
def prep(self, df: pd.DataFrame) -> None:
"""It find the indexes for each categorical and continuous columns, and for each categorical it
applies Label Encoding in order to convert them in integers and save the number of classes for each
categorical column
:param df: contains the data that need to be processed
"""
df = df.copy()
col_not_to_use = []
for col in df.columns:
if df[col].dtypes.name in ["object", "category"]:
df = self.prep_categorical_columns(col=col, df=df)
elif df[col].dtypes.name in ["int64", "float64", "int32", "float32"]:
df = self.prep_continuous_columns(col=col, df=df)
else:
col_not_to_use.append(col)
if len(self.categorical_columns) == 0:
self.categorical_dims.append(1)
print("The following cols will not be used because they have a not supported data type: ", col_not_to_use)
self._split_data(df=df)
self.scaler_continuous_columns(df=df)
def scaler_continuous_columns(self, df: pd.DataFrame) -> None:
"""Fit a StandardScaler for each continuos columns on the training set
:param df: contains the data that need to be processed
:param split_column: name of column used to split the data
"""
df_train = df.loc[df[self.split_column] == "train"].loc[:, self.numerical_columns].values
if len(self.numerical_columns) > 0:
self.scaler.fit(df_train)
def _split_data(self, df: pd.DataFrame) -> None:
"""Split the Dataframe in train, validation and test, and drop the split column
:param df: contains the data that need to be processed
"""
self.train = df.loc[df[self.split_column] == "train"].reset_index(drop=True)
self.validation = df.loc[df[self.split_column] == "validation"].reset_index(drop=True)
self.test = df.loc[df[self.split_column] == "test"].reset_index(drop=True)
self.train.drop(self.split_column, axis=1, inplace=True)
self.validation.drop(self.split_column, axis=1, inplace=True)
self.test.drop(self.split_column, axis=1, inplace=True)
def set_predict_set(self, df) -> None:
"""Tranform the categorical columns using the OrdinalEncoders fitted before the training and
save the dataframe in order to make the predictions
:param df: The data that will be used to make some predictions
"""
df = df.copy()
for col, label_enc in self.dict_label_encoder.items():
if col != self.target or (col == self.target and col in df.columns):
if df[col].isna().any(): # the columns contains nan
df[col] = df[col].fillna(self.NAN_LABEL)
df[col] = label_enc.transform(df[col].values.reshape(-1, 1)).astype(int)
df = df.fillna(0)
self.predict_set = df
def _remove_rows_without_labels(self, df) -> pd.DataFrame:
"""Remove rows from a dataframe where the label was NaN
:param df: the dataframe from which remove rows
"""
df = df.loc[df[self.target] != self.target_nan_index]
df[self.target] = df[self.target].apply(lambda x: x if x < self.target_nan_index else x - 1)
return df
def _create_dataloader(self, df, is_predict=False) -> DataLoader:
""" Given a dataframe it return a dataloader and eventually without rows
that have nan labels if not pretraining
:param df: the dataframe that will be used inside the DataLoader
:param is_predict: flag that is true if we are executing a prediction
"""
if not self.pretraining and self.target_nan_index is not None and not is_predict:
df = self._remove_rows_without_labels(df)
dataset = SaintDataset(
data=df,
target=self.target,
cat_cols=self.categorical_columns,
con_cols=self.numerical_columns,
scaler=self.scaler,
target_categorical=self.dim_target > 1
)
return DataLoader(
dataset,
**self.data_loader_params,
)
def train_dataloader(self) -> DataLoader:
""" Function that loads the train set. """
return self._create_dataloader(self.train)
def val_dataloader(self) -> DataLoader:
""" Function that loads the validation set. """
return self._create_dataloader(self.validation)
def test_dataloader(self) -> DataLoader:
""" Function that loads the validation set. """
return self._create_dataloader(self.test)
def predict_dataloader(self) -> DataLoader:
""" Function that loads the dataset for the prediction. """
return self._create_dataloader(self.predict_set, True)
def set_pretraining(self, pretraining: bool) -> None:
"""Function used to set the pretraining flag"""
self.pretraining = pretraining
def set_data_loader_params(self, data_loader_params: Dict) -> None:
"""Function used to set the parameters used by the DataLoader"""
self.data_loader_params = data_loader_params
| [
6738,
19720,
1330,
360,
713,
198,
198,
6738,
12972,
13165,
354,
62,
2971,
768,
1330,
12469,
6601,
26796,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
8692,
1330,
3602,
16354,
35608,
259,
198,
6738,
1341,
35720,
13,
... | 2.469499 | 2,754 |
import unittest
from vat_validator.validators import vat_is_valid_pt
| [
11748,
555,
715,
395,
198,
198,
6738,
410,
265,
62,
12102,
1352,
13,
12102,
2024,
1330,
410,
265,
62,
271,
62,
12102,
62,
457,
628
] | 2.84 | 25 |
#!/usr/bin/env python
import os
from setuptools import setup
# Allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='lbcapi',
version='1.0.2',
packages=['lbcapi'],
include_package_data=True,
license='MIT License',
description='Make API calls to LocalBitcoins API.',
author='LocalBitcoins Oy',
url='https://github.com/LocalBitcoins/lbcapi',
install_requires=[
'requests',
],
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
28686,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
2,
22507,
9058,
13,
9078,
284,
307,
1057,
422,
597,
3108,
198,
418,
13,
354,
15908,
7,
418,
13,
6978,
13,
27237,
697... | 2.502488 | 201 |