content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/bin/python3
import sys
n = int(input().strip())
arr = [int(arr_temp) for arr_temp in input().strip().split(" ")]
arr.reverse()
arrstring = " ".join(str(e) for e in arr)
print(arrstring)
| [
2,
48443,
8800,
14,
29412,
18,
198,
198,
11748,
25064,
628,
198,
77,
796,
493,
7,
15414,
22446,
36311,
28955,
198,
3258,
796,
685,
600,
7,
3258,
62,
29510,
8,
329,
5240,
62,
29510,
287,
5128,
22446,
36311,
22446,
35312,
7203,
366,
1... | 2.6 | 75 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for python distort_image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.image.python.ops import distort_image_ops
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
# TODO(huangyp): also measure the differences between AdjustHsvInYiq and
# AdjustHsv in core.
if __name__ == '__main__':
googletest.main()
| [
2,
15069,
2177,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
705,
34156,
24036,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 3.749441 | 447 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import digitalocean
import os
import random
import sys
import time
import requests
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
4875,
78,
5829,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
70... | 2.647059 | 68 |
import torch
from torch.autograd import Variable
| [
11748,
28034,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
628,
628
] | 4 | 13 |
from abc import ABC, abstractmethod | [
6738,
450,
66,
1330,
9738,
11,
12531,
24396
] | 4.375 | 8 |
from flask import jsonify | [
6738,
42903,
1330,
33918,
1958
] | 5 | 5 |
from datetime import date
from batch.models import DatasetOnDate
from batch.s3_dataset_scanner.extract_datasets_from_s3_metadata import (
extract_datasets_from_s3_metadata,
)
from tests.util import csv_file_to_parquet_source
| [
6738,
4818,
8079,
1330,
3128,
198,
198,
6738,
15458,
13,
27530,
1330,
16092,
292,
316,
2202,
10430,
198,
6738,
15458,
13,
82,
18,
62,
19608,
292,
316,
62,
35836,
1008,
13,
2302,
974,
62,
19608,
292,
1039,
62,
6738,
62,
82,
18,
62,
... | 2.783133 | 83 |
# -*- coding: utf-8 -*-
# Author: jlopes@usp.br
import os
import pickle
import numpy as np
import re
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
6434,
25,
474,
75,
13920,
31,
17723,
13,
1671,
198,
11748,
28686,
220,
198,
11748,
2298,
293,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
302,
220,
220,
628,
22... | 2.12963 | 54 |
from stickerfinder.i18n import i18n
from stickerfinder.logic.tag import handle_next
from stickerfinder.enum import TagMode
from stickerfinder.helper.display import (
get_settings_text,
get_help_text_and_keyboard,
)
from stickerfinder.telegram.keyboard import (
get_main_keyboard,
get_donation_keyboard,
get_settings_keyboard,
get_admin_settings_keyboard,
)
from stickerfinder.config import config
def open_settings(session, context):
"""Open the user settings menu."""
settings_keyboard = get_settings_keyboard(context.user)
context.query.message.edit_text(
get_settings_text(context.user),
reply_markup=settings_keyboard,
parse_mode="Markdown",
)
def open_admin_settings(session, context):
"""Open the user settings menu."""
admin_keyboard = get_admin_settings_keyboard(context.user)
context.query.message.edit_text(
get_settings_text(context.user),
reply_markup=admin_keyboard,
parse_mode="Markdown",
)
def switch_help(session, context):
"""Show the help keyboard."""
text, keyboard = get_help_text_and_keyboard(context.action)
context.query.message.edit_text(
text,
parse_mode="Markdown",
reply_markup=keyboard,
disable_web_page_preview=True,
)
def tag_random(session, context):
"""Initialize tagging of a whole set."""
chat = context.chat
chat.cancel(context.bot)
chat.tag_mode = TagMode.random.value
handle_next(session, context.bot, chat, context.query.message.chat, context.user)
return
def open_donations(session, context):
"""Send the donation text."""
donation_keyboard = get_donation_keyboard()
context.query.message.edit_text(
i18n.t("text.misc.donations"),
reply_markup=donation_keyboard,
parse_mode="Markdown",
disable_web_page_preview=True,
)
def main_menu(session, context):
"""Show the main menu."""
context.query.message.edit_text(
i18n.t("text.misc.start", username=config["telegram"]["bot_name"]),
reply_markup=get_main_keyboard(context.user),
parse_mode="Markdown",
disable_web_page_preview=True,
)
| [
6738,
27735,
22805,
13,
72,
1507,
77,
1330,
1312,
1507,
77,
198,
6738,
27735,
22805,
13,
6404,
291,
13,
12985,
1330,
5412,
62,
19545,
198,
6738,
27735,
22805,
13,
44709,
1330,
17467,
19076,
198,
6738,
27735,
22805,
13,
2978,
525,
13,
... | 2.567599 | 858 |
import numpy as np
import matplotlib.pyplot as plt
data = make_data(angle=-np.pi/4)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
7890,
796,
787,
62,
7890,
7,
9248,
10779,
37659,
13,
14415,
14,
19,
8,
198
] | 2.575758 | 33 |
from setuptools import setup
with open("README.md", "r") as readme:
long_description = readme.read()
setup(
version='0.5.0',
author='Sam Beck',
author_email='notsambeck@gmail.com',
name='pandabase',
packages=['pandabase'],
description="pandabase links pandas DataFrames to SQL databases. Upsert, append, read, drop, describe...",
long_description=long_description,
long_description_content_type = 'text/markdown',
url="https://github.com/notsambeck/pandabase",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'pandas>=0.24.0',
'sqlalchemy>=1.3.0',
],
python_requires='>=3.6',
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
1100,
1326,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
1100,
1326,
13,
961,
3419,
198,
198,
40406,
7,
198,
220,
220... | 2.476636 | 321 |
import json
import logging
import random
import re
import requests
import sys
import time
import traceback
from websocket import WebSocketConnectionClosedException
from markdownify import MarkdownConverter
from will import settings
from .base import IOBackend
from will.utils import Bunch, UNSURE_REPLIES, clean_for_pickling
from will.mixins import SleepMixin, StorageMixin
from multiprocessing import Process
from will.abstractions import Event, Message, Person, Channel, Attachment
from slackclient import SlackClient
from slackclient.server import SlackConnectionError
SLACK_SEND_URL = "https://slack.com/api/chat.postMessage"
SLACK_SET_TOPIC_URL = "https://slack.com/api/channels.setTopic"
SLACK_PRIVATE_SET_TOPIC_URL = "https://slack.com/api/groups.setTopic"
class SlackAttachmentConverter:
""" This takes an Attachment object or list of Attachment objects and renders
a Slack ready Message Attachment JSON payload using the render() method. """
def render(self):
""" Builds a json payload for Slack Rich Format Message Attachments.
It takes either a single Attachment object or a list or Attachment objects."""
attachments = ""
try:
for a in self.attachments:
attachments += str(
[
{
"fallback": a.fallback,
"color": a.color,
"text": a.text,
"actions": a.actions,
"footer": a.footer,
"footer_icon": a.footer_icon,
}
]
)
except AttributeError:
attachments += str(
[
{
"fallback": self.attachments.fallback,
"color": self.attachments.color,
"text": self.attachments.text,
"actions": self.attachments.actions,
"footer": self.attachments.footer,
"footer_icon": self.attachments.footer_icon,
}
]
)
finally:
return attachments
| [
11748,
33918,
198,
11748,
18931,
198,
11748,
4738,
198,
11748,
302,
198,
11748,
7007,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
12854,
1891,
198,
6738,
2639,
5459,
1330,
5313,
39105,
32048,
2601,
1335,
16922,
198,
198,
6738,
1317,
2... | 2.087199 | 1,078 |
consumir = int ( input ( "¿Consumir licor? Digite 1 para si y 2 para No: " ))
agr = 0
Ron = 0
cuello uterino = 0
teq = 0
whi = 0
otro = 0
licor = 0
listaco = []
listaco . agregar ( consumir )
listalicor = []
listaa ños = [ ]
listacv = []
listawhi = []
listam = []
listaf = []
listap = []
mientras que es cierto :
si (( consumir == 2 )):
print ( f"Aguardiente { agr } \n Ron { ron } \n Cerveza { cerv } \n Tequila { teq } \n whisky { whi } \n Otro { otro } " )
imprimir ( len ( listaco ))
imprimir ( len ( listaf ))
imprimir ( suma ( listam ))
imprimir ( suma ( listap ) / suma ( listacv ))
imprimir ( len ( listawhi ) / len ( listaco ))
descanso
elif ( consumir == 1 ):
a ñ os = int ( input ( "Ingrese su edad: " ))
listaaños . _ _ anexar ( años ) _ _
sexo = str ( input ( "Ingrese su sexo. 1 para femenino y 2 para masculino: " ))
licor = int ( input ( "1.Aguardiente, 2.Ron, 3.Cerveza, 4.Tequila, 5.Whisky, 6.Otro: " ))
listalicor _ añadir ( licor )
imprimir ( "Encuesta nueva" )
consumir = int ( input ( " Continuar la encuesta, 1 para sí, 2 para no: " ))
si ( licor == 1 ):
agr = agr + 1
elif ( licor == 2 ):
Ron = Ron + 1
si ( licor == 3 ):
cuello = cuello + 1
listacv . agregar ( cerv )
si ( licor == 3 y a ños > 0 ) :
p = a ñ os
listap _ agregar ( pag )
Seguir
si ( licor == 4 ):
teq = teq + 1
si ( licor == 5 ):
whi = whi + 1
listawhi _ agregar ( whi )
si ( licor == 6 ):
otro = otro + 1
elif ( a ñ os >= 0 ):
listaaños . _ _ anexar ( años ) _ _
Seguir
elif ( a ñ os <= 18 ) y ( sexo == 1 ):
f = años + sexo _ _
listaf . agregar ( f )
Seguir
elif ( 20 >= a ños < = 25 ) and ( sexo == 2 ) and ( licor != 5 ):
m = años + sexo _ _
si ( licor != 5 ):
listam . agregar ( m )
Seguir | [
5936,
388,
343,
796,
493,
357,
5128,
357,
366,
126,
123,
9444,
388,
343,
3476,
273,
30,
7367,
578,
352,
31215,
33721,
331,
362,
31215,
1400,
25,
366,
15306,
201,
198,
363,
81,
796,
657,
201,
198,
23672,
796,
657,
201,
198,
15509,
... | 1.816913 | 1,218 |
import os, sys, socket, random
from urllib import quote_plus
from panda3d.core import HTTPClient, HTTPCookie, URLSpec, Ramfile, Ostream, HTTPDate, DocumentSpec
from direct.task.Task import Task
from direct.directnotify.DirectNotifyGlobal import directNotify
notify = directNotify.newCategory('UserFunnel')
| [
11748,
28686,
11,
25064,
11,
17802,
11,
4738,
198,
6738,
2956,
297,
571,
1330,
9577,
62,
9541,
198,
6738,
279,
5282,
18,
67,
13,
7295,
1330,
14626,
11792,
11,
7154,
51,
5662,
18055,
11,
10289,
22882,
11,
7431,
7753,
11,
440,
5532,
1... | 3.252525 | 99 |
from django.contrib import admin
from .models import Budget, Category, Transaction
admin.site.register(Budget, BudgetAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Transaction, TransactionAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
27530,
1330,
15401,
11,
21743,
11,
45389,
628,
628,
198,
28482,
13,
15654,
13,
30238,
7,
33,
29427,
11,
15401,
46787,
8,
198,
28482,
13,
15654,
13,
30238,
7,
27313,
... | 3.813559 | 59 |
#Problem link : https://leetcode.com/problems/reverse-integer/
num = int(input())
print(reverse(num))
#Example test case
# Input : 123
# Output : 321
# Input : - 123
# Output : -321
| [
198,
2,
40781,
2792,
1058,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
50188,
12,
41433,
14,
628,
628,
198,
22510,
796,
493,
7,
15414,
28955,
198,
4798,
7,
50188,
7,
22510,
4008,
198,
198,
2,
16281,
1332,
1339,
198,
... | 2.863636 | 66 |
# import packages
import os
import pandas as pd
from calendar import monthrange
from datetime import datetime, timedelta
from azureml.core import Dataset, Datastore, Workspace
from azureml.opendatasets import NoaaIsdWeather
# get workspace and datastore
ws = Workspace.from_config()
dstore = ws.get_default_datastore()
# adjust parameters as needed
target_years = list(range(2010, 2020))
start_month = 1
# get data
for year in target_years:
for month in range(start_month, 12 + 1):
path = 'weather-data/{}/{:02d}/'.format(year, month)
try:
start = datetime(year, month, 1)
end = datetime(year, month, monthrange(year, month)[1]) + timedelta(days=1)
isd = NoaaIsdWeather(start, end).to_pandas_dataframe()
isd = isd[isd['stationName'].str.contains('FLORIDA', regex=True, na=False)]
os.makedirs(path, exist_ok=True)
isd.to_parquet(path + 'data.parquet')
except Exception as e:
print('Month {} in year {} likely has no data.\n'.format(month, year))
print('Exception: {}'.format(e))
| [
2,
1330,
10392,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
11845,
1330,
1227,
9521,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
35560,
495,
4029,
13,
7295,
1330,
16092,
292,
316,
11,
... | 2.433921 | 454 |
#encoding=utf-8
'''
Created on 2017年6月1日
@author: Administrator
'''
import xml.dom.minidom
from _codecs import encode
import random
import nltk
# parse trainingset from xml to text
# if t<2:
# print(relevant)
# print(abstract)
# break
# def nltk_token():
# infile='H:/PHDwork/BioCreative VI/track4_PM/data/training/train_and_dev/PMtask_Triage_TrainingSet_ir.txt'
# outfile='H:/PHDwork/BioCreative VI/track4_PM/data/training/token/PMtask_Triage_TrainingSet_ir.token'
# fin=open(infile,'r',encoding='utf-8')
# fout=open(outfile,'w',encoding='utf-8')
# for line in fin:
# line=line.strip()
# segs=line.split('\t')
# sentence=''
# if len(segs)==4:
# sentence=segs[2]+' '+segs[3]
# else:
# sentence=segs[2]
# tokens = nltk.word_tokenize(sentence)
# sentence_token=" ".join(tokens)
# fout.write(segs[0]+'\t'+segs[1]+'\t'+sentence_token+'\n')
# fin.close()
# fout.close()
#产生 HierarchicalRNN的数据格式,分词分句,第一行是标签,文档之间用空行间隔。
#去掉停用词
#conll形式去掉停用词
#直接从最后的输入文件中切'-'
#产生PMID列表,进行pubtator的NER
#从POS_conll文件直接产生最后的输入文件
if __name__ == "__main__":
parse_xml_trainingset()
# trainset_div_testset()
# nltk_token()
# get_text()
# produce_fastText_format()
# produce_PPIACCNN_format()
# svm_answer()
# drop_stopword()
# drop_stopword_conll()
# fasttext_emb_addfirstline()
# produce_hierarchicalRNN_3Dformat()
# split_connector()
# PMID_list()
# token_conll_inputfile()
# test_train_over()
| [
2,
12685,
7656,
28,
40477,
12,
23,
198,
7061,
6,
198,
41972,
319,
2177,
33176,
112,
21,
17312,
230,
16,
33768,
98,
198,
198,
31,
9800,
25,
22998,
198,
7061,
6,
198,
11748,
35555,
13,
3438,
13,
1084,
312,
296,
198,
6738,
4808,
1981... | 1.697895 | 950 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/9/9/009 23:01
# @Author : Woe
# @Site :
# @File : Producer.py
# @Software: PyCharm
import asyncio
from MpsiSpider.Request import Request
import threading
import time
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
2864,
14,
24,
14,
24,
14,
28694,
2242,
25,
486,
198,
2,
2488,
13838,
220,
1058,... | 2.41 | 100 |
from datetime import datetime
from django.db import models
from JWTAuth.models import Employee
# Create your models here.
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
449,
54,
5603,
1071,
13,
27530,
1330,
36824,
198,
198,
2,
13610,
534,
4981,
994,
13,
628
] | 3.757576 | 33 |
from copy import deepcopy
from tqdm.auto import tqdm
from causallearn.graph.Edges import Edges
from causallearn.graph.GeneralGraph import GeneralGraph
from causallearn.utils.ChoiceGenerator import ChoiceGenerator
from causallearn.utils.cit import *
from causallearn.utils.PCUtils.BackgroundKnowledge import BackgroundKnowledge
citest_cache = dict()
def fas(data, nodes, independence_test_method=fisherz, alpha=0.05, knowledge=None, depth=-1,
verbose=False, stable=True, show_progress=True, cache_variables_map=None):
'''
Implements the "fast adjacency search" used in several causal algorithm in this file. In the fast adjacency
search, at a given stage of the search, an edge X*-*Y is removed from the graph if X _||_ Y | S, where S is a subset
of size d either of adj(X) or of adj(Y), where d is the depth of the search. The fast adjacency search performs this
procedure for each pair of adjacent edges in the graph and for each depth d = 0, 1, 2, ..., d1, where d1 is either
the maximum depth or else the first such depth at which no edges can be removed. The interpretation of this adjacency
search is different for different algorithm, depending on the assumptions of the algorithm. A mapping from {x, y} to
S({x, y}) is returned for edges x *-* y that have been removed.
Parameters
----------
data: data set (numpy ndarray), shape (n_samples, n_features). The input data, where n_samples is the number of
samples and n_features is the number of features.
nodes: The search nodes.
independence_test_method: the function of the independence test being used
[fisherz, chisq, gsq, kci]
- fisherz: Fisher's Z conditional independence test
- chisq: Chi-squared conditional independence test
- gsq: G-squared conditional independence test
- kci: Kernel-based conditional independence test
alpha: float, desired significance level of independence tests (p_value) in (0,1)
knowledge: background background_knowledge
depth: the depth for the fast adjacency search, or -1 if unlimited
verbose: True is verbose output should be printed or logged
stable: run stabilized skeleton discovery if True (default = True)
show_progress: whether to use tqdm to show progress bar
cache_variables_map: This variable a map which contains the variables relate with cache. If it is not None,
it should contain 'data_hash_key' 、'ci_test_hash_key' and 'cardinalities'.
Returns
-------
graph: Causal graph skeleton, where graph.graph[i,j] = graph.graph[j,i] = -1 indicates i --- j.
sep_sets: separated sets of graph
'''
# --------check parameter -----------
if (depth is not None) and type(depth) != int:
raise TypeError("'depth' must be 'int' type!")
if (knowledge is not None) and type(knowledge) != BackgroundKnowledge:
raise TypeError("'background_knowledge' must be 'BackgroundKnowledge' type!")
# --------end check parameter -----------
# ------- initial variable -----------
sep_sets = {}
adjacencies = {node: set() for node in nodes}
if depth is None or depth < 0:
depth = 1000
if independence_test_method == chisq or independence_test_method == gsq:
data = np.apply_along_axis(_unique, 0, data).astype(np.int64)
if cache_variables_map is None:
if independence_test_method == chisq or independence_test_method == gsq:
cardinalities = np.max(data, axis=0) + 1
else:
cardinalities = None
cache_variables_map = {"data_hash_key": hash(str(data)),
"ci_test_hash_key": hash(independence_test_method),
"cardinalities": cardinalities}
# ------- end initial variable ---------
print('Starting Fast Adjacency Search.')
# use tqdm to show progress bar
pbar = tqdm(total=len(nodes)) if show_progress else None
for d in range(depth):
more = False
if d == 0:
more = searchAtDepth0(data, nodes, adjacencies, sep_sets, independence_test_method, alpha, verbose,
knowledge, pbar=pbar, cache_variables_map=cache_variables_map)
else:
if stable:
more = searchAtDepth(data, d, nodes, adjacencies, sep_sets, independence_test_method, alpha, verbose,
knowledge, pbar=pbar, cache_variables_map=cache_variables_map)
else:
more = searchAtDepth_not_stable(data, d, nodes, adjacencies, sep_sets, independence_test_method, alpha,
verbose, knowledge, pbar=pbar, cache_variables_map=cache_variables_map)
if not more:
break
if show_progress: pbar.close()
graph = GeneralGraph(nodes)
for i in range(len(nodes)):
for j in range(i + 1, len(nodes)):
node_x = nodes[i]
node_y = nodes[j]
if adjacencies[node_x].__contains__(node_y):
graph.add_edge(Edges().undirected_edge(node_x, node_y))
print("Finishing Fast Adjacency Search.")
return graph, sep_sets
| [
6738,
4866,
1330,
2769,
30073,
198,
198,
6738,
256,
80,
36020,
13,
23736,
1330,
256,
80,
36020,
198,
198,
6738,
26846,
6765,
1501,
13,
34960,
13,
7407,
3212,
1330,
1717,
3212,
198,
6738,
26846,
6765,
1501,
13,
34960,
13,
12218,
37065,
... | 2.634809 | 1,988 |
import re
import sys
import argparse
import subprocess as sp
parser = argparse.ArgumentParser(description='Process captions file.')
parser.add_argument('--vid','-v',required=True,help='.mp4 video file')
parser.add_argument('--rt','-t',required=True,help='Transcription file')
parser.add_argument('--poi',required=True,help='POI ID')
parser.add_argument('--counter',default=0,help='Utterance counter (for more videos)')
args = parser.parse_args()
# frame counter
i=int(args.counter)
#
activation = False
spk = 'id' + (args.poi).zfill(2)
sp.Popen(["mkdir",spk]).wait()
with open(args.rt,'r') as rt, open('mp4.scp','a') as scp, open('text','a') as txt , open('utt2spk','a') as utt2spk:
for line in rt:
# read times from line
times = re.findall('\d\d:[0-5]\d:[0-5]\d.\d\d\d',line)
# if this is a times line
if len(times) > 0:
# if this is POI speech
if line.split()[-1] == '1':
beg_t = times[0]
end_t = times[1]
utt = spk + '-' + str(i).zfill(5)
out_vid_file = spk + '/' + utt + '.mp4'
utt2spk.write(utt + ' ' + spk + '\n')
scp.write(utt + ' ' + out_vid_file + '\n')
sp.Popen(['ffmpeg', '-y', '-i', args.vid,'-ss',beg_t,'-to',end_t,out_vid_file]).wait()
activation = True
i+=1
# transcription line
elif len(line.split()) > 0:
if activation:
txt.write(utt + ' ' + line)
activation = False
| [
11748,
302,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
11748,
850,
14681,
355,
599,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
11213,
11639,
18709,
3144,
507,
2393,
2637,
8,
198,
48610,
13,
2860,
62,
49140,
1078... | 2.296796 | 593 |
from functools import wraps
from flask_login import current_user
from flask import redirect, url_for
from weddingwebsite import login_manager
from .exceptions import NoRolesProvided
def requires_roles(*roles):
"""
Implements role biased authentication over Flask-login
:param roles: Required Roles for authentication
"""
return wrapper
def roles_cannot_access(*roles):
"""
Implements role biased authentication over Flask-login
Users with the provided roles CANNOT access the route.
:param roles: Required Roles for authentication
"""
return wrapper
| [
6738,
1257,
310,
10141,
1330,
27521,
198,
198,
6738,
42903,
62,
38235,
1330,
1459,
62,
7220,
198,
6738,
42903,
1330,
18941,
11,
19016,
62,
1640,
198,
198,
6738,
10614,
732,
12485,
1330,
17594,
62,
37153,
198,
6738,
764,
1069,
11755,
133... | 3.517442 | 172 |
password="pbkdf2(1000,20,sha512)$aa925206b00a7043$28115a6960f48c6f11bd00bc838e9f2d622c0262"
| [
28712,
2625,
40842,
74,
7568,
17,
7,
12825,
11,
1238,
11,
26270,
25836,
8,
3,
7252,
46351,
22136,
65,
405,
64,
2154,
3559,
3,
2078,
15363,
64,
3388,
1899,
69,
2780,
66,
21,
69,
1157,
17457,
405,
15630,
23,
2548,
68,
24,
69,
17,
... | 1.769231 | 52 |
from dwio.nyu.algos import eGCD
| [
198,
6738,
43756,
952,
13,
3281,
84,
13,
14016,
418,
1330,
304,
38,
8610,
628
] | 2.266667 | 15 |
from setuptools import find_packages, setup
requirements = []
with open("requirements.txt") as f:
for line in f:
line = line.strip()
requirements.append(line)
setup(
name="csv_to_table",
version="0.0.3",
description="Generates a CREATE TABLE statement from a CSV file by guessing at column types",
author="Jack Maney",
author_email="jackmaney@gmail.com",
url="https://github.com/jackmaney/csv_to_table.py.git",
license="MIT",
scripts=["csv_to_table.py"],
long_description=open("README.md").read(),
entry_points={"console_scripts": ["csv-to-table=csv_to_table:main"]},
classifiers=["Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2 :: Only",
"Topic :: Utilities"],
packages=find_packages(),
install_requires=requirements
)
| [
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
11,
9058,
198,
198,
8897,
18883,
796,
17635,
198,
198,
4480,
1280,
7203,
8897,
18883,
13,
14116,
4943,
355,
277,
25,
198,
220,
220,
220,
329,
1627,
287,
277,
25,
198,
220,
220,
220,
22... | 2.545932 | 381 |
"""Base class and task for commands."""
import traceback
from .tasks import Task
class CommandFailedException(BaseException):
"""Class to represent a failure in a command. Used to avoid printing multiple tracebacks on nested commands."""
pass
class _CommandMeta(type):
"""
Metaclass (object that represents a class) for Commands.
Does command registration.
"""
all_commands = {}
@property
def bare_word(cls):
"""All commands are bare_word equivalent to the command."""
return cls.command
def __getitem__(cls, class_name):
"""Returns a command by name. Use like `Command["rails"]`."""
return cls.all_commands[class_name]
def __prepare__(name, bases):
"""Add a default `return_type` to all commands."""
return {'return_type': None}
def __init__(self, name, bases, attrs):
"""Register the command."""
super().__init__(name, bases, attrs)
# Register this command
if hasattr(self, 'command'):
self.all_commands[self.command] = self
class Command(Task, metaclass=_CommandMeta):
"""
A task that runs a command.
To implement a command, inherit from this class and:
- Include a class attribute `command = "cmd"` to specify the name of the command.
- Implement a coroutine _run() which runs the command.
"""
def __init__(self, tosh, arguments):
"""Initialize the command, given its arguments (list of tasks or bare words)."""
super().__init__(tosh)
self._arguments = arguments
self._output = []
self._status_line_tokens = [self._token(self.title)]
async def run(self):
"""Run the command, updating the status. Delegates actual work to `_run`."""
self._status = Task.Status.Running
self._tosh.refresh()
try:
result = await self._run()
self._status = Task.Status.Success
return result
except CommandFailedException:
self._status = Task.Status.Error
raise CommandFailedException()
except BaseException as e:
self._status = Task.Status.Error
self._set_output_text(traceback.format_exc())
raise CommandFailedException()
finally:
self._tosh.refresh()
@staticmethod
# Loads all commands
from . import commands
| [
37811,
14881,
1398,
290,
4876,
329,
9729,
526,
15931,
198,
11748,
12854,
1891,
198,
198,
6738,
764,
83,
6791,
1330,
15941,
628,
198,
4871,
9455,
37,
6255,
16922,
7,
14881,
16922,
2599,
198,
220,
220,
220,
37227,
9487,
284,
2380,
257,
... | 2.603917 | 919 |
import tensorflow as tf
import keras | [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
41927,
292
] | 3.6 | 10 |
from .libindy import do_call, create_cb
from ctypes import *
import logging
| [
6738,
764,
8019,
521,
88,
1330,
466,
62,
13345,
11,
2251,
62,
21101,
198,
198,
6738,
269,
19199,
1330,
1635,
198,
198,
11748,
18931,
628
] | 3.16 | 25 |
"""
Created on Aug 28, 2018
@author: ionut
"""
import datetime
import logging
from tornado.web import RequestHandler
from tornado.websocket import WebSocketHandler
import camera
class HomeHandler(RequestHandler):
"""
Handler for / - render index.html
"""
class ImageHandler(WebSocketHandler):
"""
WebSocket Handler for /image - send image data to connected clients
"""
clients = set()
last_packet = datetime.datetime.now()
@classmethod
def broadcast(cls, data):
"""
Send image data to all connected web sockets and remove closed sockets
:param cls: class method
:param data: image data from camera brodcast thread
"""
removable = set()
for client in ImageHandler.clients:
try:
client.write_message(data, binary=True)
ImageHandler.last_packet = datetime.datetime.now()
except Exception as exc:
logging.warning("loop closed for %s: %s", client, exc)
removable.add(client)
for client in removable:
ImageHandler.clients.remove(client)
class VideoHandler(WebSocketHandler):
"""
WebSocket Handler for /video - send video frames to connected clients
"""
clients = set()
last_packet = datetime.datetime.now()
@classmethod
def broadcast(cls, data):
"""
Send video data to all connected web sockets and remove closed sockets
:param cls: class method
:param data: video data from camera brodcast thread
"""
removable = set()
for client in VideoHandler.clients:
try:
client.write_message(data, binary=True)
VideoHandler.last_packet = datetime.datetime.now()
except Exception as exc:
logging.warning("loop closed for %s: %s", client, exc)
removable.add(client)
for client in removable:
VideoHandler.clients.remove(client)
| [
37811,
198,
41972,
319,
2447,
2579,
11,
2864,
198,
198,
31,
9800,
25,
22088,
315,
198,
37811,
198,
198,
11748,
4818,
8079,
198,
11748,
18931,
198,
6738,
33718,
13,
12384,
1330,
19390,
25060,
198,
6738,
33718,
13,
732,
1443,
5459,
1330,
... | 2.5 | 808 |
#!/usr/bin/env python
# coding=utf-8
"""
Ant Group
Copyright (c) 2004-2020 All Rights Reserved.
------------------------------------------------------
File Name : base
Author : Qizhi Zhang
Email: qizhi.zqz@antgroup.com
Create Time : 2020-05-14 19:58
Description : description what the main function of this file
"""
import tensorflow as tf
import tensorflow.python as python
from stensorflow.global_var import StfConfig
from stensorflow.exception.exception import StfEqualWarning, StfCondException, StfEqualException, StfTypeException, \
StfDTypeException, StfValueException, StfNoneException, StfException
import numpy as np
from stensorflow.random.random import gen_rint64, gen_rint64_from_seed
from typing import Union
import random
import os
import string
random.seed(0)
def cycle_lshift_tensor(x: tf.Tensor, index: tf.Tensor):
"""
Cycle left shift x by index.
:param x: tf.Tensor
:param index: tf.Tensor where x.shape[:-1]==index.shape
:return: y: tf.Tensor, where the vector y[i1,..., in-1,:] is the
cycle left shift of x[i1,..., in-1,:] by index[i1, ...in-1] for any i1, ..., in-1
"""
if x.shape[:-1] != index.shape:
raise StfEqualException(var1_name="x.shape[:-1]", var2_name="index.shape",
var1=x.shape[:-1], var2=index.shape)
module = x.shape[-1]
index = index % module
one_to_n = tf.constant(list(range(module)), dtype='int64')
index_ext = (tf.expand_dims(index, axis=-1) + tf.reshape(one_to_n, shape=[1] * len(index.shape) + [module]))
index_ext = index_ext % one_to_n.shape[0]
b = tf.gather(params=x, indices=index_ext, axis=len(index.shape),
batch_dims=len(index.shape))
return b
def cycle_rshift_tensor(x: tf.Tensor, index: tf.Tensor):
"""
Cycle right shift x by index
:param x: tf.Tensor
:param index: tf.Tensor where x.shape[:-1]==index.shape
:return: y: tf.Tensor, where the vector y[i1,..., in-1,:] is the
cycle right shift of x[i1,..., in-1,:] by index[i1, ...in-1] for any i1, ..., in-1
"""
return cycle_lshift_tensor(x, -index)
class SharedTensorBase:
"""
inner_value: Tensorflow Tensor or Variable of dtype int64 orint32
module: int
shape: list
must have inner_value is not None or shape is not None
"""
@property
def random_uniform_adjoint(self, seed=None):
"""
:param seed: seed for generate random number
:return: generate pseudo-random SharedTensorBase with same shape and
module with self
"""
if seed is not None:
x = gen_rint64_from_seed(shape=self.shape, seed=seed)
else:
x = gen_rint64(self.shape)
# x = tf.random.uniform(shape=self.shape, minval=-(1 << 63), maxval=(1 << 63) - 1, dtype='int64')
adjoint = SharedTensorBase(inner_value=x, module=self.module)
return adjoint
def random_stateless_uniform_adjoint(self):
"""
:param seed: seed for generate random number
:return: generate pseudo-random SharedTensorBase with same shape and
module with self
"""
if self.module is not None:
x = tf.random.stateless_uniform(shape=self.shape, seed=[0, 0], minval=0, maxval=self.module, dtype='int64')
else:
x = tf.random.stateless_uniform(shape=self.shape, seed=[0, 0], minval=None, maxval=None, dtype='int64')
adjoint = SharedTensorBase(inner_value=x, module=self.module)
return adjoint
def split(self, size_splits, axis=0, num=None):
"""
Splits self into a list of sub tensors.
:param size_splits: Either an integer indicating the number of splits along
`axis` or a 1-D integer `Tensor` or Python list containing the sizes of
each output tensor along `axis`. If a scalar, then it must evenly divide
`value.shape[axis]`; otherwise the sum of sizes along the split axis
must match that of the `self`.
:param axis: An integer or scalar `int32` `Tensor`. The dimension along which to
split. Must be in the range `[-rank(value), rank(value))`. Defaults to 0.
:param num: Optional, used to specify the number of outputs when it cannot be
inferred from the shape of `size_splits`.
:return:
"""
self.check_inner_value_is_not_None()
inner_values = tf.split(self.inner_value, size_splits, axis, num)
vs = tuple(map(lambda inner_value: SharedTensorBase(inner_value=inner_value, module=self.module), inner_values))
return vs
def slice(self, begin, size):
"""
The usage is refer to tf.slice
:param begin: An `int32` or `int64` `Tensor`.
:param size: An `int32` or `int64` `Tensor`.
:return: A `SharedTensorBase` of the same shape and module as self
"""
self.check_inner_value_is_not_None()
inner_value = tf.slice(self.inner_value, begin=begin, size=size)
return SharedTensorBase(inner_value=inner_value, module=self.module)
def to_compress_tensor(self, dtype: tf.dtypes.DType = tf.int64) -> tf.Tensor:
"""
Compress self to a tf.Tensor of dtype
:param dtype: a tf.dtypes.DType object
:return: a tf.Tensor of dtype
"""
self.check_inner_value_is_not_None()
if self.module is None:
return tf.bitcast(self.inner_value, dtype)
else:
# x = tf.reshape(self.inner_value % self.module, [-1])
x = tf.reshape(self.inner_value, [-1])
size = x.shape.as_list()[0]
capacity = int((8 * dtype.size - 1) // np.log2(self.module)) # how much log(module) in dtype
new_size = int(np.ceil(1.0 * size / capacity))
x = tf.pad(x, paddings=[[0, capacity * new_size - size]])
x = tf.reshape(x, [new_size, capacity])
y = tf.cast(x, dtype) * tf.constant(np.power(self.module, np.arange(capacity)), dtype=dtype,
shape=[1, capacity])
return tf.reduce_sum(y, axis=[1])
def decompress_from(self, y: tf.Tensor, shape=None):
"""
Decompress from tf.Tensor
:param y: tf.Tensor
:param shape: self.shape
"""
if shape is not None:
if shape != self.shape:
raise Exception(
"must have shape is None or shape==self.shape, but shape={}, self.shape={}".format(shape,
self.shape))
else:
shape = self.shape
if self.module is None:
self.inner_value = tf.bitcast(y, 'int64')
else:
capacity = int((8 * y.dtype.size - 1) // np.log2(self.module)) # how much log(module) in dtype
div_num = tf.constant(np.power(self.module, np.arange(capacity)), dtype=y.dtype, shape=[1, capacity])
x = (tf.reshape(y, [-1, 1]) // div_num) % self.module
x = tf.cast(x, 'int64')
x = tf.reshape(x, [-1])
size = int(np.prod(shape))
x = tf.slice(x, [0], [size])
self.inner_value = tf.reshape(x, shape)
def decompress_from_to_new(self, y: tf.Tensor, shape=None):
"""
Decompress from tf.Tensor
:param y: tf.Tensor
:param shape: self.shape
"""
if shape is not None:
if shape != self.shape:
raise Exception(
"must have shape is None or shape==self.shape, but shape={}, self.shape={}".format(shape,
self.shape))
else:
shape = self.shape
if self.module is None:
inner_value = tf.bitcast(y, 'int64')
else:
capacity = int((8 * y.dtype.size - 1) // np.log2(self.module)) # how much log(module) in dtype
div_num = tf.constant(np.power(self.module, np.arange(capacity)), dtype=y.dtype, shape=[1, capacity])
x = (tf.reshape(y, [-1, 1]) // div_num) % self.module
x = tf.cast(x, 'int64')
x = tf.reshape(x, [-1])
size = int(np.prod(shape))
x = tf.slice(x, [0], [size])
inner_value = tf.reshape(x, shape)
return SharedTensorBase(inner_value=inner_value, module=self.module, shape=self.shape)
def identity(self):
"""
The usage is same as tf.identity
:return: A copy of self
"""
self.check_inner_value_is_not_None()
inner_value = tf.identity(self.inner_value)
return SharedTensorBase(inner_value=inner_value, module=self.module)
def squeeze(self, axis=None):
"""
The usage is same as tf.squeeze
:param axis: An optional list of `ints`. Defaults to `[]`. If specified, only
squeezes the dimensions listed. The dimension index starts at 0. It is an
error to squeeze a dimension that is not 1. Must be in the range
`[-rank(input), rank(input))`. Must be specified if `input` is a
`RaggedTensor`.
:return: A `SharedTensorBase`. Has the same type as `self`.
Contains the same data as `self`, but has one or more dimensions of
size 1 removed.
"""
self.check_inner_value_is_not_None()
inner_value = tf.squeeze(self.inner_value, axis=axis)
return SharedTensorBase(inner_value=inner_value, module=self.module)
def rshift(self, index: tf.Tensor):
"""
Cycle right shift self by index
:param index: tf.Tensor where x.shape[:-1]==index.shape
:return: y: SharedTensorBase of same module as self, where the vector y[i1,..., in-1,:] is the
cycle right shift of x[i1,..., in-1,:] by index[i1, ...in-1] for any i1, ..., in-1
"""
self.check_inner_value_is_not_None()
if index.shape != self.shape[0:-1]:
raise StfEqualException("index.shape", "self.shape[0:-1]", index.shape, self.shape[0:-1])
index = index % self.shape[-1]
shifted_reshape = cycle_rshift_tensor(tf.reshape(self.inner_value, shape=[-1, self.shape[-1]]),
tf.reshape(index, shape=[-1]))
return SharedTensorBase(inner_value=tf.reshape(shifted_reshape, shape=self.shape), module=self.module)
def lshift(self, index: tf.Tensor):
"""
Cycle right shift self by i
:param index: tf.Tensor where x.shape[:-1]==index.shape
:return: y: SharedTensorBase with same module as self, where the vector y[i1,..., in-1,:] is the
cycle right shift of x[i1,..., in-1,:] by index[i1, ...in-1] for any i1, ..., in-1
"""
self.check_inner_value_is_not_None()
return self.rshift(-index)
def reshape(self, shape):
"""
:param shape: list, new shape
:return: reshaped SharedTensorBase
"""
self.check_inner_value_is_not_None()
inner_value = tf.reshape(self.inner_value, shape)
return SharedTensorBase(inner_value=inner_value, module=self.module)
def transpose(self):
"""
:return: The transposed SharedTensorBase
"""
self.check_inner_value_is_not_None()
inner_value = tf.transpose(self.inner_value)
return SharedTensorBase(inner_value=inner_value, module=self.module)
def reduce_mean(self, axis, keepdims=False):
"""
:param axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-len(self.shape),
len(self.shape))`.
:param keepdims: If true, retains reduced dimensions with length 1.
:return: The reduced tensor.
"""
self.check_inner_value_is_not_None()
inner_value = tf.reduce_mean(self.inner_value, axis=axis, keepdims=keepdims)
inner_value = tf.cast(inner_value, 'int64')
if self.module is not None:
inner_value %= self.module
return SharedTensorBase(inner_value=inner_value, module=self.module)
def concat(self, other, axis):
"""
:param other: Another SharedTensorBase
:param axis: Same as 'axis' in tf.concat
:return: A `Tensor` resulting from concatenation of the self and other
"""
self.check_inner_value_is_not_None()
inner_value = tf.concat(values=[self.inner_value, other.inner_value], axis=axis)
return SharedTensorBase(inner_value=inner_value, module=self.module)
def stack(self, other, axis):
"""
:param other: Another SharedTensorBase
:param axis: Same as 'axis' in tf.stack
:return: A stacked SharedTensorBase with the same module of self
"""
self.check_inner_value_is_not_None()
inner_value = tf.stack(values=[self.inner_value, other.inner_value], axis=axis)
return SharedTensorBase(inner_value=inner_value, module=self.module)
def reduce_sum(self, axis, keepdims=False):
"""
:param axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-len(self.shape),
len(self.shape))`.
:param keepdims: If true, retains reduced dimensions with length 1.
:return: The reduced tensor.
"""
self.check_inner_value_is_not_None()
inner_value = tf.reduce_sum(self.inner_value, axis=axis, keepdims=keepdims)
# inner_value = tf.cast(inner_value, 'int64')
if self.module is not None:
inner_value = inner_value % self.module
return SharedTensorBase(inner_value=inner_value, module=self.module)
def gather(self, indices, axis, batch_dims):
"""
:param indices: The index `Tensor`. Must be one of the following types: `int32`,
`int64`. Must be in range `[0, params.shape[axis])`.
:param axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The
`axis` in `params` to gather `indices` from. Must be greater than or equal
to `batch_dims`. Defaults to the first non-batch dimension. Supports
negative indexes.
:param batch_dims: An `integer`. The number of batch dimensions. Must be less
than or equal to `rank(indices)`.
:return: A `SharedTensorBase` with same module as self.
"""
self.check_inner_value_is_not_None()
inner_value = tf.gather(params=self.inner_value, indices=indices, axis=axis, batch_dims=batch_dims)
return SharedTensorBase(inner_value=inner_value, module=self.module)
def expand_dims(self, axis):
"""
expand dimension of self, like tf.expand_dims.
:param axis: Integer specifying the dimension index at which to expand the
shape of `input`. Given an input of D dimensions, `axis` must be in range
`[-(D+1), D]` (inclusive).
:return: A `SharedTensorBase` with same module as self.
"""
self.check_inner_value_is_not_None()
inner_value = tf.expand_dims(self.inner_value, axis=axis)
return SharedTensorBase(inner_value=inner_value, module=self.module)
def cumulative_sum(self, axis=-1):
"""
Compute the cumulative_sum of self along the axis
:param axis: Integer specifying the dimension index at which to expand the
shape of `input`. Given an input of D dimensions, `axis` must be in range
`[-(D+1), D]` (inclusive).
:return: SharedTensorBase of same shape and module as self
"""
self.check_inner_value_is_not_None()
perm = list(range(len(self.shape)))
perm[0] = perm[axis]
perm[axis] = 0
x = tf.transpose(self.inner_value, perm=perm)
y = tf.scan(lambda a, b: a + b, x, back_prop=False) % self.module
y = tf.transpose(y, perm=perm)
return SharedTensorBase(inner_value=y, module=self.module)
def ones_like(self):
"""
This operation returns a SharedTensorBase of the
same module and shape as self with all elements set to 1.
:return: A SharedTensorBase with all elements set to one.
"""
self.check_inner_value_is_not_None()
inner_value = tf.ones_like(self.inner_value)
return SharedTensorBase(inner_value=inner_value, module=self.module)
def complement(self):
"""
For a SharedTensorBas with module 2, return the 1-self.
:return: A SharedTensorBas with same shape as self and module 2.
"""
self.check_inner_value_is_not_None()
if self.module != 2:
raise StfValueException("self.module", 2, self.module)
inner_value = tf.math.logical_not(tf.cast(self.inner_value, 'bool'))
inner_value = tf.cast(inner_value, 'int64')
return SharedTensorBase(inner_value=inner_value, module=self.module)
def zeros_like(self):
"""
This operation returns a SharedTensorBase of the
same module and shape as self with all elements set to 0.
:return: A SharedTensorBase with all elements set to 0.
"""
self.check_inner_value_is_not_None()
inner_value = tf.zeros_like(self.inner_value)
return SharedTensorBase(inner_value=inner_value, module=self.module)
def pad(self, paddings, mode="CONSTANT", constant_values=0):
"""
:param paddings: A `Tensor` of type `int32`.
:param mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
:param constant_values: In "CONSTANT" mode, the scalar pad value to use. Must has type 'int64'.
:return: A SharedTensorBase with same module as self.
"""
self.check_inner_value_is_not_None()
inner_value = tf.pad(self.inner_value, paddings=paddings, mode=mode, constant_values=constant_values)
return SharedTensorBase(inner_value=inner_value, module=self.module)
__floordiv__ = __truediv__
class PrivateTensorBase:
"""
The Class of PrivateTensorBase
inner_value: A Tensorflow Tensor or Variable of int64
module: int,
fixedpoint: int
owner: String, "L" or "R"
The represented number = inner_value * 2^{-fixedpoint}
"""
@property
def load_from_tf_tensor(self, x: Union[tf.Tensor, np.ndarray]):
"""
Load from tensorflow tensor
:param x: tf.Tensor
:return:
"""
with tf.device(self.owner):
self.inner_value = tf.cast(tf.multiply(x, (1 << self.fixedpoint)), 'int64')
if self.module is not None:
self.inner_value %= self.module
if self.shape is not None:
if self.shape != self.inner_value.shape.as_list():
raise StfEqualException("self.shape", "self.inner_value.shape",
self.shape, self.inner_value.shape)
def identity(self):
"""
See tf.identity.
:return:
"""
self.check_inner_value_is_not_None()
with tf.device(self.owner):
inner_value = tf.identity(self.inner_value)
return PrivateTensorBase(self.owner, fixedpoint=self.fixedpoint, inner_value=inner_value, module=self.module)
def load_from_file(self, path: str, record_defaults, batch_size, field_delim=",", skip_row_num=1, skip_col_num=0,
repeat=1, clip_value=None, scale=1.0, map_fn=None, output_col_num=None, buffer_size=0):
"""
Load data from file
:param path: absolute path of file in the disk of self.owner
:param record_defaults: for example [['a'], ['a'], [1.0], [1.0], [1.0]]
:param batch_size:
:param field_delim: field delim between columns
:param skip_row_num: skip row number in head of the file
:param skip_col_num: skip column number in the file
:param repeat: repeat how many times of the file
:param clip_value: the features are clip by this value such that |x|<=clip_value
:param scale: multiply scale for the columns
:param map_fn: A map function for the columns, for example: lambda x: x[3]*x[4]
:param output_col_num: output column number
:param buffer_size: buffer size
:return:
"""
if output_col_num is None:
output_col_num = len(record_defaults) - skip_col_num
with tf.device(self.owner):
data = tf.compat.v1.data.TextLineDataset(path, buffer_size=buffer_size).skip(skip_row_num)
data_iter = data.repeat(repeat).batch(batch_size).make_one_shot_iterator()
data = data_iter.get_next()
data = tf.reshape(data, [batch_size])
data = tf.strings.split(data, sep=field_delim).to_tensor(default_value="0.0")
data = data[:, skip_col_num:]
data = tf.reshape(data, [batch_size, output_col_num])
data = tf.strings.to_number(data, out_type='float64')
data = clip(data)
if map_fn is not None:
data = data.map(map_func=map_fn)
self.load_from_tf_tensor(data)
def load_from_file_withid(self, path: str, record_defaults, batch_size, field_delim=",",
skip_row_num=1, id_col_num=0, repeat=1, clip_value=None, use_auto_id_col=False):
"""
Load data from file, and return the id columns.
:param path: absolute path of file in the disk of self.owner
:param record_defaults: for example [['a'], ['a'], [1.0], [1.0], [1.0]]
:param batch_size:
:param field_delim: field delim between columns
:param skip_row_num: skip row number in head of the file
:param repeat: repeat how many times of the file
:param clip_value: the features are clip by this value such that |x|<=clip_value
:param id_col_num: the number of front columns that are id (not feature or label)
:param use_auto_id_col: if true, add a auto increase id column [0, 1, 2, ......]
:return: tf.Tensor of id columns
"""
with tf.device(self.owner):
output_col_num = len(record_defaults) - id_col_num
data = tf.compat.v1.data.TextLineDataset(path).skip(skip_row_num)
data_iter = data.repeat(repeat).batch(batch_size).make_one_shot_iterator()
data = data_iter.get_next()
data = tf.reshape(data, [batch_size])
data = tf.strings.split(data, sep=field_delim).to_tensor(default_value="0.0")
if use_auto_id_col:
max_value = StfConfig.upper_bd_int64
idx = tf.compat.v1.data.Dataset.range(max_value).batch(batch_size)
idx_iter = idx.make_one_shot_iterator()
idx_batch = tf.reshape(idx_iter.get_next(), [batch_size, id_col_num])
else:
idx_batch = data[:, 0:id_col_num]
data = data[:, id_col_num:]
data = tf.reshape(data, [batch_size, output_col_num])
data = tf.strings.to_number(data, out_type='float64')
data = clip(data)
self.load_from_tf_tensor(data)
return idx_batch
def load_first_line_from_file(self, path: str, col_num, sep=",", dtype='float32'):
"""
Load the first line of the file
:param path: absolute path of file in the disk of self.owner
:param col_num: columns number of the file
:param sep: separation character between columns
:param dtype: tf.dtype
:return:
"""
with tf.device(self.owner):
aline = tf.io.read_file(filename=path)
aline = tf.strings.split(aline, sep="\n")
aline = aline[0]
aline = tf.strings.split(aline, sep=sep)
aline = tf.strings.to_number(aline, dtype)
aline = tf.reshape(aline, [col_num])
self.load_from_tf_tensor(aline)
def to_tf_tensor(self, owner=None, dtype='float64') -> tf.Tensor:
"""
Transform self to a tf.Tensor
:param owner: which machine the tf.Tensor lie
:param dtype: tf.Dtype 'float64' or 'int64'
:return: tf.Tensor
"""
self.check_inner_value_is_not_None()
if owner is None:
owner = self.owner
else:
owner = get_device(owner)
if owner != self.owner:
# raise StfEqualException("owner", "self.owner", owner, self.owner)
StfEqualWarning("owner", "self.owner", owner, self.owner)
with tf.device(owner):
if dtype == 'float64':
return tf.cast(tf.cast(self.inner_value, 'float64') / (2 ** self.fixedpoint), 'float64')
elif dtype == 'int64':
return self.inner_value // (2 ** self.fixedpoint)
else:
raise StfValueException("dtype", "float64 or int64", dtype)
def to_tf_str(self, owner=None, precision=StfConfig.to_str_precision, id_col=None):
"""
Transform self to a tf.Tensor of dtype string.
:param owner: which machine the tf.Tensor lie
:param precision: An optional `int`. Defaults to `StfConfig.to_str_precision`.
The post-decimal precision to use for floating point numbers.
Only used if precision > -1.
:param id_col: if not None, add a id column to the data.
:return: tr.Tensor of dtype string
"""
self.check_inner_value_is_not_None()
if owner is None:
owner = self.owner
x = self.to_tf_tensor(owner=owner)
with tf.device(get_device(owner)):
y = tf.strings.as_string(x, precision=precision)
if id_col is not None:
y = tf.concat([id_col, y], axis=1)
y = tf.compat.v1.reduce_join(y, separator=",", axis=-1)
return y
def to_file(self, path: str, separator=",", precision=StfConfig.to_str_precision, dim: int = 1, owner=None,
dtype='float64', id_col=None):
"""
Generate a tf.operator, when run it, self is writen to a file in the machine of owner.
:param path: The file to be written in the machine of self.owner.
:param separator: separation character between columns
:param precision: An optional `int`. Defaults to `StfConfig.to_str_precision`.
The post-decimal precision to use for floating point numbers.
Only used if precision > -1.
:param dim: the dimension of self
:param owner: 'L' or 'R'
:param dtype: tf.dtype 'int64' or 'float64'
:param id_col: if not None, add a id column to the data.
:return:
"""
self.check_inner_value_is_not_None()
if dim > 2 or dim < 1:
raise StfValueException("dim", "1 or 2", dim)
if owner is None:
owner = self.owner
else:
owner = get_device(owner)
with tf.device(owner):
tf_tensor = self.to_tf_tensor(dtype=dtype)
if dtype == 'float64':
tf_tensor = tf.strings.as_string(tf_tensor, precision=precision)
elif dtype == 'int64':
tf_tensor = tf.strings.as_string(tf_tensor)
else:
raise StfValueException("dtype", 'float64 or int64', dtype)
if dim == 2 and id_col is not None:
tf_tensor = tf.concat(values=[id_col, tf_tensor], axis=1)
weights = tf.compat.v1.reduce_join(tf_tensor, separator=separator, axis=-1)
if dim == 2:
weights = tf.compat.v1.reduce_join(weights, separator="\n", axis=-1)
write_op = tf.io.write_file(filename=path, contents=weights)
return write_op
def to_SharedPairBase(self, other_owner):
"""
:param other_owner:
:param op_map:
:return:
"""
xL = SharedTensorBase(inner_value=self.inner_value, module=self.module)
xR = -xL.ones_like() # use one but not zero, for div
xL += xL.ones_like()
x = SharedPairBase(ownerL=self.owner, ownerR=other_owner, xL=xL, xR=xR, fixedpoint=self.fixedpoint)
return x
def dup_with_precision(self, new_fixedpoint: int):
"""
Duplicate self to a new fixedpoint.
:param new_fixedpoint:
:return: PrivateTensorBase
"""
self.check_inner_value_is_not_None()
if new_fixedpoint == self.fixedpoint:
return self
with tf.device(self.owner):
if new_fixedpoint > self.fixedpoint:
inner_value = self.inner_value * (1 << (new_fixedpoint - self.fixedpoint)) # left shift
else:
inner_value = self.inner_value // (1 << (self.fixedpoint - new_fixedpoint))
return PrivateTensorBase(self.owner, fixedpoint=new_fixedpoint, inner_value=inner_value,
module=self.module)
def gather(self, indices, axis, batch_dims):
""" See tf.gather."""
self.check_inner_value_is_not_None()
with tf.device(self.owner):
inner_value = tf.gather(params=self.inner_value, indices=indices, axis=axis, batch_dims=batch_dims)
return PrivateTensorBase(self.owner, fixedpoint=self.fixedpoint, inner_value=inner_value,
module=self.module)
def stack(self, other, axis):
"""See tf.stack."""
self.check_inner_value_is_not_None()
self.check_module_equal(other)
self.check_owner_equal(other)
fixed_point = min(self.fixedpoint, other.fixedpoint)
x = self.dup_with_precision(fixed_point)
y = other.dup_with_precision(fixed_point)
with tf.device(self.owner):
inner_value = tf.stack([x.inner_value, y.inner_value], axis=axis)
return PrivateTensorBase(self.owner, fixedpoint=fixed_point, inner_value=inner_value, module=self.module)
def concat(self, other, axis):
"""See tf.concat"""
self.check_inner_value_is_not_None()
self.check_module_equal(other)
self.check_owner_equal(other)
fixed_point = min(self.fixedpoint, other.fixedpoint)
x = self.dup_with_precision(fixed_point)
y = other.dup_with_precision(fixed_point)
with tf.device(self.owner):
inner_value = tf.concat([x.inner_value, y.inner_value], axis=axis)
return PrivateTensorBase(self.owner, fixedpoint=fixed_point, inner_value=inner_value, module=self.module)
def reshape(self, shape):
"""See tf.reshape."""
self.check_inner_value_is_not_None()
with tf.device(self.owner):
inner_value = tf.reshape(self.inner_value, shape)
return PrivateTensorBase(owner=self.owner, fixedpoint=self.fixedpoint, inner_value=inner_value,
module=self.module)
def random_uniform_adjoint(self, seed=None):
"""
generate random uniform adjoint of self.
:param seed: A seed get from random.get_seed()
:return: A uniform random PrivateTensor with same owner, module and fixedpoint as self.
the value is differential in every time sess.run it
"""
with tf.device(self.owner):
x = self.to_SharedTensor_like()
y = x.random_uniform_adjoint(seed=seed)
adjoint = PrivateTensorBase(owner=self.owner, fixedpoint=self.fixedpoint, inner_value=y.inner_value,
module=self.module)
return adjoint
def zeros_like(self):
"""See tf.zeros_like."""
with tf.device(self.owner):
inner_value = tf.zeros_like(self.inner_value)
return PrivateTensorBase(owner=self.owner, fixedpoint=self.fixedpoint, inner_value=inner_value,
module=self.module)
def ones_like(self, fixedpoint=None):
"""See tf.ones_like."""
if fixedpoint is None:
fixedpoint = self.fixedpoint
with tf.device(self.owner):
inner_value = (1 << fixedpoint) * tf.ones_like(self.inner_value)
return PrivateTensorBase(owner=self.owner, fixedpoint=fixedpoint, inner_value=inner_value,
module=self.module)
def to_SharedTensor(self):
"""
:return a SharedTensorBase with same inner_value and module with self.
"""
return SharedTensorBase(inner_value=self.inner_value, module=self.module)
def to_SharedTensor_like(self):
"""
:return a SharedTensorBase with inner_value=None, and with same module and shape with self.
"""
return SharedTensorBase(module=self.module, shape=self.shape)
class SharedPairBase:
"""
A SharedPairBase x is represented by x= (xL+xR mod n)*2^{-fixedpoint},
where xL is in ownerL and xR is in ownerR, n=xL.module=xR.module.
:param ownerL: str (for example, "L") or object of python.framework.device_spec.DeviceSpecV2
:param ownerR: str (for example, "R") or object of python.framework.device_spec.DeviceSpecV2
:param xL: a SharedTensorBase.
:param xR: a SharedTensorBase.
:param fixedpoint: int. default is StfConfig.default_fixed_point
:param shape: list of int
Shape or (xL,xR) must not be None.
"""
def mirror(self):
"""
Get the mirrored SharedPairBase of self.
:return:
"""
ownerR = self.ownerL
ownerL = self.ownerR
xR = self.xL
xL = self.xR
return SharedPairBase(ownerL, ownerR, xL, xR, self.fixedpoint)
def split(self, size_splits, axis: int = 0, num=None):
"""
See tf.split.
:param size_splits:
:param axis:
:param num:
:return:
"""
with tf.device(self.ownerL):
xLs = self.xL.split(size_splits=size_splits, axis=axis, num=num)
with tf.device(self.ownerR):
xRs = self.xR.split(size_splits=size_splits, axis=axis, num=num)
sps = []
for xL_xR in zip(xLs, xRs):
sps += [SharedPairBase(ownerL=self.ownerL, ownerR=self.ownerR,
xL=xL_xR[0], xR=xL_xR[1], fixedpoint=self.fixedpoint)]
return tuple(sps)
def to_SharedTensor_like(self):
"""
:return: A SharedTensorBase with inner_value=None and same module and shape with self.
"""
return SharedTensorBase(module=self.xR.module, shape=self.shape)
def identity(self, ownerL=None, ownerR=None):
"""
See tf.identity.
:param ownerL:
:param ownerR:
:return:
"""
if ownerL is None:
ownerL = self.ownerL
if ownerR is None:
ownerR = self.ownerR
with tf.device(ownerL):
xL = self.xL.identity()
with tf.device(ownerR):
xR = self.xR.identity()
return SharedPairBase(ownerL=self.ownerL, ownerR=self.ownerR, xL=xL, xR=xR, fixedpoint=self.fixedpoint)
def to_private(self, owner) -> PrivateTensorBase:
"""
transform to a PrivateTensorBase.
:param owner:
:return:
"""
with tf.device(get_device(owner)):
x = self.xL.inner_value + self.xR.inner_value
return PrivateTensorBase(owner=owner, fixedpoint=self.fixedpoint, inner_value=x, module=self.xL.module)
def to_tf_tensor(self, owner) -> tf.Tensor:
"""
transform self to a tensorflow Tensor.
:param owner:
:return:
"""
with tf.device(get_device(owner)):
x = self.to_private(owner)
return x.to_tf_tensor()
def ones_like(self):
"""
See tf.ones_like
:return:
"""
with tf.device(self.ownerL):
xL = self.xL.zeros_like()
with tf.device(self.ownerR):
xR = self.xR.ones_like() # << self.fixedpoint
return SharedPairBase(self.ownerL, self.ownerR, xL, xR, fixedpoint=0)
def complement(self):
"""
compute the two's complement of self.
:return:
"""
with tf.device(self.ownerL):
xL = self.xL.complement()
return SharedPairBase(self.ownerL, self.ownerR, xL, self.xR, fixedpoint=self.fixedpoint)
def zeros_like(self):
"""
See tf.zeros_like.
:return:
"""
with tf.device(self.ownerL):
xL = self.xL.zeros_like()
with tf.device(self.ownerR):
xR = self.xR.zeros_like()
return SharedPairBase(self.ownerL, self.ownerR, xL, xR, fixedpoint=self.fixedpoint)
def cumulative_sum(self, axis=-1):
"""
Compute the cumulative_sum of self alone the given axis.
:param axis:
:return:
"""
with tf.device(self.ownerL):
xL = self.xL.cumulative_sum(axis)
with tf.device(self.ownerR):
xR = self.xR.cumulative_sum(axis)
return SharedPairBase(ownerL=self.ownerL, ownerR=self.ownerR, xL=xL, xR=xR, fixedpoint=self.fixedpoint)
def load_from_tf_tensor(self, x):
"""
Load data from a tensorflow Tensor.
:param x:
:return:
"""
with tf.device(self.ownerL):
px = PrivateTensorBase(owner=self.ownerL, fixedpoint=self.fixedpoint)
px.load_from_tf_tensor(x)
u = px.to_SharedTensor()
self.xL = -u.ones_like()
with tf.device(self.ownerR):
self.xR = u + u.ones_like()
self.fixedpoint = px.fixedpoint
def _align_owner(self, other):
"""
Align the owner of other with self.
:param other: A SharedPairBase.
:return: A SharedPairBase with same value as `other` satisfying ownerL=self.ownerL
and ownerR=self.ownerR.
"""
if self.ownerL == other.ownerL and self.ownerR == other.ownerR:
return other
elif self.ownerL == other.ownerR and self.ownerR == other.ownerL:
return other.mirror()
else:
raise Exception("ownerL must be same, ownerR must be same.")
def concat(self, other, axis):
"""
See tf.concat.
:param other:
:param axis:
:return:
"""
other = self._align_owner(other)
fixed_point = min(self.fixedpoint, other.fixedpoint)
alter_self = self.dup_with_precision(fixed_point)
other = other.dup_with_precision(fixed_point)
with tf.device(self.ownerL):
xL = alter_self.xL.concat(other.xL, axis=axis)
with tf.device(self.ownerR):
xR = alter_self.xR.concat(other.xR, axis=axis)
return SharedPairBase(ownerL=self.ownerL, ownerR=self.ownerR, xL=xL, xR=xR, fixedpoint=fixed_point)
def stack(self, other, axis):
"""
See tf.stack.
:param other:
:param axis:
:return:
"""
other = self._align_owner(other)
fixed_point = min(self.fixedpoint, other.fixedpoint)
alter_self = self.dup_with_precision(fixed_point)
other = other.dup_with_precision(fixed_point)
with tf.device(self.ownerL):
xL = alter_self.xL.stack(other.xL, axis=axis)
with tf.device(self.ownerR):
xR = alter_self.xR.stack(other.xR, axis=axis)
return SharedPairBase(ownerL=self.ownerL, ownerR=self.ownerR, xL=xL, xR=xR, fixedpoint=fixed_point)
def dup_with_precision(self, new_fixedpoint: int):
"""
Duplicate self to a new SharedPairBase with new fixedpoint.
:param new_fixedpoint:
:return:
"""
if new_fixedpoint > self.fixedpoint:
with tf.device(self.ownerL):
xL = self.xL << (new_fixedpoint - self.fixedpoint) # left shift
with tf.device(self.ownerR):
xR = self.xR << (new_fixedpoint - self.fixedpoint) # left shift
return SharedPairBase(ownerL=self.ownerL, ownerR=self.ownerR, xL=xL, xR=xR, fixedpoint=new_fixedpoint)
elif new_fixedpoint < self.fixedpoint:
with tf.device(self.ownerL):
xL = self.xL >> (self.fixedpoint - new_fixedpoint) # right shift
with tf.device(self.ownerR):
xR = -((-self.xR) >> (self.fixedpoint - new_fixedpoint)) # right shift
return SharedPairBase(ownerL=self.ownerL, ownerR=self.ownerR, xL=xL, xR=xR, fixedpoint=new_fixedpoint)
else:
return self
def reshape(self, shape):
"""
See tf.reshape.
:param shape:
:return:
"""
with tf.device(self.ownerL):
xL = self.xL.reshape(shape)
with tf.device(self.ownerR):
xR = self.xR.reshape(shape)
return SharedPairBase(ownerL=self.ownerL, ownerR=self.ownerR, xL=xL, xR=xR, fixedpoint=self.fixedpoint)
def squeeze(self, axis=None):
"""
See tf.squeeze.
:param axis:
:return:
"""
with tf.device(self.ownerL):
xL = self.xL.squeeze(axis=axis)
with tf.device(self.ownerR):
xR = self.xR.squeeze(axis=axis)
return SharedPairBase(ownerL=self.ownerL, ownerR=self.ownerR, xL=xL, xR=xR, fixedpoint=self.fixedpoint)
def to_tf_str(self, owner, precision=StfConfig.to_str_precision, id_col=None):
"""
Transform self to tf.string.
:param owner: The owner of tf
:param precision:
:param id_col:
:return:
"""
return self.to_private(owner=owner).to_tf_str(precision=precision, id_col=id_col)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
28,
40477,
12,
23,
198,
37811,
198,
220,
220,
3738,
4912,
198,
220,
220,
15069,
357,
66,
8,
5472,
12,
42334,
1439,
6923,
33876,
13,
198,
220,
220,
20368,
19351,
438,
198,
... | 2.165874 | 19,352 |
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from apps.archivo.serializers import FotoEmpleadoSerializer
from apps.comun.serializers import DireccionSerializer
from apps.empleado.models import Empleado, DatosUsuarioEmpleado
from apps.punto_control.serializers import PuntoControlSerializer
from apps.usuario.serializers import HuellaDigitalSerializer, PasswordClienteSerializer
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
11,
4912,
201,
198,
201,
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
201,
198,
201,
198,
6738,
6725,
13,
998,
23593,
13,
46911,
11341,
1330,
376,
2069,
10161,
1... | 3.240876 | 137 |
import _kratos
from .util import const
from typing import Union, List
from _kratos import get_fn_ln
| [
11748,
4808,
74,
10366,
418,
198,
6738,
764,
22602,
1330,
1500,
198,
6738,
19720,
1330,
4479,
11,
7343,
198,
6738,
4808,
74,
10366,
418,
1330,
651,
62,
22184,
62,
18755,
628,
628,
628
] | 3.181818 | 33 |
from tensorflow_core.python.keras.engine.sequential import Sequential
from tensorflow_core.python.keras.layers import ConvLSTM2D, Flatten, Dense, Dropout, concatenate
from tensorflow_core.python.keras.layers.normalization import BatchNormalization
from tensorflow_core.python.keras.models import Sequential, Model
from tensorflow_core.python.keras.layers import (ConvLSTM2D, BatchNormalization, Convolution3D, Convolution2D,
TimeDistributed, MaxPooling2D, UpSampling2D, Input, merge)
from ilab.utils import (categorical_crossentropy_3d_w, softmax_3d, softmax_2d)
| [
6738,
11192,
273,
11125,
62,
7295,
13,
29412,
13,
6122,
292,
13,
18392,
13,
3107,
1843,
1330,
24604,
1843,
198,
6738,
11192,
273,
11125,
62,
7295,
13,
29412,
13,
6122,
292,
13,
75,
6962,
1330,
34872,
43,
2257,
44,
17,
35,
11,
1610,
... | 2.698198 | 222 |
import os
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
from torchvision.datasets import cifar
from networks import cifar_archs, ilsvrc12_archs
from networks import weights_init
from netslim import prune, load_pruned_model, update_bn, update_bn_by_names, \
network_slimming, global_optimal_thresholding, get_norm_layer_names
num_classes = {
"cifar10": 10,
"cifar100": 100,
"ilsvrc12": None
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Check pruned model')
parser.add_argument('resume', default='',
help='path to a trained model weight')
parser.add_argument('--pr', type=float, default=-1, metavar='PR',
help='ratio of pruned channels to total channels, -1: do not prune, 1: optimal prune')
args = parser.parse_args()
_, dataset, arch = args.resume.split(os.sep)[-3].split('-')
input_shape = (3, 32, 32) if dataset != "ilsvrc12" else (3, 224, 224)
if "vgg" not in arch:
from thop_res import profile
else:
from thop import profile
archs = cifar_archs if num_classes[dataset] else ilsvrc12_archs
model = archs[arch](num_classes=num_classes[dataset]) if num_classes[dataset] else archs[arch]()
# prune related settings
if args.resume:
try:
model.load_state_dict(torch.load(args.resume, map_location="cpu"))
except:
print("Cannot load state_dict directly, trying to load pruned weight ...")
model = load_pruned_model(model, torch.load(args.resume, map_location="cpu"))
channel_select = True
if args.pr > 0.999:
model = prune(model, input_shape, prune_ratio=args.pr, channel_select=channel_select)
elif args.pr > 0:
model = prune(model, input_shape, prune_method=network_slimming, prune_ratio=args.pr, channel_select=channel_select)
#print(model)
#model.eval()
flops, params = profile(model, inputs=(torch.randn(1, 3, 32, 32),), verbose=False)
#print("FLOPS: {:,}\nParams: {:,}".format(int(flops), int(params)))
from torchsummary import summary
summary(model, (3, 32, 32), device="cpu")
print("FLOPS: {:.2f}M\nParams: {:.2f}M".format(flops/1024/1024, params/1024/1024)) | [
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
6738,
28034,
10178,
1330,
31408,
198,
6738,
28034,
10178,
13,
19608,
292,
1039,
1330,
269,... | 2.422521 | 968 |
import pygame as pg
import math
run : bool = True
pg.init()
pg.font.init()
WINDOW_W : int = 1600
WINDOW_H : int = 1000
fps : int = 60
clock = pg.time.Clock()
drag_allowed : bool = False
zoom : float = 1
GREEN : tuple = (220, 230, 190)
window = pg.display.set_mode((WINDOW_W, WINDOW_H))
pg.display.set_caption("Live Bus Map")
map_path : str = r"C:\Users\Felix\Documents\Python\Mobiliteit\map\images\map.png"
map_img = pg.image.load(map_path)
map_rect = map_img.get_rect()
map_image = pg.transform.scale(map_img, (int(map_img.get_width() * zoom), int(map_img.get_width() * zoom)))
map_rectangle = map_image.get_rect()
map_rectangle.center = (int(WINDOW_W/2), int(WINDOW_H/2))
stations : dict = {"Capellen Police" : (133, 500),
"Capellen Klouschter" : (100, 550),
"Windhof" : (1050, 720),
"Windhof Ecoparc" : (1030, 703)
}
stations_points : list = list(stations.values())
bus_colors : list = [pg.Color("green"), pg.Color("yellow"), pg.Color("orange"), pg.Color("red")]
font =pg.font.SysFont(None, 24)
center_map_text = font.render('Center Map', True, pg.Color("red"))
center_map_rect = center_map_text.get_rect(x=20, y=20)
center_map_box = pg.draw.rect(window, pg.Color("red"), (center_map_rect.x - 5, center_map_rect.y-5, 100, 25), 3)
bus_one = Bus(2, "Capellen Police", "Capellen Klouschter", window)
done = True
while run == True:
clock.tick(fps)
window.fill(GREEN)
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit()
run = False
if event.type == pg.MOUSEBUTTONDOWN:
mousex, mousey = pg.mouse.get_pos()
mouserect = pg.Rect(mousex, mousey, 1, 1)
print(mousex, mousey)
drag_allowed = not drag_allowed
if center_map_box.colliderect(mouserect):
map_rectangle.center = map_rect.center = (int(WINDOW_W/2), int(WINDOW_H/2))
done = True
if event.button == 4:
mouse = pg.mouse.get_pos()
if (zoom + 0.1) <= 2.5:
zoom += 0.1
print(f"IMAGE ORIGINAL SIZE: {map_img.get_width(), map_img.get_height()}")
img = pg.transform.scale(map_img, (int(map_img.get_width() * zoom), int(map_img.get_height() * zoom)))
rect = img.get_rect()
print(f"IMAGE SIZE AFTER: {img.get_width(), img.get_height()}")
rect.centerx = int((map_rectangle.centerx / map_img.get_width()) * img.get_width())
rect.centery = int((map_rectangle.centery / map_img.get_height()) * img.get_height())
print(f"RECT CENTER IN: {rect.centerx, rect.centery}")
map_image, map_rectangle = img, rect
print(zoom)
done = True
if event.button == 5:
## If Mousewheel turned backwards try to zoom out except if reached maximum zoom out
if (zoom - 0.1) >= 0.1:
zoom -= 0.1
## New image with the new width and height adapted to the zoom
img = pg.transform.scale(map_img, (int(map_img.get_width() * zoom), int(map_img.get_height() * zoom)))
rect = img.get_rect()
## The new center x coordinate of the map = (old center x coordinate of the map / old width of the map) * new width of the map
## Example: old center x = 500, old width = 1000, new width = 1300
## new center x = (500 / 1000) * 1300 = 0.5 * 1300 = 650
## So the old center was at x = 500 and the new at x = 650
rect.centerx = int((map_rectangle.centerx / map_img.get_width()) * img.get_width())
rect.centery = int((map_rectangle.centery / map_img.get_height()) * img.get_height())
print(f"RECT CENTER OUT: {rect.centerx, rect.centery}")
map_image, map_rectangle = img, rect
print(zoom)
done = True
if event.type == pg.MOUSEBUTTONUP:
drag_allowed = False
if event.type == pg.MOUSEMOTION and drag_allowed:
## Follow the mouse cursor with the map
map_rect.move_ip(event.rel)
map_rectangle.move_ip(event.rel)
index : int = 0
## Update all the positions of the stations
for x, y in stations_points:
x += event.rel[0]
y += event.rel[1]
stations_points[int(index)] = (x, y)
index += 1
done = True
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
pg.quit()
run = False
if event.key == pg.K_c:
map_rectangle.center = map_rect.center = (int(WINDOW_W/2), int(WINDOW_H/2))
done = True
if done:
window.blit(map_image, map_rectangle)
draw_stations(window)
bus_one.draw()
pg.draw.rect(window, pg.Color("grey"), (center_map_rect.x - 5, center_map_rect.y-5, 100, 25), 0)
pg.draw.rect(window, pg.Color("black"), (center_map_rect.x - 5, center_map_rect.y-5, 100, 25), 2)
window.blit(center_map_text, center_map_rect)
pg.display.update()
done = False
pg.quit() | [
11748,
12972,
6057,
355,
23241,
201,
198,
11748,
10688,
201,
198,
201,
198,
5143,
1058,
20512,
796,
6407,
201,
198,
201,
198,
6024,
13,
15003,
3419,
201,
198,
6024,
13,
10331,
13,
15003,
3419,
201,
198,
201,
198,
201,
198,
28929,
3913... | 1.900332 | 3,010 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: Gevent.py
Author: Scott Yang(Scott)
Email: yangyingfa@skybility.com
Copyright: Copyright (c) 2021, Skybility Software Co.,Ltd. All rights reserved.
Description:
"""
import gevent
import time
from gevent import monkey
monkey.patch_all()
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
8979,
25,
2269,
1151,
13,
9078,
198,
13838,
25,
4746,
10998,
7,
19040,
8,
198,
15333,
25,
331,
648,
... | 2.785124 | 121 |
#!/usr/bin/python
import spear.LPR.plateLocate, sys
import spear.LPR.imageProcessing
input_image_path = str(sys.argv[1])
img = spear.SimpleCV.Image(input_image_path)
cropped_img = spear.LPR.imageProcessing.cropToROI(img)
eq_img = spear.LPR.imageProcessing.equalize(cropped_img)
rect_obj_list = spear.LPR.plateLocate.findPlate(eq_img, perform_unit_test=True, right_click_save=True)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
17514,
13,
43,
4805,
13,
6816,
43,
13369,
11,
25064,
198,
11748,
17514,
13,
43,
4805,
13,
9060,
18709,
278,
198,
198,
15414,
62,
9060,
62,
6978,
796,
965,
7,
17597,
13,
853,
8... | 2.562914 | 151 |
from direct.distributed.DistributedSmoothNodeBase import DistributedSmoothNodeBase
from direct.distributed.GridParent import GridParent
class GridChild:
"""
Any object that expects to be parented to a grid should inherit from this.
It works with GridParent to manage its grid cell hierarchy in the scenegraph.
"""
@report(types = ['args'], dConfigParam = 'smoothnode')
class SmoothGridChild(GridChild):
"""
SmoothNodes have a special requirement in that they need to send
their current cell along with their telemetry data stream. This
allows the distributed receiving objects to update their grid parent
according to this value, rather than the setLocation() data.
Use this instead of GridNode when you expect this object to send its
telemetry data out.
"""
@report(types = ['args'], dConfigParam = 'smoothnode')
@report(types = ['args'], dConfigParam = 'smoothnode')
| [
6738,
1277,
13,
17080,
6169,
13,
20344,
6169,
7556,
5226,
19667,
14881,
1330,
4307,
6169,
7556,
5226,
19667,
14881,
198,
6738,
1277,
13,
17080,
6169,
13,
41339,
24546,
1330,
24846,
24546,
198,
198,
4871,
24846,
16424,
25,
198,
220,
220,
... | 3.498141 | 269 |
"Transform functions for raw midi files"
from enum import Enum
import music21
PIANO_TYPES = list(range(24)) + list(range(80, 96)) # Piano, Synths
PLUCK_TYPES = list(range(24, 40)) + list(range(104, 112)) # Guitar, Bass, Ethnic
BRIGHT_TYPES = list(range(40, 56)) + list(range(56, 80))
PIANO_RANGE = (21, 109)
type2inst = {
# use print_music21_instruments() to see supported types
Track.PIANO: 0, # Piano
Track.PLUCK: 24, # Guitar
Track.BRIGHT: 40, # Violin
Track.PERC: 114, # Steel Drum
}
# INFO_TYPES = set(['TIME_SIGNATURE', 'KEY_SIGNATURE'])
INFO_TYPES = set(['TIME_SIGNATURE', 'KEY_SIGNATURE', 'SET_TEMPO']) | [
1,
41762,
5499,
329,
8246,
3095,
72,
3696,
1,
198,
6738,
33829,
1330,
2039,
388,
198,
11748,
2647,
2481,
198,
198,
11901,
1565,
46,
62,
9936,
47,
1546,
796,
1351,
7,
9521,
7,
1731,
4008,
1343,
1351,
7,
9521,
7,
1795,
11,
9907,
400... | 2.46332 | 259 |
# _*_ coding:utf-8 _*_
__author__ = 'WANGY'
__date__ = '2018/8/9 12:42'
import xadmin
from .models import Singer, Album, Audio, Song, AlbumDetail, AudioDetail
from .forms import SingerAdminForm, AlbumAdminForm, SongAdminForm
from common.base import CommonAdmin
xadmin.site.register(Singer, SingerAdmin)
xadmin.site.register(Album, AlbumAdmin)
xadmin.site.register(Audio, AudioAdmin)
xadmin.site.register(Song, SongAdmin)
xadmin.site.register(AlbumDetail, AlbumDetailAdmin)
xadmin.site.register(AudioDetail, AudioDetailAdmin)
| [
2,
4808,
9,
62,
19617,
25,
40477,
12,
23,
4808,
9,
62,
198,
834,
9800,
834,
796,
705,
54,
15567,
56,
6,
198,
834,
4475,
834,
796,
705,
7908,
14,
23,
14,
24,
1105,
25,
3682,
6,
198,
198,
11748,
2124,
28482,
198,
198,
6738,
764,... | 2.950549 | 182 |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T N A N O
#
# Copyright (c) 2020+ Buro Petr van Blokland + Claudia Mens
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# -----------------------------------------------------------------------------
#
# elements.py
#
# This source contains the class with knowledge about elements that
# can be placed on a page.
#
import sys # Import access to some deep Python functions
import drawBot
if __name__ == "__main__":
sys.path.insert(0, "..") # So we can import pagebotnano002 without installing.
from pagebotnano_005.toolbox.color import asColor
class Element:
"""Base class of all elements that can be placed on a page.
Class names start with a capital. See a class as a factory of element objects
(name spelled with an initial lower case.)
>>> from pagebotnano_005.document import Document
>>> doc = Document()
>>> page = doc.newPage()
>>> page
<Page pn=1 w=595 h=842 elements=0>
"""
def build(self, x, y, doc, page, parent=None):
"""Build the content of the element, including background color,
stroked frame and content of the inheriting classes.
>>> from pagebotnano_005.document import Document
>>> doc = Document()
>>> page = doc.newPage()
>>> e = Element(10, 10, 100, 100, fill=(1, 0, 0))
>>> page.addElement(e)
>>> doc.build() # Recursively draws all pages and their elements.
>>> doc.export('_export/Element-build.pdf') # Export an build again.
"""
# Calculate the new origing relative to self, for all drawing,
# including the child elements
ox = x + self.x
oy = y + self.y
# Do building of the element background here.
#Let inheriting subclasses handle what must appear on the background.
self.drawBackground(ox, oy, doc, page, parent)
# Then let inheriting subclasses draw any content (if they have it)
self.drawContent(ox, oy, doc, page, parent)
# Then recursively pass the build instruction on to all child elements.
# Use the position of self as origin for the relative position of the children.
for element in self.elements:
element.build(ox, oy, doc, page, parent=self)
# Do building of the element foreground here.
#Let inheriting subclasses handle what must appear on the background.
self.drawForeground(ox, oy, doc, page, parent)
def drawContent(self, ox, oy, doc, page, parent):
"""Default behavior is to do nothing, as the Element (and e.h. Rect)
don’t have content to draw, besides the background and frame.
"""
pass
def drawBackground(self, ox, oy, doc, page, parent):
"""Draw the background of the element. Default is to just draw the
rectangle with the fill color, if it is defined. This method should be
redefined by inheriting subclasses that need different foreground drawing.
"""
if self.fill is not None:
drawBot.stroke(None) # Any stroke drawing is done in foreground
r, g, b, a = asColor(self.fill)
if r is None:
drawBot.fill(None)
else:
drawBot.fill(r, g, b, a)
if self.w is not None and self.h is not None:
drawBot.rect(ox, oy, self.w, self.h)
def drawForeground(self, ox, oy, doc, page, parent):
"""Draw the foreground of the element. Default is to just draw the
rectangle with the fill color, if it is defined. This method should be
redefined by inheriting subclasses that need different foreground drawing.
"""
if self.stroke is not None and self.strokeWidth: # Only if defined.
drawBot.fill(None) # Fill is done in background drawing.
r, g, b, a = asColor(self.stroke)
if r is None:
drawBot.stroke(None)
else:
drawBot.strokeWidth(self.strokeWidth)
drawBot.stroke(r, g, b, a)
if self.w is not None and self.h is not None:
drawBot.rect(ox, oy, self.w, self.h)
class Rect(Element):
"""This element draws a simple rectangle. This is identical to the default
behavior of the base Element class, so nothing needs to be defined here.
>>> from pagebotnano_005.document import Document
>>> doc = Document()
>>> page = doc.newPage()
>>> padding = 40
>>> e = Rect(padding, padding, page.w-2*padding, page.h-2*padding, fill=(1, 0.2, 1))
>>> page.addElement(e)
>>> doc.export('_export/Rect.pdf') # Build and export.
"""
class Text(Element):
"""This element draws a FormattedString on a defined place. Not text wrapping
is done.
>>> from pagebotnano_005.document import Document
>>> fs = Text.FormattedString('Hello world', font='Georgia', fontSize=100)
>>> doc = Document()
>>> page = doc.newPage()
>>> padding = 40
>>> e = Text(fs, padding, page.h/2, fill=(1, 0, 0))
>>> page.addElement(e)
>>> doc.export('_export/Text.pdf') # Build and export.
"""
# Add abbreviation for easier usage.
FormattedString = FS = drawBot.FormattedString
def drawContent(self, ox, oy, dox, page, parent):
"""We just need to define drawing of the content. The rest of behavior
for the Text element (including drawing on the background and the frame)
is handled by the base Element class.
"""
drawBot.text(self.fs, (ox, oy))
class TextBox(Text):
"""This elements draws a FormattedString as wrapped text on a defined place
with a defined width. It handles overflow and hyphenation for the given language
code.
>>> from pagebotnano_005.document import Document
>>> from pagebotnano_005.toolbox.loremipsum import loremipsum
>>> headLine = 'Example of TextBox overflow\\n'
>>> txt = loremipsum()
>>> fontSize = 18
>>> headSize = fontSize*1.5
>>> fs = Text.FS(headLine, font='Georgia-Bold', lineHeight=headSize*1.4, fontSize=headSize)
>>> fs.append(Text.FS(txt, font='Georgia', lineHeight=fontSize*1.4, fontSize=fontSize))
>>> doc = Document()
>>> padding = 80
>>> previousPage = None
>>> while True:
... page = doc.newPage()
... # Add text element with page number
... pn = TextBox.FS(str(page.pn), align='center', font='Georgia', fontSize=16)
... page.addElement(Text(pn, page.w/2, padding/2))
... e = TextBox(fs, x=padding, y=padding, w=page.w-2*padding, h=page.h-2*padding, fill=1)
... page.addElement(e)
... fs = e.getOverflow(fs)
... if not fs:
... break
>>> doc.export('_export/TextBox-Overflow.pdf') # Build and export.
"""
def __init__(self, fs, x, y, w, h=None, fill=None, stroke=None,
strokeWidth=None):
"""Call the super class element with all standard attributes.
Different from the Text class, now the width `w` is a required attribute.
"""
Text.__init__(self, fs, x=x, y=y, w=w, h=h, fill=fill, stroke=stroke,
strokeWidth=strokeWidth)
def getOverflow(self, fs=None, w=None, h=None, doc=None):
"""Flow the text into self and put any overflow in self.next.
If there is no self.next defined, then store the remaining overflow
text in self.overflow.
"""
# If another FormattedString is defined, then use that.
# Otherwise use the existing self.fs
if fs is None:
fs = self.fs
# Since we cannot test the overflow without drawing in DrawBot, we'll
# create a text column far outside the page boundaries.
# Unfortunately this increases the PDF export size.
h = w or self.h
w = h or self.w
if h is None and w is not None:
# Height of the box is undefined, measure it from the defined column width.
_, h = drawBot.textSize(fs, width=w)
elif w is None and h is not None:
# Width of the box is undefined, measure it from the defined column height.
w, _ = drawBot.textSize(fs, height=h)
# Height of the box is undefined, measure it from the defined column width.
return drawBot.textBox(fs, (10000, 0, w, h))
def drawContent(self, ox, oy, doc, page, parent):
"""We just need to define drawing of the foreground. The rest of behavior
for the Text element (including drawing on the background) is handled
by the base Element class.
"""
# Store any overflow to be processed by the caller.
# Note that this should never happen, as the context of the text box
# better can be processed by self.flowText before any drawing is done.
# It is assumed that self.h is set, otherwise take the height of the
# text column that fits all text.
h = self.h
w = self.w
if h is None and w is not None:
# Height of the box is undefined, measure it from the defined column width.
_, h = drawBot.textSize(self.fs, width=w)
elif w is None and h is not None:
# Width of the box is undefined, measure it from the defined column height.
w, _ = drawBot.textSize(self.fs, height=h)
# Else if width and height are both defined or undefined, we can used them as is.
# In case width and height are both defined, it may result in a new overflow
# FormattedString. Store that in self.overflow.
self.overflow = drawBot.textBox(self.fs, (ox, oy, self.w, self.h or page.h))
if __name__ == "__main__":
# Running this document will execute all >>> comments as test of this source.
import doctest
doctest.testmod()[0] | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
2,
16529,
32501,
198,
2,
198,
2,
220,
220,
350,
317,
402,
412,
347,
440,
309,
220,
399,
317,
399,
440,
198,
2,
1... | 2.644486 | 3,772 |
# Aim: to rotate a point P by θ°:, centre O.
# MIT License
# Copyright (c) 2020 syed343 (GitHub username)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pygame
from math import sin, cos, radians
# P is referred to as p in the code
p = [float(i) for i in input('Enter point P as a, b: ').split(',')]
# t = theta = angle (in degrees) by which P is rotated, centre origin
t = float(input('Enter starting value for angle θ°: '))
WIDTH = 500
OFFSET = int(500/2)
R = 3 # radius of plotted points
FONT_SIZE = 18
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
FPS = int(input('Enter FPS: '))
DISPLAY = pygame.display.set_mode((WIDTH, WIDTH))
fps_clock = pygame.time.Clock()
pygame.init()
FONT = pygame.font.Font(None, FONT_SIZE) # None -> default font of OS
while True:
t = (t + 1) % 360
rt = radians(t)
# m is a 2x2 matrix
m = [[cos(rt), -1 * sin(rt)], [sin(rt), cos(rt)]]
# P` is referred to as q in the code
q = image(m, p)
# background and axes
DISPLAY.fill(WHITE)
pygame.draw.line(DISPLAY, BLACK, (OFFSET, 0), (OFFSET, WIDTH))
pygame.draw.line(DISPLAY, BLACK, (0, OFFSET), (WIDTH, OFFSET))
# text in the top right of the window
text = [FONT.render(f'{t}°', True, BLACK),
FONT.render(point_to_text(p, 'P'), True, BLACK),
FONT.render(point_to_text(q, 'P`'), True, BLACK),
FONT.render(matrix_to_text(m, 'M'), True, BLACK)]
for i, tx in enumerate(text):
DISPLAY.blit(tx, (WIDTH - tx.get_width(), (i + 1) * tx.get_height()))
# plot O, P, P` and the line OP`
_p = convert_point(p, WIDTH, OFFSET)
_q = convert_point(q, WIDTH, OFFSET)
_o = convert_point((0, 0), WIDTH, OFFSET)
pygame.draw.line(DISPLAY, RED, _o, _q)
pygame.draw.circle(DISPLAY, RED, _p, R)
pygame.draw.circle(DISPLAY, RED, _o, R)
pygame.draw.circle(DISPLAY, RED, _q, R)
# update screen and check if the user wants to quit
pygame.display.update()
fps_clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
| [
2,
36223,
25,
284,
23064,
257,
966,
350,
416,
7377,
116,
7200,
45299,
7372,
440,
13,
198,
198,
2,
17168,
13789,
198,
198,
2,
15069,
357,
66,
8,
12131,
827,
276,
32118,
357,
38,
270,
16066,
20579,
8,
198,
198,
2,
2448,
3411,
318,
... | 2.655527 | 1,167 |
#!/usr/bin/python
import sys
import math, numpy as np
import roslib; roslib.load_manifest('hrl_fabric_based_tactile_sensor')
import rospy
from hrl_msgs.msg import FloatArray
import hrl_lib.util as ut
import hrl_lib.transforms as tr
import hrl_fabric_based_tactile_sensor.adc_publisher_node as apn
from m3skin_ros.msg import RawTaxelArray
from geometry_msgs.msg import Transform
from m3skin_ros.srv import None_TransformArray, None_TransformArrayResponse
from m3skin_ros.srv import None_String
from pr2_tactile_sleeve_driver_node import Tactile_Sleeve
if __name__ == '__main__':
import optparse
p = optparse.OptionParser()
p.add_option('--arm_to_use', action='store',
dest='arm', type='string',
help='l or r')
opt, args = p.parse_args()
if opt.arm != 'r' and opt.arm != 'l':
rospy.logerr('Invalid arm_to_use')
raise RuntimeError('Invalid arm_to_use')
ts = Tactile_Sleeve(opt.arm)
# raw_data_gripper_pub = rospy.Publisher('pr2_fabric_gripper_sensor/taxels/raw_data',
# RawTaxelArray)
# rospy.Service('pr2_fabric_gripper_sensor/taxels/srv/local_coord_frames',
# None_TransformArray, ts.local_coord_frames_gripper_cb)
# rospy.Service('pr2_fabric_gripper_sensor/taxels/srv/link_name', None_String,
# ts.link_name_gripper_cb)
raw_data_gripper_right_link_pub = rospy.Publisher('pr2_fabric_gripper_right_link_sensor/taxels/raw_data',
RawTaxelArray)
rospy.Service('pr2_fabric_gripper_right_link_sensor/taxels/srv/local_coord_frames',
None_TransformArray, ts.local_coord_frames_gripper_right_link_cb)
rospy.Service('pr2_fabric_gripper_right_link_sensor/taxels/srv/link_name', None_String,
ts.link_name_gripper_right_link_cb)
raw_data_gripper_left_link_pub = rospy.Publisher('pr2_fabric_gripper_left_link_sensor/taxels/raw_data',
RawTaxelArray)
rospy.Service('pr2_fabric_gripper_left_link_sensor/taxels/srv/local_coord_frames',
None_TransformArray, ts.local_coord_frames_gripper_left_link_cb)
rospy.Service('pr2_fabric_gripper_left_link_sensor/taxels/srv/link_name', None_String,
ts.link_name_gripper_left_link_cb)
raw_data_gripper_palm_pub = rospy.Publisher('pr2_fabric_gripper_palm_sensor/taxels/raw_data',
RawTaxelArray)
rospy.Service('pr2_fabric_gripper_palm_sensor/taxels/srv/local_coord_frames',
None_TransformArray, ts.local_coord_frames_gripper_palm_cb)
rospy.Service('pr2_fabric_gripper_palm_sensor/taxels/srv/link_name', None_String,
ts.link_name_gripper_palm_cb)
raw_data_forearm_pub = rospy.Publisher('pr2_fabric_forearm_sensor/taxels/raw_data',
RawTaxelArray)
rospy.Service('pr2_fabric_forearm_sensor/taxels/srv/local_coord_frames',
None_TransformArray, ts.local_coord_frames_forearm_cb)
rospy.Service('pr2_fabric_forearm_sensor/taxels/srv/link_name', None_String,
ts.link_name_forearm_cb)
rospy.init_node('fabric_tactile_sleeve_driver_node')
baudrate = 115200
dev2_nm = '/dev/robot/arduino3'
dev2 = apn.setup_serial(dev2_nm, baudrate)
for i in range(10):
dev2.readline()
rospy.loginfo('Started publishing gripper and forearm taxels')
rta1 = RawTaxelArray()
rta2 = RawTaxelArray()
rta3 = RawTaxelArray()
rta4 = RawTaxelArray()
while not rospy.is_shutdown():
adc_data = apn.get_adc_data(dev2, 32)
#for t in range(32):
# if adc_data[t] < 1000:
# rospy.loginfo(t)
forearm_vals = adc_data[0:5] + adc_data[6:16] + adc_data[17:24]
rta1.val_z = forearm_vals
raw_data_forearm_pub.publish(rta1)
rta2.val_z = adc_data[26:27] + adc_data[29:30] + adc_data[31:32] + adc_data[24:25]
raw_data_gripper_left_link_pub.publish(rta2)
rta3.val_z = adc_data[28:29] + adc_data[30:31] + adc_data[27:28] + adc_data[25:26]
raw_data_gripper_right_link_pub.publish(rta3)
rta4.val_z = adc_data[5:6] + adc_data[16:17];
raw_data_gripper_palm_pub.publish(rta4)
dev2.close()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
25064,
198,
11748,
10688,
11,
299,
32152,
355,
45941,
198,
198,
11748,
686,
6649,
571,
26,
686,
6649,
571,
13,
2220,
62,
805,
8409,
10786,
11840,
75,
62,
36434,
1173,
62,
3106,
... | 1.966087 | 2,241 |
import demoji
import parser
demoji.download_codes()
| [
11748,
13605,
7285,
198,
11748,
30751,
198,
198,
9536,
31370,
13,
15002,
62,
40148,
3419,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628
] | 2.733333 | 30 |
#!/usr/bin/env python
# Yackback 2018
# Red cog for scraping BOM
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
575,
441,
1891,
2864,
201,
198,
2,
2297,
43072,
329,
46743,
347,
2662,
201,
198
] | 2.615385 | 26 |
# -*- coding: utf-8 -*-
"""
Generator.py
create (generate) annotated data
H. Déjean
copyright Xerox 2017
READ project
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from dataGenerator.generator import Generator
try:basestring
except NameError:basestring = str
class listGenerator(Generator):
"""
a generator for list
"""
if __name__ == "__main__":
from dataGenerator.numericalGenerator import integerGenerator
integerGenerator(10,0)
lG = listGenerator((5,4),integerGenerator, integerGenerator(10,0))
lG.instantiate()
lG.generate()
print(lG._generation) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
628,
198,
220,
220,
220,
35986,
13,
9078,
628,
220,
220,
220,
2251,
357,
8612,
378,
8,
24708,
515,
1366,
220,
198,
220,
220,
220,
220,
367,
13,
360,
2634,
73,... | 2.750742 | 337 |
from channels.db import database_sync_to_async
from wagsley.schema.base import query
#from .models import Post
from puput.models import EntryPage as Post
from .schema import PostConnection, PostEdge, PostNode
@query.field("allPosts")
@database_sync_to_async
@query.field("post")
@database_sync_to_async
| [
6738,
9619,
13,
9945,
1330,
6831,
62,
27261,
62,
1462,
62,
292,
13361,
198,
198,
6738,
266,
3775,
1636,
13,
15952,
2611,
13,
8692,
1330,
12405,
198,
198,
2,
6738,
764,
27530,
1330,
2947,
198,
6738,
15552,
315,
13,
27530,
1330,
21617,
... | 3.111111 | 99 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/9/5 22:35
# @Author : Tom.lee
# @Site :
# @File : _test.py
# @Software: PyCharm
# import numpy as np
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
220,
220,
220,
1058,
2177,
14,
24,
14,
20,
2534,
25,
2327,
198,
2,
2488,
13838,
220,
1058,
4186,
13,... | 2.058824 | 85 |
class StructuralInstanceUsage(Enum,IComparable,IFormattable,IConvertible):
"""
Represents the structural usage of a family instance.
enum StructuralInstanceUsage,values: Automatic (10),Brace (7),Column (2),Girder (3),HorizontalBracing (8),Joist (4),KickerBracing (9),Other (6),Purlin (5),TrussChord (11),TrussWeb (12),Undefined (0),Wall (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
Automatic=None
Brace=None
Column=None
Girder=None
HorizontalBracing=None
Joist=None
KickerBracing=None
Other=None
Purlin=None
TrussChord=None
TrussWeb=None
Undefined=None
value__=None
Wall=None
| [
4871,
32112,
1523,
33384,
28350,
7,
4834,
388,
11,
2149,
296,
37064,
11,
5064,
579,
1078,
540,
11,
2149,
261,
1851,
856,
2599,
201,
198,
37227,
201,
198,
1432,
6629,
262,
13204,
8748,
286,
257,
1641,
4554,
13,
201,
198,
201,
198,
22... | 2.572127 | 409 |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
import nlpregex.abs_graph.double_link
import nlpregex.abs_graph.node
import nlpregex.abs_graph.edge
import nlpregex.abs_graph.graph
# @brief
# @param token_pairs: list of tuple of (first token, second token)
# @brief main public function to find and reduce common subtrees
#
| [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
299,
75,
3866,
25636,
13,
8937,
62,
34960,
13,
23352,
62,
8726,
198,
11748,
299,
75,
3866,
25636,
13,... | 2.59854 | 137 |
from ixnetwork_restpy.assistants.statistics.statviewassistant import StatViewAssistant
from ixnetwork_restpy.assistants.ports.portmapassistant import PortMapAssistant
| [
6738,
220,
844,
27349,
62,
2118,
9078,
13,
562,
396,
1187,
13,
14269,
3969,
13,
14269,
1177,
562,
10167,
1330,
5133,
7680,
48902,
198,
6738,
220,
844,
27349,
62,
2118,
9078,
13,
562,
396,
1187,
13,
3742,
13,
634,
8899,
562,
10167,
1... | 3.553191 | 47 |
#!/usr/bin/python3
# -*- encoding: utf-8 -*-
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198
] | 1.956522 | 23 |
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR, MultiStepLR
import torch.nn.functional as F
from torch import autograd
from torch.utils.data import Dataset, DataLoader, TensorDataset
from torch.optim.lr_scheduler import StepLR, MultiStepLR
class AverageMeter(object):
"""Computes and stores the average and current value"""
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
6738,
28034,
10178,
1330,
40522,
11,
31408,
... | 3.283871 | 155 |
# Generated by Django 2.1.8 on 2019-07-02 17:18
import cms.models.fields
from django.conf import settings
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
import djangocms_attributes_field.fields
import filer.fields.file
import filer.fields.image
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
23,
319,
13130,
12,
2998,
12,
2999,
1596,
25,
1507,
198,
198,
11748,
269,
907,
13,
27530,
13,
25747,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
11748,
42625,
14208,
13,
3642,
822,... | 3.089109 | 101 |
from colorama import Fore, Style
from player import *
class Enemy(Mario_reddy):
"""docstring for Enemy."""
| [
6738,
3124,
1689,
1330,
4558,
11,
17738,
198,
6738,
2137,
1330,
1635,
628,
198,
4871,
21785,
7,
42315,
62,
445,
9892,
2599,
198,
220,
220,
220,
37227,
15390,
8841,
329,
21785,
526,
15931,
198
] | 3.323529 | 34 |
"""
Potential field imaging through the Generalized Inverse method
---------------------------------------------------------------
Module :mod:`fatiando.gravmag.imaging` has functions for imaging methods in
potential fields. These methods produce an image of the subsurface without
doing an inversion. However, there is a tradeoff with the quality of the result
being generally inferior to an inversion result.
Here we'll show how the Generalized Inverse imaging method can be used on some
synthetic data. We'll plot the final result as slices across the x, y, z axis.
"""
from __future__ import division
from fatiando import gridder, mesher
from fatiando.gravmag import prism, imaging
from fatiando.vis.mpl import square
import matplotlib.pyplot as plt
import numpy as np
# Make some synthetic gravity data from a simple prism model
model = [mesher.Prism(-1000, 1000, -3000, 3000, 0, 2000, {'density': 800})]
shape = (25, 25)
xp, yp, zp = gridder.regular((-5000, 5000, -5000, 5000), shape, z=-10)
data = prism.gz(xp, yp, zp, model)
# Run the Generalized Inverse
mesh = imaging.geninv(xp, yp, zp, data, shape, zmin=0, zmax=5000, nlayers=25)
# Plot the results
fig = plt.figure()
X, Y = xp.reshape(shape)/1000, yp.reshape(shape)/1000
image = mesh.props['density'].reshape(mesh.shape)
# First plot the original gravity data
ax = plt.subplot(2, 2, 1)
ax.set_title('Gravity data (mGal)')
ax.set_aspect('equal')
scale = np.abs([data.min(), data.max()]).max()
tmp = ax.contourf(Y, X, data.reshape(shape), 30, cmap="RdBu_r", vmin=-scale,
vmax=scale)
plt.colorbar(tmp, ax=ax, pad=0)
ax.set_xlim(Y.min(), Y.max())
ax.set_ylim(X.min(), X.max())
ax.set_xlabel('y (km)')
ax.set_ylabel('x (km)')
# Then plot model slices in the x, y, z directions through the middle of the
# model. Also show the outline of the true model for comparison.
scale = 0.1*np.abs([image.min(), image.max()]).max()
x = mesh.get_xs()/1000
y = mesh.get_ys()/1000
z = mesh.get_zs()/1000
x1, x2, y1, y2, z1, z2 = np.array(model[0].get_bounds())/1000
ax = plt.subplot(2, 2, 2)
ax.set_title('Model slice at z={} km'.format(z[len(z)//2]))
ax.set_aspect('equal')
ax.pcolormesh(y, x, image[mesh.shape[0]//2, :, :], cmap="cubehelix",
vmin=-scale, vmax=scale)
square([y1, y2, x1, x2])
ax.set_ylim(x.min(), x.max())
ax.set_xlim(y.min(), y.max())
ax.set_xlabel('y (km)')
ax.set_ylabel('x (km)')
ax = plt.subplot(2, 2, 3)
ax.set_title('Model slice at y={} km'.format(y[len(y)//2]))
ax.set_aspect('equal')
ax.pcolormesh(x, z, image[:, :, mesh.shape[1]//2], cmap="cubehelix",
vmin=-scale, vmax=scale)
square([x1, x2, z1, z2])
ax.set_ylim(z.max(), z.min())
ax.set_xlim(x.min(), x.max())
ax.set_xlabel('x (km)')
ax.set_ylabel('z (km)')
ax = plt.subplot(2, 2, 4)
ax.set_title('Model slice at x={} km'.format(x[len(x)//2]))
ax.set_aspect('equal')
ax.pcolormesh(y, z, image[:, mesh.shape[2]//2, :], cmap="cubehelix",
vmin=-scale, vmax=scale)
square([y1, y2, z1, z2])
ax.set_ylim(z.max(), z.min())
ax.set_xlim(y.min(), y.max())
ax.set_xlabel('y (km)')
ax.set_ylabel('z (km)')
plt.tight_layout()
plt.show()
| [
37811,
198,
25396,
1843,
2214,
19560,
832,
262,
3611,
1143,
554,
4399,
2446,
198,
47232,
24305,
198,
198,
26796,
1058,
4666,
25,
63,
69,
7246,
25440,
13,
70,
4108,
19726,
13,
320,
3039,
63,
468,
5499,
329,
19560,
5050,
287,
198,
13059... | 2.431021 | 1,283 |
# write_Crosswalk_Blackhurst.py (scripts)
# !/usr/bin/env python3
# coding=utf-8
"""
Create a crosswalk linking BEA to NAICS
"""
import pandas as pd
from flowsa.common import datapath, load_bea_crosswalk
from scripts.common_scripts import unique_activity_names, order_crosswalk
if __name__ == '__main__':
# select years to pull unique activity names
years = ['2002']
# flowclass
flowclass = ['Water']
# datasource
datasource = 'Blackhurst_IO'
# df of unique ers activity names
df = unique_activity_names(flowclass, years, datasource)
# add manual naics 2012 assignments
df = assign_naics(df)
# drop any rows where naics12 is 'nan' (because level of detail not needed or to prevent double counting)
df.dropna(subset=["Sector"], inplace=True)
# assign sector type
df['SectorType'] = None
# sort df
df = order_crosswalk(df)
# save as csv
df.to_csv(datapath + "activitytosectormapping/" + "Crosswalk_" + datasource + "_toNAICS.csv", index=False)
| [
2,
3551,
62,
21544,
11152,
62,
9915,
33500,
13,
9078,
357,
46521,
8,
198,
2,
5145,
14,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
19617,
28,
40477,
12,
23,
198,
198,
37811,
198,
16447,
257,
3272,
11152,
17795,
9348,
32,
284,
1... | 2.822715 | 361 |
import arcpy
if __name__ == '__main__':
process(
feature_class=arcpy.GetParameterAsText(0),
field=arcpy.GetParameterAsText(1)
) | [
11748,
10389,
9078,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1429,
7,
198,
220,
220,
220,
220,
220,
220,
220,
3895,
62,
4871,
28,
5605,
9078,
13,
3855,
36301,
1722,
8206,
7,
15,
... | 2.231884 | 69 |
from pathlib import Path
from urllib.parse import urlparse
import click
import requests
import database_connection # noqa: F401
from matrix_connection import get_download_url
from schema import Message
def run_downloads(messages, download_dir, prefer_thumbnails):
"""Run downloads
:param messages: List of messages
:param download_dir: Location where the images shall be stored
:param prefer_thumbnails: Whether to prefer thumbnails than full images.
"""
s = requests.Session()
for msg in messages:
image_url = (msg.thumbnail_url if prefer_thumbnails else None) or msg.image_url
try:
download_url = get_download_url(image_url)
try:
res = s.head(download_url)
res.raise_for_status()
mtype, subtype = res.headers['content-type'].split('/', 2)
if mtype != 'image':
print(f"Skipping {image_url}: {res.headers['content-type']}")
continue
except requests.exceptions.RequestException as e:
print("{} Skipping...".format(e))
continue
try:
res = s.get(download_url)
res.raise_for_status()
filename = (download_dir / download_stem(msg, prefer_thumbnails)
).with_suffix('.' + subtype)
print('Downloading', image_url, '->', filename)
with open(filename, 'wb') as fp:
fp.write(res.content)
except requests.exceptions.RequestException as e:
print("{} Skipping...".format(e))
except AssertionError:
print('Assertion Error in get_download_url("{}"). Skipping...'.format(image_url))
@click.command()
@click.option('--thumbnails/--no-thumbnails', default=True)
@click.argument('output', required=False)
def download_images(thumbnails, output):
"""Download thumbnails."""
noun = 'thumbnails' if thumbnails else 'images'
download_dir = Path(output or noun)
messages = [msg for msg in Message.objects
if msg.content.get('msgtype') == 'm.image']
download_dir.mkdir(exist_ok=True)
current_stems = {p.stem for p in download_dir.glob('*')}
new_messages = [msg for msg in messages
if download_stem(msg, thumbnails)
not in current_stems]
skip_count = len(messages) - len(new_messages)
if skip_count:
print(f"Skipping {skip_count} already-downloaded {noun}")
if new_messages:
print(f"Downloading {len(new_messages)} new {noun}...")
else:
print("Nothing to do")
run_downloads(new_messages, download_dir, prefer_thumbnails=thumbnails)
if __name__ == '__main__':
download_images()
| [
6738,
3108,
8019,
1330,
10644,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
29572,
198,
198,
11748,
3904,
198,
11748,
7007,
198,
198,
11748,
6831,
62,
38659,
220,
1303,
645,
20402,
25,
376,
21844,
198,
6738,
17593,
62,
38659,
1330... | 2.313223 | 1,210 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
AppScale modifications
Distributed Method:
All calls are made to a datastore server for queries, gets, puts, and deletes,
index functions, transaction functions.
"""
import collections
import datetime
import logging
import os
import sys
import threading
import warnings
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api import users
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_index
from google.appengine.runtime import apiproxy_errors
from google.net.proto import ProtocolBuffer
from google.appengine.datastore import entity_pb
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.datastore import old_datastore_stub_util
# Where the SSL certificate is placed for encrypted communication
CERT_LOCATION = "/etc/appscale/certs/mycert.pem"
# Where the SSL private key is placed for encrypted communication
KEY_LOCATION = "/etc/appscale/certs/mykey.pem"
# The default SSL port to connect to
SSL_DEFAULT_PORT = 8443
# The amount of time before we consider a query cursor to be no longer valid.
CURSOR_TIMEOUT = 120
ASCENDING = datastore_pb.Query_Order.ASCENDING
DESCENDING = datastore_pb.Query_Order.DESCENDING
EQUALITY_OPERATORS = set((datastore_pb.Query_Filter.EQUAL,
))
INEQUALITY_OPERATORS = set((datastore_pb.Query_Filter.LESS_THAN,
datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL,
datastore_pb.Query_Filter.GREATER_THAN,
datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL,
))
try:
__import__('google.appengine.api.taskqueue.taskqueue_service_pb')
taskqueue_service_pb = sys.modules.get(
'google.appengine.api.taskqueue.taskqueue_service_pb')
except ImportError:
from google.appengine.api.taskqueue import taskqueue_service_pb
warnings.filterwarnings('ignore', 'tempnam is a potential security risk')
entity_pb.Reference.__hash__ = lambda self: hash(self.Encode())
datastore_pb.Query.__hash__ = lambda self: hash(self.Encode())
datastore_pb.Transaction.__hash__ = lambda self: hash(self.Encode())
_MAX_QUERY_COMPONENTS = 100
_BATCH_SIZE = 20
_MAX_ACTIONS_PER_TXN = 5
class DatastoreDistributed(apiproxy_stub.APIProxyStub):
""" A central server hooks up to a db and communicates via protocol
buffers.
"""
_PROPERTY_TYPE_TAGS = {
datastore_types.Blob: entity_pb.PropertyValue.kstringValue,
bool: entity_pb.PropertyValue.kbooleanValue,
datastore_types.Category: entity_pb.PropertyValue.kstringValue,
datetime.datetime: entity_pb.PropertyValue.kint64Value,
datastore_types.Email: entity_pb.PropertyValue.kstringValue,
float: entity_pb.PropertyValue.kdoubleValue,
datastore_types.GeoPt: entity_pb.PropertyValue.kPointValueGroup,
datastore_types.IM: entity_pb.PropertyValue.kstringValue,
int: entity_pb.PropertyValue.kint64Value,
datastore_types.Key: entity_pb.PropertyValue.kReferenceValueGroup,
datastore_types.Link: entity_pb.PropertyValue.kstringValue,
long: entity_pb.PropertyValue.kint64Value,
datastore_types.PhoneNumber: entity_pb.PropertyValue.kstringValue,
datastore_types.PostalAddress: entity_pb.PropertyValue.kstringValue,
datastore_types.Rating: entity_pb.PropertyValue.kint64Value,
str: entity_pb.PropertyValue.kstringValue,
datastore_types.Text: entity_pb.PropertyValue.kstringValue,
type(None): 0,
unicode: entity_pb.PropertyValue.kstringValue,
users.User: entity_pb.PropertyValue.kUserValueGroup,
}
def __init__(self,
app_id,
datastore_location,
history_file=None,
require_indexes=False,
service_name='datastore_v3',
trusted=False,
root_path='/var/apps/'):
"""Constructor.
Args:
app_id: string
datastore_location: location of datastore server
history_file: DEPRECATED. No-op.
require_indexes: bool, default False. If True, composite indexes must
exist in index.yaml for queries that need them.
service_name: Service name expected for all calls.
trusted: bool, default False. If True, this stub allows an app to
access the data of another app.
root_path: A str, the path where index.yaml can be found.
"""
super(DatastoreDistributed, self).__init__(service_name)
# TODO lock any use of these global variables
assert isinstance(app_id, basestring) and app_id != ''
self.__app_id = app_id
self.__datastore_location = datastore_location
self.__index_cache = {}
self.__is_encrypted = True
res = self.__datastore_location.split(':')
if len(res) == 2:
if int(res[1]) != SSL_DEFAULT_PORT:
self.__is_encrypted = False
self.SetTrusted(trusted)
self.__entities = {}
self.__schema_cache = {}
self.__tx_actions_dict = {}
self.__tx_actions = set()
self.__queries = {}
self.__require_indexes = require_indexes
self.__root_path = root_path + self.__app_id + "/app"
self.__cached_yaml = (None, None, None)
if require_indexes:
self._SetupIndexes()
def Clear(self):
""" Clears the datastore by deleting all currently stored entities and
queries. """
self.__entities = {}
self.__queries = {}
self.__schema_cache = {}
def SetTrusted(self, trusted):
"""Set/clear the trusted bit in the stub.
This bit indicates that the app calling the stub is trusted. A
trusted app can write to datastores of other apps.
Args:
trusted: boolean.
"""
self.__trusted = trusted
def __ValidateAppId(self, app_id):
"""Verify that this is the stub for app_id.
Args:
app_id: An application ID.
Raises:
datastore_errors.BadRequestError: if this is not the stub for app_id.
"""
assert app_id
if not self.__trusted and app_id != self.__app_id:
raise datastore_errors.BadRequestError(
'app %s cannot access app %s\'s data' % (self.__app_id, app_id))
def __ValidateKey(self, key):
"""Validate this key.
Args:
key: entity_pb.Reference
Raises:
datastore_errors.BadRequestError: if the key is invalid
"""
assert isinstance(key, entity_pb.Reference)
self.__ValidateAppId(key.app())
for elem in key.path().element_list():
if elem.has_id() == elem.has_name():
raise datastore_errors.BadRequestError(
'each key path element should have id or name but not both: %r' % key)
def _AppIdNamespaceKindForKey(self, key):
""" Get (app, kind) tuple from given key.
The (app, kind) tuple is used as an index into several internal
dictionaries, e.g. __entities.
Args:
key: entity_pb.Reference
Returns:
Tuple (app, kind), both are unicode strings.
"""
last_path = key.path().element_list()[-1]
return (datastore_types.EncodeAppIdNamespace(key.app(), key.name_space()),
last_path.type())
READ_PB_EXCEPTIONS = (ProtocolBuffer.ProtocolBufferDecodeError, LookupError,
TypeError, ValueError)
READ_ERROR_MSG = ('Data in %s is corrupt or a different version. '
'Try running with the --clear_datastore flag.\n%r')
READ_PY250_MSG = ('Are you using FloatProperty and/or GeoPtProperty? '
'Unfortunately loading float values from the datastore '
'file does not work with Python 2.5.0. '
'Please upgrade to a newer Python 2.5 release or use '
'the --clear_datastore flag.\n')
def Read(self):
""" Does Nothing """
return
def Write(self):
""" Does Nothing """
return
def MakeSyncCall(self, service, call, request, response, request_id=None):
""" The main RPC entry point. service must be 'datastore_v3'.
"""
self.assertPbIsInitialized(request)
super(DatastoreDistributed, self).MakeSyncCall(service,
call,
request,
response,
request_id)
self.assertPbIsInitialized(response)
def assertPbIsInitialized(self, pb):
"""Raises an exception if the given PB is not initialized and valid."""
explanation = []
assert pb.IsInitialized(explanation), explanation
pb.Encode()
def QueryHistory(self):
"""Returns a dict that maps Query PBs to times they've been run."""
return []
def _maybeSetDefaultAuthDomain(self):
""" Sets default auth domain if not set. """
auth_domain = os.environ.get("AUTH_DOMAIN")
if not auth_domain:
os.environ['AUTH_DOMAIN'] = "appscale.com"
def _RemoteSend(self, request, response, method):
"""Sends a request remotely to the datstore server. """
tag = self.__app_id
self._maybeSetDefaultAuthDomain()
user = users.GetCurrentUser()
if user != None:
tag += ":" + user.email()
tag += ":" + user.nickname()
tag += ":" + user.auth_domain()
api_request = remote_api_pb.Request()
api_request.set_method(method)
api_request.set_service_name("datastore_v3")
api_request.set_request(request.Encode())
api_response = remote_api_pb.Response()
api_response = api_request.sendCommand(self.__datastore_location,
tag,
api_response,
1,
self.__is_encrypted,
KEY_LOCATION,
CERT_LOCATION)
if not api_response or not api_response.has_response():
raise datastore_errors.InternalError(
'No response from db server on %s requests.' % method)
if api_response.has_application_error():
error_pb = api_response.application_error()
logging.error(error_pb.detail())
raise apiproxy_errors.ApplicationError(error_pb.code(),
error_pb.detail())
if api_response.has_exception():
raise api_response.exception()
response.ParseFromString(api_response.response())
def _Dynamic_Put(self, put_request, put_response):
"""Send a put request to the datastore server. """
put_request.set_trusted(self.__trusted)
ent_kinds = []
for ent in put_request.entity_list():
last_path = ent.key().path().element_list()[-1]
if last_path.type() not in ent_kinds:
ent_kinds.append(last_path.type())
for kind in ent_kinds:
indexes = self.__index_cache.get(kind)
if indexes:
for index in indexes:
new_composite = put_request.add_composite_index()
new_composite.CopyFrom(index)
self._RemoteSend(put_request, put_response, "Put")
return put_response
def _Dynamic_Get(self, get_request, get_response):
"""Send a get request to the datastore server. """
self._RemoteSend(get_request, get_response, "Get")
return get_response
def _Dynamic_Delete(self, delete_request, delete_response):
"""Send a delete request to the datastore server.
Args:
delete_request: datastore_pb.DeleteRequest.
delete_response: datastore_pb.DeleteResponse.
Returns:
A datastore_pb.DeleteResponse from the AppScale datastore server.
"""
# Determine if there are composite indexes that need to be deleted.
# The datastore service will look up meta data to figure out which
# composite indexes apply.
ent_kinds = []
for key in delete_request.key_list():
last_path = key.path().element_list()[-1]
if last_path.type() not in ent_kinds:
ent_kinds.append(last_path.type())
has_composites = False
for kind in ent_kinds:
indexes = self.__index_cache.get(kind)
if indexes:
has_composites = True
break
if has_composites:
delete_request.set_mark_changes(True)
delete_request.set_trusted(self.__trusted)
self._RemoteSend(delete_request, delete_response, "Delete")
return delete_response
def __cleanup_old_cursors(self):
""" Remove any cursors which are no longer being used. """
for key in self.__queries.keys():
_, time_stamp = self.__queries[key]
# This calculates the time in the future when this cursor is no longer
# valid.
timeout_time = time_stamp + datetime.timedelta(seconds=CURSOR_TIMEOUT)
if datetime.datetime.now() > timeout_time:
del self.__queries[key]
def _Dynamic_RunQuery(self, query, query_result):
"""Send a query request to the datastore server. """
if query.has_transaction():
if not query.has_ancestor():
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST,
'Only ancestor queries are allowed inside transactions.')
(filters, orders) = datastore_index.Normalize(query.filter_list(),
query.order_list(), [])
old_datastore_stub_util.FillUsersInQuery(filters)
query_response = datastore_pb.QueryResult()
if not query.has_app():
query.set_app(self.__app_id)
self.__ValidateAppId(query.app())
# Set the composite index if it applies.
indexes = []
if query.has_kind():
kind_indexes = self.__index_cache.get(query.kind())
if kind_indexes:
indexes.extend(kind_indexes)
index_to_use = _FindIndexToUse(query, indexes)
if index_to_use != None:
new_index = query.add_composite_index()
new_index.MergeFrom(index_to_use)
self._RemoteSend(query, query_response, "RunQuery")
skipped_results = 0
if query_response.has_skipped_results():
skipped_results = query_response.skipped_results()
def order_compare_entities(a, b):
""" Return a negative, zero or positive number depending on whether
entity a is considered smaller than, equal to, or larger than b,
according to the query's orderings. """
cmped = 0
for o in orders:
prop = o.property().decode('utf-8')
reverse = (o.direction() is datastore_pb.Query_Order.DESCENDING)
a_val = datastore._GetPropertyValue(a, prop)
if isinstance(a_val, list):
a_val = sorted(a_val, order_compare_properties, reverse=reverse)[0]
b_val = datastore._GetPropertyValue(b, prop)
if isinstance(b_val, list):
b_val = sorted(b_val, order_compare_properties, reverse=reverse)[0]
cmped = order_compare_properties(a_val, b_val)
if o.direction() is datastore_pb.Query_Order.DESCENDING:
cmped = -cmped
if cmped != 0:
return cmped
if cmped == 0:
return cmp(a.key(), b.key())
def order_compare_entities_pb(a, b):
""" Return a negative, zero or positive number depending on whether
entity a is considered smaller than, equal to, or larger than b,
according to the query's orderings. a and b are protobuf-encoded
entities."""
return order_compare_entities(datastore.Entity.FromPb(a),
datastore.Entity.FromPb(b))
def order_compare_properties(x, y):
"""Return a negative, zero or positive number depending on whether
property value x is considered smaller than, equal to, or larger than
property value y. If x and y are different types, they're compared based
on the type ordering used in the real datastore, which is based on the
tag numbers in the PropertyValue PB.
"""
if isinstance(x, datetime.datetime):
x = datastore_types.DatetimeToTimestamp(x)
if isinstance(y, datetime.datetime):
y = datastore_types.DatetimeToTimestamp(y)
x_type = self._PROPERTY_TYPE_TAGS.get(x.__class__)
y_type = self._PROPERTY_TYPE_TAGS.get(y.__class__)
if x_type == y_type:
try:
return cmp(x, y)
except TypeError:
return 0
else:
return cmp(x_type, y_type)
results = query_response.result_list()
for result in results:
old_datastore_stub_util.PrepareSpecialPropertiesForLoad(result)
old_datastore_stub_util.ValidateQuery(query, filters, orders,
_MAX_QUERY_COMPONENTS)
cursor = old_datastore_stub_util.ListCursor(query, results,
order_compare_entities_pb)
self.__cleanup_old_cursors()
self.__queries[cursor.cursor] = cursor, datetime.datetime.now()
if query.has_count():
count = query.count()
elif query.has_limit():
count = query.limit()
else:
count = _BATCH_SIZE
cursor.PopulateQueryResult(query_result, count,
query.offset(), compile=query.compile())
query_result.set_skipped_results(skipped_results)
if query.compile():
compiled_query = query_result.mutable_compiled_query()
compiled_query.set_keys_only(query.keys_only())
compiled_query.mutable_primaryscan().set_index_name(query.Encode())
def _Dynamic_Next(self, next_request, query_result):
"""Get the next set of entities from a previously run query. """
self.__ValidateAppId(next_request.cursor().app())
cursor_handle = next_request.cursor().cursor()
if cursor_handle not in self.__queries:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST,
'Cursor %d not found' % cursor_handle)
cursor, _ = self.__queries[cursor_handle]
if cursor.cursor != cursor_handle:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST,
'Cursor %d not found' % cursor_handle)
assert cursor.app == next_request.cursor().app()
count = _BATCH_SIZE
if next_request.has_count():
count = next_request.count()
cursor.PopulateQueryResult(query_result, count,
next_request.offset(),
next_request.compile())
def _Dynamic_Count(self, query, integer64proto):
"""Get the number of entities for a query. """
query_result = datastore_pb.QueryResult()
self._Dynamic_RunQuery(query, query_result)
count = query_result.result_size()
integer64proto.set_value(count)
def _Dynamic_BeginTransaction(self, request, transaction):
"""Send a begin transaction request from the datastore server. """
request.set_app(self.__app_id)
self._RemoteSend(request, transaction, "BeginTransaction")
self.__tx_actions = []
return transaction
def _Dynamic_AddActions(self, request, _):
"""Associates the creation of one or more tasks with a transaction.
Args:
request: A taskqueue_service_pb.TaskQueueBulkAddRequest containing the
tasks that should be created when the transaction is comitted.
"""
if ((len(self.__tx_actions) + request.add_request_size()) >
_MAX_ACTIONS_PER_TXN):
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST,
'Too many messages, maximum allowed %s' % _MAX_ACTIONS_PER_TXN)
new_actions = []
for add_request in request.add_request_list():
clone = taskqueue_service_pb.TaskQueueAddRequest()
clone.CopyFrom(add_request)
clone.clear_transaction()
new_actions.append(clone)
self.__tx_actions.extend(new_actions)
def _Dynamic_Commit(self, transaction, transaction_response):
""" Send a transaction request to commit a transaction to the
datastore server. """
transaction.set_app(self.__app_id)
self._RemoteSend(transaction, transaction_response, "Commit")
response = taskqueue_service_pb.TaskQueueAddResponse()
try:
for action in self.__tx_actions:
try:
apiproxy_stub_map.MakeSyncCall(
'taskqueue', 'Add', action, response)
except apiproxy_errors.ApplicationError, e:
logging.warning('Transactional task %s has been dropped, %s',
action, e)
finally:
self.__tx_actions = []
def _Dynamic_Rollback(self, transaction, transaction_response):
""" Send a rollback request to the datastore server. """
transaction.set_app(self.__app_id)
self.__tx_actions = []
self._RemoteSend(transaction, transaction_response, "Rollback")
return transaction_response
def _Dynamic_GetSchema(self, req, schema):
""" Get the schema of a particular kind of entity. """
app_str = req.app()
self.__ValidateAppId(app_str)
namespace_str = req.name_space()
app_namespace_str = datastore_types.EncodeAppIdNamespace(app_str,
namespace_str)
kinds = []
for app_namespace, kind in self.__entities:
if (app_namespace != app_namespace_str or
(req.has_start_kind() and kind < req.start_kind()) or
(req.has_end_kind() and kind > req.end_kind())):
continue
app_kind = (app_namespace_str, kind)
if app_kind in self.__schema_cache:
kinds.append(self.__schema_cache[app_kind])
continue
kind_pb = entity_pb.EntityProto()
kind_pb.mutable_key().set_app('')
kind_pb.mutable_key().mutable_path().add_element().set_type(kind)
kind_pb.mutable_entity_group()
props = {}
for entity in self.__entities[app_kind].values():
for prop in entity.protobuf.property_list():
if prop.name() not in props:
props[prop.name()] = entity_pb.PropertyValue()
props[prop.name()].MergeFrom(prop.value())
for value_pb in props.values():
if value_pb.has_int64value():
value_pb.set_int64value(0)
if value_pb.has_booleanvalue():
value_pb.set_booleanvalue(False)
if value_pb.has_stringvalue():
value_pb.set_stringvalue('none')
if value_pb.has_doublevalue():
value_pb.set_doublevalue(0.0)
if value_pb.has_pointvalue():
value_pb.mutable_pointvalue().set_x(0.0)
value_pb.mutable_pointvalue().set_y(0.0)
if value_pb.has_uservalue():
value_pb.mutable_uservalue().set_gaiaid(0)
value_pb.mutable_uservalue().set_email('none')
value_pb.mutable_uservalue().set_auth_domain('none')
value_pb.mutable_uservalue().clear_nickname()
value_pb.mutable_uservalue().clear_obfuscated_gaiaid()
if value_pb.has_referencevalue():
value_pb.clear_referencevalue()
value_pb.mutable_referencevalue().set_app('none')
pathelem = value_pb.mutable_referencevalue().add_pathelement()
pathelem.set_type('none')
pathelem.set_name('none')
for name, value_pb in props.items():
prop_pb = kind_pb.add_property()
prop_pb.set_name(name)
prop_pb.set_multiple(False)
prop_pb.mutable_value().CopyFrom(value_pb)
kinds.append(kind_pb)
self.__schema_cache[app_kind] = kind_pb
for kind_pb in kinds:
kind = schema.add_kind()
kind.CopyFrom(kind_pb)
if not req.properties():
kind.clear_property()
schema.set_more_results(False)
def _Dynamic_AllocateIds(self, allocate_ids_request, allocate_ids_response):
"""Send a request for allocation of IDs to the datastore server. """
self._RemoteSend(allocate_ids_request, allocate_ids_response, "AllocateIds")
return allocate_ids_response
def _Dynamic_CreateIndex(self, index, id_response):
""" Create a new index. Currently stubbed out."""
if index.id() != 0:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
'New index id must be 0.')
self._RemoteSend(index, id_response, "CreateIndex")
return id_response
def _Dynamic_GetIndices(self, app_str, composite_indices):
""" Gets the indices of the current app.
Args:
app_str: A api_base_pb.StringProto, the application identifier.
composite_indices: datastore_pb.CompositeIndices protocol buffer.
Returns:
A datastore_pb.CompositesIndices containing the current indexes
used by this application.
"""
self._RemoteSend(app_str, composite_indices, "GetIndices")
return composite_indices
def _Dynamic_UpdateIndex(self, index, void):
""" Updates the indices of the current app. Tells the AppScale datastore
server to build out the new index with existing data.
Args:
index: A datastore_pb.CompositeIndex, the composite index to update.
void: A entity_pb.VoidProto.
"""
self._RemoteSend(index, void, "UpdateIndex")
return
def _Dynamic_DeleteIndex(self, index, void):
""" Deletes an index of the current app.
Args:
index: A datastore_pb.CompositeIndex, the composite index to delete.
void: A entity_pb.VoidProto.
Returns:
A entity_pb.VoidProto.
"""
self._RemoteSend(index, void, "DeleteIndex")
return void
def _SetupIndexes(self, _open=open):
"""Ensure that the set of existing composite indexes matches index.yaml.
Create any new indexes, and delete indexes which are no longer required.
Args:
_open: Function used to open a file.
"""
if not self.__root_path:
logging.warning("No index.yaml was loaded.")
return
index_yaml_file = os.path.join(self.__root_path, 'index.yaml')
if (self.__cached_yaml[0] == index_yaml_file and
os.path.exists(index_yaml_file) and
os.path.getmtime(index_yaml_file) == self.__cached_yaml[1]):
requested_indexes = self.__cached_yaml[2]
else:
try:
index_yaml_mtime = os.path.getmtime(index_yaml_file)
fh = _open(index_yaml_file, 'r')
except (OSError, IOError):
logging.info("Error reading file")
index_yaml_data = None
else:
try:
index_yaml_data = fh.read()
finally:
fh.close()
requested_indexes = []
if index_yaml_data is not None:
index_defs = datastore_index.ParseIndexDefinitions(index_yaml_data)
if index_defs is not None and index_defs.indexes is not None:
requested_indexes = datastore_index.IndexDefinitionsToProtos(
self.__app_id,
index_defs.indexes)
self.__cached_yaml = (index_yaml_file, index_yaml_mtime,
requested_indexes)
existing_indexes = datastore_pb.CompositeIndices()
app_str = api_base_pb.StringProto()
app_str.set_value(self.__app_id)
self._Dynamic_GetIndices(app_str, existing_indexes)
requested = dict((x.definition().Encode(), x) for x in requested_indexes)
existing = dict((x.definition().Encode(), x) for x in
existing_indexes.index_list())
# Delete any indexes that are no longer requested.
deleted = 0
for key, index in existing.iteritems():
if key not in requested:
self._Dynamic_DeleteIndex(index, api_base_pb.VoidProto())
deleted += 1
# Add existing indexes in the index cache.
for key, index in existing.iteritems():
new_index = entity_pb.CompositeIndex()
new_index.CopyFrom(index)
ent_kind = new_index.definition().entity_type()
if ent_kind in self.__index_cache:
new_indexes = self.__index_cache[ent_kind]
new_indexes.append(new_index)
self.__index_cache[ent_kind] = new_indexes
else:
self.__index_cache[ent_kind] = [new_index]
# Compared the existing indexes to the requested ones and create any
# new indexes requested.
created = 0
for key, index in requested.iteritems():
if key not in existing:
new_index = entity_pb.CompositeIndex()
new_index.CopyFrom(index)
new_index.set_id(self._Dynamic_CreateIndex(new_index,
api_base_pb.Integer64Proto()).value())
new_index.set_state(entity_pb.CompositeIndex.READ_WRITE)
self._Dynamic_UpdateIndex(new_index, api_base_pb.VoidProto())
created += 1
ent_kind = new_index.definition().entity_type()
if ent_kind in self.__index_cache:
new_indexes = self.__index_cache[ent_kind]
new_indexes.append(new_index)
self.__index_cache[ent_kind] = new_indexes
else:
self.__index_cache[ent_kind] = [new_index]
if created or deleted:
logging.info('Created %d and deleted %d index(es); total %d',
created, deleted, len(requested))
def _FindIndexToUse(query, indexes):
""" Matches the query with one of the composite indexes.
Args:
query: A datastore_pb.Query.
indexes: A list of entity_pb.CompsiteIndex.
Returns:
The composite index of the list for which the composite index matches
the query. Returns None if there is no match.
"""
if not query.has_kind():
return None
index_list = __IndexListForQuery(query)
if index_list == []:
return None
index_match = index_list[0]
for index in indexes:
if index_match.Equals(index.definition()):
return index
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.NEED_INDEX,
'Query requires an index')
def __IndexListForQuery(query):
"""Get the composite index definition used by the query, if any, as a list.
Args:
query: the datastore_pb.Query to compute the index list for
Returns:
A singleton list of the composite index definition pb used by the query,
"""
required, kind, ancestor, props = (
datastore_index.CompositeIndexForQuery(query))
if not required:
return []
index_pb = entity_pb.Index()
index_pb.set_entity_type(kind)
index_pb.set_ancestor(bool(ancestor))
for name, direction in datastore_index.GetRecommendedIndexProperties(props):
prop_pb = entity_pb.Index_Property()
prop_pb.set_name(name)
prop_pb.set_direction(direction)
index_pb.property_list().append(prop_pb)
return [index_pb]
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
4343,
3012,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
... | 2.463965 | 12,460 |
# coding: utf-8
__all__ = ['set_app_key', 'get_app_key',
'set_timeout', 'get_timeout',
'set_port_number', 'get_port_number',
'set_log_level', 'set_log_path',
#'set_app_id', 'get_app_id',
'set_on_state_callback', 'set_on_event_callback',
'Profile', 'get_desktop_session']
from appdirs import *
import socket
import deprecation
import platform
import logging
from datetime import datetime
from logging.handlers import RotatingFileHandler
from requests import Session
from requests.exceptions import ConnectTimeout
from .tools import is_string_type
from .eikonError import EikonError
from eikon import __version__
from .streaming_session import DesktopSession
def set_app_key(app_key):
"""
Set the app key.
Parameters
----------
app_key : string
the app key
Notes
-----
The app key identifies your application on Refinitiv Platform.
You can get an app key using the App Key Generator (this App is available in Eikon Desktop).
"""
get_profile().set_app_key(app_key)
def get_app_key():
"""
Returns the app key previously set
Notes
-----
The app key identifies your application on Refinitiv Platform.
You can get an application ID using the App Key Generator (this App is available in Eikon Desktop).
"""
return get_profile().get_app_key()
def get_desktop_session():
"""
Returns the desktop session for streaming access
Notes
-----
The desktop session is use to initialize streaming subscription.
"""
return get_profile()._get_desktop_session()
def set_on_state_callback(on_state):
"""
Set the callback for desktop session state notification.
Parameters
----------
on_state : function that accepts session, state_ and state_msg as parameters.
"""
get_profile().set_on_state_callback(on_state)
def set_on_event_callback(on_event):
"""
Set the callback for desktop session event notification.
Parameters
----------
on_event : function that accepts session, event_code and event_msg as parameters.
"""
get_profile().set_on_event_callback(on_event)
def set_timeout(timeout):
"""
Set the timeout for each request.
Parameters
----------
timeout : int
the request timeout in sec
Default value: 30 sec
"""
get_profile().set_timeout(timeout)
def get_timeout():
"""
Returns the request timeout in sec
"""
return get_profile().get_timeout()
def set_port_number(port_number):
"""
Set the port number to communicate with the Eikon Data API proxy.
This port number is detected automatically but you can call this function to force it manually for troubleshooting issues.
Parameters
----------
port_number : int
the port number
"""
get_profile().set_port_number(port_number)
def get_port_number():
"""
Returns the port number used to communicate with the Eikon Data API Proxy
"""
return get_profile().get_port_number()
def get_profile():
"""
Returns the Profile singleton
"""
return Profile.get_profile()
def set_log_level(level):
"""
Set the log level.
When logs are activated (log_level != logging.NOTSET), log files are created in the current directory.
To change directory for log files, set log path with set_log_path() function.
Parameters
----------
level : int
Possible values from logging module : [CRITICAL, FATAL, ERROR, WARNING, WARN, INFO, DEBUG, NOTSET]
Example
-------
ek.set_log_level(logging.DEBUG)
"""
get_profile().set_log_level(level)
def set_log_path(path):
"""
Set the filepath of the log file.
Parameters
----------
path : string
File path location for log files
Example
-------
ek.set_log_path("c:\\my_directory")
"""
get_profile().set_log_path(path)
def identify_scripting_proxy_port(http_session, application_key):
"""
Returns the port used by the Scripting Proxy stored in a configuration file.
"""
port = None
logger = get_profile().logger
app_names = ['Eikon API proxy', 'Eikon Scripting Proxy']
app_author = 'Thomson Reuters'
if platform.system() == 'Linux':
path = [user_config_dir(app_name, app_author, roaming=True)
for app_name in app_names if os.path.isdir(user_config_dir(app_name, app_author, roaming=True))]
else:
path = [user_data_dir(app_name, app_author, roaming=True)
for app_name in app_names if os.path.isdir(user_data_dir(app_name, app_author, roaming=True))]
if len(path):
port_in_use_file = os.path.join(path[0], '.portInUse')
# Test if '.portInUse' file exists
if os.path.exists(port_in_use_file):
# First test to read .portInUse file
firstline = read_firstline_in_file(port_in_use_file)
if firstline != '':
saved_port = firstline.strip()
if check_port(http_session, application_key, saved_port):
port = saved_port
logger.info('Port {} was retrieved from .portInUse file'.format(port))
if port is None:
logger.info('Warning: file .portInUse was not found. Try to fallback to default port number.')
port_list = ['9000', '36036']
for port_number in port_list:
logger.info('Try defaulting to port {}...'.format(port_number))
if check_port(http_session, application_key, port_number):
return port_number
handshake(http_session, application_key, port)
return port
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
834,
439,
834,
796,
37250,
2617,
62,
1324,
62,
2539,
3256,
705,
1136,
62,
1324,
62,
2539,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
705,
2617,
62,
48678,
3256,
705,
11... | 2.688324 | 2,124 |
# -*- coding: utf-8 -*-
"""
Configuration manager for workflow definitions.
This works over Kaptan:https://github.com/emre/kaptan
The global configuration registry is declared in the bottom of this file.
"""
import os.path as op
from nipype.pipeline.engine import Node, MapNode, JoinNode
from nipype.interfaces.base import isdefined
def update_config(value):
""" Value can be a configuration file path or a dictionary with
configuration settings."""
global PYPES_CFG
if isinstance(value, str):
PYPES_CFG.update_from_file(value)
elif isinstance(value, dict):
PYPES_CFG.update(value)
else:
raise NotImplementedError('Cannot update the configuration with {}.'.format(value))
def check_mandatory_inputs(node_names):
""" Raise an exception if any of the items in the List[str] `node_names` is not
present in the global configuration settings."""
for name in node_names:
if name not in PYPES_CFG:
raise AttributeError('Could not find a configuration parameter for {}. '
'Please set it in the an input configuration file.'.format(name))
def get_config_setting(param_name, default=''):
""" Return the value for the entry with name `param_name` in the global configuration."""
return PYPES_CFG.get(param_name, default=default)
def setup_node(interface, name, settings=None, overwrite=True, **kwargs):
""" Create a pe.Node from `interface` with a given name.
Check in the global configuration if there is any value for the node name and will set it.
Parameters
----------
interface: nipype.interface
name: str
settings: dict
Dictionary with values for the pe.Node inputs.
These will have higher priority than the ones in the global Configuration.
overwrite: bool
If True will overwrite the settings of the node if they are already defined.
Default: True
kwargs: keyword arguments
type: str or None.
choices: 'map', 'join, or None.
If 'map' will return a MapNode.
If 'join' will return a JoinNode.
If None will return a Node.
Extra arguments to pass to nipype.Node __init__ function.
Returns
-------
node: nipype.Node
"""
typ = kwargs.pop('type', None)
if typ == 'map':
node_class = MapNode
elif typ == 'join':
node_class = JoinNode
else:
node_class = Node
node = node_class(interface=interface, name=name, **kwargs)
params = _get_params_for(name)
if settings is not None:
params.update(settings)
_set_node_inputs(node, params, overwrite=overwrite)
return node
# ---------------------------------------------------------------------------
# Helper functions for specific parameters of config
# ---------------------------------------------------------------------------
def check_atlas_file():
""" Return True and the path to the atlas_file if the configuration settings
`normalize_atlas` is True and `atlas_file` points to an existing file.
If `normalize_atlas` is False will return False and an empty string.
Otherwise will raise a FileNotFoundError.
Returns
-------
do_atlas: bool
atlas_file: str
Existing file path to the atlas file
Raises
------
FileNotFoundError
If the `normalize_atlas` option is True but the
`atlas_file` is not an existing file.
"""
normalize_atlas = get_config_setting('normalize_atlas', default=False)
if not normalize_atlas:
return False, ''
atlas_file = get_config_setting('atlas_file', default='')
if not op.isfile(atlas_file):
raise FileNotFoundError('Could not find atlas file in {}. '
'Please set `normalize_atlas` to False '
'or give an existing atlas image.'.format(atlas_file))
return True, atlas_file
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
38149,
4706,
329,
30798,
17336,
13,
198,
1212,
2499,
625,
509,
2373,
272,
25,
5450,
1378,
12567,
13,
785,
14,
368,
260,
14,
74,
2373,
272,
198,
198,
464,
... | 2.782913 | 1,428 |
from toolbag import format_as_si
| [
6738,
2891,
21454,
1330,
5794,
62,
292,
62,
13396,
628
] | 3.4 | 10 |
import os
TRAIN_IMG_PATH = r"C:\Users\ASUS\Documents\Projects\Siamese_network\package\output"
IMG_SHAPE = (28, 28, 1)
BATCH_SIZE = 64
EPOCHS = 100
| [
11748,
28686,
198,
198,
51,
3861,
1268,
62,
3955,
38,
62,
34219,
796,
374,
1,
34,
7479,
14490,
59,
1921,
2937,
59,
38354,
59,
16775,
82,
59,
42801,
1047,
68,
62,
27349,
59,
26495,
59,
22915,
1,
198,
198,
3955,
38,
62,
9693,
45721,... | 2.223881 | 67 |
import requests
import json
import sys
from time import sleep
if __name__ == '__main__':
iterations = 50
if len(sys.argv) > 1:
iterations = int(sys.argv[1])
for x in xrange(iterations):
game_id = createGame()
startGame(game_id)
waitForGameEnd(game_id)
print("Game Ended: %d - %s" % (x + 1, game_id)) | [
11748,
7007,
198,
11748,
33918,
198,
11748,
25064,
198,
6738,
640,
1330,
3993,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
197,
2676,
602,
796,
2026,
628,
197,
361,
18896,
7,
17597,
13,
853,
85,
8,
1875,
... | 2.46875 | 128 |
"""
Copyright @ 2017, 2020, 2021 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved.
This software may be used, reproduced, and provided to others only as permitted under the terms of the agreement under which it was acquired from the U.S. Government. Neither title to, nor ownership of, the software is hereby transferred. This notice shall remain on all copies of the software.
This file is available under the terms of the NASA Open Source Agreement (NOSA), and further subject to the additional disclaimer below:
Disclaimer:
THE SOFTWARE AND/OR TECHNICAL DATA ARE PROVIDED "AS IS" WITHOUT ANY WARRANTY OF ANY KIND, EITHER EXPRESSED, IMPLIED, OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTY THAT THE SOFTWARE AND/OR TECHNICAL DATA WILL CONFORM TO SPECIFICATIONS, ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR FREEDOM FROM INFRINGEMENT, ANY WARRANTY THAT THE SOFTWARE AND/OR TECHNICAL DATA WILL BE ERROR FREE, OR ANY WARRANTY THAT TECHNICAL DATA, IF PROVIDED, WILL CONFORM TO THE SOFTWARE. IN NO EVENT SHALL THE UNITED STATES GOVERNMENT, OR ITS CONTRACTORS OR SUBCONTRACTORS, BE LIABLE FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO, DIRECT, INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES, ARISING OUT OF, RESULTING FROM, OR IN ANY WAY CONNECTED WITH THIS SOFTWARE AND/OR TECHNICAL DATA, WHETHER OR NOT BASED UPON WARRANTY, CONTRACT, TORT, OR OTHERWISE, WHETHER OR NOT INJURY WAS SUSTAINED BY PERSONS OR PROPERTY OR OTHERWISE, AND WHETHER OR NOT LOSS WAS SUSTAINED FROM, OR AROSE OUT OF THE RESULTS OF, OR USE OF, THE SOFTWARE AND/OR TECHNICAL DATA.
THE UNITED STATES GOVERNMENT DISCLAIMS ALL WARRANTIES AND LIABILITIES REGARDING THIRD PARTY COMPUTER SOFTWARE, DATA, OR DOCUMENTATION, IF SAID THIRD PARTY COMPUTER SOFTWARE, DATA, OR DOCUMENTATION IS PRESENT IN THE NASA SOFTWARE AND/OR TECHNICAL DATA, AND DISTRIBUTES IT "AS IS."
RECIPIENT AGREES TO WAIVE ANY AND ALL CLAIMS AGAINST THE UNITED STATES GOVERNMENT AND ITS CONTRACTORS AND SUBCONTRACTORS, AND SHALL INDEMNIFY AND HOLD HARMLESS THE UNITED STATES GOVERNMENT AND ITS CONTRACTORS AND SUBCONTRACTORS FOR ANY LIABILITIES, DEMANDS, DAMAGES, EXPENSES OR LOSSES THAT MAY ARISE FROM RECIPIENT'S USE OF THE SOFTWARE AND/OR TECHNICAL DATA, INCLUDING ANY DAMAGES FROM PRODUCTS BASED ON, OR RESULTING FROM, THE USE THEREOF.
IF RECIPIENT FURTHER RELEASES OR DISTRIBUTES THE NASA SOFTWARE AND/OR TECHNICAL DATA, RECIPIENT AGREES TO OBTAIN THIS IDENTICAL WAIVER OF CLAIMS, INDEMNIFICATION AND HOLD HARMLESS, AGREEMENT WITH ANY ENTITIES THAT ARE PROVIDED WITH THE SOFTWARE AND/OR TECHNICAL DATA.
"""
""" pumapy
Root directory for the pumapy package.
"""
# Note: only this version has to be bumped for the whole puma project
# create tag using: git tag -a v$(python setup.py --version) -m 'DESCRIBE VERSION'
__version__ = "3.1.3"
# utilities
from pumapy.utilities.workspace import Workspace
from pumapy.utilities.logger import Logger, print_warning
from pumapy.utilities.timer import Timer
from pumapy.utilities.isosurface import generate_isosurface
from pumapy.utilities.property_maps import IsotropicConductivityMap, AnisotropicConductivityMap, ElasticityMap
from pumapy.utilities.boundary_conditions import ConductivityBC, ElasticityBC
from pumapy.utilities.example_files import path_to_example_file, list_example_files
# input/output
from pumapy.io.input import import_3Dtiff, import_bin, import_weave_vtu, import_vti
from pumapy.io.output import export_vti, export_3Dtiff, export_bin, export_sparta_implicit_surfaces, export_stl
try:
from pumapy.io.export_texgen_weave import export_weave_vtu
except:
print_warning("WARNING: 'import TexGen.Core' failed: cannot use TexGen functions and pumapy.export_weave_vtu.")
# material properties
from pumapy.materialproperties.surfacearea import compute_surface_area
from pumapy.materialproperties.volumefraction import compute_volume_fraction
from pumapy.materialproperties.mean_intercept_length import compute_mean_intercept_length
from pumapy.materialproperties.orientation import compute_orientation_st, compute_angular_differences
from pumapy.materialproperties.conductivity import compute_thermal_conductivity, compute_electrical_conductivity
from pumapy.materialproperties.tortuosity import compute_continuum_tortuosity
from pumapy.materialproperties.elasticity import compute_elasticity, compute_stress_analysis
from pumapy.materialproperties.radiation import compute_radiation, compute_extinction_coefficients
from pumapy.materialproperties.permeability import compute_permeability
# filtering
from pumapy.filters.filters import (filter_median, filter_gaussian, filter_edt, filter_mean,
filter_erode, filter_dilate, filter_opening, filter_closing)
# generation
from pumapy.generation.tpms import generate_tpms
from pumapy.generation.sphere import get_sphere
from pumapy.generation.random_spheres import generate_random_spheres
from pumapy.generation.generate_sphere import generate_sphere
from pumapy.generation.generate_2d_square_array import generate_2d_square_array
from pumapy.generation.random_fibers import generate_random_fibers
try:
from pumapy.generation.weave_3mdcp.weave_3mdcp import generate_3mdcp
except ImportError: # import it only if installed
pass
# visualization
from pumapy.visualization.render import render_volume, render_contour, render_orientation, render_contour_multiphase
from pumapy.visualization.slicer import plot_slices, compare_slices
# segmentation
from pumapy.segmentation.porespace import identify_porespace, fill_closed_pores
| [
37811,
198,
15269,
2488,
2177,
11,
12131,
11,
33448,
1578,
1829,
5070,
355,
7997,
416,
262,
22998,
286,
262,
2351,
15781,
261,
2306,
873,
290,
4687,
8694,
13,
1439,
6923,
33876,
13,
198,
1212,
3788,
743,
307,
973,
11,
31759,
11,
290,
... | 3.365098 | 1,679 |
###############################################################################
###############################################################################
# Name: log.py
# Coder: Janson Fang
# Description:
# This module contains a method used create a logger
###############################################################################
###############################################################################
# Libraries and Modules
###############################################################################
import logging
import sys
###############################################################################
# Method Definitions
###############################################################################
def createLogger(logLevel = 'DEBUG'):
'''Returns a configured logger named 'root'
Log messages are in the format:
'%(asctime)s [%(levelname)s] (%(module)s:%(lineno)d) %(message)s'
Args:
logLevel (str): A string indicating returned log level
Returns
A logger with where stream is set to sys.stdout and log level is user
defined
'''
logger = logging.getLogger('root')
logFormat = \
'%(asctime)s [%(levelname)s] (%(module)s:%(lineno)d) %(message)s'
formatter = logging.Formatter(logFormat, '%Y-%m-%d %H:%M:%S')
handler = logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(formatter)
logger.addHandler(handler)
level = logging.getLevelName(logLevel)
logger.setLevel(level)
return logger
###############################################################################
# Main Script
###############################################################################
if __name__ == "__main__":
logger = createLogger()
logger.info('Test log statement: %d', 5)
else:
logger = createLogger() | [
29113,
29113,
7804,
4242,
21017,
198,
29113,
29113,
7804,
4242,
21017,
198,
2,
6530,
25,
2604,
13,
9078,
198,
2,
327,
12342,
25,
449,
23103,
24468,
198,
2,
12489,
25,
198,
2,
220,
220,
770,
8265,
4909,
257,
2446,
973,
2251,
257,
497... | 3.823285 | 481 |
# branching_iteration.py
# name = "tsofa"
# greeting = "hi " + name
# print(greeting)
# #text = raw_input("type anything please")
# text = str(input("type anything please"))
# print(5*text)
# # if true prog ends and doesn't look at other conditions.
# x = float(input("Enter a value for x: "))
# y = float(input("Enter value for y: "))
# if x == y :
# print("x and y are equal.")
# if y != 0:
# print("therefore, x / y is ", x/y)
# elif x < y:
# print("x is smaller")
# else:
# print("y is smaller")
# print("Thanks")
# n = raw_input("You're in the lost forest. Go right or left? ")
# while n == "right":
# n = raw_input("You're in the lost forest. Go right or left? ")
# print("Yaay. You got out of the lost forest. ")
# for loops
# syntax :
'''
for <varibale> in range(<some number):
expression
expression
...
--> each time through the loop, <variable> takes a value
--> first tiem, <varialbe> starts at the smallest value
--> next time , <variable> gets the prev value + 1
--> etc
--> range(start, stop, step)
--> default values are start = 0 and step = 1 and optional
--> loop until value is stop - 1
-->FYI: range() if u give it 1value, it's going to be for STOP, 2values will be STOP & START etc while
--> the missing ones will de by default
'''
# for loop examples
# mysum = 0
# for i in range(5, 7):
# mysum += i
# print(mysum) # 11
# mysum = 0
# for i in range(5, 11, 2):
# mysum += i
# print(mysum) #21
'''
break statements
--> immediately exits whatever loop it is in
--> skips remaining expressions in code block
--> exits only innermost loop!
'''
# break statement examples
# mysum = 0
# for i in range(5, 11, 2):
# mysum += i
# if mysum == 5:
# break
# mysum += 1
# print(mysum)
# for loops VS while loops
'''
--> know number of iterations --> unbounded number of iterations
--> uses a counter --> can use a counter but must initialize before loop
and increment it inside the loop
'''
x = 1
print(x)
x_str = str(x)
print("my fav number is", x, ".", "x =", x) # using , in print
print("my fav number is " + x_str + ". " + "x = " + x_str) # using + in print
# when concancating : its your choice what to use but know these
'''
--> , FYI - u can use mix object ie string and integers types BUT output will have spaces
--> + FYI - u cannot mix object - u use string types only BUT output will be nice.
''' | [
2,
49526,
62,
2676,
341,
13,
9078,
198,
198,
2,
1438,
796,
366,
83,
568,
13331,
1,
198,
2,
31933,
796,
366,
5303,
366,
1343,
1438,
198,
2,
3601,
7,
70,
2871,
278,
8,
198,
198,
2,
1303,
5239,
796,
8246,
62,
15414,
7203,
4906,
1... | 2.695887 | 924 |
"""
Holds event definitions to be used by services for generating events
"""
from .base import EventBase
from .types import (
ActivePolicyBundleContentChanged,
ActivePolicyBundleIdChanged,
FeedGroupSyncCompleted,
FeedGroupSyncFailed,
FeedGroupSyncStarted,
FeedSyncCompleted,
FeedSyncFailed,
FeedSyncStarted,
FeedSyncTaskCompleted,
FeedSyncTaskFailed,
FeedSyncTaskStarted,
ImageAnalysisFailed,
ImageAnalysisSuccess,
ImageArchived,
ImageArchiveDeleted,
ImageArchiveDeleteFailed,
ImageArchivingFailed,
ImageRegistryLookupFailed,
ImageRestored,
ImageRestoreFailed,
ListTagsFailed,
PolicyEngineLoadAnalysisFailed,
RandomWisdomEvent,
SaveAnalysisFailed,
ServiceAuthzPluginHealthCheckFailed,
ServiceDowned,
ServiceOrphaned,
ServiceRemoved,
TagManifestParseFailed,
TagPolicyEvaluationUpdated,
TagVulnerabilityUpdated,
UserAnalyzeImageCompleted,
UserAnalyzeImageFailed,
)
## TODO: Update refs in __init__ to types.py and fix code instances for invocation of events. Then, add API call.
| [
37811,
198,
39,
10119,
1785,
17336,
284,
307,
973,
416,
2594,
329,
15453,
2995,
198,
37811,
198,
198,
6738,
764,
8692,
1330,
8558,
14881,
198,
6738,
764,
19199,
1330,
357,
198,
220,
220,
220,
14199,
36727,
33,
31249,
19746,
31813,
11,
... | 2.885714 | 385 |
__all__ = ["Package"]
from resource_management.core.base import Resource, ForcedListArgument, ResourceArgument
| [
834,
439,
834,
796,
14631,
27813,
8973,
198,
198,
6738,
8271,
62,
27604,
13,
7295,
13,
8692,
1330,
20857,
11,
40731,
8053,
28100,
1713,
11,
20857,
28100,
1713,
628
] | 3.896552 | 29 |
'''This code is for Embedding face
author : Hyunah Oh
data : 2020.01.22
flow : Detection -> Alignment -> Normalization -> Embedding(load pretrained) -> Training Classification
'''
import numpy as np
import cv2
from data import load_metadata
from model import create_model
from align import AlignDlib
from sklearn.externals import joblib
metadata = load_metadata('images')
# Initialize the OpenFace face alignment utility
alignment = AlignDlib('models/landmarks.dat')
#### Detection & Alignment & Normalization ####
embedded = np.zeros((metadata.shape[0], 128))
nn4_small2_pretrained = create_model()
nn4_small2_pretrained.load_weights('weights/nn4.small2.v1.h5')
#### Embedding ####
for i, m in enumerate(metadata):
img = load_image(m.image_path())
img = align_image(img)
# scale RGB values to interval [0,1]
img = (img / 255.).astype(np.float32)
# obtain embedding vector for image
embedded[i] = nn4_small2_pretrained.predict(np.expand_dims(img, axis=0))[0]
joblib.dump(embedded, 'models/embedded_images.pkl') | [
7061,
6,
1212,
2438,
318,
329,
13302,
6048,
278,
1986,
198,
1772,
1058,
6707,
403,
993,
3966,
198,
1366,
1058,
12131,
13,
486,
13,
1828,
198,
5202,
1058,
46254,
4613,
978,
16747,
4613,
14435,
1634,
4613,
13302,
6048,
278,
7,
2220,
218... | 2.927171 | 357 |
from threading import Thread
import time
import random | [
6738,
4704,
278,
1330,
14122,
198,
11748,
640,
198,
11748,
4738
] | 4.909091 | 11 |
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Tests for pw_console.text_formatting"""
import unittest
from parameterized import parameterized # type: ignore
from pw_console.text_formatting import get_line_height
class TestTextFormatting(unittest.TestCase):
"""Tests for text_formatting functions."""
@parameterized.expand([
(
'with short prefix height 2',
len('LINE that should be wrapped'), # text_width
len('| |'), # screen_width
len('--->'), # prefix_width
( 'LINE that should b\n'
'--->e wrapped \n').count('\n'), # expected_height
len( '_____'), # expected_trailing_characters
),
(
'with short prefix height 3',
len('LINE that should be wrapped three times.'), # text_width
len('| |'), # screen_width
len('--->'), # prefix_width
( 'LINE that should b\n'
'--->e wrapped thre\n'
'--->e times. \n').count('\n'), # expected_height
len( '______'), # expected_trailing_characters
),
(
'with short prefix height 4',
len('LINE that should be wrapped even more times, say four.'),
len('| |'), # screen_width
len('--->'), # prefix_width
( 'LINE that should b\n'
'--->e wrapped even\n'
'---> more times, s\n'
'--->ay four. \n').count('\n'), # expected_height
len( '______'), # expected_trailing_characters
),
(
'no wrapping needed',
len('LINE wrapped'), # text_width
len('| |'), # screen_width
len('--->'), # prefix_width
( 'LINE wrapped \n').count('\n'), # expected_height
len( '______'), # expected_trailing_characters
),
(
'prefix is > screen width',
len('LINE that should be wrapped'), # text_width
len('| |'), # screen_width
len('------------------>'), # prefix_width
( 'LINE that should b\n'
'e wrapped \n').count('\n'), # expected_height
len( '_________'), # expected_trailing_characters
),
(
'prefix is == screen width',
len('LINE that should be wrapped'), # text_width
len('| |'), # screen_width
len('----------------->'), # prefix_width
( 'LINE that should b\n'
'e wrapped \n').count('\n'), # expected_height
len( '_________'), # expected_trailing_characters
),
]) # yapf: disable
def test_get_line_height(self, _name, text_width, screen_width,
prefix_width, expected_height,
expected_trailing_characters) -> None:
"""Test line height calculations."""
height, remaining_width = get_line_height(text_width, screen_width,
prefix_width)
self.assertEqual(height, expected_height)
self.assertEqual(remaining_width, expected_trailing_characters)
if __name__ == '__main__':
unittest.main()
| [
2,
15069,
33448,
383,
23097,
39054,
46665,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
407,
198,
2,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
... | 2.100845 | 1,894 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'amir.ui'
#
# Created by: PyQt5 UI code generator 5.14.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
321,
343,
13,
9019,
6,
201,
198,
2,
201,
198,
2,
15622,
416,
25,
9485,
48,
83,
20,
12454,
2438,
... | 2.308057 | 211 |
# Copyright 2016-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Commands for CPCs.
"""
from __future__ import absolute_import
import click
from tabulate import tabulate
import zhmcclient
from .zhmccli import cli
from ._helper import print_properties, print_resources, \
options_to_properties, original_options, COMMAND_OPTIONS_METAVAR, \
click_exception, add_options, LIST_OPTIONS, TABLE_FORMATS, hide_property
POWER_SAVING_TYPES = ['high-performance', 'low-power', 'custom']
DEFAULT_POWER_SAVING_TYPE = 'high-performance'
POWER_CAPPING_STATES = ['disabled', 'enabled', 'custom']
def find_cpc(cmd_ctx, client, cpc_name):
"""
Find a CPC by name and return its resource object.
"""
try:
cpc = client.cpcs.find(name=cpc_name)
except zhmcclient.Error as exc:
raise click_exception(exc, cmd_ctx.error_format)
return cpc
@cli.group('cpc', options_metavar=COMMAND_OPTIONS_METAVAR)
def cpc_group():
"""
Command group for managing CPCs.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
@cpc_group.command('list', options_metavar=COMMAND_OPTIONS_METAVAR)
@click.option('--type', is_flag=True, required=False, hidden=True)
@click.option('--mach', is_flag=True, required=False, hidden=True)
@add_options(LIST_OPTIONS)
@click.pass_obj
def cpc_list(cmd_ctx, **options):
"""
List the CPCs managed by the HMC.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(lambda: cmd_cpc_list(cmd_ctx, options))
@cpc_group.command('show', options_metavar=COMMAND_OPTIONS_METAVAR)
@click.argument('CPC', type=str, metavar='CPC')
@click.option('--all', is_flag=True, required=False,
help='Show all properties. Default: Hide some properties in '
'table output formats')
@click.pass_obj
def cpc_show(cmd_ctx, cpc, **options):
"""
Show details of a CPC.
\b
In table output formats, the following properties are hidden by default
but can be shown by using the --all option:
- auto-start-list
- available-features-list
- cpc-power-saving-state
- ec-mcl-description
- network1-ipv6-info
- network2-ipv6-info
- stp-configuration
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(lambda: cmd_cpc_show(cmd_ctx, cpc, options))
@cpc_group.command('update', options_metavar=COMMAND_OPTIONS_METAVAR)
@click.argument('CPC', type=str, metavar='CPC')
@click.option('--description', type=str, required=False,
help='The new description of the CPC. '
'(DPM mode only).')
@click.option('--acceptable-status', type=str, required=False,
help='The new set of acceptable operational status values.')
# TODO: Support multiple values for acceptable-status
@click.option('--next-activation-profile', type=str, required=False,
help='The name of the new next reset activation profile. '
'(not in DPM mode).')
@click.option('--processor-time-slice', type=int, required=False,
help='The new time slice (in ms) for logical processors. '
'A value of 0 causes the time slice to be dynamically '
'determined by the system. A positive value causes a constant '
'time slice to be used. '
'(not in DPM mode).')
@click.option('--wait-ends-slice/--no-wait-ends-slice', default=None,
required=False,
help='The new setting for making logical processors lose their '
'time slice when they enter a wait state. '
'(not in DPM mode).')
@click.pass_obj
def cpc_update(cmd_ctx, cpc, **options):
"""
Update the properties of a CPC.
Only the properties will be changed for which a corresponding option is
specified, so the default for all options is not to change properties.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
\b
Limitations:
* The --acceptable-status option does not support multiple values.
"""
cmd_ctx.execute_cmd(lambda: cmd_cpc_update(cmd_ctx, cpc, options))
@cpc_group.command('set-power-save', options_metavar=COMMAND_OPTIONS_METAVAR)
@click.argument('CPC', type=str, metavar='CPC')
@click.option('--power-saving', type=click.Choice(POWER_SAVING_TYPES),
required=False, default=DEFAULT_POWER_SAVING_TYPE,
help='Defines the type of power saving. Default: {pd}'.
format(pd=DEFAULT_POWER_SAVING_TYPE))
@click.pass_obj
def set_power_save(cmd_ctx, cpc, **options):
"""
Set the power save settings of a CPC.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(lambda: cmd_cpc_set_power_save(cmd_ctx, cpc, options))
@cpc_group.command('set-power-capping',
options_metavar=COMMAND_OPTIONS_METAVAR)
@click.argument('CPC', type=str, metavar='CPC')
@click.option('--power-capping-state', type=click.Choice(POWER_CAPPING_STATES),
required=True,
help='Defines the state of power capping.')
@click.option('--power-cap-current', type=int, required=False,
help='Specifies the current cap value for the CPC in watts (W). '
'Required if power capping state is enabled.')
@click.pass_obj
def set_power_capping(cmd_ctx, cpc, **options):
"""
Set the power capping settings of a CPC.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(lambda: cmd_cpc_set_power_capping(cmd_ctx, cpc,
options))
@cpc_group.command('get-em-data', options_metavar=COMMAND_OPTIONS_METAVAR)
@click.argument('CPC', type=str, metavar='CPC')
@click.pass_obj
def get_em_data(cmd_ctx, cpc):
"""
Get all energy management data of a CPC.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(lambda: cmd_cpc_get_em_data(cmd_ctx, cpc))
@cpc_group.group('autostart', options_metavar=COMMAND_OPTIONS_METAVAR)
def cpc_autostart_group():
"""
Command group for managing the auto-start list of a CPC (in DPM mode).
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
@cpc_autostart_group.command('show', options_metavar=COMMAND_OPTIONS_METAVAR)
@click.argument('CPC', type=str, metavar='CPC')
@click.pass_obj
def cpc_autostart_show(cmd_ctx, cpc):
"""
Show the auto-start list of a CPC.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(lambda: cmd_cpc_autostart_show(cmd_ctx, cpc))
@cpc_autostart_group.command('add', options_metavar=COMMAND_OPTIONS_METAVAR)
@click.argument('CPC', type=str, metavar='CPC')
@click.argument('PARTITIONS_DELAY', type=(str, int), metavar='PARTITIONS DELAY')
@click.option('--group', type=str, metavar='GROUP',
required=False,
help='Add the partition(s) as a partition group with this name. '
'Required when adding a group.')
@click.option('--description', type=str, metavar='TEXT',
required=False,
help='Description of partition group. '
'Default: No description.')
@click.option('--before', type=str, metavar='PARTITION_OR_GROUP',
required=False,
help='Insert the new partition or group before this '
'partition/group. '
'Default: Append new partition or group to the end.')
@click.option('--after', type=str, metavar='PARTITION_OR_GROUP',
required=False,
help='Insert the new partition or group after this '
'partition/group. '
'Default: Append new partition or group to the end.')
@click.pass_obj
def cpc_autostart_add(cmd_ctx, cpc, partitions_delay, **options):
"""
Add a partition or group to the auto-start list of a CPC.
A partition group exists only in context of the auto-start list; it has
nothing to do with Group objects.
PARTITIONS is the partition name or a comma-separated list of partition
names in case of adding a partition group.
DELAY is the delay afer starting this partition or group, in seconds.
The updated auto-start list is shown.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(
lambda: cmd_cpc_autostart_add(cmd_ctx, cpc, partitions_delay, options))
@cpc_autostart_group.command('delete', options_metavar=COMMAND_OPTIONS_METAVAR)
@click.argument('CPC', type=str, metavar='CPC')
@click.argument('PARTITION_OR_GROUP', type=str, metavar='PARTITION_OR_GROUP')
@click.pass_obj
def cpc_autostart_delete(cmd_ctx, cpc, partition_or_group):
"""
Delete a partition or group from the auto-start list of a CPC.
The updated auto-start list is shown.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(
lambda: cmd_cpc_autostart_delete(cmd_ctx, cpc, partition_or_group))
@cpc_autostart_group.command('clear', options_metavar=COMMAND_OPTIONS_METAVAR)
@click.argument('CPC', type=str, metavar='CPC')
@click.pass_obj
def cpc_autostart_clear(cmd_ctx, cpc):
"""
Clear the auto-start list of a CPC.
In addition to the command-specific options shown in this help text, the
general options (see 'zhmc --help') can also be specified right after the
'zhmc' command name.
"""
cmd_ctx.execute_cmd(
lambda: cmd_cpc_autostart_clear(cmd_ctx, cpc))
def get_auto_start_list(cpc):
"""
Helper functoin that converts the 'auto-start-list' property of a CPC
to a list suitable for the zhmcclient.Cpc.set_auto_start_list() method.
Returns:
None - if the CPC is in classic mode
list, with items that are one of:
- tuple(partition, post_start_delay)
- tuple(partition_list, name, description, post_start_delay)
"""
auto_start_list = cpc.prop('auto-start-list', None)
if auto_start_list is None:
# CPC is in classic mode
return None
as_list = []
for auto_start_item in auto_start_list:
if auto_start_item['type'] == 'partition':
# item is a partition
uri = auto_start_item['partition-uri']
delay = auto_start_item['post-start-delay']
partition = cpc.partitions.resource_object(uri)
as_item = (partition, delay)
as_list.append(as_item)
if auto_start_item['type'] == 'partition-group':
# item is a partition group
name = auto_start_item['name']
description = auto_start_item['description']
delay = auto_start_item['post-start-delay']
partitions = []
for uri in auto_start_item['partition-uris']:
partition = cpc.partitions.resource_object(uri)
partitions.append(partition)
as_item = (partitions, name, description, delay)
as_list.append(as_item)
return as_list
def auto_start_table_str(as_list, output_format):
"""
Return a string with the auto-start list table in the specified output
format.
"""
headers = ['Partition/Group', 'Post start delay', 'Partitions in group',
'Group description']
table = []
for as_item in as_list:
if isinstance(as_item[0], zhmcclient.Partition):
# item is a partition
partition, delay = as_item
row = [partition.name, delay]
table.append(row)
else:
# item is a partition group
partitions, name, description, delay = as_item
partition_names = ', '.join([p.name for p in partitions])
row = [name, delay, partition_names, description]
table.append(row)
table_str = tabulate(table, headers, tablefmt=output_format)
return table_str
| [
2,
15069,
1584,
12,
23344,
19764,
11421,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 2.589144 | 5,306 |
from __future__ import print_function
import argparse
import sys
import thread
sys.path.append('..')
sys.path.append('gen-py')
from frugal.context import FContext
from frugal.provider import FScopeProvider
from frugal.server.http_server import FHttpServer
from frugal.tornado.transport import FNatsPublisherTransportFactory
from frugal.tornado.transport import FNatsSubscriberTransportFactory
from nats.io.client import Client as NATS
from tornado import gen, ioloop
from common.FrugalTestHandler import FrugalTestHandler
from common.utils import *
from frugal_test.f_FrugalTest import Processor
# Explicitly importing from gen_py_tornado
from gen_py_tornado.frugal_test.f_Events_publisher import EventsPublisher
from gen_py_tornado.frugal_test.f_Events_subscriber import EventsSubscriber
from gen_py_tornado.frugal_test.ttypes import Event
@gen.coroutine
# Use the tornado pub/sub since vanilla python code generation doesn't support it
# Clients in the cross language tests will fail if they try to publish and don't receive a response
# TODO: Modify the crossrunner to support running tests with or without scopes
@gen.coroutine
if __name__ == '__main__':
main()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
1822,
29572,
198,
11748,
25064,
198,
11748,
4704,
198,
198,
17597,
13,
6978,
13,
33295,
10786,
492,
11537,
198,
17597,
13,
6978,
13,
33295,
10786,
5235,
12,
9078,
11537,
19... | 3.364672 | 351 |
# coding=utf-8
import math
import torch
from torch import Tensor
from torch import nn
from src.util.model_util import freeze_params
class Embeddings(nn.Module):
"""
simple embedding class
"""
def __init__(self, embedding_dim: int = 64, scale: bool = False, vocab_size: int = 0,
padding_idx: int = 1, freeze: bool = False, **kwargs):
"""
Create new embeddings for the vocabulary.
Use scaling for the Transformer.
"""
super().__init__()
self.embedding_dim = embedding_dim
self.scale = scale
self.vocab_size = vocab_size
self.embedding = nn.Embedding(vocab_size, self.embedding_dim, padding_idx=padding_idx)
if freeze:
freeze_params(self)
def forward(self, x: Tensor) -> Tensor:
"""
Perform lookup for input `x` in the embedding table.
"""
if self.scale:
# transformer
return self.embedding(x) * math.sqrt(self.embedding_dim)
return self.embedding(x)
class PositionalEncoding(nn.Module):
"""
Pre-compute position encodings (PE).
In forward pass, this adds the position-encodings to the
input for as many time steps as necessary.
Implementation based on OpenNMT-py.
https://github.com/OpenNMT/OpenNMT-py
"""
def __init__(self, size: int = 0, max_len: int = 5000):
"""
Positional Encoding with maximum length max_len
:param size: module dim
:param max_len:
"""
if size % 2 != 0:
raise ValueError("Cannot use sin/cos positional encoding with "
"odd dim (got dim={:d})".format(size))
pe = torch.zeros(max_len, size)
position = torch.arange(1, max_len).unsqueeze(1)
div_term = torch.exp((torch.arange(0, size, 1, dtype=torch.float)
* - (math.log(10000.0) / size)
))
# 两个::表示步长
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
pe = pe.unsqueeze(0) # shape [1, max_len, size]
super().__init__()
# Buffers won’t be returned in module.parameters(), so that the optimizer won’t have a change to update them.
# buf can return in module.OrderDict(), so can be save with the module.save()
self.register_buffer('pe', pe)
self.dim = size
def forward(self, emb):
"""
Embed inputs
:param emb (FloatTensor): Sequence of word vectors
:return: (batch_size, seq_len, self.dim)
"""
# Add position encoding
return emb + self.pe[:, :emb.size(1)]
| [
2,
19617,
28,
40477,
12,
23,
201,
198,
11748,
10688,
201,
198,
11748,
28034,
201,
198,
6738,
28034,
1330,
309,
22854,
201,
198,
6738,
28034,
1330,
299,
77,
201,
198,
201,
198,
6738,
12351,
13,
22602,
13,
19849,
62,
22602,
1330,
16611,... | 2.087149 | 1,354 |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
from future.utils import native_str
import datetime
import re
import warnings
from obspy import Catalog, UTCDateTime
from obspy.core.event import Origin, Magnitude
from obspy.core.inventory import Inventory
from obspy.core.util.misc import to_int_or_zero
try:
import shapefile
except ImportError as e:
HAS_PYSHP = False
PYSHP_VERSION = None
PYSHP_VERSION_AT_LEAST_1_2_11 = False
IMPORTERROR_MSG = str(e) + (
". ObsPy's write support for shapefiles requires the 'pyshp' module "
"to be installed in addition to the general ObsPy dependencies.")
else:
HAS_PYSHP = True
try:
PYSHP_VERSION = list(map(to_int_or_zero,
shapefile.__version__.split('.')))
except AttributeError:
PYSHP_VERSION = None
PYSHP_VERSION_AT_LEAST_1_2_11 = False
else:
PYSHP_VERSION_AT_LEAST_1_2_11 = PYSHP_VERSION >= [1, 2, 11]
PYSHP_VERSION_WARNING = (
'pyshp versions < 1.2.11 are buggy, e.g. in writing numerical values to '
'the dbf table, so e.g. timestamp float values might lack proper '
'precision. You should update to a newer pyshp version.')
def _write_shapefile(obj, filename, **kwargs):
"""
Write :class:`~obspy.core.inventory.inventory.Inventory` or
:class:`~obspy.core.event.Catalog` object to a ESRI shapefile.
:type obj: :class:`~obspy.core.event.Catalog` or
:class:`~obspy.core.inventory.Inventory`
:param obj: ObsPy object for shapefile output
:type filename: str
:param filename: Filename to write to. According to ESRI shapefile
definition, multiple files with the following suffixes will be written:
".shp", ".shx", ".dbj", ".prj". If filename does not end with ".shp",
it will be appended. Other files will be created with respective
suffixes accordingly.
"""
if not HAS_PYSHP:
raise ImportError(IMPORTERROR_MSG)
if not PYSHP_VERSION_AT_LEAST_1_2_11:
warnings.warn(PYSHP_VERSION_WARNING)
if not filename.endswith(".shp"):
filename += ".shp"
if PYSHP_VERSION >= [2., 0, 0]:
writer = shapefile.Writer(target=filename, shapeType=shapefile.POINT)
else:
writer = shapefile.Writer(shapeType=shapefile.POINT)
writer.autoBalance = 1
# create the layer
if isinstance(obj, Catalog):
_add_catalog_layer(writer, obj)
elif isinstance(obj, Inventory):
_add_inventory_layer(writer, obj)
else:
msg = ("Object for shapefile output must be "
"a Catalog or Inventory.")
raise TypeError(msg)
if PYSHP_VERSION >= [2.0, 0, 0]:
writer.close()
else:
writer.save(filename)
_save_projection_file(filename.rsplit('.', 1)[0] + '.prj')
def _add_catalog_layer(writer, catalog):
"""
:type writer: :class:`shapefile.Writer`.
:param writer: pyshp Writer object
:type catalog: :class:`~obspy.core.event.Catalog`
:param catalog: Event data to add as a new layer.
"""
# [name, type, width, precision]
# field name is 10 chars max
# ESRI shapefile attributes are stored in dbf files, which can not
# store datetimes, only dates, see:
# http://www.gdal.org/drv_shapefile.html
# use POSIX timestamp for exact origin time, set time of first pick
# for events with no origin
field_definitions = [
["EventID", 'C', 100, None],
["OriginID", 'C', 100, None],
["MagID", 'C', 100, None],
["Date", 'D', None, None],
["OriginTime", 'N', 20, 6],
["FirstPick", 'N', 20, 6],
["Longitude", 'N', 16, 10],
["Latitude", 'N', 16, 10],
["Depth", 'N', 8, 3],
["MinHorUncM", 'N', 12, 3],
["MaxHorUncM", 'N', 12, 3],
["MaxHorAzi", 'N', 7, 3],
["OriUncDesc", 'C', 40, None],
["Magnitude", 'N', 8, 3],
]
_create_layer(writer, field_definitions)
for event in catalog:
# try to use preferred origin/magnitude, fall back to first or use
# empty one with `None` values in it
origin = (event.preferred_origin() or
event.origins and event.origins[0] or
Origin(force_resource_id=False))
magnitude = (event.preferred_magnitude() or
event.magnitudes and event.magnitudes[0] or
Magnitude(force_resource_id=False))
t_origin = origin.time
pick_times = [pick.time for pick in event.picks
if pick.time is not None]
t_pick = pick_times and min(pick_times) or None
date = t_origin or t_pick
feature = {}
# setting fields with `None` results in values of `0.000`
# need to really omit setting values if they are `None`
if event.resource_id is not None:
feature["EventID"] = str(event.resource_id)
if origin.resource_id is not None:
feature["OriginID"] = str(origin.resource_id)
if t_origin is not None:
# Use timestamp for exact timing
feature["OriginTime"] = t_origin.timestamp
if t_pick is not None:
# Use timestamp for exact timing
feature["FirstPick"] = t_pick.timestamp
if date is not None:
# ESRI shapefile attributes are stored in dbf files, which can
# not store datetimes, only dates. We still need to use the
# GDAL API with precision up to seconds (aiming at other output
# drivers of GDAL; `100` stands for GMT)
feature["Date"] = date.datetime
if origin.latitude is not None:
feature["Latitude"] = origin.latitude
if origin.longitude is not None:
feature["Longitude"] = origin.longitude
if origin.depth is not None:
feature["Depth"] = origin.depth / 1e3
if magnitude.mag is not None:
feature["Magnitude"] = magnitude.mag
if magnitude.resource_id is not None:
feature["MagID"] = str(magnitude.resource_id)
if origin.origin_uncertainty is not None:
ou = origin.origin_uncertainty
ou_description = ou.preferred_description
if ou_description == 'uncertainty ellipse':
feature["MinHorUncM"] = ou.min_horizontal_uncertainty
feature["MaxHorUncM"] = ou.max_horizontal_uncertainty
feature["MaxHorAzi"] = \
ou.azimuth_max_horizontal_uncertainty
feature["OriUncDesc"] = ou_description
elif ou_description == 'horizontal uncertainty':
feature["MinHorUncM"] = ou.horizontal_uncertainty
feature["MaxHorUncM"] = ou.horizontal_uncertainty
feature["MaxHorAzi"] = 0.0
feature["OriUncDesc"] = ou_description
else:
msg = ('Encountered an event with origin uncertainty '
'description of type "{}". This is not yet '
'implemented for output as shapefile. No origin '
'uncertainty will be added to shapefile for such '
'events.').format(ou_description)
warnings.warn(msg)
if origin.latitude is not None and origin.longitude is not None:
writer.point(origin.longitude, origin.latitude)
_add_record(writer, feature)
def _add_inventory_layer(writer, inventory):
"""
:type writer: :class:`shapefile.Writer`.
:param writer: pyshp Writer object
:type inventory: :class:`~obspy.core.inventory.Inventory`
:param inventory: Inventory data to add as a new layer.
"""
# [name, type, width, precision]
# field name is 10 chars max
# ESRI shapefile attributes are stored in dbf files, which can not
# store datetimes, only dates, see:
# http://www.gdal.org/drv_shapefile.html
# use POSIX timestamp for exact origin time, set time of first pick
# for events with no origin
field_definitions = [
["Network", 'C', 20, None],
["Station", 'C', 20, None],
["Longitude", 'N', 16, 10],
["Latitude", 'N', 16, 10],
["Elevation", 'N', 9, 3],
["StartDate", 'D', None, None],
["EndDate", 'D', None, None],
["Channels", 'C', 254, None],
]
_create_layer(writer, field_definitions)
for net in inventory:
for sta in net:
channel_list = ",".join(["%s.%s" % (cha.location_code, cha.code)
for cha in sta])
feature = {}
# setting fields with `None` results in values of `0.000`
# need to really omit setting values if they are `None`
if net.code is not None:
feature["Network"] = net.code
if sta.code is not None:
feature["Station"] = sta.code
if sta.latitude is not None:
feature["Latitude"] = sta.latitude
if sta.longitude is not None:
feature["Longitude"] = sta.longitude
if sta.elevation is not None:
feature["Elevation"] = sta.elevation
if sta.start_date is not None:
# ESRI shapefile attributes are stored in dbf files, which
# can not store datetimes, only dates. We still need to use
# the GDAL API with precision up to seconds (aiming at
# other output drivers of GDAL; `100` stands for GMT)
feature["StartDate"] = sta.start_date.datetime
if sta.end_date is not None:
# ESRI shapefile attributes are stored in dbf files, which
# can not store datetimes, only dates. We still need to use
# the GDAL API with precision up to seconds (aiming at
# other output drivers of GDAL; `100` stands for GMT)
feature["EndDate"] = sta.end_date.datetime
if channel_list:
feature["Channels"] = channel_list
if sta.latitude is not None and sta.longitude is not None:
writer.point(sta.longitude, sta.latitude)
_add_record(writer, feature)
wgs84_wkt = \
"""
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
"""
wgs84_wkt = re.sub(r'\s+', '', wgs84_wkt)
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
357,
48546,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 2.218788 | 4,918 |
import urllib.request,json
from .models import NewsSource,NewsArticle
from datetime import date
#getting api key
api_key = None
#getting news base url
base_url = None
#getting articles url
articles_url = None
def get_news_source(category):
'''
Function that gets the json response to our url request
'''
get_news_source_url = base_url.format(category,api_key)
with urllib.request.urlopen(get_news_source_url) as url:
get_news_source_data = url.read()
get_news_source_response = json.loads(get_news_source_data)
news_source_results = None
if get_news_source_response['sources']:
news_source_results_list = get_news_source_response['sources']
news_source_results = process_news_source(news_source_results_list)
return news_source_results
def process_news_source(news_source_list):
'''
Function that processes the news sources results and turns them into a list of objects
Args:
news_source_list: A list of dictionaries that contain sources details
Returns:
news_source_results: A list of sources objects
'''
news_source_results = []
for news_source_item in news_source_list:
id = news_source_item.get('id')
name = news_source_item.get('name')
description = news_source_item.get('description')
url = news_source_item.get('url')
category = news_source_item.get('category')
country = news_source_item.get('country')
news_source_object = NewsSource(id,name,description,url,category,country)
news_source_results.append(news_source_object)
return news_source_results
def get_articles(id):
'''
Function that processes the articles and returns a list of articles objects
'''
get_articles_url = articles_url.format(id,api_key)
with urllib.request.urlopen(get_articles_url) as url:
news_article_results = json.loads(url.read())
news_article_object = None
if news_article_results['articles']:
news_article_object = process_news_article(news_article_results['articles'])
return news_article_object
def process_news_article(news_article_list):
'''
'''
news_article_object = []
for news_article_item in news_article_list:
id = news_article_item.get('id')
author = news_article_item.get('author')
title = news_article_item.get('title')
description = news_article_item.get('description')
url = news_article_item.get('url')
image = news_article_item.get('urlToImage')
date = news_article_item.get('publishedAt')
if image:
news_article_result = NewsArticle(id,author,title,description,url,image,date)
news_article_object.append(news_article_result)
return news_article_object | [
11748,
2956,
297,
571,
13,
25927,
11,
17752,
198,
6738,
764,
27530,
1330,
3000,
7416,
11,
9980,
14906,
198,
6738,
4818,
8079,
1330,
3128,
198,
198,
2,
37210,
40391,
1994,
198,
15042,
62,
2539,
796,
6045,
198,
198,
2,
37210,
1705,
2779... | 2.965398 | 867 |
""" Code is generated by ucloud-model, DO NOT EDIT IT. """
import pytest
import logging
from ucloud.core import exc
from ucloud.testing import env, funcs, op, utest
logger = logging.getLogger(__name__)
scenario = utest.Scenario(207)
@pytest.mark.skipif(env.is_ut(), reason=env.get_skip_reason())
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DescribeUDBType",
)
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DescribeUDBParamGroup",
)
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DescribeUDBInstancePrice",
)
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CheckUDBInstanceAllowance",
)
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=1,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CreateUDBInstance",
)
@scenario.step(
max_retries=50,
retry_interval=10,
startup_delay=30,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "State", "Running"),
],
action="DescribeUDBInstanceState",
)
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=1,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="StopUDBInstance",
)
@scenario.step(
max_retries=20,
retry_interval=3,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
(
"str_eq",
"DataSet.0.Name",
funcs.concat(variables.get("DBName"), variables.get("DBTypeId")),
),
("str_eq", "DataSet.0.DBTypeId", variables.get("DBTypeId")),
("str_eq", "DataSet.0.State", "Shutoff"),
],
action="DescribeUDBInstance",
)
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="DescribeUDBInstanceUpgradePrice",
)
@scenario.step(
max_retries=0,
retry_interval=0,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [("str_eq", "RetCode", 0)],
action="CheckUDBInstanceAllowance",
)
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=2,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "ResizeUDBInstanceResponse"),
],
action="ResizeUDBInstance",
)
@scenario.step(
max_retries=100,
retry_interval=10,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
(
"str_eq",
"DataSet.0.Name",
funcs.concat(variables.get("DBName"), variables.get("DBTypeId")),
),
("str_eq", "DataSet.0.DBTypeId", variables.get("DBTypeId")),
("str_eq", "DataSet.0.State", "Running"),
("str_eq", "DataSet.0.MemoryLimit", variables.get("MemoryLimit") + 1),
("str_eq", "DataSet.0.DiskSpace", variables.get("DiskSpace") + 10),
],
action="DescribeUDBInstance",
)
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "CheckUDBInstanceToHAAllowanceResponse"),
("str_eq", "Allowance", "Yes"),
],
action="CheckUDBInstanceToHAAllowance",
)
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=10,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "DescribePromoteToHAPriceResponse"),
],
action="DescribePromoteToHAPrice",
)
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=0,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "PromoteUDBInstanceToHAResponse"),
],
action="PromoteUDBInstanceToHA",
)
@scenario.step(
max_retries=30,
retry_interval=10,
startup_delay=10,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "DescribeUDBInstanceStateResponse"),
("str_eq", "State", "WaitForSwitch"),
],
action="DescribeUDBInstanceState",
)
@scenario.step(
max_retries=3,
retry_interval=10,
startup_delay=20,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "SwitchUDBInstanceToHAResponse"),
],
action="SwitchUDBInstanceToHA",
)
@scenario.step(
max_retries=30,
retry_interval=10,
startup_delay=60,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
(
"str_eq",
"DataSet.0.Name",
funcs.concat(variables.get("DBName"), variables.get("DBTypeId")),
),
("str_eq", "DataSet.0.VirtualIP", variables.get("VirtualIP")),
("str_eq", "DataSet.0.State", "Running"),
],
action="DescribeUDBInstance",
)
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=60,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "StopUDBInstanceResponse"),
],
action="StopUDBInstance",
)
@scenario.step(
max_retries=6,
retry_interval=10,
startup_delay=10,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "DescribeUDBInstanceResponse"),
("str_eq", "DataSet.0.State", "Shutoff"),
],
action="DescribeUDBInstance",
)
@scenario.step(
max_retries=3,
retry_interval=1,
startup_delay=5,
fast_fail=False,
validators=lambda variables: [
("str_eq", "RetCode", 0),
("str_eq", "Action", "DeleteUDBInstanceResponse"),
],
action="DeleteUDBInstance",
)
| [
37811,
6127,
318,
7560,
416,
334,
17721,
12,
19849,
11,
8410,
5626,
48483,
7283,
13,
37227,
198,
198,
11748,
12972,
9288,
198,
11748,
18931,
198,
198,
6738,
334,
17721,
13,
7295,
1330,
2859,
198,
6738,
334,
17721,
13,
33407,
1330,
17365... | 2.189033 | 2,936 |
#!/usr/bin/env python3
import argparse
from crfunctions import btcprice
from crfunctions import ltcprice
from crfunctions import ethprice
parser = argparse.ArgumentParser()
parser.add_argument('--btc', dest='btcprice', help='Displays the current price of Bitcoin.',default=False,
action='store_true')
parser.add_argument('--ltc', dest='ltcprice', help='Displays the current price of Litecoin.', default=False,
action='store_true')
parser.add_argument('--eth', dest='ethprice', help='Displays the current price of Ethereum.', default=False,
action='store_true')
args = parser.parse_args()
if args.btcprice:
btcprice()
elif args.ltcprice:
ltcprice()
elif args.ethprice:
ethprice()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
628,
198,
11748,
1822,
29572,
628,
198,
6738,
1067,
12543,
2733,
1330,
275,
23047,
20888,
198,
6738,
1067,
12543,
2733,
1330,
300,
23047,
20888,
198,
6738,
1067,
12543,
2733,
1330,
4555,
... | 2.669014 | 284 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
#!/usr/bin/python3
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198
] | 2.111111 | 9 |
# -*- coding:utf-8 -*_
"""
@author: Fang Wang
@date: 2017.02.25
@desc: mongodb client
"""
import pymongo
from pymongo import MongoClient
if __name__ == "__main__":
print insert_data({"a": 1})
print query_data({"a": 1})
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
62,
198,
198,
37811,
198,
31,
9800,
25,
24468,
15233,
198,
31,
4475,
25,
2177,
13,
2999,
13,
1495,
198,
31,
20147,
25,
285,
506,
375,
65,
5456,
198,
37811,
198,
11748,
279,
4948,
... | 2.397959 | 98 |
#!/usr/bin/env python
"""Simple Python script to generate iCalendar from the D&D content calendar.
This depends on BeautifulSoup and icalendar, both of which are easy_install-able."""
import urllib2
from BeautifulSoup import BeautifulSoup
from datetime import date, timedelta
from icalendar import Event, Calendar
base_url = "http://www.wizards.com/dnd/"
calendar_url = base_url + "calendar.aspx?Month=%0i&Year=%i"
first_month = (9,2009) # the first month with data
oneday = timedelta(days=1)
def event_from_row(row_soup, day):
"""Parse a CalendarEventRow into an iCalendar Event."""
ev = Event()
ev.add('dtstart', day)
ev.add('dtend', day+oneday)
insider = row_soup.find('img', {'class' : 'CalendarDnDIImage'}) is not None
prefix = ""
span = row_soup.find('span', {'class' : 'CalendarPrefix'})
if span is not None:
prefix += span.contents[0] + ' '
a = row_soup.find('a', {'class' : 'CalendarEvent'})
if a is not None:
url = base_url + a['href']
ev.add("url", url)
ev.add("description", url)
else:
a = row_soup.find('a', {'class' : 'CalendarEventNoLink'})
title = a.contents[0]
ev.add("summary", prefix+title)
return ev
def events_from_day(day_soup, month, year):
"""Generate a list of events from a CalendarCell element (a single day)."""
daydiv = day_soup.find('div', {'class' : 'CalendarDay'})
span = daydiv.find('span')
days = int(span.contents[0])
day = date(year, month, days)
event_rows = day_soup.findAll('div', {'class' : 'CalendarEventRow'})
events = []
for i,row in enumerate(event_rows):
try:
ev = event_from_row(row, day)
except:
print row
raise
else:
ev['uid'] = "%s-%i"%(str(day),i)
events.append(ev)
return events
def scrape_month(month,year):
"""Scrape the calendar page for a month, and return a list of all Events.
in that month."""
print "Scraping %02i/%i"%(month,year)
url = calendar_url%(month,year)
req = urllib2.urlopen(url)
if req.getcode() != 200:
raise "Failed to fetch, error %i"%req.getcode()
raw = req.read()
soup = BeautifulSoup(raw)
caldiv = soup.find('div', {'class':'CalendarContent'})
days = caldiv.findAll('div', {'class':'CalendarCell'})
events = []
for day in days:
events.extend(events_from_day(day, month, year))
return events
def generate_calendar(start=first_month, months=-1):
"""Generate an iCalendar containing all events on the D&D Content Calendar.
Default behavior will get all data ever, but it can be limited by the start
and months arguments."""
cal = Calendar()
cal.add('X-WR-CALNAME', 'D&D Content')
m,y = start
months = int(months)
while months != 0:
events = scrape_month(m,y)
months -= 1
if not events:
print "No events for %02i/%i"%(m,y)
break
for ev in events:
cal.add_component(ev)
m += 1
if m == 13:
m = 1
y += 1
return cal
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
outfile = sys.argv[1]
else:
outfile = None
cal = generate_calendar()
if outfile:
with open(outfile, 'w') as f:
f.write(cal.as_string())
else:
print cal.as_string()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
26437,
11361,
4226,
284,
7716,
1312,
9771,
9239,
422,
262,
360,
5,
35,
2695,
11845,
13,
198,
1212,
8338,
319,
23762,
50,
10486,
290,
220,
605,
9239,
11,
1111,
286,
543,
389,
2... | 2.276342 | 1,509 |
""" entry points to pastescript
"""
import sys
from .urldispatcher import URLDispatcher
def make_urldispatch_application(_, **settings):
""" paste.app_factory interface for URLDispatcher"""
patterns = [p.split("=", 1)
for p in settings['patterns'].split('\n')
if p]
application = URLDispatcher()
for pattern, app in patterns:
pattern = pattern.strip()
app = app.strip()
mod, obj = app.split(":", 1)
if mod not in sys.modules:
__import__(mod)
mod = sys.modules[mod]
obj = getattr(mod, obj)
application.add_url(app, pattern, obj)
return application
| [
37811,
5726,
2173,
284,
1613,
3798,
1968,
198,
37811,
198,
11748,
25064,
198,
6738,
764,
333,
335,
271,
8071,
2044,
1330,
10289,
7279,
8071,
2044,
628,
198,
4299,
787,
62,
333,
335,
8802,
963,
62,
31438,
28264,
11,
12429,
33692,
2599,
... | 2.337979 | 287 |
import torch
from torch.autograd import Variable
from ..torchkit import log_normal, SigmoidFlow
from .base_model import BaseModel
class FlowModel(BaseModel):
"""
Abstract class for normalizing flow model
"""
def compute_log_likelihood(self, x, weights, biases, extra_params,
detach=False, mask=None, regime=None):
"""
Return log-likelihood of the model for each example.
WARNING: This is really a joint distribution only if the DAGness constraint on the mask is satisfied.
Otherwise the joint does not integrate to one.
:param x: (batch_size, num_vars)
:param weights: list of tensor that are coherent with self.weights
:param biases: list of tensor that are coherent with self.biases
:param mask: tensor, shape=(batch_size, num_vars)
:param regime: np.ndarray, shape=(batch_size,)
:return: (batch_size, num_vars) log-likelihoods
"""
density_params = self.forward_given_params(x, weights, biases, mask, regime)
return self._log_likelihood(x, density_params)
| [
11748,
28034,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
6738,
11485,
13165,
354,
15813,
1330,
2604,
62,
11265,
11,
311,
17225,
1868,
37535,
198,
6738,
764,
8692,
62,
19849,
1330,
7308,
17633,
628,
198,
4871,
27782,
17633,
... | 2.658768 | 422 |
from enum import Enum, auto
| [
6738,
33829,
1330,
2039,
388,
11,
8295,
201,
198
] | 3.222222 | 9 |
from rest_framework import serializers
from rest_framework.fields import ReadOnlyField
from klazor.models import *
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
1334,
62,
30604,
13,
25747,
1330,
4149,
10049,
15878,
198,
198,
6738,
479,
75,
17725,
13,
27530,
1330,
1635,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628
] | 3.512821 | 39 |
import logging
import cv2
import numpy
import pytesseract
import requests
from config import DEAN_URL, STUDENT_ID, PASSWORD
DEAN_INDEX_PATH = "service/login.html"
POST_LOGIN_FORM_PATH = "vatuu/UserLoginAction"
POST_USER_LOADING_PATH = "vatuu/UserLoadingAction"
GET_CAPTCHA_IMG_PATH = "vatuu/GetRandomNumberToJPEG"
| [
11748,
18931,
198,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
198,
11748,
12972,
83,
408,
263,
529,
198,
11748,
7007,
198,
198,
6738,
4566,
1330,
5550,
1565,
62,
21886,
11,
49348,
3525,
62,
2389,
11,
41752,
54,
12532,
198,
198,
... | 2.636364 | 121 |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
class Zigzag:
"""
The Zigzag Cipher (Rail-Fence)
"""
def encrypt(self, text, key, alphabet=None):
"""
Encryption method
:param text: Text to encrypt
:param key: Encryption key
:param alphabet: unused
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
if key <= 0:
return
key0 = key - 1
step = key0 << 1
# the first row
crypted = [text[::step]]
# next rows
textlen = len(text)
for row in range(1, key0):
right = step - row
for left in range(row, textlen, step):
crypted.append(text[left])
if right < textlen:
crypted.append(text[right])
right += step
# the last row
crypted.append(text[key0::step])
return "".join(crypted)
def decrypt(self, text, key, alphabet=None):
"""
Decryption method
:param text: Text to decrypt
:param key: Decryption key
:param alphabet: unused
:type text: string
:type key: integer
:type alphabet: string
:return: text
:rtype: string
"""
step = (key - 1) << 1
textlen = len(text)
decrypted = [None] * textlen
# first row
i = 0
for left in range(0, textlen, step):
decrypted[left] = text[i]
i += 1
# next rows
for row in range(1, key):
for left in range(row, textlen, step):
decrypted[left] = text[i]
i += 1
right = left + step - (row << 1)
if right < textlen and right != left:
decrypted[right] = text[i]
i += 1
return "".join(decrypted)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
628,
198,
4871,
24992,
50183,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
383,
24992,
50183,
44334,
357,
44631,
12,
37,
59... | 1.930118 | 1,016 |
__version__="0.5.3"
| [
834,
9641,
834,
2625,
15,
13,
20,
13,
18,
1,
198
] | 1.818182 | 11 |
from ..common import *
"""
### 使用者自定义视图模板并为此模板编辑逻辑的步骤:【后期补全】
"""
###
"""
### 路由默认写入根urls.py文件,也可用新的别名替代第一个名字(不推荐这么做,因为随之改动的地方会很多,除非你很熟悉Django,否则不建议更改urls.py在别名中的位置)。
### 存在隐患,在用户创建路由时应当判断路由明是否冲突,反向名称是否冲突。这将在未来版本中修复。
### urlpatterns参数必须要有一个空行,否则会错误处理。这将在未来版本中修复。
"""
LABEL_COL_LEN = 200
| [
6738,
11485,
11321,
1330,
1635,
198,
198,
37811,
198,
21017,
220,
45635,
18796,
101,
38519,
164,
229,
103,
22522,
248,
20046,
231,
164,
100,
228,
32368,
122,
162,
101,
94,
30266,
123,
33176,
114,
10310,
118,
29826,
97,
162,
101,
94,
3... | 0.701923 | 416 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'LexusLee'
"""
构造枚举类
"""
class Enum:
"""
枚举类
"""
if __name__ == '__main__':
print 'Test begin.'
print 'Test end.' | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
834,
9800,
834,
796,
705,
45117,
385,
24338,
6,
198,
37811,
198,
162,
252,
226,
34460,
254,
162,
252,
248,
10310,
122,
1... | 1.831776 | 107 |
# encoding: utf-8
"""
@author: Chong Li
@time: 2021/06/25 16:35
@desc:
"""
import torch
import numpy as np
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
37811,
198,
31,
9800,
25,
46892,
7455,
198,
31,
2435,
25,
33448,
14,
3312,
14,
1495,
1467,
25,
2327,
198,
31,
20147,
25,
198,
37811,
198,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
... | 2.477273 | 44 |
'''
08 - Plotting time-series: putting it all together
In this exercise, you will plot two time-series with different scales on the
same Axes, and annotate the data from one of these series.
The CO2/temperatures data is provided as a DataFrame called climate_change. You
should also use the function that we have defined before, called plot_timeseries,
which takes an Axes object (as the axes argument) plots a time-series (provided as
x and y arguments), sets the labels for the x-axis and y-axis and sets the color for
the data, and for the y tick/axis labels:
`plot_timeseries(axes, x, y, color, xlabel, ylabel)`
Then, you will annotate with text an important time-point in the data: on 2015-10-06,
when the temperature first rose to above 1 degree over the average.
Instructions:
- Use the plot_timeseries function to plot CO2 levels against time. Set xlabel to "Time
(years)" ylabel to "CO2 levels" and color to 'blue'.
- Create ax2, as a twin of the first Axes.
- In ax2, plot temperature against time, setting the color ylabel to "Relative temp (Celsius)" and color to 'red'.
- Annotate the data using the ax2.annotate method. Place the text ">1 degree" in x=pd.Timestamp('2008-10-06'),
y=-0.2 pointing with a gray thin arrow to x=pd.Timestamp('2015-10-06'), y = 1.
'''
fig, ax = plt.subplots()
# Plot the CO2 levels time-series in blue
plot_timeseries(ax, climate_change.index, climate_change["co2"], 'blue', "Time (years)", "CO2 levels")
# Create an Axes object that shares the x-axis
ax2 = ax.twinx()
# Plot the relative temperature data in red
plot_timeseries(ax2, climate_change.index, climate_change['relative_temp'], 'red', "Time (years)", "Relative temp (Celsius)")
# Annotate the point with relative temperature >1 degree
ax2.annotate(">1 degree", xy=(pd.Timestamp('2015-10-06'), 1), xytext=(pd.Timestamp('2008-10-06'), -0.2), arrowprops={'arrowstyle':'->', 'color':'gray'})
plt.show()
| [
7061,
6,
198,
2919,
532,
28114,
889,
640,
12,
25076,
25,
5137,
340,
477,
1978,
198,
198,
818,
428,
5517,
11,
345,
481,
7110,
734,
640,
12,
25076,
351,
1180,
16252,
319,
262,
220,
198,
31642,
12176,
274,
11,
290,
24708,
378,
262,
1... | 3.173486 | 611 |
# Initialization file contains the server settings and config options. It also
# builds the app and initializes the server.
import os
import secrets
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_mail import Mail
secret_key = secrets.token_hex(16)
app = Flask(__name__)
app.config['SECRET_KEY'] = secret_key
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view = 'login'
login_manager.login_message_category = 'info'
app.config['MAIL_SERVER'] = 'smtp.googlemail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = os.environ.get('EMAIL_USER')
app.config['MAIL_PASSWORD'] = os.environ.get('EMAIL_PASS')
mail = Mail(app)
app.config['PDF_FILE_DUMP'] = "projectCode/static/pdf_gen/"
from projectCode import routes
| [
2,
20768,
1634,
2393,
4909,
262,
4382,
6460,
290,
4566,
3689,
13,
632,
635,
198,
2,
12188,
262,
598,
290,
4238,
4340,
262,
4382,
13,
198,
198,
11748,
28686,
198,
11748,
13141,
198,
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
25410... | 2.93921 | 329 |
"""
_/_/_/_/_/_/_/_/
_/ ver 1.0.2 _/
_/_/_/_/_/_/_/_/
"""
import asyncio
import discord
from discord.ext import commands
import re
import sys
import os
# TODO:os.environ['DISCORD_BOT_TOKEN']
TOKEN = os.environ['DISCORD_BOT_TOKEN']
# TODO:コマンド変更時
bot = commands.Bot(command_prefix='!')
Emcompre = '!'
# TODO:バージョン変更時
botver = '1.0.2'
flag = True
b_count = 0
o_flag = 0
m_count = 0
# 募集時メンバーリスト
MEMBER_LIST = []
# MEMBER_LIST結果出力用
MEMBER_DIS = []
# UNICODE
ONE = '\N{Large Red Circle}'
TWO = '\N{Large Blue Circle}'
THREE = '\N{Large Yellow Circle}'
FOUR = '\N{Large Green Circle}'
FIVE = '\N{Large Orange Circle}'
SIX = '\N{Large Purple Circle}'
SEVEN = '\N{Large Brown Circle}'
EIGHT = '\N{Medium Black Circle}'
NINE = '\N{Medium White Circle}'
CANCEL = '\N{SQUARED CL}'
ERROR = '\N{WARNING SIGN}'
# リアクションリスト
REACTION_LIST = [
ONE, TWO, THREE, FOUR, FIVE,
SIX, SEVEN, EIGHT, NINE]
# help_Embed
help = discord.Embed(
title='募集用bot 「@bot_chan」の使い方',
description='募集したい内容を、人数を設定して募集をかけることが出きるbotです。\n'
'各コマンドの使い方は以下を御参照ください。\n',
color=discord.Color.green())
# help ?at使い方
help.add_field(
name=':loudspeaker: 各コマンドの使い方\n',
value=':pushpin:***募集を募るコマンド***\n'
' 募集の際に使うこのbotの基本となるコマンド\n'
'\n'
' ***記述方法***\n'
' **' + Emcompre + 'at 「募集要項」 「人数」**\n'
'\n'
' ※各要素に必ず半角スペースを1つ設けてください。\n'
' ※鍵かっこをつける必要はありません。\n'
' ※合計9人まで募集をかけられます。\n'
' ※それぞれの参加ボタンが押された時点で募集を終了します。\n'
'\n'
':pushpin:***バグ対応用コマンド***\n'
' コマンド実行時などにバグが発生した際に一時的な対策として使うコマンド\n'
'\n'
' ***記述方法***\n'
' **' + Emcompre + 'atre**\n',
inline=False)
# help リアクションについて
help.add_field(
name=':loudspeaker: リアクションについて\n',
value='このbotではリアクションを用いて\n'
'__参加ボタン__を(例 :red_circle:)\n'
'__募集中止ボタン__を(:cl:)として扱っています。\n'
'\n'
':pushpin:参加ボタンについて\n'
' 人数に応じてボタンが追加されます。\n'
' 募集者や一度リアクションした人はボタンを押せなくなります。\n'
'\n'
':pushpin:募集中止ボタンについて\n'
' 募集中止ボタンは押した時点で__募集を取り消す__ことができます。\n')
# help developper info
help.set_footer(
text='made by Farrule\n'
'@bot_chan verstion: @bot_chan ' + botver,
icon_url='https://cdn.discordapp.com/'
'attachments/865123798813900820/865258524971106314/Farrule_logo2.jfif')
# update_Embed
# TODO: バージョンアップ時変更
update = discord.Embed(
title='アップデート内容',
color=discord.Color.red()
)
update.add_field(
name=':wrench: ver' + botver + 'アップデート\n',
value='プログラムの根幹部分を最適化\n'
'プログラムの修正を簡易化\n'
'!upコマンドの追加\n'
'!helpコマンドのコマンド名をを!hpに変更\n'
)
update.set_footer(
text='date 25, 7, 2021'
)
@bot.event # ? 起動時処理
# ? up コマンド入力処理
@bot.command()
# ? help コマンド入力時処理
@bot.command()
# ? atre コマンド入力時処理
@bot.command()
# ? at コマンド入力時処理
@bot.command()
# ? リアクションボタン メンバーリスト追加処理
@bot.event
# ? 各リアクション処理
@bot.event
bot.run(TOKEN)
| [
37811,
198,
171,
120,
123,
14,
171,
120,
123,
14,
171,
120,
123,
14,
171,
120,
123,
14,
171,
120,
123,
14,
171,
120,
123,
14,
171,
120,
123,
14,
171,
120,
123,
14,
198,
171,
120,
123,
14,
220,
220,
220,
3326,
352,
13,
15,
13... | 1.398406 | 2,008 |
print(sum(map(max, zip(map(int, input().split()), map(int, input().split())))))
| [
4798,
7,
16345,
7,
8899,
7,
9806,
11,
19974,
7,
8899,
7,
600,
11,
5128,
22446,
35312,
3419,
828,
3975,
7,
600,
11,
5128,
22446,
35312,
3419,
4008,
22305,
201,
198
] | 2.612903 | 31 |