content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from time import sleep
from blinkt import *
from pythonosc import osc_message_builder
from pythonosc import udp_client
sender = udp_client.SimpleUDPClient('127.0.0.1', 4559)
set_all(255,0,0)
show()
while True:
sender.send_message('/play_this', 60)
sleep(.1)
set_brightness(1)
for i in range(5):
set_brightness((.6-i/10))
show()
sleep(.1)
| [
6738,
640,
1330,
3993,
198,
6738,
21019,
83,
1330,
1635,
198,
6738,
21015,
17500,
1330,
267,
1416,
62,
20500,
62,
38272,
198,
6738,
21015,
17500,
1330,
334,
26059,
62,
16366,
198,
198,
82,
2194,
796,
334,
26059,
62,
16366,
13,
26437,
... | 2.283133 | 166 |
from unittest import TestCase
from unittest.mock import patch, MagicMock
from word_vectorizer.models.model_data import ModelData
from word_vectorizer.models.model_data_loader import ModelDataLoader
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
11,
6139,
44,
735,
198,
198,
6738,
1573,
62,
31364,
7509,
13,
27530,
13,
19849,
62,
7890,
1330,
9104,
6601,
198,
6738,
1573,
62,
31364,
7509,... | 3.571429 | 56 |
import tensorflow as tf
import numpy as np
from tommy2tommy.layers import attention
if __name__ == "__main__":
tf.test.main()
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
16667,
1820,
17,
39532,
1820,
13,
75,
6962,
1330,
3241,
628,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
2... | 2.74 | 50 |
import streamlit as st
import math
import itertools as it
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from dataframe import df
def data_processing():
"""
Страница предварительного анализа исходных данных.
"""
st.title('Предварительный анализ')
st.write(df.head(10))
# Отрисовка гистограмм столбцов исходных данных
rows = math.ceil(len(df.columns) / 2)
cols = 2
fig, ax = plt.subplots(rows, cols, figsize=(4, 1.5*rows))
fig.tight_layout(h_pad=2.5)
coord_pairs = it.product(range(rows), range(cols))
for i, coords in enumerate(coord_pairs):
r, c = coords
ax[r, c].xaxis.set_visible(True)
for tick in ax[r, c].get_xticklabels():
tick.set_rotation(25)
sns.histplot(data=df, x=df.columns[i], ax=ax[r, c])
if i + 1 == len(df.columns):
coords = next(coord_pairs, None)
if coords:
r, c = coords
ax[r, c].axis('off')
break
st.pyplot(fig)
| [
11748,
4269,
18250,
355,
336,
198,
198,
11748,
10688,
198,
11748,
340,
861,
10141,
355,
340,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
384,
397,
1211,
355,
301... | 1.758224 | 608 |
import os
import yaml
import typer
import requests
from pathlib import Path
from yaspin import yaspin
import subprocess
from .config import config
from .server import Server
from .utils import get_file_sha1, init_storage, cleanup, pretty_error
app = typer.Typer()
@app.command("prepare")
if __name__ == "__main__":
app()
| [
11748,
28686,
198,
11748,
331,
43695,
198,
11748,
1259,
525,
198,
11748,
7007,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
331,
5126,
259,
1330,
331,
5126,
259,
198,
11748,
850,
14681,
198,
198,
6738,
764,
11250,
1330,
4566,
198,
673... | 3.121495 | 107 |
'''
Created on 10.08.2014
@author: Philip Peter <philip.peter@justgeek.de>
As long as you retain this notice you can do whatever you want with this stuff.
If we meet some day, and you think this stuff is worth it, you can buy me a
beer in return
Philip Peter
'''
class IMDbQuery(object):
'''
A generic class for IMDb queries
Actual queries need to be derieved from this
'''
outputDir = "..\\data\\"
dataFileName = "generic.dat"
errorFileName = 'errors.txt'
debugEnabled = False
statList = []
query = """
SELECT
title.id as id,
title.production_year as year,
FROM
title
WHERE
title.kind_id in (1,3,4) AND
title.id NOT IN (SELECT movie_id FROM movie_info WHERE info_type_id = 3 AND info in ('Adult','Erotica','Reality-TV','News','Talk-Show','Game-Show'))
ORDER by year ASC
"""
def __init__(self):
'''
All common stuff
'''
self.conversionErrors = 0
def writeDataHeader(self):
'''
Lists the stat names in order at the beginning of the data file
'''
self.dataFile.write('# year')
for entry in self.statList:
self.dataFile.write(', ' + entry)
self.dataFile.write("\n\n")
self.dataFile.flush()
def debugMessage(self, debugMessage):
'''
Small wrapper to output debug messages
'''
if self.debugEnabled:
self.errorFile.write(debugMessage)
self.errorFile.write("\n")
self.errorFile.flush()
def convertRuntime(self, runtime):
'''
Parse most runtime formats
'''
originalTime = runtime
runtime.strip()
if runtime[0].isalpha():
# most likely first is name like USA:length
runtime = runtime.split(':', 1)
runtime = runtime[1]
runtime = runtime.replace(':', '.')
runtime = runtime.replace(',', '.')
runtime = runtime.replace("'", '.')
runtime = runtime.replace('"', '')
try:
result = float(runtime)
return result
except ValueError:
#Not a valid int
result = 0
self.conversionErrors += 1
self.debugMessage("! Runtime conversion error: " + str(originalTime) + "\t->\t" + str(runtime))
return 0
def endOfYear(self, year):
'''
Called once per year, write data to file
'''
results = {}
year = int(year)
for entry in self.statList:
results[entry] = 0
results = self.tally(results)
if year >= 2014 or year == '0':
''''Skip years after 2013, incomplete data'''
return
self.dataFile.write(str(year))
for entry in self.statList:
self.dataFile.write("\t" + str(round(results[entry], 2)))
self.dataFile.write("\n")
self.dataFile.flush()
if (year % 10) == 0:
print "\nProcessed year " + str(year),
else:
print '.',
def startQuery(self, cursor):
'''
Main query, does everything
'''
lastYear = '0'
self.dataFile = open(self.outputDir + self.dataFileName, 'w')
if(self.debugEnabled):
self.errorFile = open(self.outputDir + self.errorFileName, 'w')
self.writeDataHeader()
cursor.execute(self.query)
print "\tQuery executed"
row = cursor.fetchone()
while row is not None:
year = row['year']
if (year == None) or (year == 'None'):
year = '0';
year = str(year)
if year != '0':
if(year != lastYear):
self.endOfYear(lastYear);
lastYear = year
self.addRow(row)
row = cursor.fetchone()
self.closeFiles()
def addRow(self, row):
'''
Needs to be implemented in subclass
Process one individual row
'''
def tally(self, resultDict):
'''
Needs to be implemented in subclass
Called at the end of each year, returns dict with all stats
'''
return resultDict
class GenericSumQuery(IMDbQuery):
'''
Outputs just a sum add the end of everything
'''
def __init__(self):
'''
Constructor
'''
super(GenericSumQuery,self).__init__()
self.results = {}
for entry in self.statList:
self.results[entry] = 0
def writeDataHeader(self):
'''
Not necessary
'''
def endOfYear(self, year):
'''
Called once per year, doesn't do much
'''
self.results = self.tally(self.results)
if year >= 2014 or year == '0':
''''Skip years after 2013, incomplete data'''
return
if (year % 10) == 0:
print "\nProcessed year " + str(year),
else:
print '.',
def startQuery(self, cursor):
'''
Main query, does everything
'''
lastYear = '0'
self.dataFile = open(self.outputDir + self.dataFileName, 'w')
if(self.debugEnabled):
self.errorFile = open(self.outputDir + self.errorFileName, 'w')
self.writeDataHeader()
cursor.execute(self.query)
print "\tQuery executed"
row = cursor.fetchone()
while row is not None:
year = row['year']
if (year == None) or (year == 'None'):
year = '0';
year = str(year)
if year != '0':
if(year != lastYear):
self.endOfYear(lastYear);
lastYear = year
self.addRow(row)
row = cursor.fetchone()
self.writeResults(self.results)
self.closeFiles() | [
7061,
6,
198,
41972,
319,
838,
13,
2919,
13,
4967,
198,
198,
31,
9800,
25,
14576,
5613,
1279,
28864,
541,
13,
79,
2357,
31,
3137,
469,
988,
13,
2934,
29,
198,
198,
1722,
890,
355,
345,
12377,
428,
4003,
345,
460,
466,
4232,
345,
... | 2.386415 | 2,223 |
__version__ = '6.9.2'
| [
834,
9641,
834,
796,
705,
21,
13,
24,
13,
17,
6,
198
] | 1.833333 | 12 |
import sys
import time
import configparser
from datetime import datetime
from volt_meter import VoltMeter
from sense_client import SenseClient
from solar_edge_client import SolarEdgeClient
daily_power_meter = VoltMeter(25, .03, 2.35)
power_consumed_meter = VoltMeter(23, .03, 2.25)
solar_output_meter = VoltMeter(17, .03, 2.25)
config = configparser.ConfigParser()
config.read('config.txt')
sense_email = config['DEFAULT']['sense_email']
sense_password = config['DEFAULT']['sense_password']
solar_edge_api_key = config['DEFAULT']['solar_edge_api_key']
sense_client = SenseClient(sense_email, sense_password)
solar_edge_client = SolarEdgeClient(solar_edge_api_key)
if __name__ == '__main__':
try:
main_loop()
except KeyboardInterrupt:
print ('\nExiting by user request.\n')
sys.exit(0)
| [
11748,
25064,
198,
11748,
640,
198,
11748,
4566,
48610,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
13161,
62,
27231,
1330,
22702,
44,
2357,
198,
6738,
2565,
62,
16366,
1330,
24956,
11792,
198,
6738,
6591,
62,
14907,
62,
16366,
... | 2.767677 | 297 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/system_parameter.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/api/system_parameter.proto",
package="google.api",
syntax="proto3",
serialized_options=b"\n\016com.google.apiB\024SystemParameterProtoP\001ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\242\002\004GAPI",
serialized_pb=b'\n!google/api/system_parameter.proto\x12\ngoogle.api"B\n\x10SystemParameters\x12.\n\x05rules\x18\x01 \x03(\x0b\x32\x1f.google.api.SystemParameterRule"X\n\x13SystemParameterRule\x12\x10\n\x08selector\x18\x01 \x01(\t\x12/\n\nparameters\x18\x02 \x03(\x0b\x32\x1b.google.api.SystemParameter"Q\n\x0fSystemParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0bhttp_header\x18\x02 \x01(\t\x12\x1b\n\x13url_query_parameter\x18\x03 \x01(\tBv\n\x0e\x63om.google.apiB\x14SystemParameterProtoP\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\xa2\x02\x04GAPIb\x06proto3',
)
_SYSTEMPARAMETERS = _descriptor.Descriptor(
name="SystemParameters",
full_name="google.api.SystemParameters",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="rules",
full_name="google.api.SystemParameters.rules",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=49,
serialized_end=115,
)
_SYSTEMPARAMETERRULE = _descriptor.Descriptor(
name="SystemParameterRule",
full_name="google.api.SystemParameterRule",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="selector",
full_name="google.api.SystemParameterRule.selector",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="parameters",
full_name="google.api.SystemParameterRule.parameters",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=117,
serialized_end=205,
)
_SYSTEMPARAMETER = _descriptor.Descriptor(
name="SystemParameter",
full_name="google.api.SystemParameter",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.api.SystemParameter.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="http_header",
full_name="google.api.SystemParameter.http_header",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="url_query_parameter",
full_name="google.api.SystemParameter.url_query_parameter",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=207,
serialized_end=288,
)
_SYSTEMPARAMETERS.fields_by_name["rules"].message_type = _SYSTEMPARAMETERRULE
_SYSTEMPARAMETERRULE.fields_by_name["parameters"].message_type = _SYSTEMPARAMETER
DESCRIPTOR.message_types_by_name["SystemParameters"] = _SYSTEMPARAMETERS
DESCRIPTOR.message_types_by_name["SystemParameterRule"] = _SYSTEMPARAMETERRULE
DESCRIPTOR.message_types_by_name["SystemParameter"] = _SYSTEMPARAMETER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SystemParameters = _reflection.GeneratedProtocolMessageType(
"SystemParameters",
(_message.Message,),
{
"DESCRIPTOR": _SYSTEMPARAMETERS,
"__module__": "google.api.system_parameter_pb2"
# @@protoc_insertion_point(class_scope:google.api.SystemParameters)
},
)
_sym_db.RegisterMessage(SystemParameters)
SystemParameterRule = _reflection.GeneratedProtocolMessageType(
"SystemParameterRule",
(_message.Message,),
{
"DESCRIPTOR": _SYSTEMPARAMETERRULE,
"__module__": "google.api.system_parameter_pb2"
# @@protoc_insertion_point(class_scope:google.api.SystemParameterRule)
},
)
_sym_db.RegisterMessage(SystemParameterRule)
SystemParameter = _reflection.GeneratedProtocolMessageType(
"SystemParameter",
(_message.Message,),
{
"DESCRIPTOR": _SYSTEMPARAMETER,
"__module__": "google.api.system_parameter_pb2"
# @@protoc_insertion_point(class_scope:google.api.SystemParameter)
},
)
_sym_db.RegisterMessage(SystemParameter)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
2,
15069,
12131,
3012,
11419,
201,
198,
2,
201,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
201,
198,
... | 2.049015 | 4,060 |
# grid relative
from .aws import *
from .azure import *
from .gcp import *
from .provider import *
| [
2,
10706,
3585,
198,
6738,
764,
8356,
1330,
1635,
198,
6738,
764,
1031,
495,
1330,
1635,
198,
6738,
764,
70,
13155,
1330,
1635,
198,
6738,
764,
15234,
1304,
1330,
1635,
198
] | 3.193548 | 31 |
from torch import nn
from lib.layers.Replicate_unit import Replicate_unit1d
from lib.layers.FC_LSTM_unit import FC_lstm_stacked
from lib.layers.Conv_LSTM_unit import CONV_lstm_unit
import random
import torch.nn.functional as F
import torch
from pysc2.lib import actions
import numpy as np
if __name__ == "__main__":
# basic sanity test
# feature_fc = torch.randn((1, 63))
# feature_screen = torch.randn((1, 8, 84, 84))
# feature_minimap = torch.randn((1, 3, 64, 64))
# model = scNet()
# out_fc, out_screen, out_minimap = model(feature_fc, feature_screen, feature_minimap)
#
# print(out_fc.size(), out_screen.size(), out_minimap.size())
# # cuda test
# feature_fc = torch.randn((1, 63)).cuda()
# feature_screen = torch.randn((1, 8, 84, 84)).cuda()
# feature_minimap = torch.randn((1, 3, 64, 64)).cuda()
# model = scNet()
# model.cuda()
# model_output = model(feature_fc, feature_screen, feature_minimap)
# scNetOutput2candidateAction(model_output)
# robust interation test
model = scNet()
model.cuda()
for s in range(1000):
feature_fc = torch.randn((1, 63)).cuda()
feature_screen = torch.randn((1, 8, 84, 84)).cuda()
feature_minimap = torch.randn((1, 3, 64, 64)).cuda()
model = scNet()
model.cuda()
out_fc, out_screen, out_minimap = model(feature_fc, feature_screen, feature_minimap)
model.dump_hidden(model.get_hidden())
print(out_fc.size(), out_screen.size(), out_minimap.size())
| [
6738,
28034,
1330,
299,
77,
198,
6738,
9195,
13,
75,
6962,
13,
39232,
5344,
62,
20850,
1330,
18407,
5344,
62,
20850,
16,
67,
198,
6738,
9195,
13,
75,
6962,
13,
4851,
62,
43,
2257,
44,
62,
20850,
1330,
10029,
62,
75,
301,
76,
62,
... | 2.391236 | 639 |
import tensorflow as tf
import sonnet as snt
from tensorflow_probability import distributions
from crazycar.algos_tf.common import make_mlp
from crazycar.algos_tf.base import BaseModel, BaseNetwork
EPS = 1e-16
class Actor(BaseNetwork):
"""
Actor for DDPG
Args:
encoder: class from crazycar.encoder
act_dim: number of action
hiddens: NO. units for each layers
"""
@tf.function
@tf.function
class Critic(BaseNetwork):
"""
Double Q for DDPG
Args:
encoder: class from crazycar.encoder
act_dim: number of action
hiddens: NO. units for each layers
"""
@tf.function
class SAC(BaseModel):
"""
Soft Actor-Critic
Args:
...
"""
@tf.function
def actor_loss(self, batch):
"""
L(s) = -E[Q(s, a)| a~u(s)]
Where,
Q is a soft-Q: Q - alpha * log_prob
"""
act, log_prob = self.actor.sample(batch['obs'])
q1, q2 = self.critic(batch['obs'], act)
min_q = tf.minimum(q1, q2)
loss = tf.reduce_mean(tf.exp(self.log_alpha) * log_prob - min_q)
return loss
@tf.function
def critic_v_loss(self, batch):
"""
...
"""
v1, v2 = self.critic_v(batch['obs'])
v = tf.minimum(v1, v2)
act, log_prob = self.actor.sample(batch['obs'])
q1, q2 = self.critic(batch['obs'], act)
min_q = tf.minimum(q1, q2)
target_v = tf.stop_gradient(min_q - tf.exp(self.log_alpha) * log_prob)
td_v = tf.reduce_mean((target_v - v) ** 2)
# print(batch)
return td_v
@tf.function
def critic_loss(self, batch):
"""
L(s, a) = (y - Q(s,a))^2
Where,
Q is a soft-Q: Q - alpha * log_prob
y(s, a) = r(s, a) + (1 - done) * gamma * Q'(s', a'); a' ~ u'(s')
"""
q1, q2 = self.critic(batch['obs'], batch['act'])
next_v_target1, next_v_target2 = self.critic_v(batch['next_obs'])
next_v_target = tf.minimum(next_v_target1, next_v_target2)
target_q = tf.stop_gradient(
batch['rew'] + (1 - batch['done']) * self.gamma * next_v_target
)
td_q1 = tf.reduce_mean((target_q - q1) ** 2)
td_q2 = tf.reduce_mean((target_q - q2) ** 2)
return td_q1 + td_q2
@tf.function
def alpha_loss(self, batch):
"""
L = -(alpha * log_prob + target_entropy)
"""
act, log_prob = self.actor.sample(batch['obs'])
# print(act, log_prob)
loss = -tf.reduce_mean(self.log_alpha * tf.stop_gradient(log_prob + self.target_entropy))
return loss
@tf.function
@tf.function
# @tf.function
if __name__ == "__main__":
from crazycar.algos_tf.encoder import Sensor
from crazycar.utils import set_seed
from crazycar.agents.constants import SENSOR_SHAPE, CAMERA_SHAPE
set_seed()
agent = SAC(Sensor, 5)
tmp = {
"sensor": tf.ones((1, ) + SENSOR_SHAPE),
"image": tf.ones((1, ) + CAMERA_SHAPE)
}
print(agent.predict(tmp))
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
3367,
3262,
355,
264,
429,
198,
198,
6738,
11192,
273,
11125,
62,
1676,
65,
1799,
1330,
24570,
198,
198,
6738,
7165,
7718,
13,
14016,
418,
62,
27110,
13,
11321,
1330,
787,
62,
4029,
79... | 2.029335 | 1,534 |
import random
import os
import struct
from functools import partial
MAX_U64 = 1 << 64
shuffle = partial(
random.shuffle, random=lambda: struct.unpack("Q", os.urandom(8))[0] / MAX_U64
)
| [
11748,
4738,
198,
11748,
28686,
198,
11748,
2878,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
198,
22921,
62,
52,
2414,
796,
352,
9959,
5598,
198,
198,
1477,
18137,
796,
13027,
7,
198,
220,
220,
220,
4738,
13,
1477,
18137,
11,
4738... | 2.728571 | 70 |
import os
from contextlib import contextmanager
from shutil import rmtree
from tempfile import mkdtemp
from galaxy.dependencies import ConditionalDependencies
AZURE_BLOB_TEST_CONFIG = """<object_store type="azure_blob">
blah...
</object_store>
"""
AZURE_BLOB_TEST_CONFIG_YAML = """
type: azure_blob
other_attributes: blah
"""
DISTRIBUTED_WITH_AZURE_CONFIG_YAML = """
type: distributed
backends:
- id: files1
type: azure_blob
"""
FILES_SOURCES_DROPBOX = """
- type: webdav
- type: dropbox
"""
JOB_CONF_YAML = """
runners:
runner1:
load: job_runner_A
"""
VAULT_CONF_CUSTOS = """
type: custos
"""
VAULT_CONF_HASHICORP = """
type: hashicorp
"""
@contextmanager
| [
11748,
28686,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
6738,
4423,
346,
1330,
374,
16762,
631,
198,
6738,
20218,
7753,
1330,
33480,
67,
29510,
198,
198,
6738,
16161,
13,
45841,
3976,
1330,
9724,
1859,
35,
2690,
3976,
198,
198,
227... | 2.469534 | 279 |
# Imports needed for evaluating expected result.
from datetime import datetime, date, timedelta
from decimal import Decimal
from robot.api.deco import keyword
from robot.utils import unicode
@keyword(types=['Integer']) # type always is given as str
@keyword(types=[u'INT']) # type given as unicode on Python 2
@keyword(types={'argument': 'lOnG'}) # type always given as str
@keyword(types={u'argument': u'Float'}) # type given as unicode on Python 2
@keyword(types=['Double'])
@keyword(types=['DECIMAL'])
@keyword(types=['Boolean'])
@keyword(types=['Bool'])
@keyword(types=['String'])
@keyword(types=['BYTES'])
@keyword(types=['ByteArray'])
@keyword(types=['DateTime'])
@keyword(types=['Date'])
@keyword(types=['TimeDelta'])
@keyword(types=['List'])
@keyword(types=['TUPLE'])
@keyword(types=['Dictionary'])
@keyword(types=['Dict'])
@keyword(types=['Map'])
@keyword(types=['Set'])
@keyword(types=['FrozenSet'])
| [
2,
1846,
3742,
2622,
329,
22232,
2938,
1255,
13,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
3128,
11,
28805,
12514,
198,
6738,
32465,
1330,
4280,
4402,
198,
198,
6738,
9379,
13,
15042,
13,
12501,
78,
1330,
21179,
198,
6738,
9379,
13... | 2.502513 | 398 |
import torch
from . import misc
| [
11748,
28034,
198,
198,
6738,
764,
1330,
12747,
628
] | 3.777778 | 9 |
from django.urls import reverse
from common.utils.tests import TestCaseUtils
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
198,
6738,
2219,
13,
26791,
13,
41989,
1330,
6208,
20448,
18274,
4487,
628
] | 3.590909 | 22 |
import os
from pathlib import Path
import pytest
import snakemake
@pytest.mark.skipif(
os.environ.get("GITHUB_ACTIONS", "false") == "true", reason="Broken on GitHub Actions"
)
| [
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
12972,
9288,
198,
11748,
17522,
15883,
628,
198,
31,
9078,
9288,
13,
4102,
13,
48267,
361,
7,
198,
220,
220,
220,
28686,
13,
268,
2268,
13,
1136,
7203,
38,
10554,
1052... | 2.859375 | 64 |
from typing import Union
import numpy as np
| [
6738,
19720,
1330,
4479,
198,
11748,
299,
32152,
355,
45941,
198
] | 4 | 11 |
# -*- coding:utf-8 -*-
# @Time:2020/6/21 16:22
# @Author:TimVan
# @File:20. Valid Parentheses.py
# @Software:PyCharm
# 20. Valid Parentheses.py
# Given a string containing just the characters '(', ')', '{', '}', '[' and ']',
# determine if the input string is valid.
# An input string is valid if:
# Open brackets must be closed by the same type of brackets.
# Open brackets must be closed in the correct order.
# Note that an empty string is also considered valid.
#
# Example 1:
# Input: "()"
# Output: true
#
# Example 2:
# Input: "()[]{}"
# Output: true
#
# Example 3:
# Input: "(]"
# Output: false
#
# Example 4:
# Input: "([)]"
# Output: false
#
# Example 5:
# Input: "{[]}"
# Output: true
solution = Solution()
inputStrArr = [
"{[]}()[]{}"
, ""
, "})"
, "{()}[])"
, "([)]"
, "{[]}()["
]
for one in inputStrArr:
print(solution.isValid(one))
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
25,
42334,
14,
21,
14,
2481,
1467,
25,
1828,
198,
2,
2488,
13838,
25,
14967,
25298,
198,
2,
2488,
8979,
25,
1238,
13,
48951,
16774,
39815,
13,
9078,
198,
... | 2.597633 | 338 |
import DS1302
from machine import I2C, Pin
from esp8266_i2c_lcd import I2cLcd
import time
DEFAULT_I2C_ADDR = 0x27
i2c = I2C(0,sda=Pin(0),scl=Pin(1),freq=400000)
lcd = I2cLcd(i2c, DEFAULT_I2C_ADDR, 2, 16)
ds = DS1302.DS1302(Pin(2),Pin(3),Pin(4))
while True:
d=ds.DateTime()
date1=str(d[0])+"-"+str(d[1])+"-"+str(d[2])+" "+str(d[4])+":"+str(d[5])
lcd.putstr(date1)
time.sleep(1)
lcd.clear()
d=ds.DateTime()
date2=str(d[0])+"-"+str(d[1])+"-"+str(d[2])+" "+str(d[4])+" "+str(d[5])
lcd.putstr(date2)
time.sleep(1)
lcd.clear()
| [
11748,
17400,
12952,
17,
198,
6738,
4572,
1330,
314,
17,
34,
11,
13727,
198,
6738,
15024,
23,
25540,
62,
72,
17,
66,
62,
75,
10210,
1330,
314,
17,
66,
43,
10210,
198,
11748,
640,
198,
7206,
38865,
62,
40,
17,
34,
62,
2885,
7707,
... | 1.737654 | 324 |
#!/usr/bin/python
#
# Filename:
#
# JsonToCsvConverter.py
#
#
# Basic Usage:
#
# python JsonToCsvConverter.py /directory/containing/datastream/json/files
#
# Utilities
import sys, os, re, glob, json
if __name__ == '__main__':
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
7066,
12453,
25,
220,
198,
2,
198,
2,
220,
220,
220,
220,
449,
1559,
2514,
34,
21370,
3103,
332,
353,
13,
9078,
198,
2,
198,
2,
198,
2,
14392,
29566,
25,
198,
2,
198,
2,
... | 2.278261 | 115 |
from matplotlib.pyplot import plot, show, figure, title
import matplotlib as plt
import numpy as np
from numpy import linalg as LA
from scipy.fftpack import dct, idct, fft, ifft
from scipy.sparse import coo_matrix
from sklearn.linear_model import Lasso
import sys, random
Fs = 40e3 #Sample rate
duration = 1./8
N_samps = np.floor(duration*Fs)
M = 250 # Number of compressed "basis" functions - we're going from N_samps to M samps.
f1 = 200
f2 = 3950
print "Compression ratio {0}".format(M/N_samps)
t = np.linspace(0,duration,N_samps)
X = np.sin(2*np.pi*f1*t) + np.sin(2*np.pi*f2*t)
q = range(0,int(N_samps))
random.shuffle(q);
yi = q[0:M];
yi = np.sort(yi)
#Y = X[yi]
D = fft(np.eye(N_samps))
D_inv = LA.inv(D)
y = np.dot(D, X)
A = D_inv[yi, :]
Y = np.dot(A, y)
Y = X[yi]
lasso = Lasso(alpha=0.01)
lasso.fit(A,Y)
# plot(lasso.coef_)
print lasso.coef_.shape
Xhat = np.dot(D_inv, lasso.coef_).real
print Xhat.shape
figure(figsize=[12,6])
plot(t,X)
show()
plot(t,Xhat)
show()
| [
6738,
2603,
29487,
8019,
13,
9078,
29487,
1330,
7110,
11,
905,
11,
3785,
11,
3670,
198,
11748,
2603,
29487,
8019,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
1330,
300,
1292,
70,
355,
9131,
198,
6738,
629,... | 2.165217 | 460 |
# Portal & Balloon Msgs for Evan Intro | Dream World: Dream Forest Entrance (900010000)
# Author: Tiger
# "What was that? I heard something!"
sm.avatarOriented("Effect/OnUserEff.img/guideEffect/evanTutorial/evanBalloon01")
| [
2,
25663,
1222,
47821,
6997,
14542,
329,
21523,
37219,
930,
7610,
2159,
25,
7610,
9115,
7232,
8132,
357,
12865,
486,
2388,
8,
198,
2,
6434,
25,
17030,
198,
198,
2,
366,
2061,
373,
326,
30,
314,
2982,
1223,
2474,
198,
5796,
13,
615,
... | 3.111111 | 72 |
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test service.
Handles test related functionality.
"""
from __future__ import print_function
import os
import re
import shutil
from chromite.cbuildbot import commands
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import failures_lib
from chromite.lib import image_lib
from chromite.lib import moblab_vm
from chromite.lib import osutils
from chromite.lib import portage_util
class Error(Exception):
"""The module's base error class."""
class BuildTargetUnitTestResult(object):
"""Result value object."""
def __init__(self, return_code, failed_cpvs):
"""Init method.
Args:
return_code (int): The return code from the command execution.
failed_cpvs (list[package_info.CPV]|None): List of packages whose tests
failed.
"""
self.return_code = return_code
self.failed_cpvs = failed_cpvs or []
@property
def BuildTargetUnitTest(build_target,
chroot,
packages=None,
blocklist=None,
was_built=True,
code_coverage=False,
testable_packages_optional=False):
"""Run the ebuild unit tests for the target.
Args:
build_target (build_target_lib.BuildTarget): The build target.
chroot (chroot_lib.Chroot): The chroot where the tests are running.
packages (list[str]|None): Packages to be tested. If none, uses all testable
packages.
blocklist (list[str]|None): Tests to skip.
was_built (bool): Whether packages were built.
code_coverage (bool): Whether to produce code coverage data.
testable_packages_optional (bool): Whether to allow no testable packages to
be found.
Returns:
BuildTargetUnitTestResult
"""
# TODO(saklein) Refactor commands.RunUnitTests to use this/the API.
# TODO(crbug.com/960805) Move cros_run_unit_tests logic here.
cmd = ['cros_run_unit_tests', '--board', build_target.name]
if packages:
cmd.extend(['--packages', ' '.join(packages)])
if blocklist:
cmd.extend(['--blacklist_packages', ' '.join(blocklist)])
if testable_packages_optional:
cmd.append('--no-testable-packages-ok')
if not was_built:
cmd.append('--assume-empty-sysroot')
extra_env = chroot.env
if code_coverage:
use_flags = extra_env.get('USE', '').split()
if 'coverage' not in use_flags:
use_flags.append('coverage')
extra_env['USE'] = ' '.join(use_flags)
# Set up the failed package status file.
with chroot.tempdir() as tempdir:
extra_env[constants.CROS_METRICS_DIR_ENVVAR] = chroot.chroot_path(tempdir)
result = cros_build_lib.run(cmd, enter_chroot=True,
extra_env=extra_env,
chroot_args=chroot.get_enter_args(),
check=False)
failed_pkgs = portage_util.ParseDieHookStatusFile(tempdir)
return BuildTargetUnitTestResult(result.returncode, failed_pkgs)
def BuildTargetUnitTestTarball(chroot, sysroot, result_path):
"""Build the unittest tarball.
Args:
chroot (chroot_lib.Chroot): Chroot where the tests were run.
sysroot (sysroot_lib.Sysroot): The sysroot where the tests were run.
result_path (str): The directory where the archive should be created.
"""
tarball = 'unit_tests.tar'
tarball_path = os.path.join(result_path, tarball)
cwd = chroot.full_path(sysroot.path, constants.UNITTEST_PKG_PATH)
if not os.path.exists(cwd):
return None
result = cros_build_lib.CreateTarball(tarball_path, cwd, chroot=chroot.path,
compression=cros_build_lib.COMP_NONE,
check=False)
return tarball_path if result.returncode == 0 else None
def DebugInfoTest(sysroot_path):
"""Run the debug info tests.
Args:
sysroot_path (str): The sysroot being tested.
Returns:
bool: True iff all tests passed, False otherwise.
"""
cmd = ['debug_info_test', os.path.join(sysroot_path, 'usr/lib/debug')]
result = cros_build_lib.run(cmd, enter_chroot=True, check=False)
return result.returncode == 0
def ChromitePytest():
"""Run Pytest tests in Chromite.
Returns:
bool: True iff all tests passed, False otherwise.
"""
cmd = [
os.path.join(constants.CHROMITE_SCRIPTS_DIR, 'run_pytest'),
constants.CHROMITE_DIR,
]
result = cros_build_lib.run(cmd, check=False)
return result.returncode == 0
def CreateMoblabVm(workspace_dir, chroot_dir, image_dir):
"""Create the moblab VMs.
Assumes that image_dir is in exactly the state it was after building
a test image and then converting it to a VM image.
Args:
workspace_dir (str): Workspace for the moblab VM.
chroot_dir (str): Directory containing the chroot for the moblab VM.
image_dir (str): Directory containing the VM image.
Returns:
MoblabVm: The resulting VM.
"""
vms = moblab_vm.MoblabVm(workspace_dir, chroot_dir=chroot_dir)
vms.Create(image_dir, dut_image_dir=image_dir, create_vm_images=False)
return vms
def PrepareMoblabVmImageCache(vms, builder, payload_dirs):
"""Preload the given payloads into the moblab VM image cache.
Args:
vms (MoblabVm): The Moblab VM.
builder (str): The builder path, used to name the cache dir.
payload_dirs (list[str]): List of payload directories to load.
Returns:
str: Absolute path to the image cache path.
"""
with vms.MountedMoblabDiskContext() as disk_dir:
image_cache_root = os.path.join(disk_dir, 'static/prefetched')
# If by any chance this path exists, the permission bits are surely
# nonsense, since 'moblab' user doesn't exist on the host system.
osutils.RmDir(image_cache_root, ignore_missing=True, sudo=True)
image_cache_dir = os.path.join(image_cache_root, builder)
osutils.SafeMakedirsNonRoot(image_cache_dir)
for payload_dir in payload_dirs:
osutils.CopyDirContents(payload_dir, image_cache_dir, allow_nonempty=True)
image_cache_rel_dir = image_cache_dir[len(disk_dir):].strip('/')
return os.path.join('/', 'mnt/moblab', image_cache_rel_dir)
def RunMoblabVmTest(chroot, vms, builder, image_cache_dir, results_dir):
"""Run Moblab VM tests.
Args:
chroot (chroot_lib.Chroot): The chroot in which to run tests.
builder (str): The builder path, used to find artifacts on GS.
vms (MoblabVm): The Moblab VMs to test.
image_cache_dir (str): Path to artifacts cache.
results_dir (str): Path to output test results.
"""
with vms.RunVmsContext():
# TODO(evanhernandez): Move many of these arguments to test config.
test_args = [
# moblab in VM takes longer to bring up all upstart services on first
# boot than on physical machines.
'services_init_timeout_m=10',
'target_build="%s"' % builder,
'test_timeout_hint_m=90',
'clear_devserver_cache=False',
'image_storage_server="%s"' % (image_cache_dir.rstrip('/') + '/'),
]
cros_build_lib.run(
[
'test_that',
'--no-quickmerge',
'--results_dir', results_dir,
'-b', 'moblab-generic-vm',
'localhost:%s' % vms.moblab_ssh_port,
'moblab_DummyServerNoSspSuite',
'--args', ' '.join(test_args),
],
enter_chroot=True,
chroot_args=chroot.get_enter_args(),
)
def SimpleChromeWorkflowTest(sysroot_path, build_target_name, chrome_root,
goma):
"""Execute SimpleChrome workflow tests
Args:
sysroot_path (str): The sysroot path for testing Chrome.
build_target_name (str): Board build target
chrome_root (str): Path to Chrome source root.
goma (goma_util.Goma): Goma object (or None).
"""
board_dir = 'out_%s' % build_target_name
out_board_dir = os.path.join(chrome_root, board_dir, 'Release')
use_goma = goma is not None
extra_args = []
with osutils.TempDir(prefix='chrome-sdk-cache') as tempdir:
sdk_cmd = _InitSimpleChromeSDK(tempdir, build_target_name, sysroot_path,
chrome_root, use_goma)
if goma:
extra_args.extend(['--nostart-goma', '--gomadir', goma.linux_goma_dir])
_BuildChrome(sdk_cmd, chrome_root, out_board_dir, goma)
_TestDeployChrome(sdk_cmd, out_board_dir)
_VMTestChrome(build_target_name, sdk_cmd)
def _InitSimpleChromeSDK(tempdir, build_target_name, sysroot_path, chrome_root,
use_goma):
"""Create ChromeSDK object for executing 'cros chrome-sdk' commands.
Args:
tempdir (string): Tempdir for command execution.
build_target_name (string): Board build target.
sysroot_path (string): Sysroot for Chrome to use.
chrome_root (string): Path to Chrome.
use_goma (bool): Whether to use goma.
Returns:
A ChromeSDK object.
"""
extra_args = ['--cwd', chrome_root, '--sdk-path', sysroot_path]
cache_dir = os.path.join(tempdir, 'cache')
sdk_cmd = commands.ChromeSDK(
constants.SOURCE_ROOT, build_target_name, chrome_src=chrome_root,
goma=use_goma, extra_args=extra_args, cache_dir=cache_dir)
return sdk_cmd
def _VerifySDKEnvironment(out_board_dir):
"""Make sure the SDK environment is set up properly.
Args:
out_board_dir (str): Output SDK dir for board.
"""
if not os.path.exists(out_board_dir):
raise AssertionError('%s not created!' % out_board_dir)
logging.info('ARGS.GN=\n%s',
osutils.ReadFile(os.path.join(out_board_dir, 'args.gn')))
def _BuildChrome(sdk_cmd, chrome_root, out_board_dir, goma):
"""Build Chrome with SimpleChrome environment.
Args:
sdk_cmd (ChromeSDK object): sdk_cmd to run cros chrome-sdk commands.
chrome_root (string): Path to Chrome.
out_board_dir (string): Path to board directory.
goma (goma_util.Goma): Goma object
"""
# Validate fetching of the SDK and setting everything up.
sdk_cmd.Run(['true'])
sdk_cmd.Run(['gclient', 'runhooks'])
# Generate args.gn and ninja files.
gn_cmd = os.path.join(chrome_root, 'buildtools', 'linux64', 'gn')
gn_gen_cmd = '%s gen "%s" --args="$GN_ARGS"' % (gn_cmd, out_board_dir)
sdk_cmd.Run(['bash', '-c', gn_gen_cmd])
_VerifySDKEnvironment(out_board_dir)
if goma:
# If goma is enabled, start goma compiler_proxy here, and record
# several information just before building Chrome is started.
goma.Start()
extra_env = goma.GetExtraEnv()
ninja_env_path = os.path.join(goma.goma_log_dir, 'ninja_env')
sdk_cmd.Run(['env', '--null'],
run_args={'extra_env': extra_env,
'stdout': ninja_env_path})
osutils.WriteFile(os.path.join(goma.goma_log_dir, 'ninja_cwd'),
sdk_cmd.cwd)
osutils.WriteFile(os.path.join(goma.goma_log_dir, 'ninja_command'),
cros_build_lib.CmdToStr(sdk_cmd.GetNinjaCommand()))
else:
extra_env = None
result = None
try:
# Build chromium.
result = sdk_cmd.Ninja(run_args={'extra_env': extra_env})
finally:
# In teardown, if goma is enabled, stop the goma compiler proxy,
# and record/copy some information to log directory, which will be
# uploaded to the goma's server in a later stage.
if goma:
goma.Stop()
ninja_log_path = os.path.join(chrome_root,
sdk_cmd.GetNinjaLogPath())
if os.path.exists(ninja_log_path):
shutil.copy2(ninja_log_path,
os.path.join(goma.goma_log_dir, 'ninja_log'))
if result:
osutils.WriteFile(os.path.join(goma.goma_log_dir, 'ninja_exit'),
str(result.returncode))
def _TestDeployChrome(sdk_cmd, out_board_dir):
"""Test SDK deployment.
Args:
sdk_cmd (ChromeSDK object): sdk_cmd to run cros chrome-sdk commands.
out_board_dir (string): Path to board directory.
"""
with osutils.TempDir(prefix='chrome-sdk-stage') as tempdir:
# Use the TOT deploy_chrome.
script_path = os.path.join(
constants.SOURCE_ROOT, constants.CHROMITE_BIN_SUBDIR, 'deploy_chrome')
sdk_cmd.Run([script_path, '--build-dir', out_board_dir,
'--staging-only', '--staging-dir', tempdir])
# Verify chrome is deployed.
chromepath = os.path.join(tempdir, 'chrome')
if not os.path.exists(chromepath):
raise AssertionError(
'deploy_chrome did not run successfully! Searched %s' % (chromepath))
def _VMTestChrome(board, sdk_cmd):
"""Run cros_run_test."""
image_dir_symlink = image_lib.GetLatestImageLink(board)
image_path = os.path.join(image_dir_symlink,
constants.VM_IMAGE_BIN)
# Run VM test for boards where we've built a VM.
if image_path and os.path.exists(image_path):
sdk_cmd.VMTest(image_path)
def ValidateMoblabVmTest(results_dir):
"""Determine if the VM test passed or not.
Args:
results_dir (str): Path to directory containing test_that results.
Raises:
failures_lib.TestFailure: If dummy_PassServer did not run or failed.
"""
log_file = os.path.join(results_dir, 'debug', 'test_that.INFO')
if not os.path.isfile(log_file):
raise failures_lib.TestFailure('Found no test_that logs at %s' % log_file)
log_file_contents = osutils.ReadFile(log_file)
if not re.match(r'dummy_PassServer\s*\[\s*PASSED\s*]', log_file_contents):
raise failures_lib.TestFailure('Moblab run_suite succeeded, but did '
'not successfully run dummy_PassServer.')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
13130,
383,
18255,
1505,
7294,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
4... | 2.457842 | 5,598 |
from django.conf import settings
from django.urls import path
from rest_framework.routers import DefaultRouter
from shop_app.apis.carousel_api import CarouselApiView
from shop_app.apis.display_api import CommodityCardDisplay, CommodityDetailDisplay, CommodityCategoryDisplay
app_name = 'shop_app'
urlpatterns = [
path(f'{settings.URL_PREFIX}/crousel/', CarouselApiView.as_view()),
# path('add-into-shop-cart/', AddShopCartOperation.as_view(), name='add-into-shop-cart'),
# path('add-into-favorites-chsc-api/', AddFavoritesOperation.as_view(), name='add-into-favorites-chsc-api'),
]
# DRF视图集注册
router = DefaultRouter()
router.register(f'{settings.URL_PREFIX}', CommodityCardDisplay, basename='commodity-card') # 关键词搜索/商品卡片显示
router.register(f'{settings.URL_PREFIX}', CommodityDetailDisplay, basename='commodity-detail') # 商品详情显示
router.register(f'{settings.URL_PREFIX}', CommodityCategoryDisplay, basename='commodity-category'), # 商品类别
urlpatterns += router.urls
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
1334,
62,
30604,
13,
472,
1010,
1330,
15161,
49,
39605,
198,
198,
6738,
6128,
62,
1324,
13,
499,
271,
13,
7718,
48355,
62,
15042,... | 2.391727 | 411 |
import model
import data
from yamlargs import expose_module
expose_module(model)
expose_module(data)
| [
11748,
2746,
198,
11748,
1366,
198,
198,
6738,
331,
321,
15521,
82,
1330,
15651,
62,
21412,
198,
198,
1069,
3455,
62,
21412,
7,
19849,
8,
198,
1069,
3455,
62,
21412,
7,
7890,
8,
198
] | 3.029412 | 34 |
#!/usr/bin/python
# -*-coding=utf-8
from __future__ import print_function, division
import unittest
from onvif import ONVIFCamera, ONVIFError
CAM_HOST = '10.1.3.10'
CAM_PORT = 80
CAM_USER = 'root'
CAM_PASS = 'password'
DEBUG = False
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
66,
7656,
28,
40477,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
7297,
198,
11748,
555,
715,
395,
198,
198,
6738,
319,
85,
361,
1330,
6177,
12861,
4851,... | 2.371901 | 121 |
import FWCore.ParameterSet.Config as cms
process = cms.Process("FWLitePlots")
#input stuff for Run/Lumi selection with the "JSON"-formatted files from the PVT group
import FWCore.PythonUtilities.LumiList as LumiList
# setup process
process = cms.Process("FWLitePlots")
# get JSON file correctly parced
JSONfile = 'DCSTRONLY_132440-140388'
myList = LumiList.LumiList (filename = JSONfile).getCMSSWString().split(',')
# Set up the parameters for the calo jet analyzer
process.jetStudies = cms.PSet(
# input parameter sets
jetSrc = cms.InputTag('selectedPatJets'),
pfJetSrc = cms.InputTag('selectedPatJetsAK5PF'),
metSrc = cms.InputTag('patMETs'),
pfMetSrc = cms.InputTag('patMETsPF'),
useCalo = cms.bool(True)
)
# Set up the parameters for the PF jet analyzer
process.pfJetStudies = process.jetStudies.clone( useCalo = cms.bool(False) )
process.load('PhysicsTools.SelectorUtils.pfJetIDSelector_cfi')
process.load('PhysicsTools.SelectorUtils.jetIDSelector_cfi')
process.plotParameters = cms.PSet (
doTracks = cms.bool(False),
useMC = cms.bool(False)
)
process.inputs = cms.PSet (
fileNames = cms.vstring(
'reco_7TeV_380_pat.root'
),
lumisToProcess = cms.untracked.VLuminosityBlockRange( myList )
)
process.outputs = cms.PSet (
outputName = cms.string('jetPlots.root')
)
| [
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
198,
14681,
796,
269,
907,
13,
18709,
7203,
24160,
43,
578,
3646,
1747,
4943,
198,
198,
2,
15414,
3404,
329,
5660,
14,
43,
12994,
6356,
351,
262,
366,
40386,
26793,
... | 2.575816 | 521 |
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
| [
6738,
42625,
14208,
13,
3642,
822,
13,
11299,
19199,
13,
27530,
1330,
14041,
6030,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
628
] | 4 | 28 |
from ipypdf.widgets.node_tools import AutoTools
def test_parse_text(app, pdf_nodes):
"""
Check that the ocr model runs and adds content properly
"""
doc_node = pdf_nodes[0]
app.tree.remove_children(doc_node) # clear any previously defined nodes
AutoTools(doc_node).extract_text()
assert len(doc_node.data['children']) > 0, "Layoutparser could not find any nodes"
# Cleanup
app.tree.remove_children(doc_node)
| [
6738,
20966,
4464,
7568,
13,
28029,
11407,
13,
17440,
62,
31391,
1330,
11160,
33637,
628,
198,
4299,
1332,
62,
29572,
62,
5239,
7,
1324,
11,
37124,
62,
77,
4147,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
6822,
326,
262,
2... | 2.78882 | 161 |
'''
Dalvik Executable (dex) parser.
References:
- http://www.dalvikvm.com/
- http://code.google.com/p/androguard/source/browse/core/bytecodes/dvm.py
- http://androguard.googlecode.com/hg/specs/dalvik/dex-format.html
Author: Robert Xiao
Creation Date: May 29, 2011
'''
from hachoir_parser import HachoirParser
from hachoir_core.field import (SeekableFieldSet, RootSeekableFieldSet, FieldSet, ParserError,
String, RawBytes, GenericVector,
UInt8, UInt16, UInt32, NullBits, Bit)
from hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_parser.program.java import eat_descriptor
# modified from java.py
code_to_type_name = {
'B': "byte",
'C': "char",
'D': "double",
'F': "float",
'I': "int",
'J': "long",
'L': "object",
'S': "short",
'Z': "boolean",
}
| [
7061,
6,
198,
35,
282,
28930,
8393,
18187,
357,
67,
1069,
8,
30751,
13,
198,
198,
19927,
25,
198,
12,
2638,
1378,
2503,
13,
31748,
28930,
14761,
13,
785,
14,
198,
12,
2638,
1378,
8189,
13,
13297,
13,
785,
14,
79,
14,
392,
3828,
... | 2.470423 | 355 |
from django.http import JsonResponse
| [
6738,
42625,
14208,
13,
4023,
1330,
449,
1559,
31077,
628,
198
] | 3.545455 | 11 |
from py_collector import Scheduler, Manager, Collector
from datetime import datetime
try:
from pymongo.write_concern import WriteConcern
from pymodm.connection import connect
from pymodm import MongoModel, fields
from bs4 import BeautifulSoup
import requests
except:
raise ImportError('Please make sure pymongo, pymodm, bs4,and requests\
are installed to run this example')
connect("mongodb://localhost:27017/myDatabase", alias="my-app")
if __name__ =='__main__':
TikTok().monitor() | [
6738,
12972,
62,
33327,
273,
1330,
27774,
18173,
11,
9142,
11,
17573,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
28311,
25,
198,
220,
220,
220,
422,
279,
4948,
25162,
13,
13564,
62,
1102,
30903,
1330,
19430,
3103,
30903,
198,
220,
... | 2.97191 | 178 |
#!/usr/bin/env python
# coding: utf-8
# # 「第1章 画像分類」の準備ファイル
#
# - 本ファイルでは、第1章で使用するフォルダの作成とファイルのダウンロードを行います。
#
# In[1]:
import os
import urllib.request
import zipfile
# In[2]:
# フォルダ「data」が存在しない場合は作成する
data_dir = "./data/"
if not os.path.exists(data_dir):
os.mkdir(data_dir)
# In[3]:
# ImageNetのclass_indexをダウンロードする
# Kerasで用意されているものです
# https://github.com/fchollet/deep-learning-models/blob/master/imagenet_utils.py
url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
save_path = os.path.join(data_dir, "imagenet_class_index.json")
if not os.path.exists(save_path):
urllib.request.urlretrieve(url, save_path)
# In[4]:
# 1.3節で使用するアリとハチの画像データをダウンロードし解凍します
# PyTorchのチュートリアルで用意されているものです
# https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
url = "https://download.pytorch.org/tutorial/hymenoptera_data.zip"
save_path = os.path.join(data_dir, "hymenoptera_data.zip")
if not os.path.exists(save_path):
urllib.request.urlretrieve(url, save_path)
print("if inner")
# ZIPファイルを読み込み
zip = zipfile.ZipFile(save_path)
zip.extractall(data_dir) # ZIPを解凍
zip.close() # ZIPファイルをクローズ
# ZIPファイルを消去
os.remove(save_path)
# In[5]:
#【※(実施済み)】
#ゴールデンリトリバーの画像を手動でダウンロード
#https://pixabay.com/ja/photos/goldenretriever-%E7%8A%AC-3724972/
#の640×426サイズの画像
#(画像権利情報:CC0 Creative Commons、商用利用無料、帰属表示は必要ありません)
#を、フォルダ「data」の直下に置く。
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1303,
40283,
163,
105,
105,
16,
44165,
254,
13328,
242,
119,
161,
225,
237,
26344,
228,
165,
94,
252,
13700,
27032,
118,
244,
43636,
24... | 1.655093 | 864 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) Merchise Autrement [~º/~] and Contributors
# All rights reserved.
#
# This is free software; you can do what the LICENCE file allows you to.
#
from odoo import api, fields
@api.model
def TimezoneSelection(*args, **kwargs):
"""A selection field for installed timezones."""
return fields.Selection(_tz_get, *args, **kwargs)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
16529,
30934,
198,
2,
15069,
357,
66,
8,
34414,
786,
5231,
260,
434,
685,
93,
36165,
14,
93,
60,
290,
25767,
669... | 3.342857 | 140 |
# -*- coding: utf-8 -*-
"""Sphinx documentation plugin used to document tasks.
Introduction
============
Usage
-----
Add the extension to your :file:`docs/conf.py` configuration module:
.. code-block:: python
extensions = (...,
'celery.contrib.sphinx')
If you'd like to change the prefix for tasks in reference documentation
then you can change the ``celery_task_prefix`` configuration value:
.. code-block:: python
celery_task_prefix = '(task)' # < default
With the extension installed `autodoc` will automatically find
task decorated objects and generate the correct (as well as
add a ``(task)`` prefix), and you can also refer to the tasks
using `:task:proj.tasks.add` syntax.
Use ``.. autotask::`` to manually document a task.
"""
from __future__ import absolute_import, unicode_literals
from inspect import formatargspec
from sphinx.domains.python import PyModulelevel
from sphinx.ext.autodoc import FunctionDocumenter
from celery.app.task import BaseTask
from celery.five import getfullargspec
class TaskDocumenter(FunctionDocumenter):
"""Document task definitions."""
objtype = 'task'
member_order = 11
@classmethod
class TaskDirective(PyModulelevel):
"""Sphinx task directive."""
def setup(app):
"""Setup Sphinx extension."""
app.add_autodocumenter(TaskDocumenter)
app.domains['py'].directives['task'] = TaskDirective
app.add_config_value('celery_task_prefix', '(task)', True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
50,
746,
28413,
10314,
13877,
973,
284,
3188,
8861,
13,
198,
198,
21906,
198,
25609,
198,
198,
28350,
198,
30934,
198,
198,
4550,
262,
7552,
284,
534,
1058,
7753,... | 3.180043 | 461 |
from flask import render_template, request, url_for, flash, redirect
from flask_login import login_user, logout_user, login_required, current_user
from . import auth
from .. import db
from ..models import User
from .forms import LoginForm, RegistrationForm
from ..email import send_email
@auth.before_app_request
@auth.route('/unconfirmed')
@auth.route('/login', methods=['GET', 'POST'])
@auth.route('/register', methods=['GET', 'POST'])
@auth.route('/confirm/<token>')
@login_required
@auth.route('/logout')
@login_required
@auth.route('/confirm')
@login_required
| [
6738,
42903,
1330,
8543,
62,
28243,
11,
2581,
11,
19016,
62,
1640,
11,
7644,
11,
18941,
201,
198,
6738,
42903,
62,
38235,
1330,
17594,
62,
7220,
11,
2604,
448,
62,
7220,
11,
17594,
62,
35827,
11,
1459,
62,
7220,
201,
198,
6738,
764,... | 2.779817 | 218 |
"""GenericModel class and its child classes.
The GenericModel class enables flattening of the model parameters for tracking.
MLP and LeNet are example models. Add your own PyTorch model by inheriting
from GenericModel and organizing it into the pytorch lightning style.
"""
# pylint: disable = no-member
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import SGD, Adam, Adagrad, RMSprop
class GenericModel(pl.LightningModule):
"""GenericModel class that enables flattening of the model parameters."""
def __init__(self, optimizer, learning_rate, custom_optimizer=None, gpus=0):
"""Init a new GenericModel.
Args:
optimizer: optimizer to use, such as "adam", "sgd", etc.
learning_rate: learning rate to use.
custom_optimizer (optional): Custom optimizer object. Defaults to None.
gpus (optional): GPUs to use for training. Defaults to 0.
"""
super().__init__()
self.learning_rate = learning_rate
self.optimizer = optimizer
self.custom_optimizer = custom_optimizer
self.gpus = gpus
self.optim_path = []
self.accuracy = pl.metrics.Accuracy()
def configure_optimizers(self):
"""Configure the optimizer for Pytorch Lightning.
Raises:
Exception: Optimizer not recognized.
"""
if self.custom_optimizer:
return self.custom_optimizer(self.parameters(), self.learning_rate)
elif self.optimizer == "adam":
return Adam(self.parameters(), self.learning_rate)
elif self.optimizer == "sgd":
return SGD(self.parameters(), self.learning_rate)
elif self.optimizer == "adagrad":
return Adagrad(self.parameters(), self.learning_rate)
elif self.optimizer == "rmsprop":
return RMSprop(self.parameters(), self.learning_rate)
else:
raise Exception(
f"custom_optimizer supplied is not supported: {self.custom_optimizer}"
)
def get_flat_params(self):
"""Get flattened and concatenated params of the model."""
params = self._get_params()
flat_params = torch.Tensor()
if torch.cuda.is_available() and self.gpus > 0:
flat_params = flat_params.cuda()
for _, param in params.items():
flat_params = torch.cat((flat_params, torch.flatten(param)))
return flat_params
def init_from_flat_params(self, flat_params):
"""Set all model parameters from the flattened form."""
if not isinstance(flat_params, torch.Tensor):
raise AttributeError(
"Argument to init_from_flat_params() must be torch.Tensor"
)
shapes = self._get_param_shapes()
state_dict = self._unflatten_to_state_dict(flat_params, shapes)
self.load_state_dict(state_dict, strict=True)
class MLP(GenericModel):
"""A Multilayer Perceptron model.
Default is 1 hidden layer with 50 neurons.
"""
def __init__(
self,
input_dim,
num_classes,
learning_rate,
num_hidden_layers=1,
hidden_dim=50,
optimizer="adam",
custom_optimizer=None,
gpus=0,
):
"""Init an MLP model.
Args:
input_dim: Number of input dimensions.
num_classes: Number of classes or output dimensions.
learning_rate: The learning rate to use.
num_hidden_layers (optional): Number of hidden layers. Defaults to 1.
hidden_dim (optional): Number of neurons in each layer. Defaults to 50.
optimizer (optional): The optimizer to use. Defaults to "adam".
custom_optimizer (optional): The custom optimizer to use. Defaults to None.
gpus (optional): GPUs to use if available. Defaults to 0.
"""
super().__init__(
optimizer=optimizer,
learning_rate=learning_rate,
custom_optimizer=custom_optimizer,
gpus=gpus,
)
# NOTE: nn.ModuleList is not the same as Sequential,
# the former doesn't have forward implemented
if num_hidden_layers == 0:
self.layers = nn.Linear(input_dim, num_classes)
else:
self.layers = nn.Sequential(nn.Linear(input_dim, hidden_dim), nn.ReLU())
n_layers = 2
for _ in range(num_hidden_layers - 1):
self.layers.add_module(
name=f"{n_layers}", module=nn.Linear(hidden_dim, hidden_dim)
)
self.layers.add_module(name=f"{n_layers+1}", module=nn.ReLU())
n_layers += 2
self.layers.add_module(
name=f"{n_layers}", module=nn.Linear(hidden_dim, num_classes)
)
def forward(self, x_in, apply_softmax=False):
"""Forward pass."""
# Pytorch lightning recommends using forward for inference, not training
y_pred = self.layers(x_in)
if apply_softmax:
y_pred = F.softmax(y_pred, dim=1)
return y_pred
def loss_fn(self, y_pred, y):
"""Loss function."""
return F.cross_entropy(y_pred, y)
def training_step(self, batch, batch_idx):
"""Training step for a batch of data.
The model computes the loss and save it along with the flattened model params.
"""
X, y = batch
y_pred = self(X)
# Get model weights flattened here to append to optim_path later
flat_w = self.get_flat_params()
loss = self.loss_fn(y_pred, y)
preds = y_pred.max(dim=1)[1] # class
accuracy = self.accuracy(preds, y)
self.log(
"train_loss", loss, on_step=False, on_epoch=True, prog_bar=True, logger=True
)
self.log(
"train_acc",
accuracy,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
)
return {"loss": loss, "accuracy": accuracy, "flat_w": flat_w}
def training_epoch_end(self, training_step_outputs):
"""Only save the last step in each epoch.
Args:
training_step_outputs: all the steps in this epoch.
"""
# Only record the last step in each epoch
self.optim_path.append(training_step_outputs[-1])
class LeNet(GenericModel):
"""LeNet-5 convolutional neural network."""
def __init__(self, learning_rate, optimizer="adam", custom_optimizer=None, gpus=0):
"""Init a LeNet model.
Args:
learning_rate: learning rate to use.
optimizer (optional): optimizer to use. Defaults to "adam".
custom_optimizer (optional): custom optimizer to use. Defaults to None.
gpus (optional): Number of GPUs for training if available. Defaults to 0.
"""
super().__init__(optimizer, learning_rate, custom_optimizer, gpus=gpus)
self.relu = nn.ReLU()
self.pool = nn.AvgPool2d(kernel_size=(2, 2), stride=(2, 2))
self.conv1 = nn.Conv2d(
in_channels=1,
out_channels=6,
kernel_size=(5, 5),
stride=(1, 1),
padding=(0, 0),
)
self.conv2 = nn.Conv2d(
in_channels=6,
out_channels=16,
kernel_size=(5, 5),
stride=(1, 1),
padding=(0, 0),
)
self.conv3 = nn.Conv2d(
in_channels=16,
out_channels=120,
kernel_size=(5, 5),
stride=(1, 1),
padding=(0, 0),
)
self.fc1 = nn.Linear(120, 84)
self.fc2 = nn.Linear(84, 10)
def forward(self, x):
"""Forward pass."""
x = self.relu(self.conv1(x))
x = self.pool(x)
x = self.relu(self.conv2(x))
x = self.pool(x)
x = self.relu(self.conv3(x)) # (n_examples, 120, 1, 1) -> (n_examples, 120)
x = x.reshape(x.shape[0], -1)
x = self.relu(self.fc1(x))
x = self.fc2(x)
return x
def loss_fn(self, y_pred, y):
"""Loss function."""
return F.cross_entropy(y_pred, y)
def training_step(self, batch, batch_idx):
"""Training step for a batch of data.
The model computes the loss and save it along with the flattened model params.
"""
X, y = batch
y_pred = self(X)
# Get model weights flattened here to append to optim_path later
flat_w = self.get_flat_params()
loss = self.loss_fn(y_pred, y)
preds = y_pred.max(dim=1)[1] # class
accuracy = self.accuracy(preds, y)
self.log(
"train_loss", loss, on_step=False, on_epoch=True, prog_bar=True, logger=True
)
self.log(
"train_acc",
accuracy,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
)
return {"loss": loss, "accuracy": accuracy, "flat_w": flat_w}
def training_epoch_end(self, training_step_outputs):
"""Only save the last step in each epoch.
Args:
training_step_outputs: all the steps in this epoch.
"""
self.optim_path.extend(training_step_outputs)
class CNNNet(GenericModel):
"""Simple convolutional neural network."""
def __init__(self, learning_rate, optimizer="adam", custom_optimizer=None, gpus=0):
"""Init a CNN model.
Args:
learning_rate: learning rate to use.
optimizer (optional): optimizer to use. Defaults to "adam".
custom_optimizer (optional): custom optimizer to use. Defaults to None.
gpus (optional): Number of GPUs for training if available. Defaults to 0.
"""
super().__init__(optimizer, learning_rate, custom_optimizer, gpus=gpus)
self.relu = nn.ReLU()
self.pool = nn.MaxPool2d(kernel_size=2)
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=8,
kernel_size=3,
stride=1,
padding=1,
)
self.conv2 = nn.Conv2d(
in_channels=8,
out_channels=8,
kernel_size=3,
stride=1,
padding=1,
)
self.conv3 = nn.Conv2d(
in_channels=8,
out_channels=8,
kernel_size=3,
stride=1,
padding=1,
)
self.fc1 = nn.Linear(8*4*4, 32)
self.fc2 = nn.Linear(32, 10)
def forward(self, x):
"""Forward pass."""
x = self.relu(self.conv1(x))
x = self.pool(x)
x = self.relu(self.conv2(x))
x = self.pool(x)
x = self.relu(self.conv3(x)) # (n_examples, 120, 1, 1) -> (n_examples, 120)
x = self.pool(x)
x = x.view(x.shape[0], -1)
# print(x.shape)
x = self.relu(self.fc1(x))
x = self.fc2(x)
return x
def loss_fn(self, y_pred, y):
"""Loss function."""
return F.cross_entropy(y_pred, y)
def training_step(self, batch, batch_idx):
"""Training step for a batch of data.
The model computes the loss and save it along with the flattened model params.
"""
X, y = batch
y_pred = self(X)
# Get model weights flattened here to append to optim_path later
flat_w = self.get_flat_params()
loss = self.loss_fn(y_pred, y)
preds = y_pred.max(dim=1)[1] # class
accuracy = self.accuracy(preds, y)
self.log(
"train_loss", loss, on_step=False, on_epoch=True, prog_bar=True, logger=True
)
self.log(
"train_acc",
accuracy,
on_step=False,
on_epoch=True,
prog_bar=True,
logger=True,
)
return {"loss": loss, "accuracy": accuracy, "flat_w": flat_w}
def training_epoch_end(self, training_step_outputs):
"""Only save the last step in each epoch.
Args:
training_step_outputs: all the steps in this epoch.
"""
self.optim_path.extend(training_step_outputs)
| [
37811,
46189,
17633,
1398,
290,
663,
1200,
6097,
13,
198,
198,
464,
42044,
17633,
1398,
13536,
27172,
3101,
286,
262,
2746,
10007,
329,
9646,
13,
198,
5805,
47,
290,
1004,
7934,
389,
1672,
4981,
13,
3060,
534,
898,
9485,
15884,
354,
2... | 2.073674 | 5,918 |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from exchange import injective_insurance_rpc_pb2 as exchange_dot_injective__insurance__rpc__pb2
class InjectiveInsuranceRPCStub(object):
"""InjectiveInsuranceRPC defines gRPC API of Insurance provider.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Funds = channel.unary_unary(
'/injective_insurance_rpc.InjectiveInsuranceRPC/Funds',
request_serializer=exchange_dot_injective__insurance__rpc__pb2.FundsRequest.SerializeToString,
response_deserializer=exchange_dot_injective__insurance__rpc__pb2.FundsResponse.FromString,
)
self.Redemptions = channel.unary_unary(
'/injective_insurance_rpc.InjectiveInsuranceRPC/Redemptions',
request_serializer=exchange_dot_injective__insurance__rpc__pb2.RedemptionsRequest.SerializeToString,
response_deserializer=exchange_dot_injective__insurance__rpc__pb2.RedemptionsResponse.FromString,
)
class InjectiveInsuranceRPCServicer(object):
"""InjectiveInsuranceRPC defines gRPC API of Insurance provider.
"""
def Funds(self, request, context):
"""Funds lists all insurance funds.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Redemptions(self, request, context):
"""PendingRedemptions lists all pending redemptions according to a filter
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
# This class is part of an EXPERIMENTAL API.
class InjectiveInsuranceRPC(object):
"""InjectiveInsuranceRPC defines gRPC API of Insurance provider.
"""
@staticmethod
@staticmethod
| [
2,
2980,
515,
416,
262,
308,
49,
5662,
11361,
8435,
17050,
13877,
13,
8410,
5626,
48483,
0,
198,
37811,
11792,
290,
4382,
6097,
11188,
284,
1237,
672,
3046,
12,
23211,
2594,
526,
15931,
198,
11748,
1036,
14751,
198,
198,
6738,
5163,
1... | 2.509434 | 848 |
'''
Created on May 27, 2016
@author: cesar
'''
from scipy.stats import norm, pareto, lognorm, gamma, weibull_min, weibull_max, gengamma, expon
import numpy as np
from scipy.stats import kstest
import sys
from matplotlib import pyplot as plt
from scipy import interpolate
from scipy.integrate import quadrature
from scipy.stats import expon, lognorm, pareto
def bestFit(X,plot=False,distToTest=["norm","pareto","lognorm","gamma","expon","weibull_min","weibull_max"],numberOfBins=100):
"""
Version of August 2015
X: data
return
"""
xMin = min(X)
xMax = max(X)
dX = (xMax - xMin)*0.01
support = np.arange(xMin,xMax,dX)
bestFit = []
error = []
if("norm" in distToTest):
try:
dist_param = norm.fit(X)
myDist = norm(*dist_param)
kt, p_value = kstest(X,"norm",dist_param)
bestFit.append({"distribution":"norm","ktest":kt,"pvalue":p_value,"parameters":dist_param})
if(plot):
Y = myDist.pdf(support)
plt.plot(support,Y,label="norm",linewidth=2.0)
stuff = plt.hist(X,bins=numberOfBins,normed=True)
except:
error.append(("norm_err",sys.exc_info()))
if("pareto" in distToTest):
try:
dist_param = pareto.fit(X)
myDist = pareto(*dist_param)
kt, p_value = kstest(X,"pareto",dist_param)
bestFit.append({"distribution":"pareto","ktest":kt,"pvalue":p_value,"parameters":dist_param})
if(plot):
Y = myDist.pdf(support)
plt.plot(support,Y,label="pareto",linewidth=2.0)
stuff = plt.hist(X,bins=numberOfBins,normed=True)
except:
error.append(("pareto_err",sys.exc_info()))
if("lognorm" in distToTest):
try:
dist_param = lognorm.fit(X)
myDist = lognorm(*dist_param)
kt, p_value = kstest(X,"lognorm",dist_param)
bestFit.append({"distribution":"lognorm","ktest":kt,"pvalue":p_value,"parameters":dist_param})
if(plot):
Y = myDist.pdf(support)
plt.plot(support,Y,label="lognorm",linewidth=2.0)
stuff = plt.hist(X,bins=numberOfBins,normed=True)
except:
error.append(("lognorm_err",sys.exc_info()))
if("gamma" in distToTest):
try:
dist_param = gamma.fit(X)
myDist = gamma(*dist_param)
kt, p_value = kstest(X,"gamma",dist_param)
bestFit.append({"distribution":"gamma","ktest":kt,"pvalue":p_value,"parameters":dist_param})
if(plot):
Y = myDist.pdf(support)
plt.plot(support,Y,label="gamma",linewidth=2.0)
stuff = plt.hist(X,bins=numberOfBins,normed=True)
except:
error.append(("gamma_err",sys.exc_info()))
if("weibull_min" in distToTest):
try:
dist_param = weibull_min.fit(X)
myDist = weibull_min(*dist_param)
kt, p_value = kstest(X,"weibull_min",dist_param)
bestFit.append({"distribution":"weibull_min","ktest":kt,"pvalue":p_value,"parameters":dist_param})
if(plot):
Y = myDist.pdf(support)
plt.plot(support,Y,label="weibull_min",linewidth=2.0)
stuff = plt.hist(X,bins=numberOfBins,normed=True)
except:
error.append(("weibull_min_err",sys.exc_info()))
if("weibull_max" in distToTest):
try:
dist_param = weibull_max.fit(X)
myDist = weibull_max(*dist_param)
kt, p_value = kstest(X,"weibull_max",dist_param)
bestFit.append({"distribution":"weibull_max","ktest":kt,"pvalue":p_value,"parameters":dist_param})
if(plot):
Y = myDist.pdf(support)
plt.plot(support,Y,label="weibull_max",linewidth=2.0)
stuff = plt.hist(X,bins=numberOfBins,normed=True)
except:
error.append(("weibull_max_err",sys.exc_info()))
if("expon" in distToTest):
try:
dist_param = expon.fit(X)
myDist = expon(*dist_param)
kt, p_value = kstest(X,"expon",dist_param)
bestFit.append({"distribution":"expon","ktest":kt,"pvalue":p_value,"parameters":dist_param})
if(plot):
Y = myDist.pdf(support)
plt.plot(support,Y,label="expon",linewidth=2.0)
stuff = plt.hist(X,bins=numberOfBins,normed=True)
except:
error.append(("expon_err",sys.exc_info()))
#FINISH PLOT
if(plot):
plt.legend(loc="best")
plt.show()
return (bestFit,error)
def realBestFit(X,plot=False,distToTest=["norm","pareto","expon","lognorm","gamma","weibull_min","weibull_max"],numberOfBins=100):
"""
"""
bF = bestFit(X,False,distToTest,numberOfBins)[0]
a = [(b["ktest"],b["distribution"]) for b in bF]
a.sort()
return a[0][1]
def plotBlockCoocurrance(coOccurranceMatrix,listOfNodes,whereToPrint):
"""
"""
plt.clf()
fig = plt.figure()
fig.set_size_inches(10, 10)
#plt.matshow(dat,cmap = plt.get_cmap('jet'))
plt.matshow(coOccurranceMatrix,cmap = plt.get_cmap('hot'))
plt.colorbar(shrink=.80)
plt.xticks(np.arange(len(listOfNodes)),listOfNodes, rotation=90)
plt.yticks(np.arange(len(listOfNodes)),listOfNodes)
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 7}
matplotlib.rc('font', **font)
plt.savefig(whereToPrint+"coocurrances.pdf",bbox_inches='tight')
| [
7061,
6,
198,
41972,
319,
1737,
2681,
11,
1584,
198,
198,
31,
9800,
25,
269,
18964,
198,
7061,
6,
198,
6738,
629,
541,
88,
13,
34242,
1330,
2593,
11,
279,
533,
1462,
11,
300,
2360,
579,
11,
34236,
11,
356,
571,
724,
62,
1084,
11... | 1.891247 | 3,016 |
#!/usr/bin/env python3
# Copyright 2020, Esri. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
LiveFeedsHealthCheck
1) Authenticate GIS profile
2) Setup
3) Validation and Health check
4) Process results
5) Create/Update RSS files
6) Save output
"""
try:
import arcgis
import arcpy
import html
import json
import os
import EventsManager as EventsManager
import FileManager as FileManager
import LoggingUtils as LoggingUtils
import QueryEngine as QueryEngine
import RSSManager as RSSManager
import ServiceValidator as ServiceValidator
import StatusManager as StatusManager
import TimeUtils as TimeUtils
import version as version
from ConfigManager import ConfigManager
from UserUtils import User
except ImportError as e:
print(f"Import Error: {e}")
class ItemCountNotInRangeError(Exception):
"""Exception raised for errors when the input item count is 0
Attributes:
item_count -- input item count which caused the error
message -- explanation of the error
"""
class InputFileNotFoundError(Exception):
"""Exception raised for errors in the input file is not found.
Attributes:
file -- input file
message -- explanation of the error
"""
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
15069,
12131,
11,
8678,
380,
13,
220,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2... | 3.362847 | 576 |
"""
Units Manged By Systemctl (services)
====================================
Parsers included in this module are:
ListUnits - command ``/bin/systemctl list-units``
-------------------------------------------------
UnitFiles - command ``/bin/systemctl list-unit-files``
------------------------------------------------------
"""
from .. import get_active_lines
from ... import Parser, parser
from insights.specs import Specs
@parser(Specs.systemctl_list_unit_files)
class UnitFiles(Parser):
"""
The UnitFiles class parses the output of ``/bin/systemctl list-unit-files`` and provides
information about enabled services.
Output of Command::
mariadb.service enabled
neutron-openvswitch-agent.service enabled
neutron-ovs-cleanup.service enabled
neutron-server.service enabled
runlevel0.target disabled
runlevel1.target disabled
runlevel2.target enabled
Example:
>>> conf = shared[UnitFiles]
>>> conf.is_on('existing-enabled-service.service')
True
>>> conf.is_on('existing-disabled-service.service')
False
>>> conf.is_on('nonexistent-service.service')
False
>>> conf.exists('existing-enabled-service.service')
True
>>> conf.exists('existing-disabled-service.service')
True
>>> conf.exists('nonexistent-service.service')
False
>>> 'existing-enabled-service.service' in conf.services
True
>>> 'existing-disabled-service.service' in conf.services
True
>>> 'nonexistent-service.service' in conf.services
False
>>> conf.services['existing-enabled-service.service']
True
>>> conf.services['existing-disabled-service.service']
False
>>> conf.services['nonexistent-service.service']
KeyError: 'nonexistent-service.service'
"""
def parse_content(self, content):
"""
Main parsing class method which stores all interesting data from the content.
Args:
content (context.content): Parser context content
"""
# 'static' means 'on' to fulfill dependency of something else that is on
# man systemctl - "is-enabled" knows these states
valid_states = set(['enabled', 'enabled-runtime', 'linked', 'linked-runtime', 'masked',
'masked-runtime', 'static', 'indirect', 'disabled', 'generated',
'transient', 'bad', 'invalid'])
# man systemctl - "is-enabled" considers these to be enabled
on_states = set(['enabled', 'enabled-runtime', 'static', 'indirect', 'generated', 'transient'])
for line in get_active_lines(content):
parts = line.split(None) # AWK like split, strips whitespaces
if len(parts) == 2 and any(part in valid_states for part in parts):
service, state = parts
enabled = state in on_states
self.services[service] = enabled
self.parsed_lines[service] = line
self.service_list.append(service)
def is_on(self, service_name):
"""
Checks if the service is enabled in systemctl.
Args:
service_name (str): service name including '.service'
Returns:
Union[bool, None]: True if service is enabled, False if it is disabled. None if the
service doesn't exist.
"""
return self.services.get(service_name, None)
def exists(self, service_name):
"""
Checks if the service is listed in systemctl.
Args:
service_name (str): service name including '.service'
Returns:
bool: True if service exists, False otherwise.
"""
return service_name in self.service_list
@parser(Specs.systemctl_list_units)
class ListUnits(Parser):
"""
The ListUnits class parses the output of ``/bin/systemctl list-units`` and provides
information about all the services listed under it.
Output of Command::
sockets.target loaded active active Sockets
swap.target loaded active active Swap
systemd-shutdownd.socket loaded active listening Delayed Shutdown Socket
neutron-dhcp-agent.service loaded active running OpenStack Neutron DHCP Agent
neutron-openvswitch-agent.service loaded active running OpenStack Neutron Open vSwitch Agent
Example:
>>> units.get_service_details('swap.target')
{'LOAD': 'loaded', 'ACTIVE': 'active', 'SUB': 'active', 'UNIT': 'swap.target'}
>>> units.unit_list['swap.target']
{'LOAD': 'loaded', 'ACTIVE': 'active', 'SUB': 'active', 'UNIT': 'swap.target'}
>>> units.is_active('swap.target')
True
>>> units.unit_list['random.service']
{'LOAD': None, 'ACTIVE': None, 'SUB': None, 'UNIT': None}
"""
def parse_content(self, content):
"""
Main parsing class method which stores all interesting data from the content.
Args:
content (context.content): Parser context content
"""
BULLET_CHAR_U = u'\u25CF'
BULLET_CHAR_B = b"\xe2\x97\x8f"
for line in get_active_lines(content):
parts = line.split(None) # AWK like split, strips whitespaces
if parts[0] == BULLET_CHAR_U or parts[0].encode('utf-8') == BULLET_CHAR_B or parts[0] == '*':
self.unit_list[parts[1]] = self.parse_service_details(parts[1:])
else:
self.unit_list[parts[0]] = self.parse_service_details(parts)
def get_service_details(self, service_name):
"""
Return the service details collected by systemctl.
Args:
service_name (str): service name including its extension.
Returns:
dict: Dictionary containing details for the service.
if service is not present dictonary values will be `None`::
{'LOAD': 'loaded', 'ACTIVE': 'active', 'SUB': 'running', 'UNIT': 'neutron-dhcp-agent.service'}
"""
empty_details = {'LOAD': None, 'ACTIVE': None, 'SUB': None, 'UNIT': None}
return self.unit_list.get(service_name, empty_details)
def is_loaded(self, service_name):
"""
Return the LOAD state of service managed by systemd.
Args:
service_name (str): service name including its extension.
Returns:
bool: True if service is loaded False if not loaded
"""
return self.get_service_details(service_name)['LOAD'] == 'loaded'
def is_active(self, service_name):
"""
Return the ACTIVE state of service managed by systemd.
Args:
service_name (str): service name including its extension.
Returns:
bool: True if service is active False if inactive
"""
return self.get_service_details(service_name)['ACTIVE'] == 'active'
def is_running(self, service_name):
"""
Return the SUB state of service managed by systemd.
Args:
service_name (str): service name including its extension.
Returns:
bool: True if service is running False in all other states.
"""
return self.get_service_details(service_name)['SUB'] == 'running'
| [
37811,
198,
3118,
896,
337,
5102,
2750,
4482,
34168,
357,
30416,
8,
198,
10052,
1421,
198,
198,
47,
945,
364,
3017,
287,
428,
8265,
389,
25,
198,
198,
8053,
3118,
896,
532,
3141,
7559,
14,
8800,
14,
10057,
34168,
1351,
12,
41667,
15... | 2.390252 | 3,180 |
from app1.models import *
from app1.util.utils import *
def teachCourse(request):
'''
get:
http://127.0.0.1:8000/app3/teachCourse?tno=001
调用参数:
tno:工号
'''
tid=request.GET.get("tno")
result=Inventory.objects.filter(teachPlan__teacher__tno=tid).values("teachPlan__teacher__tname","teachPlan__course__cname","teachPlan__department__dname","teachPlan__teach_date","teachPlan__credit","student__sname")
return showJsonresult(result) | [
6738,
598,
16,
13,
27530,
1330,
1635,
198,
6738,
598,
16,
13,
22602,
13,
26791,
1330,
1635,
198,
198,
4299,
4545,
49046,
7,
25927,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
220,
220,
220,
220,
651,
25,
198,
220,
220... | 2.210046 | 219 |
import math
import os
import random
import re
import sys
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
s = input()
result = sherlockAndAnagrams(s)
fptr.write(str(result) + '\n')
fptr.close()
| [
11748,
10688,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
302,
198,
11748,
25064,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
277,
20692,
796,
1280,
7,
418,
13,
268,
2268,
17816,
2606,
... | 2.20438 | 137 |
# -*- coding: utf-8 -*-
model = {
u'ан ': 0,
u'ен ': 1,
u'ың ': 2,
u' қа': 3,
u' ба': 4,
u'ай ': 5,
u'нда': 6,
u'ын ': 7,
u' са': 8,
u' ал': 9,
u'ді ': 10,
u'ары': 11,
u'ды ': 12,
u'ып ': 13,
u' мұ': 14,
u' бі': 15,
u'асы': 16,
u'да ': 17,
u'най': 18,
u' жа': 19,
u'мұн': 20,
u'ста': 21,
u'ған': 22,
u'н б': 23,
u'ұна': 24,
u' бо': 25,
u'ның': 26,
u'ін ': 27,
u'лар': 28,
u'сын': 29,
u' де': 30,
u'аға': 31,
u'тан': 32,
u' кө': 33,
u'бір': 34,
u'ер ': 35,
u'мен': 36,
u'аза': 37,
u'ынд': 38,
u'ыны': 39,
u' ме': 40,
u'анд': 41,
u'ері': 42,
u'бол': 43,
u'дың': 44,
u'қаз': 45,
u'аты': 46,
u'сы ': 47,
u'тын': 48,
u'ғы ': 49,
u' ке': 50,
u'ар ': 51,
u'зақ': 52,
u'ық ': 53,
u'ала': 54,
u'алы': 55,
u'аны': 56,
u'ара': 57,
u'ағы': 58,
u'ген': 59,
u'тар': 60,
u'тер': 61,
u'тыр': 62,
u'айд': 63,
u'ард': 64,
u'де ': 65,
u'ға ': 66,
u' қо': 67,
u'бар': 68,
u'ің ': 69,
u'қан': 70,
u' бе': 71,
u' қы': 72,
u'ақс': 73,
u'гер': 74,
u'дан': 75,
u'дар': 76,
u'лық': 77,
u'лға': 78,
u'ына': 79,
u'ір ': 80,
u'ірі': 81,
u'ғас': 82,
u' та': 83,
u'а б': 84,
u'гі ': 85,
u'еді': 86,
u'еле': 87,
u'йды': 88,
u'н к': 89,
u'н т': 90,
u'ола': 91,
u'рын': 92,
u'іп ': 93,
u'қст': 94,
u'қта': 95,
u'ң б': 96,
u' ай': 97,
u' ол': 98,
u' со': 99,
u'айт': 100,
u'дағ': 101,
u'иге': 102,
u'лер': 103,
u'лып': 104,
u'н а': 105,
u'ік ': 106,
u'ақт': 107,
u'бағ': 108,
u'кен': 109,
u'н қ': 110,
u'ны ': 111,
u'рге': 112,
u'рға': 113,
u'ыр ': 114,
u' ар': 115,
u'алғ': 116,
u'аса': 117,
u'бас': 118,
u'бер': 119,
u'ге ': 120,
u'еті': 121,
u'на ': 122,
u'нде': 123,
u'не ': 124,
u'ниг': 125,
u'рды': 126,
u'ры ': 127,
u'сай': 128,
u' ау': 129,
u' кү': 130,
u' ни': 131,
u' от': 132,
u' өз': 133,
u'ауд': 134,
u'еп ': 135,
u'иял': 136,
u'лты': 137,
u'н ж': 138,
u'н о': 139,
u'осы': 140,
u'оты': 141,
u'рып': 142,
u'рі ': 143,
u'тке': 144,
u'ты ': 145,
u'ы б': 146,
u'ы ж': 147,
u'ылы': 148,
u'ысы': 149,
u'і с': 150,
u'қар': 151,
u' бұ': 152,
u' да': 153,
u' же': 154,
u' тұ': 155,
u' құ': 156,
u'ады': 157,
u'айл': 158,
u'ап ': 159,
u'ата': 160,
u'ені': 161,
u'йла': 162,
u'н м': 163,
u'н с': 164,
u'нды': 165,
u'нді': 166,
u'р м': 167,
u'тай': 168,
u'тін': 169,
u'ы т': 170,
u'ыс ': 171,
u'інд': 172,
u' би': 173,
u'а ж': 174,
u'ауы': 175,
u'деп': 176,
u'дің': 177,
u'еке': 178,
u'ери': 179,
u'йын': 180,
u'кел': 181,
u'лды': 182,
u'ма ': 183,
u'нан': 184,
u'оны': 185,
u'п ж': 186,
u'п о': 187,
u'р б': 188,
u'рия': 189,
u'рла': 190,
u'уда': 191,
u'шыл': 192,
u'ы а': 193,
u'ықт': 194,
u'і а': 195,
u'і б': 196,
u'із ': 197,
u'ілі': 198,
u'ң қ': 199,
u' ас': 200,
u' ек': 201,
u' жо': 202,
u' мә': 203,
u' ос': 204,
u' ре': 205,
u' се': 206,
u'алд': 207,
u'дал': 208,
u'дег': 209,
u'дей': 210,
u'е б': 211,
u'ет ': 212,
u'жас': 213,
u'й б': 214,
u'лау': 215,
u'лда': 216,
u'мет': 217,
u'нын': 218,
u'сар': 219,
u'сі ': 220,
u'ті ': 221,
u'ыры': 222,
u'ыта': 223,
u'ісі': 224,
u'ң а': 225,
u'өте': 226,
u' ат': 227,
u' ел': 228,
u' жү': 229,
u' ма': 230,
u' то': 231,
u' шы': 232,
u'а а': 233,
u'алт': 234,
u'ама': 235,
u'арл': 236,
u'аст': 237,
u'бұл': 238,
u'дай': 239,
u'дық': 240,
u'ек ': 241,
u'ель': 242,
u'есі': 243,
u'зді': 244,
u'көт': 245,
u'лем': 246,
u'ль ': 247,
u'н е': 248,
u'п а': 249,
u'р а': 250,
u'рес': 251,
u'са ': 252,
u'та ': 253,
u'тте': 254,
u'тұр': 255,
u'шы ': 256,
u'ы д': 257,
u'ы қ': 258,
u'ыз ': 259,
u'қыт': 260,
u' ко': 261,
u' не': 262,
u' ой': 263,
u' ор': 264,
u' сұ': 265,
u' тү': 266,
u'аль': 267,
u'аре': 268,
u'атт': 269,
u'дір': 270,
u'ев ': 271,
u'егі': 272,
u'еда': 273,
u'екі': 274,
u'елд': 275,
u'ерг': 276,
u'ерд': 277,
u'ияд': 278,
u'кер': 279,
u'кет': 280,
u'лыс': 281,
u'ліс': 282,
u'мед': 283,
u'мпи': 284,
u'н д': 285,
u'ні ': 286,
u'нін': 287,
u'п т': 288,
u'пек': 289,
u'рел': 290,
u'рта': 291,
u'ріл': 292,
u'рін': 293,
u'сен': 294,
u'тал': 295,
u'шіл': 296,
u'ы к': 297,
u'ы м': 298,
u'ыст': 299,
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
19849,
796,
1391,
198,
334,
6,
16142,
22177,
705,
25,
657,
11,
198,
334,
6,
16843,
22177,
705,
25,
352,
11,
198,
334,
6,
45035,
142,
96,
705,
25,
362,
11,
198,
334... | 1.43563 | 2,874 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
import unittest
from cortex_serving_client.printable_chars import remove_non_printable
| [
11748,
555,
715,
395,
198,
198,
6738,
20223,
62,
31293,
62,
16366,
13,
4798,
540,
62,
354,
945,
1330,
4781,
62,
13159,
62,
4798,
540,
628
] | 3.423077 | 26 |
import termios
import socket
import select
import enum
import sys
import tty
from paramiko.py3compat import u
| [
11748,
3381,
4267,
198,
11748,
17802,
198,
11748,
2922,
198,
11748,
33829,
198,
11748,
25064,
198,
11748,
256,
774,
198,
198,
6738,
5772,
12125,
13,
9078,
18,
5589,
265,
1330,
334,
198
] | 3.46875 | 32 |
from ..apibase import SlobsService
from .selectionbase import SelectionBase
from .selection import Selection # For the side-effects
| [
6738,
11485,
499,
571,
589,
1330,
3454,
8158,
16177,
198,
6738,
764,
49283,
8692,
1330,
29538,
14881,
198,
6738,
764,
49283,
1330,
29538,
220,
1303,
1114,
262,
1735,
12,
34435,
628
] | 4.322581 | 31 |
#!/usr/bin/env python
"""Estimate parameters of models that predict SchTime using available info: SchDep, SchArr, Carrier,
Date, Distance, Origin and Dest coordinates."""
import numpy as np
import pandas as pd
from sklearn import linear_model
from analysis.filter import get_jetstream
def regression(year, month, df):
"""OLS regression modeling SchTime with Distance and Jetstream info."""
#Obtain relevant dataframe and calculate Jetstream.
if(len(df) == 0):
df = get_jetstream(year, month)
#Run OLS regression.
x = df[['Distance', 'Jetstream']]
y = df['SchTime']
ols = linear_model.LinearRegression()
ols.fit(x,y)
params = list(ols.coef_)
params.append(ols.intercept_)
score = ols.score(x,y)
#Calculate standard error.
mean_sqerr = np.mean((y - ols.predict(x).T)**2) #Mean Square Error.
x['ones'] = 1
stderror = np.sqrt(mean_sqerr * np.diag(np.linalg.pinv(np.dot(x.T, x))))
return (params, score, stderror)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
22362,
1920,
10007,
286,
4981,
326,
4331,
3059,
7575,
1262,
1695,
7508,
25,
3059,
12156,
11,
3059,
3163,
81,
11,
30252,
11,
220,
198,
220,
220,
220,
7536,
11,
34600,
11,
19349,
... | 2.613158 | 380 |
import re
text=input()
lookForVars=re.compile(r"\b([_])([A-Za-z]+)\b")
varNames=lookForVars.findall(text)
output=""
for k in range(0,len(varNames)):
output+=varNames[k][1]
if not k==len(varNames)-1:
output+=","
print(output)
| [
11748,
302,
198,
5239,
28,
15414,
3419,
198,
5460,
1890,
53,
945,
28,
260,
13,
5589,
576,
7,
81,
1,
59,
65,
26933,
62,
60,
5769,
58,
32,
12,
57,
64,
12,
89,
48688,
19415,
65,
4943,
198,
7785,
36690,
28,
5460,
1890,
53,
945,
13... | 2.132743 | 113 |
##########################################
# Author: Feroz Farazi (msff2@cam.ac.uk) #
# Date: 07 Dec 2020 #
##########################################
import configparser
import pkg_resources
import os
ENTRDF_PROP_FILE = pkg_resources.resource_filename(__name__,os.path.join('conf','EntityRDFizer.properties'))
config = configparser.RawConfigParser()
config.read(ENTRDF_PROP_FILE)
"""Created variables to be used globally to maintain the values
read from a property file and to update the variables if users set
new values via setter functions"""
tboxIRI=''
aboxIRI=''
aboxFileName=''
aboxFileExtension=''
instanceLabelCreationOption=''
if __name__ == '__main__':
"""Shows the default values available in the property file"""
print(readInstanceLabelCreationOption())
print(readABoxFileExtension())
"""Sets new values to update the ones read from the property file"""
setTBoxIRI("http://a/test/tbox/iri")
setABoxIRI("http://a/test/abox/iri")
setABoxFileName("a-test-a-box-file-name")
setInstanceLabelCreationOption("no")
setABoxFileExtension("a-test-a-box-file-extension")
"""Shows the new values set via setter functions above"""
print(getTBoxIRI())
print(getABoxIRI())
print(getABoxFileName())
print(getInstanceLabelCreationOption())
print(getABoxFileExtension())
| [
29113,
7804,
2235,
198,
2,
6434,
25,
376,
3529,
89,
6755,
7761,
357,
907,
487,
17,
31,
20991,
13,
330,
13,
2724,
8,
1303,
198,
2,
7536,
25,
8753,
4280,
12131,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,... | 2.930586 | 461 |
BABELRC_CONTENT = u'''{
"presets": [
"@babel/preset-env", "@babel/preset-react"
]
}'''
WEBPACK_CONFIG_JS_CONTENT = u'''module.exports = {
watch: true,
module: {
rules: [
{
test: /\.js$/,
exclude: /node_modules/,
use: {
loader: "babel-loader"
}
}
]
}
}'''
INDEX_HTML_DJANGO_CONTENT = u'''<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Pycra Application</title>
</head>
<body>
<div id="app">
<!-- React will load here -->
</div>
</body>
<!-- DJANGO_PLACEHOLDER -->
{% load static %}
<script src="{% static '{self.app_name}/js/main.js' %}"></script>
</html>'''
INDEX_HTML_FLASK_CONTENT = u'''<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Pycra Application</title>
</head>
<body>
<div id="app">
<!-- React will load here -->
</div>
</body>
<script src="{{ url_for('static', filename='js/main.js') }}"></script>
</html>'''
APP_JS_CONTENT = '''import React, { Component } from "react";
import { render } from "react-dom";
class App extends Component {
constructor(props) {
super(props);
this.state = {};
}
render() {
return (
<div>
<h1 style={{textAlign: 'center'}}>Hello World</h1>
</div>
);
}
}
export default App;
const container = document.getElementById("app");
render(<App />, container);'''
INDEX_JS_CONTENT = u'''import App from "./components/App";'''
| [
4339,
33,
3698,
7397,
62,
37815,
3525,
796,
334,
7061,
6,
90,
198,
220,
220,
220,
366,
18302,
1039,
1298,
685,
198,
220,
220,
220,
220,
220,
220,
220,
44212,
65,
9608,
14,
18302,
316,
12,
24330,
1600,
44212,
65,
9608,
14,
18302,
3... | 2.083434 | 827 |
##############
# Setup Django
import django
django.setup()
#############
# Test proper
import threading
import time
import pytest
from django.db import DatabaseError, connection, transaction
from django.db.models import F, Subquery
from app.models import Sock
@pytest.mark.django_db
| [
7804,
4242,
2235,
198,
2,
31122,
37770,
198,
198,
11748,
42625,
14208,
198,
28241,
14208,
13,
40406,
3419,
628,
198,
7804,
4242,
2,
198,
2,
6208,
1774,
198,
198,
11748,
4704,
278,
198,
11748,
640,
198,
11748,
12972,
9288,
198,
6738,
4... | 3.360465 | 86 |
import requests
#from urlparse import urljoin
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urljoin
# curl -X POST "http://192.168.0.3:5000/api/v1/image"
# -H "accept: application/json"
# -H "Authorization: DEADBEEF"
# -H "content-type: multipart/form-data"
# -F "file=@linux;type="
# -F "q={ "description": "Example image",
# "type": "Kernel",
# "arch": "arm64",
# "public": false,
# "known_good": true } "
headers = {'Authorization': '/T9kmICCxhhk0Ec6kCqudgXwwWNTzNrrqmuCTCAwA2U='}
url = urljoin("http://192.168.0.3:5000", "/api/v1/image")
files = {'file': open('../../../builds/debian-staging/476/linux', 'rb')}
data = {'q': '{"name": "drue test 476", "description": "", "type": "Kernel", "arch": "arm64"}'
}
r = requests.post(url, files=files, data=data, headers=headers)
if r.status_code != 200:
print('Error posting {}, HTTP {}, {}'.format(url,
r.status_code, r.reason))
print(r.status_code)
print(r.json())
import pdb; pdb.set_trace()
| [
11748,
7007,
198,
2,
6738,
19016,
29572,
1330,
19016,
22179,
198,
6738,
2003,
13,
20307,
62,
32016,
1330,
2721,
62,
7344,
1386,
198,
17350,
62,
7344,
1386,
3419,
198,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
22179,
198,
198,
... | 2.34292 | 452 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import os, sys, requests
from bs4 import BeautifulSoup
from biplist import *
#python在安装时,默认的编码是ascii,当程序中出现非ascii编码时,python的处理常常会报类似这样的错误。
#python没办法处理非ascii编码的,此时需要自己设置将python的默认编码,一般设置为utf8的编码格式。
reload(sys)
sys.setdefaultencoding('utf8')
#创建文件夹路劲
#保存Html文件
#解析用户列表
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
11,
25064,
11,
7007,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
220,
198,
6738,
3182,
489,
396,
1330,
1... | 1.054487 | 312 |
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="bearx",
version="0.1",
scripts=['bearx'],
author="Bartek Szmelczynski",
author_email="bartek.szmelczynski@gmail.com",
description=("deep learning library created in order to get in ",
"depth knownledge about neural nets"),
long_description=long_description,
url="https://github.com/bartekkz/BearX",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
4943,
355,
277,
71,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
71,
13,
961,
3419,
628,
198,
40406,
... | 2.596154 | 260 |
"""
sensitivity.py -- Driver to calculate the gradient of a workflow, and return
it as a driver output.
SensitivityDriver includes a differentiator slot where the differentiation
method can be plugged. Fake finite difference is supported.
"""
# pylint: disable-msg=C0103
#public symbols
__all__ = ['SensitivityDriver']
import logging
from openmdao.main.numpy_fallback import zeros
from openmdao.main.datatypes.api import Array, List
from openmdao.main.driver_uses_derivatives import DriverUsesDerivatives
from openmdao.main.hasconstraints import HasConstraints
from openmdao.main.hasparameters import HasParameters
from openmdao.main.hasobjective import HasObjectives
from openmdao.util.decorators import add_delegate
from openmdao.main.interfaces import IHasParameters, IHasObjectives, IHasConstraints, implements
@add_delegate(HasParameters, HasObjectives, HasConstraints)
class SensitivityDriver(DriverUsesDerivatives):
"""Driver to calculate the gradient of a workflow, and return
it as a driver output. The gradient is calculated from all
inputs (Parameters) to all outputs (Objectives and Constraints).
SensitivityDriver includes a differentiator slot where the differentiation
method can be plugged. Fake finite difference is supported.
"""
implements(IHasParameters, IHasObjectives, IHasConstraints)
dF = Array(zeros((0, 0),'d'), iotype='out', desc='Sensitivity of the '
'objectives withrespect to the parameters. Index 1 is the '
'objective output, while index 2 is the parameter input')
dG = Array(zeros((0, 0),'d'), iotype='out', desc='Sensitivity of the '
'constraints withrespect to the parameters. Index 1 is the '
'constraint output, while index 2 is the parameter input')
F = Array(zeros((0, 0),'d'), iotype='out', desc='Values of the objectives '
'which sensitivities are taken around.')
G = Array(zeros((0, 0),'d'), iotype='out', desc='Values of the constraints '
'which sensitivities are taken around.')
dF_names = List([], iotype='out', desc='Objective names that'
'correspond to our array indices')
dG_names = List([], iotype='out', desc='Constraint names that'
'correspond to our array indices')
dx_names = List([], iotype='out', desc='Parameter names that'
'correspond to our array indices')
F = Array(zeros(0,'d'), iotype='out', desc='Objective baseline values '
'where sensitivity is evaluated.')
G = Array(zeros(0,'d'), iotype='out', desc='Constraint baseline values '
'where sensitivity is evaluated.')
x = Array(zeros(0,'d'), iotype='out', desc='Parameter baseline values '
'where sensitivity is evaluated.')
def execute(self):
"""Calculate the gradient of the workflow."""
self._check()
# Calculate gradient of the workflow
self.calc_derivatives(first=True)
self.ffd_order = 1
self.differentiator.calc_gradient()
self.ffd_order = 0
inputs = self.get_parameters().keys()
objs = self.get_objectives().keys()
constraints = list(self.get_eq_constraints().keys() + \
self.get_ineq_constraints().keys())
self.dF = zeros((len(objs), len(inputs)), 'd')
self.dG = zeros((len(constraints), len(inputs)), 'd')
self.F = zeros(len(objs), 'd')
self.G = zeros(len(constraints), 'd')
self.x = zeros(len(inputs), 'd')
self.dF_names = []
self.dG_names = []
self.dx_names = []
for i, input_name in enumerate(inputs):
self.dx_names.append(input_name)
self.x[i] = self.differentiator.base_param[input_name]
for j, output_name in enumerate(objs):
self.dF[j][i] = self.differentiator.get_derivative(output_name,
wrt=input_name)
self.dF_names.append(output_name)
self.F[j] = self.differentiator.base_data[output_name]
for j, output_name in enumerate(constraints):
self.dG[j][i] = self.differentiator.get_derivative(output_name,
wrt=input_name)
self.dG_names.append(output_name)
self.G[j] = self.differentiator.base_data[output_name]
# Sensitivity is sometimes run sequentially using different submodels,
# so we need to return the state to the baseline value.
self.differentiator.reset_state()
def _check(self):
"""Make sure we aren't missing inputs or outputs"""
if len(self.get_parameters().values()) < 1:
msg = "Missing inputs for gradient calculation"
self.raise_exception(msg, ValueError)
if len(self.get_objectives().values()) + \
len(self.get_eq_constraints().values()) + \
len(self.get_ineq_constraints().values()) < 1:
msg = "Missing outputs for gradient calculation"
self.raise_exception(msg, ValueError)
| [
37811,
198,
220,
220,
220,
14233,
13,
9078,
1377,
12434,
284,
15284,
262,
31312,
286,
257,
30798,
11,
290,
1441,
198,
220,
220,
220,
340,
355,
257,
4639,
5072,
13,
220,
198,
220,
220,
220,
220,
198,
220,
220,
220,
14173,
11365,
3210... | 2.290131 | 2,361 |
import checkdata
from archive.utils.mock_di_api import mock_api
from archive.utils.operator_test import operator_test
api = mock_api(__file__) # class instance of mock_api
mock_api.print_send_msg = True # set class variable for printing api.send
optest = operator_test(__file__)
# config parameter
msg = optest.get_msgtable('bytetest.csv')
msg.body = [ [b[0],b[1],b[2].encode('cp1250'),b[3],b[4]] for b in msg.body ]
msg.attributes['table']['columns'][2]['type']['hana'] = 'VARBINARY'
checkdata.on_input(msg) | [
11748,
2198,
7890,
198,
6738,
15424,
13,
26791,
13,
76,
735,
62,
10989,
62,
15042,
1330,
15290,
62,
15042,
198,
6738,
15424,
13,
26791,
13,
46616,
62,
9288,
1330,
10088,
62,
9288,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
15... | 2.565854 | 205 |
'''
Descrição: Interface para visualizar as especificações de hardware do computador
Desenvolvedor: William Hoeflich
Versão: 0.1
'''
# Importando as bibliotecas utilizadas no projeto
from ui import Interface
if __name__ == "__main__":
main() | [
7061,
6,
201,
198,
24564,
380,
16175,
28749,
25,
26491,
31215,
5874,
528,
283,
355,
1658,
431,
7790,
64,
16175,
127,
113,
274,
390,
6890,
466,
2653,
7079,
201,
198,
5960,
24330,
5634,
273,
25,
3977,
9544,
891,
33467,
201,
198,
34947,
... | 2.59596 | 99 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import pgl
import paddle.nn as nn
from .base_model import ScalableGNN
| [
2,
15069,
357,
66,
8,
33448,
350,
37382,
47,
37382,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845... | 3.727273 | 187 |
#!/usr/bin/python
"""Inspect path to locate possible completions for cmd.
Walk through the entries in the current PATH env var and look for
things that might match a cmd. Argument is a text string S; we dump
out any program on the path that contains S.
If SHELL is set to bash, then we also test to see if any
bash functions currently defined match S.
"""
import locale
import os
import re
import subprocess
import sys
import script_utils as u
flag_text = ""
def parse_args(argv):
"""Parse command line arguments for the script."""
global flag_text
inprog = 0
pa = []
arg = argv.pop(0)
while argv:
arg = argv.pop(0)
u.verbose(3, "parse_args: arg is " + arg)
if inprog == 1:
pa.append(arg)
elif arg == "-d":
u.increment_verbosity()
u.verbose(1, "debug level now %d" % u.verbosity_level())
else:
pa.append(arg)
nargs = len(pa)
if nargs != 1:
u.error("supply single text string to match")
flag_text = pa.pop()
u.verbose(1, "+ search text: " + flag_text)
def inspect_path():
"""Inspect path components."""
if "PATH" not in os.environ:
u.error("no definition for PATH in environment (?)")
path = os.environ["PATH"]
u.verbose(1, "PATH set to: %s" % path)
path_directories = path.split(":")
matcher = re.compile(r"^.*%s.*$" % flag_text)
for d in path_directories:
u.verbose(2, "+ considering dir %s" % d)
if os.path.isdir(d):
for filename in os.listdir(d):
m = matcher.match(filename)
if m is not None:
print "%s/%s" % (d, filename)
def shell_is_bash():
"""Return TRUE if the shell being used is bash."""
if "SHELL" not in os.environ:
u.warning("no definition for SHELL in environment (?)")
return False
shell = os.environ["SHELL"]
u.verbose(1, "SHELL set to: %s" % shell)
matcher = re.compile(r"^.*/bash$")
m = matcher.match(shell)
if m is not None:
return True
return False
def inspect_bash_functions():
"""Examine declared bash functions to see if any of them match."""
cmd = "echo typeset -F | bash -i 2>&1"
mypipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
pout, perr = mypipe.communicate()
if mypipe.returncode != 0:
u.error("command failed (rc=%d): cmd was %s: "
"err=%s" % (mypipe.returncode, cmd, perr))
encoding = locale.getdefaultlocale()[1]
decoded = pout.decode(encoding)
lines = decoded.strip().split("\n")
matcher = re.compile(r"^declare\s+\S+\s+(%s)\s*$" % flag_text)
for line in lines:
u.verbose(3, "+ considering bash declaration %s" % line)
m = matcher.match(line)
if m is not None:
frag = m.group(1)
print "bash function %s" % frag
#----------------------------------------------------------------------
#
# Main portion of script
#
parse_args(sys.argv)
u.setdeflanglocale()
inspect_path()
if shell_is_bash():
inspect_bash_functions()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
37811,
818,
4443,
3108,
284,
17276,
1744,
1224,
45240,
329,
23991,
13,
198,
198,
35963,
832,
262,
12784,
287,
262,
1459,
46490,
17365,
1401,
290,
804,
329,
198,
27971,
326,
1244,
2872,
257,
2... | 2.612816 | 1,108 |
# Copyright (c) 2012 iQIYI Inc.
# Copyright (c) 2013 Tencent Inc.
# All rights reserved.
#
# Author: Jingxu Chen <chenjingxu@qiyi.com>
# Feng Chen <chen3feng@gmail.com>
# Date: October 13, 2012
"""
A helper class to get the files generated from thrift IDL files.
"""
import os
import blade
import configparse
import console
import build_rules
import java_jar_target
import py_targets
from blade_util import var_to_list
from cc_targets import CcTarget
from thrift_helper import ThriftHelper
class ThriftLibrary(CcTarget):
"""A scons thrift library target subclass.
This class is derived from CcTarget.
"""
def __init__(self,
name,
srcs,
deps,
optimize,
deprecated,
blade,
kwargs):
"""Init method.
Init the thrift target.
"""
srcs = var_to_list(srcs)
self._check_thrift_srcs_name(srcs)
CcTarget.__init__(self,
name,
'thrift_library',
srcs,
deps,
'',
[], [], [], optimize, [], [],
blade,
kwargs)
self.data['python_vars'] = []
self.data['python_sources'] = []
thrift_config = configparse.blade_config.get_config('thrift_config')
thrift_lib = var_to_list(thrift_config['thrift_libs'])
thrift_bin = thrift_config['thrift']
if thrift_bin.startswith("//"):
dkey = self._convert_string_to_target_helper(thrift_bin)
if dkey not in self.expanded_deps:
self.expanded_deps.append(dkey)
if dkey not in self.deps:
self.deps.append(dkey)
# Hardcode deps rule to thrift libraries.
self._add_hardcode_library(thrift_lib)
# Link all the symbols by default
self.data['link_all_symbols'] = True
self.data['deprecated'] = deprecated
self.data['java_sources_explict_dependency'] = []
# For each thrift file initialize a ThriftHelper, which will be used
# to get the source files generated from thrift file.
self.thrift_helpers = {}
for src in srcs:
self.thrift_helpers[src] = ThriftHelper(
os.path.join(self.path, src))
def _check_thrift_srcs_name(self, srcs):
"""_check_thrift_srcs_name.
Checks whether the thrift file's name ends with 'thrift'.
"""
error = 0
for src in srcs:
base_name = os.path.basename(src)
pos = base_name.rfind('.')
if pos == -1:
console.error('invalid thrift file name %s' % src)
error += 1
file_suffix = base_name[pos + 1:]
if file_suffix != 'thrift':
console.error('invalid thrift file name %s' % src)
error += 1
if error > 0:
console.error_exit('invalid thrift file names found.')
def _generate_header_files(self):
"""Whether this target generates header files during building."""
return True
def _thrift_gen_cpp_files(self, path, src):
"""_thrift_gen_cpp_files.
Get the c++ files generated from thrift file.
"""
return [self._target_file_path(path, f)
for f in self.thrift_helpers[src].get_generated_cpp_files()]
def _thrift_gen_py_files(self, path, src):
"""_thrift_gen_py_files.
Get the python files generated from thrift file.
"""
return [self._target_file_path(path, f)
for f in self.thrift_helpers[src].get_generated_py_files()]
def _thrift_gen_java_files(self, path, src):
"""_thrift_gen_java_files.
Get the java files generated from thrift file.
"""
return [self._target_file_path(path, f)
for f in self.thrift_helpers[src].get_generated_java_files()]
def _thrift_java_rules(self):
"""_thrift_java_rules.
Generate scons rules for the java files from thrift file.
"""
for src in self.srcs:
src_path = os.path.join(self.path, src)
thrift_java_src_files = self._thrift_gen_java_files(self.path,
src)
self._write_rule('%s.ThriftJava(%s, "%s")' % (
self._env_name(),
str(thrift_java_src_files),
src_path))
self.data['java_sources'] = (
os.path.dirname(thrift_java_src_files[0]),
os.path.join(self.build_path, self.path),
self.name)
self.data['java_sources_explict_dependency'] += thrift_java_src_files
def _thrift_python_rules(self):
"""_thrift_python_rules.
Generate python files.
"""
for src in self.srcs:
src_path = os.path.join(self.path, src)
thrift_py_src_files = self._thrift_gen_py_files(self.path, src)
py_cmd_var = '%s_python' % self._generate_variable_name(
self.path, self.name)
self._write_rule('%s = %s.ThriftPython(%s, "%s")' % (
py_cmd_var,
self._env_name(),
str(thrift_py_src_files),
src_path))
self.data['python_vars'].append(py_cmd_var)
self.data['python_sources'] += thrift_py_src_files
def scons_rules(self):
"""scons_rules.
It outputs the scons rules according to user options.
"""
self._prepare_to_generate_rule()
# Build java source according to its option
env_name = self._env_name()
self.options = self.blade.get_options()
self.direct_targets = self.blade.get_direct_targets()
if (getattr(self.options, 'generate_java', False) or
self.data.get('generate_java') or
self.key in self.direct_targets):
self._thrift_java_rules()
if (getattr(self.options, 'generate_python', False) or
self.data.get('generate_python') or
self.key in self.direct_targets):
self._thrift_python_rules()
self._setup_cc_flags()
sources = []
obj_names = []
for src in self.srcs:
thrift_cpp_files = self._thrift_gen_cpp_files(self.path, src)
thrift_cpp_src_files = [f for f in thrift_cpp_files if f.endswith('.cpp')]
self._write_rule('%s.Thrift(%s, "%s")' % (
env_name,
str(thrift_cpp_files),
os.path.join(self.path, src)))
for thrift_cpp_src in thrift_cpp_src_files:
obj_name = '%s_object' % self._generate_variable_name(
self.path, thrift_cpp_src)
obj_names.append(obj_name)
self._write_rule(
'%s = %s.SharedObject(target="%s" + top_env["OBJSUFFIX"], '
'source="%s")' % (obj_name,
env_name,
thrift_cpp_src,
thrift_cpp_src))
sources.append(thrift_cpp_src)
self._write_rule('%s = [%s]' % (self._objs_name(), ','.join(obj_names)))
self._write_rule('%s.Depends(%s, %s)' % (
env_name, self._objs_name(), sources))
self._cc_library()
options = self.blade.get_options()
if (getattr(options, 'generate_dynamic', False) or
self.data.get('build_dynamic')):
self._dynamic_cc_library()
def thrift_library(name,
srcs=[],
deps=[],
optimize=[],
deprecated=False,
**kwargs):
"""thrift_library target. """
thrift_library_target = ThriftLibrary(name,
srcs,
deps,
optimize,
deprecated,
blade.blade,
kwargs)
blade.blade.register_target(thrift_library_target)
build_rules.register_function(thrift_library)
| [
2,
15069,
357,
66,
8,
2321,
1312,
48,
40,
56,
40,
3457,
13,
198,
2,
15069,
357,
66,
8,
2211,
9368,
1087,
3457,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
6434,
25,
42279,
87,
84,
12555,
1279,
6607,
49940,
87,
84,
31,
... | 1.873543 | 4,547 |
import sys
import os
from glob import glob
from PIL import Image
from tqdm import trange
from util import util
import shutil
image_path = sys.argv[1]
path_trainA = os.path.join(image_path,'trainA')
path_trainB = os.path.join(image_path,'trainB')
path_testA = os.path.join(image_path,'testA')
path_testB = os.path.join(image_path,'testB')
path = [path_trainA, path_trainB, path_testA, path_testB]
filenamesTrain = glob("{}/train/*.jpg".format(image_path))
filenamesTest = glob("{}/val/*.jpg".format(image_path))
filenamesTrain.sort()
filenamesTest.sort()
image = Image.open(filenamesTrain[0])
w,h = image.size
region_A = (0,0,w//2,h)
region_B = (w//2, 0, w, h)
files = [filenamesTrain, filenamesTest]
for i in trange(2):
util.mkdirs(path[2*i])
util.mkdirs(path[2*i+1])
for j in trange(len(files[i])):
img = Image.open(files[i][j])
A = img.crop(region_A)
A.save(os.path.join(path[2*i],'{}.jpg'.format(j)))
B = img.crop(region_B)
B.save(os.path.join(path[2*i+1],'{}.jpg'.format(j))) | [
11748,
25064,
198,
11748,
28686,
198,
6738,
15095,
1330,
15095,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
256,
80,
36020,
1330,
491,
858,
198,
6738,
7736,
1330,
7736,
198,
11748,
4423,
346,
198,
198,
9060,
62,
6978,
796,
25064,
13,
... | 2.253275 | 458 |
#!/usr/bin/env python
#
# Copyright @2014 blackshirtmuslim@yahoo.com
# Licensed: see Python license
"""Module to handle json services."""
import datetime
import json
import peewee
import tornado.web
import tornado.escape
from dompetku.handler import base
from dompetku.utils import jsonify
from dompetku.model import Transaksi, User
from dompetku.form import TransaksiForm
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
15069,
2488,
4967,
2042,
15600,
14664,
2475,
31,
40774,
13,
785,
198,
2,
49962,
25,
766,
11361,
5964,
198,
198,
37811,
26796,
284,
5412,
33918,
2594,
526,
15931,
198,
198,
... | 3.445455 | 110 |
"""
Test all test files.
"""
# import glob
# import unittest
#
# test_files = glob.glob('test_*.py')
# module_strings = [test_file[0:len(test_file)-3] for test_file in test_files]
# suites = [unittest.defaultTestLoader.loadTestsFromName(test_file) for test_file in module_strings]
# testSuite = unittest.TestSuite(suites)
# text_runner = unittest.TextTestRunner().run(testSuite)
| [
37811,
198,
14402,
477,
1332,
3696,
13,
198,
37811,
198,
198,
2,
1330,
15095,
198,
2,
1330,
555,
715,
395,
198,
2,
198,
2,
1332,
62,
16624,
796,
15095,
13,
4743,
672,
10786,
9288,
62,
24620,
9078,
11537,
198,
2,
8265,
62,
37336,
7... | 2.753623 | 138 |
YStepsPerBlock = 225
XStepsPerBlock = 227
Delay = 0.0008
| [
56,
8600,
82,
5990,
12235,
796,
18500,
198,
55,
8600,
82,
5990,
12235,
796,
30989,
198,
13856,
323,
796,
657,
13,
830,
23,
198,
220,
220,
220,
220
] | 2.178571 | 28 |
"""
@brief test tree node (time=5s)
"""
import sys
import os
import unittest
import warnings
import time
try:
from pstats import SortKey
except ImportError:
# python < 3.7
from pyquickhelper.pycode.profiling import SortKey
import pandas
from pyquickhelper.pycode import ExtTestCase
from pyquickhelper.pandashelper import df2rst
from pyquickhelper import __file__ as rootfile
from pyquickhelper.pycode.profiling import (
profile, profile2df, profile2graph, ProfileNode)
if __name__ == "__main__":
unittest.main()
| [
37811,
198,
31,
65,
3796,
220,
220,
220,
220,
220,
1332,
5509,
10139,
357,
2435,
28,
20,
82,
8,
198,
37811,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
11748,
14601,
198,
11748,
640,
198,
28311,
25,
198,
22... | 2.903226 | 186 |
upper_leg = RigidBody('Upper Leg', upper_leg_mass_center, upper_leg_frame,
upper_leg_mass, upper_leg_central_inertia)
torso = RigidBody('Torso', torso_mass_center, torso_frame,
torso_mass, torso_central_inertia) | [
45828,
62,
1455,
796,
24666,
312,
25842,
10786,
52,
2848,
3564,
3256,
6727,
62,
1455,
62,
22208,
62,
16159,
11,
6727,
62,
1455,
62,
14535,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 2.172414 | 116 |
from substance import (Engine, Command)
from substance.exceptions import (LinkCommandError)
# Disable parsing for this command
| [
6738,
9136,
1330,
357,
13798,
11,
9455,
8,
198,
6738,
9136,
13,
1069,
11755,
1330,
357,
11280,
21575,
12331,
8,
628,
198,
220,
220,
220,
1303,
31529,
32096,
329,
428,
3141,
198
] | 4.15625 | 32 |
from django.dispatch import receiver
from django.urls import resolve, reverse
from django.utils.translation import gettext_lazy as _
from django.template.loader import get_template
from pretix.base.middleware import _parse_csp, _merge_csp, _render_csp
from pretix.presale.signals import (
html_head,
process_response,
)
from pretix.base.signals import (
logentry_display,
register_payment_providers,
register_data_exporters,
)
from pretix.control.signals import (
event_dashboard_widgets,
nav_event_settings,
)
from .exporter import EthereumOrdersExporter
from . import models
NUM_WIDGET = '<div class="numwidget"><span class="num">{num}</span><span class="text">{text}</span></div>' # noqa: E501
@receiver(process_response, dispatch_uid="payment_eth_add_question_type_csp")
@receiver(html_head, dispatch_uid="payment_eth_add_question_type_javascript")
@receiver(event_dashboard_widgets)
@receiver(register_payment_providers, dispatch_uid="payment_eth")
@receiver(nav_event_settings, dispatch_uid='pretix_eth_nav_wallet_address_upload')
@receiver(signal=logentry_display)
@receiver(register_data_exporters, dispatch_uid='single_event_eth_orders')
| [
6738,
42625,
14208,
13,
6381,
17147,
1330,
9733,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
10568,
11,
9575,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
62,
75,
12582,
355,
4808,
198,
6738,
42625,
14208,
13,
28243,... | 2.919315 | 409 |
from typing import List
| [
6738,
19720,
1330,
7343,
628,
198
] | 4.333333 | 6 |
#!/usr/bin/env python3
# Copyright (c) 2021, DeepX-inc
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# @author Krishneel Chaudhary
"""Python Xnode Decorators."""
from .xnode_builder import node_init
rosnode = node_init()
def register(globals_params: dict = None):
"""
Spin the rosnode on the executor.
Args:
----
globals_params (dict): Dictonary of module globals()
"""
if globals_params is not None:
assert isinstance(
globals_params, dict
), f'Expected {dict} but received {type(globals_params)}'
__keys__: list = ['__name__', 'rosnode']
for key in __keys__:
if key not in globals_params.keys():
raise KeyError(f'Key {key} is required')
name = globals_params['__name__']
if name not in ['__main__']:
print(
'\033[33m__main__ not found in the globals\033[0m'
)
return
if rosnode.node is None:
raise RuntimeError('Please initialize the node')
rosnode.spin()
def register_node(func_or_dict):
"""Registor Node in XNode."""
if isinstance(func_or_dict, dict):
globals_params = func_or_dict
return _register
else:
globals_params = None
return _register(func_or_dict)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
33448,
11,
10766,
55,
12,
1939,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
389,
1043... | 2.788306 | 992 |
# elements_constraints_discovery.py
from __future__ import print_function
import pandas as pd
from tdda.constraints.pdconstraints import discover_constraints, verify_df
df = pd.read_csv('../testdata/elements92.csv')
constraints = discover_constraints(df)
with open('elements92.tdda', 'w') as f:
f.write(constraints.to_json())
print('Written elements92.tdda')
print(verify_df(df, 'elements92.tdda'))
| [
2,
4847,
62,
1102,
2536,
6003,
62,
67,
40821,
13,
9078,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
256,
1860,
64,
13,
1102,
2536,
6003,
13,
30094,
1102,
2536,
6003,... | 2.645161 | 155 |
"""
The class that handles model IO
"""
import numpy as np
import antimony as sb
class ModelIO:
"""
Class for loading and parsing models
Parameters
---------
model_contents : str
Either the model string or the file path
content_type : str, {"ModelString", "ModelFile"}
The type of the model
Attributes
----------
react_stoic: (ns, nr) ndarray
A 2D array of the stoichiometric coefficients of the reactants.
Reactions are columns and species are rows.
prod_stoic: (ns, nr) ndarray
A 2D array of the stoichiometric coefficients of the products.
Reactions are columns and species are rows.
init_state: (ns,) ndarray
A 1D array representing the initial state of the system.
k_det: (nr,) ndarray
A 1D array representing the deterministic rate constants of the
system.
volume: float, optional
The volume of the reactor vessel which is important for second
and higher order reactions. Defaults to 1 arbitrary units.
chem_flag: bool, optional
If True, divide by Na (Avogadro's constant) while calculating
stochastic rate constants. Defaults to ``False``.
"""
@staticmethod
def _create_stoic_mat(ns, nr, name_list, stoic_tuple, species_names):
""" Function to create the stoichiometric matrix """
stoic_mat = np.zeros([ns, nr], dtype=np.int)
for index, (names, stoics) in enumerate(zip(name_list, stoic_tuple)):
for a_name, a_stoic in zip(names, stoics):
species_index = species_names.index(a_name)
stoic_mat[species_index, index] += int(a_stoic)
return stoic_mat
def _parse_model(self):
""" Parse model contents """
react_stoic_tuple = sb.getReactantStoichiometries(self.sb_module)
react_names = sb.getReactantNames(self.sb_module)
prod_names = sb.getProductNames(self.sb_module)
prod_stoic_tuple = sb.getProductStoichiometries(self.sb_module)
# 0:all, 1:speciescounts, 2:rateconstants, 6:rxnrateequations, 9:compartmentvols
species_names = sb.getSymbolNamesOfType(self.sb_module, 1)
self.species_names = species_names
rxn_names = sb.getSymbolNamesOfType(self.sb_module, 6)
self.rxn_names = rxn_names
ns = len(species_names)
nr = sb.getNumReactions(self.sb_module)
# Stochastic matrices
self.react_stoic = self._create_stoic_mat(
ns, nr, react_names, react_stoic_tuple, species_names
)
self.prod_stoic = self._create_stoic_mat(
ns, nr, prod_names, prod_stoic_tuple, species_names
)
# Initial states
init_state_values = sb.getSymbolInitialAssignmentsOfType(self.sb_module, 1)
if "" in init_state_values:
raise InitialStateError("Missing initial value for one of the species.")
self.init_state = np.array(init_state_values, dtype=np.int64)
# Rate constants
rxn_rateeqns = sb.getSymbolEquationsOfType(self.sb_module, 6)
rxn_rate_names = list(sb.getSymbolNamesOfType(self.sb_module, 2))
rxn_rate_values = sb.getSymbolInitialAssignmentsOfType(self.sb_module, 2)
rxn_rate_dict = dict(zip(rxn_rate_names, rxn_rate_values))
# Chem flag
try:
self.chem_flag = True if rxn_rate_dict["chem_flag"] == "true" else False
except KeyError:
raise ChemFlagError("The chem flag was not specified in the model.")
# Check rate constant specifications
del rxn_rate_dict["chem_flag"]
rxn_rate_names.remove("chem_flag")
if "" in rxn_rateeqns:
raise RateConstantError("Missing rate constant for one of the reactions.")
for rxn_rateeqn in rxn_rateeqns:
if rxn_rateeqn not in rxn_rate_names:
raise RateConstantError(
f"{rxn_rateeqn} doesn't match any rate constant."
)
# kdet
try:
self.k_det = np.array(
[rxn_rate_dict[rre] for rre in rxn_rateeqns], dtype=float
)
except KeyError:
raise RateConstantError(
"You are missing a numerical value for one of the rate constants."
)
# Volume
try:
self.volume = int(
sb.getSymbolInitialAssignmentsOfType(self.sb_module, 9)[0]
)
except IndexError:
raise VolumeError("Missing compartment information")
@property
def args(self):
""" Returns the attributes of the ModelIO class """
return (
self.species_names,
self.rxn_names,
self.react_stoic,
self.prod_stoic,
self.init_state,
self.k_det,
self.chem_flag,
self.volume,
)
@classmethod
def translate_sbml(cls, sbml_file: str):
"""
Translate SBML file to Antimony model specification.
cayenne's model specification is loosely based on Antimony's model
specification.
"""
er_code = sb.loadSBMLFile(sbml_file)
if er_code == -1:
raise ModelError("Error while parsing model")
sb_module = sb.getMainModuleName()
sb_string = sb.getAntimonyString(sb_module)
return sb_string
| [
37811,
198,
220,
220,
220,
383,
1398,
326,
17105,
2746,
24418,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
1885,
33969,
355,
264,
65,
628,
628,
628,
198,
198,
4871,
9104,
9399,
25,
198,
220,
220,
220,
37227,
... | 2.144742 | 2,577 |
import torch
import torch.nn as nn
class EMA:
"""
Modified version of class fairseq.models.ema.EMAModule.
Args:
model (nn.Module):
cfg (DictConfig):
device (str):
skip_keys (list): The keys to skip assigning averaged weights to.
"""
def step(self, new_model: nn.Module):
"""
One EMA step
Args:
new_model (nn.Module): Online model to fetch new weights from
"""
ema_state_dict = {}
ema_params = self.model.state_dict()
for key, param in new_model.state_dict().items():
ema_param = ema_params[key].float()
if key in self.skip_keys:
ema_param = param.to(dtype=ema_param.dtype).clone()
else:
ema_param.mul_(self.decay)
ema_param.add_(param.to(dtype=ema_param.dtype), alpha=1 - self.decay)
ema_state_dict[key] = ema_param
self.model.load_state_dict(ema_state_dict, strict=False)
self.num_updates += 1
def restore(self, model: nn.Module):
"""
Reassign weights from another model
Args:
model (nn.Module): model to load weights from.
Returns:
model with new weights
"""
d = self.model.state_dict()
model.load_state_dict(d, strict=False)
return model
@staticmethod
def get_annealed_rate(start, end, curr_step, total_steps):
"""
Calculate EMA annealing rate
"""
r = end - start
pct_remaining = 1 - curr_step / total_steps
return end - r * pct_remaining
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
628,
198,
4871,
412,
5673,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
40499,
2196,
286,
1398,
3148,
41068,
13,
27530,
13,
19687,
13,
3620,
2390,
375,
2261,
13,
628,
2... | 2.071066 | 788 |
# -*- coding: utf-8 -*-
"""
package.module
~~~~~~~~~~~~~~
A brief description goes here.
:copyright: (c) 2016 by chenxiaofeng.
:license: LICENSE_NAME, see LICENSE_FILE for more details.
"""
from datetime import datetime
from .base import db
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
5301,
13,
21412,
198,
220,
220,
220,
220,
15116,
8728,
4907,
628,
220,
220,
220,
317,
4506,
6764,
2925,
994,
13,
628,
220,
220,
220,
1058,
... | 2.75 | 96 |
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
# Imports
from BoundaryConditions.Simulation.SimulationData import getSimData
from Controller.Cell.CHP_SystemThermal import CtrlDefault
from GenericModel.Design import _check_pBTypes, generateGenericCell
from GenericModel.PARAMETER import PBTYPES_NOW as pBTypes
from SystemComponentsFast import simulate, CellChpSystemThermal
from PostProcesing import plots
from plotly.subplots import make_subplots
import plotly.graph_objs as go
import numpy as np
import logging
# %%
FORMAT = ("%(levelname)s %(name)s %(asctime)-15s "
"%(filename)s:%(lineno)d %(message)s")
logging.basicConfig(format=FORMAT)
logging.getLogger().setLevel(logging.WARNING)
# %%
# set parameters
# time
start = '01.01.2020'
end = '01.01.2021'
# seperate agents
nSepBSLagents = 100
pAgricultureBSLsep = 0.7
# pHH buildings
nBuildings = {'FSH': 505, 'REH': 1010, 'SAH': 680, 'BAH': 100}
pAgents = {'FSH': 0.9, 'REH': 0.9, 'SAH': 0.85, 'BAH': 0.75}
pPHHagents = {'FSH': 0.8, 'REH': 0.8, 'SAH': 0.6, 'BAH': 0.9}
pAgriculture = {'FSH': 0.2, 'REH': 0.2, 'SAH': 0.0, 'BAH': 0.0}
# district heating and PV
pDHN = {'FSH': 0.1, 'REH': 0.1, 'SAH': 0.1, 'BAH': 0.1}
pPVplants = 0.2
pHeatpumps = {'class_1': 0, 'class_2': 0,
'class_3': 0, 'class_4': 0.12,
'class_5': 0.27}
pCHP = 0.1
# buildings are imported
# environment
region = "East"
# set controller to use it or set variable to None
controller = CtrlDefault()
# %%
# prepare simulation
nSteps, time, SLP, HWP, Weather, Solar = getSimData(start, end, region)
# %%
# generate cell
cell = generateGenericCell(nBuildings, pAgents,
pPHHagents, pAgriculture,
pDHN, pPVplants, pHeatpumps, pCHP, pBTypes,
nSepBSLagents, pAgricultureBSLsep,
region, nSteps)
# get dhn demand
demand = cell.get_thermal_demand(True)
# generate chp system with storage
chpSystem = CellChpSystemThermal(demand, 0.35, 2*demand, 0.05,
0.98, 0.98, nSteps)
# configure controller
chpSystem.controller = None#controller
# add chp system to cell
cell.add_chp_thermal(chpSystem)
# %%
# run the simulation
simulate(cell, nSteps, SLP.to_dict('list'), HWP, Weather.to_dict('list'),
Solar.to_dict('list'))
# %%
plots.cellPowerBalance(cell, time)
# %%
plots.cellEnergyBalance(cell, time)
# %%
chpSystem = cell.get_thermal_chp_system()
chp_gen_e = np.array(chpSystem.chp.gen_e.get_memory())
CHPstate = chp_gen_e > 0.
fig = go.Figure()
fig.add_trace(go.Scatter(x=time, y=CHPstate,
line={'color': 'rgba(100, 149, 237, 0.5)',
'width': 1},
name="CHP state")
)
fig.update_layout(height=600, width=600,
title_text="CHP operation")
fig.update_xaxes(title_text="Time")
fig.update_yaxes(title_text="On/Off")
# %%
plots.chargeState(chpSystem.storage, time)
| [
2,
1675,
751,
257,
649,
2685,
11,
2099,
705,
2,
43313,
6,
198,
2,
1675,
751,
257,
649,
1317,
2902,
2685,
11,
2099,
705,
2,
43313,
685,
4102,
2902,
49946,
198,
2,
43313,
198,
2,
1846,
3742,
198,
6738,
30149,
560,
25559,
1756,
13,
... | 2.178803 | 1,387 |
# Hey Dear , the convention of plotting the graph is different from mathematical representation
# x-axis is same as x-axis and y-axis is opposite to y-axis
import cv2
img = cv2.imread("/Users/abhishekraj/Downloads/xa.jpeg")
# Image is just the array of pixel or matrix
print(img.shape)
# Resize the image
imgResize = cv2.resize(img, (300, 200))
# Cropping an image ,No requirement of cv2 here
# Note in cropping height comes first then width comes
imgCropped =img[0:200,200:500] # img[height,width]
cv2.imshow("Output", img)
cv2.imshow("Resize", imgResize)
cv2.imshow("Cropped", imgCropped)
cv2.waitKey(0)
| [
2,
14690,
23420,
837,
262,
9831,
286,
29353,
262,
4823,
318,
1180,
422,
18069,
10552,
198,
2,
2124,
12,
22704,
318,
976,
355,
2124,
12,
22704,
290,
331,
12,
22704,
318,
6697,
284,
331,
12,
22704,
198,
11748,
269,
85,
17,
198,
198,
... | 2.837963 | 216 |
from PIL import Image
def quarter(filename):
"""
:param filename: the filename
:return: redefined image
"""
image = Image.open(filename)
width, height = image.size
quarter_sized = image.resize((int(width/2), int(height/2)))
quarter_sized.save('quarter_sized_%s.png' % filename.strip('.png'))
svelte_image = image.resize((width, height+300))
svelte_image.save('svelte_%s.png' % filename.strip('.png'))
#quarter('zophie.png')
def rotate(filename):
"""
:param filename:
:return:
"""
image = Image.open(filename)
image.rotate(90).save('rotated90.png')
image.rotate(180).save('rotate180.png')
image.rotate(270).save('rotate270.png')
#rotate('zophie.png')
image = Image.open('zophie.png')
image.rotate(6, expand=True).save('rotate_expanded.png')
def mirror_flip(filename):
"""
:param filename:
:return:
"""
image = Image.open(filename)
image.transpose(Image.FLIP_LEFT_RIGHT).save('horizontal_flip.png')
image.transpose(Image.FLIP_TOP_BOTTOM).save('vertical_flip.png')
mirror_flip('zophie.png')
| [
6738,
350,
4146,
1330,
7412,
628,
198,
4299,
3860,
7,
34345,
2599,
198,
220,
220,
220,
37227,
628,
220,
220,
220,
1058,
17143,
29472,
25,
262,
29472,
198,
220,
220,
220,
1058,
7783,
25,
2266,
18156,
2939,
198,
220,
220,
220,
37227,
... | 2.453333 | 450 |
import unittest
from conans.test.utils.tools import TestClient
from conans.paths import CONANINFO
from conans.util.files import load
import os
| [
11748,
555,
715,
395,
198,
6738,
369,
504,
13,
9288,
13,
26791,
13,
31391,
1330,
6208,
11792,
198,
6738,
369,
504,
13,
6978,
82,
1330,
7102,
1565,
10778,
198,
6738,
369,
504,
13,
22602,
13,
16624,
1330,
3440,
198,
11748,
28686,
628
] | 3.428571 | 42 |
"""
Module to run a simulation of photodetection signals and test various filters.
Classes
-------
Toy : the main class to run the simulations
Noise : abstract class to generate noise, see concrete subclasses
WhiteNoise : generate white noise
DataCycleNoise : generate noise copying it from a source array
Filter : class to apply filters
Functions
---------
downsample : downsample by averaging in groups
"""
import abc
import numpy as np
from matplotlib import pyplot as plt
import numba
import tqdm
import uproot
from scipy import signal
import integrate
from single_filter_analysis import single_filter_analysis
import readwav
import textbox
import npzload
import template as _template
import colormap
def downsample(a, n, axis=-1, dtype=None):
"""
Downsample an array by averaging nearby elements.
Parameters
----------
a : array
The array to downsample.
n : int
The number of averaged elements per group.
axis : int
The axis along which the averaging is computed. Default last.
dtype : data-type, optional
The data type of the output.
Return
------
da : array
The downsampled array. The shape is the same as `a` apart from the
specified axis, which has size a.shape[axis] // n.
"""
if n == 1:
return np.asarray(a, dtype=dtype)
length = a.shape[axis]
axis %= len(a.shape)
trunc_length = length - length % n
idx = (slice(None),) * axis
idx += (slice(0, trunc_length),)
idx += (slice(None),) * (len(a.shape) - axis - 1)
shape = tuple(a.shape[i] for i in range(axis))
shape += (trunc_length // n, n)
shape += tuple(a.shape[i] for i in range(axis + 1, len(a.shape)))
return np.mean(np.reshape(a[idx], shape), axis=axis + 1, dtype=dtype)
class Noise(metaclass=abc.ABCMeta):
"""
Abstract base class for generating noise for simulations.
Concrete subclasses
-------------------
WhiteNoise
DataCycleNoise
Methods
-------
generate : generate an array of noise
"""
def __init__(self, timebase=8):
"""
Parameters
----------
timebase : int
The duration of samples in nanoseconds. Default is 8, i.e. the
sampling frequency of the waveform returned by `generate` is
125 MSa/s.
"""
self.timebase = timebase
@abc.abstractmethod
def generate(self, nevents, event_length, generator=None):
"""
Generate noise with unitary variance.
Parameters
----------
nevents : int
Number of events i.e. independent chunks of simulated data.
event_length : int
Number of samples of each event.
generator : np.random.Generator, optional
Random number generator.
Return
------
events : array (nevents, event_length)
Simulated noise.
"""
pass
class WhiteNoise(Noise):
"""
Class to generate white noise.
Methods
-------
generate
"""
@numba.jit(cache=True, nopython=True)
@numba.jit(cache=True, nopython=True)
_correlate = _correlate2
def run_sliced(fun, ntot, n=None):
"""
Run a cycle which calls a given function with a progressing slice as sole
argument until a range is covered, printing a progressbar.
Parameters
----------
fun : function
A function with a single parameter which is a slice object.
ntot : int
The end of the range covered by the sequence of slices.
n : int, optional
The length of each slice (the last slice may be shorter). If None, the
function is called once with the slice 0:ntot.
"""
if n is None:
fun(slice(0, ntot))
else:
for i in tqdm.tqdm(range(ntot // n + bool(ntot % n))):
start = i * n
end = min((i + 1) * n, ntot)
s = slice(start, end)
fun(s)
| [
37811,
198,
26796,
284,
1057,
257,
18640,
286,
2825,
375,
316,
3213,
10425,
290,
1332,
2972,
16628,
13,
198,
198,
9487,
274,
198,
26866,
198,
48236,
1058,
262,
1388,
1398,
284,
1057,
262,
27785,
198,
2949,
786,
1058,
12531,
1398,
284,
... | 2.48 | 1,625 |
# Child class is able to access parent class methods and variables
# When one sub-child class is extending child which extends Parent class
P = Parent()
P.displayP()
#P.displayC() #output error as Parent doesn't inherit child methods
C = Child()
C.displayC() #Child class display
C.displayP() #Inherited from parent
SC = SubChild()
SC.displayP()
SC.displayC()
SC.displaySC()
#Output
#Parent constructor
#Parent Display
#Parent constructor
#Child constructor
#Child Display
#Parent Display
#Parent constructor
#Child constructor
#SubChild Constructor
#Parent Display
#Child Display
#SubChild Display | [
2,
5932,
1398,
318,
1498,
284,
1895,
2560,
1398,
5050,
290,
9633,
198,
2,
1649,
530,
850,
12,
9410,
1398,
318,
16610,
1200,
543,
14582,
16774,
1398,
198,
220,
220,
220,
220,
198,
198,
47,
796,
16774,
3419,
198,
47,
13,
13812,
47,
... | 3.561404 | 171 |
import torch
import torchvision
import torch.nn.functional as F
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch import optim
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm
# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Hyperparameters
sequence_length = 28
input_size = 28
hidden_size = 256
num_layers = 2
num_classes = 10
learning_rate = 0.005
batch_size = 64
num_epochs = 3
class LSTM(nn.Module):
'''Recurrent neural network with LSTM (many-to-one)
'''
def forward(self, x):
'''
'''
out, _ = self.lstm(x) # x=[64, 28, 28], out=[64, 28, 256]=(batch, seq_len, 1 * hidden_size)
# Decode the hidden state of the last time step
# only take the last hidden state and send it into fc
out = out[:, -1, :] # out = [64, 256]
out = self.fc(out)
return out
def check_accuracy(loader, model):
'''Check accuracy on training & test to see how good our model
'''
num_correct = 0
num_samples = 0
# Set model to eval
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device).squeeze(1)
y = y.to(device=device)
scores = model(x)
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
# Toggle model back to train
model.train()
return num_correct / num_samples
# Load Data
train_dataset = datasets.MNIST(root="mnist/MNIST", train=True,
transform=transforms.ToTensor(), download=True)
test_dataset = datasets.MNIST(root="mnist/MNIST", train=False,
transform=transforms.ToTensor(), download=True)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)
# Initialize network (try out just using simple RNN, or GRU, and then compare with LSTM)
model = LSTM(input_size, hidden_size, num_layers, num_classes).to(device)
# model = BLSTM(input_size, hidden_size, num_layers, num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Train Network
for epoch in range(num_epochs):
for batch_idx, (data, targets) in enumerate(tqdm(train_loader)):
# (torch.Size([64, 1, 28, 28]), torch.Size([64]))
# Get data to cuda if possible
data = data.to(device=device).squeeze(1) # [64, 1, 28, 28] -> [64, 28, 28]
targets = targets.to(device=device)
# forward
scores = model(data)
loss = criterion(scores, targets)
# backward
optimizer.zero_grad()
loss.backward()
# gradient descent update step/adam step
optimizer.step()
print(f"Accuracy on training set: {check_accuracy(train_loader, model)*100:2f}")
print(f"Accuracy on test set: {check_accuracy(test_loader, model)*100:.2f}")
| [
11748,
28034,
198,
11748,
28034,
10178,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
10178,
13,
19608,
292,
1039,
355,
40522,
198,
11748,
28034,
10178,
13,
7645,
23914,
355,
31408,
220,
198,
6738,
28034,
1330,
643... | 2.48583 | 1,235 |
#!/usr/bin/env python3
import itertools as it, operator as op, functools as ft
from collections import OrderedDict, defaultdict, deque, namedtuple
from contextlib import contextmanager
import os, sys, io, re, time, logging, configparser
import base64, hashlib, unicodedata, math
import signal, threading
from pulsectl import ( Pulse,
PulseEventTypeEnum as ev_t, PulseEventFacilityEnum as ev_fac, PulseEventMaskEnum as ev_m,
PulseLoopStop, PulseDisconnected, PulseIndexError )
get_logger = lambda name: LogStyleAdapter(logging.getLogger(name))
conf_read.path_default = '~/.pulseaudio-mixer-cli.cfg'
PAMixerUIFit = namedtuple('PAMixerUIFit', 'rows controls')
if __name__ == '__main__': sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
340,
861,
10141,
355,
340,
11,
10088,
355,
1034,
11,
1257,
310,
10141,
355,
10117,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
11,
4277,
11600,
11,
390,
4188,
11,
3... | 2.971193 | 243 |
# NÚMEROS PRIMOS
# Faça um programa que leia um número inteiro e diga se ele é ou não um número primo.
num = int(input('Digite um numero inteiro: '))
tot = 0
for c in range(1, num + 1):
if num % c == 0:
print('\033[33m', end='')
tot += 1
else:
print('\033[31m', end='')
print(f'{c} ', end='')
print(f'\n\033[mo numero {num} foi divisivel {tot} vezes')
if tot == 2:
print('E por isso ele é primo')
else:
print('E pos isso ele não é primo')
| [
2,
399,
127,
248,
29296,
2640,
4810,
3955,
2640,
198,
2,
18350,
50041,
23781,
1430,
64,
8358,
443,
544,
23781,
299,
21356,
647,
78,
493,
68,
7058,
304,
3100,
64,
384,
9766,
38251,
267,
84,
299,
28749,
23781,
299,
21356,
647,
78,
268... | 2.046025 | 239 |
import click
@click.group()
import profiles
import auth
import orgs
import projects
import resources
| [
11748,
3904,
628,
198,
31,
12976,
13,
8094,
3419,
628,
198,
11748,
16545,
198,
11748,
6284,
198,
11748,
8745,
82,
198,
11748,
4493,
198,
11748,
4133,
198
] | 3.888889 | 27 |
"""
x2sys_init - Initialize a new x2sys track database.
"""
from pygmt.clib import Session
from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias
@fmt_docstring
@use_alias(
D="fmtfile",
E="suffix",
F="force",
G="discontinuity",
I="spacing",
N="units",
R="region",
V="verbose",
W="gap",
j="distcalc",
)
@kwargs_to_strings(I="sequence", R="sequence")
def x2sys_init(tag, **kwargs):
r"""
Initialize a new x2sys track database.
Serves as the starting point for x2sys and initializes a set of data bases
that are particular to one kind of track data. These data, their associated
data bases, and key parameters are given a short-hand notation called an
x2sys TAG. The TAG keeps track of settings such as file format, whether the
data are geographic or not, and the binning resolution for track indices.
Before you can run :meth:`pygmt.x2sys_init` you must set the environmental
parameter X2SYS_HOME to a directory where you have write permission, which
is where x2sys can keep track of your settings.
Full option list at :gmt-docs:`supplements/x2sys/x2sys_init.html`
{aliases}
Parameters
----------
tag : str
The unique name of this data type x2sys TAG.
fmtfile : str
Format definition file prefix for this data set (see
:gmt-docs:`GMT's Format Definition Files
<supplements/x2sys/x2sys_init.html#format-definition-files>`
for more information). Specify full path if the file is not in the
current directory.
Some file formats already have definition files premade. These include:
- **mgd77** (for plain ASCII MGD77 data files)
- **mgd77+** (for enhanced MGD77+ netCDF files)
- **gmt** (for old mgg supplement binary files)
- **xy** (for plain ASCII x, y tables)
- **xyz** (same, with one z-column)
- **geo** (for plain ASCII longitude, latitude files)
- **geoz** (same, with one z-column).
suffix : str
Specifies the file extension (suffix) for these data files. If not
given we use the format definition file prefix as the suffix (see
``fmtfile``).
discontinuity : str
**d**\|\ **g**.
Selects geographical coordinates. Append **d** for discontinuity at the
Dateline (makes longitude go from -180 to +180) or **g** for
discontinuity at Greenwich (makes longitude go from 0 to 360
[Default]). If not given we assume the data are Cartesian.
spacing : str or list
*dx*\[/*dy*].
*dx* and optionally *dy* is the grid spacing. Append **m** to
indicate minutes or **s** to indicate seconds for geographic data.
These spacings refer to the binning used in the track bin-index data
base.
units : str or list
**d**\|\ **s**\ *unit*.
Sets the units used for distance and speed when requested by other
programs. Append **d** for distance or **s** for speed, then give the
desired *unit* as:
- **c** - Cartesian userdist or userdist/usertime
- **e** - meters or m/s
- **f** - feet or feet/s
- **k** - km or km/hr
- **m** - miles or miles/hr
- **n** - nautical miles or knots
- **u** - survey feet or survey feet/s
[Default is ``units=["dk", "se"]`` (km and m/s) if ``discontinuity`` is
set, and ``units=["dc", "sc"]`` otherwise (e.g., for Cartesian units)].
{R}
{V}
gap : str or list
**t**\|\ **d**\ *gap*.
Give **t** or **d** and append the corresponding maximum time gap (in
user units; this is typically seconds [Default is infinity]), or
distance (for units, see ``units``) gap [Default is infinity]) allowed
between the two data points immediately on either side of a crossover.
If these limits are exceeded then a data gap is assumed and no COE will
be determined.
{j}
"""
with Session() as lib:
arg_str = " ".join([tag, build_arg_string(kwargs)])
lib.call_module(module="x2sys_init", args=arg_str)
| [
37811,
198,
87,
17,
17597,
62,
15003,
532,
20768,
1096,
257,
649,
2124,
17,
17597,
2610,
6831,
13,
198,
37811,
198,
6738,
12972,
70,
16762,
13,
565,
571,
1330,
23575,
198,
6738,
12972,
70,
16762,
13,
16794,
364,
1330,
1382,
62,
853,
... | 2.622961 | 1,594 |
"""
MIT License
Copyright (c) 2016 Santi Dsp
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Here are the main options to interpolate Ahocoder features
(either can be lf0 or voided-frequency).
"""
from __future__ import print_function
from subprocess import run, PIPE
import numpy as np
import struct
import os | [
37811,
198,
36393,
13789,
198,
15269,
357,
66,
8,
1584,
10844,
72,
360,
2777,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
198,
1659,
428,
3788,
290,
3917,
10314,
3696,
357,
1169,
366,
... | 3.855856 | 333 |
import igraph as ig
from igraph import Graph
import numpy as np
import sys, os
from GraphPackage.AssistantObjects import Feature_Workhouse
from GraphPackage.AssistantObjects import DynDomEntry
from csb.bio.utils import rmsd
from mainPackage.PathAndDir import Dir2TmpFile
#from GraphPackage.Graph_Config import Path2ViterbiJar
from mainPackage.PathAndDir import Path2ViterbiJar
from Utils.MyIO import ReadViterbiOutFile
#if __name__=='__main__':
# idx_num = 2
# Path2GraphStructure = "../TextFolder/Graph_{}.txt".format(str(idx_num))
# Path2GraphCluster = "../TextFolder/Graph_{}_Cluster.txt".format(str(idx_num))
# Path2SynthesisFeature = "../TextFolder/Graph_{}_Feature.txt".format(str(idx_num))
# Path2ViterbiFeature = "../TextFolder/Graph_{}_Feature_Viterbi.txt".format(str(idx_num))
# Path2ViterbiOut = "../TextFolder/Graph_{}_Feature_Out.txt".format(str(idx_num))
# from GraphAssistFunc import loadD_Graph
# G = loadD_Graph(Path2GraphCluster, Path2GraphStructure)
# n, eds = G.constructLineGraph()
# LineG = D_LineGraph(G, n, eds)
#
# matFeature = np.loadtxt(Path2SynthesisFeature)
# ver_idx = np.abs(matFeature[:,2])==1
# ed_idx = np.abs(matFeature[:,2])==2
# mat_ver = matFeature[ver_idx, :]
# mat_ed = matFeature[ed_idx, :]
# from sklearn.preprocessing import QuantileTransformer
# scaler = QuantileTransformer()
# scaler.fit(np.matrix(mat_ver[:,3]).transpose())
# val_ver = scaler.transform(np.matrix(mat_ver[:,3]).transpose())
# matFeature[ver_idx,3] = val_ver.transpose()
# scaler = QuantileTransformer()
# scaler.fit(np.matrix(mat_ed[:,3]).transpose())
# val_ed = scaler.transform(np.matrix(mat_ed[:,3]).transpose())
# matFeature[ed_idx,3] = val_ed.transpose()
#
# squareMatFeature = np.ones((n,n))*-1
# for vec in matFeature.tolist():
# squareMatFeature[int(vec[0]), int(vec[1])] = vec[3]
# squareMatFeature[int(vec[1]), int(vec[0])] = vec[3]
#
# LineG.setFeature(squareMatFeature)
# LineG.WriteFeature(squareMatFeature, Path2ViterbiFeature)
# LineG.runViterbi("../Script/ViterbiJar/ViterbiAlgorithm.jar", Path2ViterbiFeature, Path2ViterbiOut)
#
# from MyIO import ReadViterbiOutFile
# ViterbiLabel = ReadViterbiOutFile(Path2ViterbiOut)
# ViterbiLabel = [i if i >0 else -1 for i in ViterbiLabel]
# print ("Viterbi Label\n", ViterbiLabel)
# print (LineG.calculateLogScore(ViterbiLabel))
# TrueLabels = LineG.vs['TrueLabel']
# print ("True Label\n", TrueLabels)
# print (LineG.calculateLogScore(TrueLabels))
| [
11748,
45329,
1470,
355,
45329,
198,
6738,
45329,
1470,
1330,
29681,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
25064,
11,
28686,
198,
6738,
29681,
27813,
13,
48902,
10267,
82,
1330,
27018,
62,
12468,
4803,
198,
6738,
29681,
27813,
1... | 2.484762 | 1,050 |
import sys
sys.exit()
print('after exit()')
| [
11748,
25064,
198,
17597,
13,
37023,
3419,
198,
4798,
10786,
8499,
8420,
3419,
11537,
628
] | 3 | 15 |
"""
tests for time conversions relevant to MSISE00
"""
from __future__ import annotations
import datetime
import typing
import numpy as np
from pytest import approx
import sciencedates as sd
T: list[typing.Any] = [datetime.datetime(2013, 7, 2, 12, 0, 0)]
T.append(T[0].date())
T.append(np.datetime64(T[0]))
T.append(str(T[0]))
| [
37811,
198,
41989,
329,
640,
32626,
5981,
284,
6579,
24352,
405,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
11748,
4818,
8079,
198,
11748,
19720,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
12972,
9288,
1330,... | 2.739837 | 123 |
from .serializers import PokemonsSerializer,SinglePokemonSerializer
from rest_framework.response import Response
from rest_framework.views import APIView
import json
import requests
# PokeList view allows the whole pokemon list to be returned.
# SinglePoke view ensures that the pokemon whose id is given will return
| [
6738,
764,
46911,
11341,
1330,
41163,
11567,
32634,
7509,
11,
28008,
48034,
32634,
7509,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
1334,
62,
30604,
13,
33571,
1330,
3486,
3824,
769,
198,
11748,
33918,
198,
11748,
7007... | 4.02439 | 82 |
import os
add_list = ["DBAG0K-A9009ZRQC-000","DBAG0K-1900AZBJY-000"]
for index in add_list:
os.system("node multi_add_cart.js {} {}".format(index,len(add_list)))
| [
11748,
28686,
198,
2860,
62,
4868,
796,
14631,
35,
4339,
38,
15,
42,
12,
32,
12865,
24,
57,
49,
48,
34,
12,
830,
2430,
35,
4339,
38,
15,
42,
12,
48104,
22778,
33,
41,
56,
12,
830,
8973,
198,
198,
1640,
6376,
287,
751,
62,
4868... | 2.168831 | 77 |
# Copyright 2012 New Dream Network, LLC (DreamHost)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_middleware import request_id
from pecan import hooks
from neutron import context
class ContextHook(hooks.PecanHook):
"""Configures a request context and attaches it to the request.
The following HTTP request headers are used:
X-User-Id or X-User:
Used for context.user_id.
X-Project-Id:
Used for context.tenant_id.
X-Project-Name:
Used for context.tenant_name.
X-Auth-Token:
Used for context.auth_token.
X-Roles:
Used for setting context.is_admin flag to either True or False.
The flag is set to True, if X-Roles contains either an administrator
or admin substring. Otherwise it is set to False.
"""
priority = 95
| [
2,
15069,
2321,
968,
7610,
7311,
11,
11419,
357,
30571,
17932,
8,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,... | 3.004386 | 456 |
from app.extensions import sched
from elasticapm import Client
from flask import current_app
def register_apm(name=None):
"""This decorator wraps a passed function with a call to the app's registered Elastic APM instance
:param object: Function to be wrapped and decorator parameters by calling
:type object: object
:raises e: Client connection exception.
:return: Wrapped function
:rtype: func
"""
return wrap | [
6738,
598,
13,
2302,
5736,
1330,
6038,
198,
6738,
27468,
499,
76,
1330,
20985,
198,
6738,
42903,
1330,
1459,
62,
1324,
628,
198,
4299,
7881,
62,
499,
76,
7,
3672,
28,
14202,
2599,
198,
220,
220,
220,
37227,
1212,
11705,
1352,
27521,
... | 3.492188 | 128 |
from boost_histogram import Histogram
| [
6738,
5750,
62,
10034,
21857,
1330,
5590,
21857,
201,
198,
201,
198
] | 3.416667 | 12 |
#!/usr/bin/python3
import numpy
from tqdm import tqdm
from collections import defaultdict
from amr_coref.utils.logging import setup_logging, ERROR
from amr_coref.coref.coref_mention_data import CorefMentionData
from amr_coref.utils.data_utils import load_json
from amr_coref.coref.vocab_embeddings import load_word_set
if __name__ == '__main__':
setup_logging(level=ERROR)
coref_fpath = 'data/tdata/train.json.gz'
#coref_fpath = 'data/tdata/test.json.gz'
men_set_fn = 'data/tdata/mention_tokens.txt'
max_dist = 999999
print('Loading and testing', coref_fpath)
mention_set = load_word_set(men_set_fn)
cr_data = load_json(coref_fpath)
mdata = CorefMentionData(cr_data, mention_set)
print('There are {:,} documents'.format(len(mdata.mentions.keys())))
print()
# Stats for max anaphor to antecedent distances
distances = []
mlist_lens = []
pair_count = 0
for doc_name, mlist in mdata.mentions.items():
mlist_lens.append(len(mlist))
for midx in range(len(mlist)):
mention = mlist[midx]
antecedents = mlist[:midx]
pair_count += len(antecedents)
if mention.cluster_id:
for antecedent in antecedents:
if mention.cluster_id == antecedent.cluster_id:
dist = mention.mdata_idx-antecedent.mdata_idx
distances.append( dist )
assert numpy.min(distances) > 0
print('Mention list lengths go from %d to %d' % (min(mlist_lens), max(mlist_lens)))
print('There are {:,} total (potential) anaphor to antecedent pairs'.format(pair_count))
print('with {:,} pairs in clusters {:.1f}%'.format(len(distances), 100.*len(distances)/pair_count))
print('Max distance is {:,} and the average is {:.1f} with a stdev of {:.1f}'.format(\
numpy.max(distances), numpy.mean(distances), numpy.std(distances)))
print()
print('Stats for features')
stats = defaultdict(list)
for doc_name, mlist in tqdm(mdata.mentions.items(), ncols=100, leave=False):
for midx in range(len(mlist)):
mention = mlist[midx]
antecedents = mlist[:midx]
antecedents = antecedents[-max_dist:]
# Accumulate so data for statistics
stats['sent_idx'].append(mention.sent_idx)
stats['tok_idx'].append(mention.tok_idx)
stats['sidx_diff'] = [mention.sent_idx - a.sent_idx for a in antecedents]
doc_idx = mdata.get_doc_tok_idx(mention)
stats['doc_idx_diff'] += [doc_idx - mdata.get_doc_tok_idx(a) for a in antecedents]
stats['men_idx_diff'] += [mention.mdata_idx - a.mdata_idx for a in antecedents]
for key, values in stats.items():
mean = numpy.mean(values)
std = numpy.std(values)
print('%-12s mean=%5d max=%5d stdev=%7.1f 95%%CI=%7.1f' % (key, mean, numpy.max(values),
std, mean+2*std))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
11748,
299,
32152,
198,
6738,
220,
220,
256,
80,
36020,
1330,
256,
80,
36020,
198,
6738,
220,
220,
17268,
1330,
4277,
11600,
198,
6738,
220,
220,
716,
81,
62,
7295,
69,
13,
26791,
13,
... | 2.142344 | 1,391 |
# +1
# +1
# This class is very simple because it does not actually generate features itself.
# Instead, it simply uses the features generated by the trigger model. Currently, to represent a pair of event mentions,
# it uses just the embeddings associated with the two anchors (with optional window size)
| [
198,
198,
2,
1343,
16,
628,
198,
2,
1343,
16,
198,
2,
770,
1398,
318,
845,
2829,
780,
340,
857,
407,
1682,
7716,
3033,
2346,
13,
198,
2,
5455,
11,
340,
2391,
3544,
262,
3033,
7560,
416,
262,
7616,
2746,
13,
16888,
11,
284,
2380,... | 4.246575 | 73 |