content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python3
# -*-coding:utf-8 -*-
import os
import time
from colorama import init
from termcolor import cprint
init()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
66,
7656,
25,
40477,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
640,
198,
6738,
3124,
1689,
1330,
2315,
198,
6738,
3381,
8043,
1330,
269,
4798,
628,
198,... | 2.808511 | 47 |
# Import the rebound module
import sys
import matplotlib; matplotlib.use("pdf")
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib.colors import LogNorm
import rebound
import numpy as np
import time
from rebound.interruptible_pool import InterruptiblePool
import warnings
Ngrid = 500
#3dt = 100.23
orbit = 11.8618*1.*np.pi
dt = orbit/3000.
tmax = orbit*1e2 # Maximum integration time.
integrators = ["whfast-nocor", "whfast"]
#integrators = ["mercury","swifter-whm","whfast-nocor", "whfast"]
colors = {
'whfast-nocor': "#FF0000",
'whfast': "#00AA00",
'mercury': "#6E6E6E",
'swifter-whm': "#444444",
'swifter-helio':"#AABBBB",
'swifter-tu4': "#FFAAAA",
'ias15': "g",
}
trials = 4
parameters = [(inte,i*trials+j,j) for i,inte in enumerate(integrators) for j in xrange(trials)]
if len(sys.argv)!=2:
pool = InterruptiblePool()
print("Running %d simulations" % (len(parameters)))
res = np.array(pool.map(simulation,parameters)).reshape(len(integrators),trials,2,Ngrid)
np.save("res.npy",res)
else:
print("Loading %d simulations" % (len(parameters)))
print(sys.argv[1])
res = np.load(sys.argv[1])
f,axarr = plt.subplots(1,1,figsize=(13,4))
extent=[res[:,:,0,:].min()/orbit, res[:,:,0,:].max()/orbit, 1e-16, 1e-5]
axarr.set_xlim(extent[0], extent[1])
axarr.set_ylim(extent[2], extent[3])
axarr.set_xlabel(r"time [orbits]")
axarr.set_ylabel(r"relative energy error")
plt.xscale('log', nonposy='clip')
plt.yscale('log', nonposy='clip')
plt.grid(True)
res_mean = np.mean(res,axis=1)
for i in xrange(len(res)):
for j in xrange(trials):
res_trial = res[i,j,:,:]
im1 = axarr.plot(res_trial[0]/orbit,res_trial[1], color=colors[integrators[i]],alpha=0.2)
im1 = axarr.plot(res_mean[i][0]/orbit,res_mean[i][1], label=integrators[i].upper(),color=colors[integrators[i]], linewidth=2.0)
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('small')
lgd = plt.legend(loc="upper center", bbox_to_anchor=(0.5, -0.2), prop = fontP,ncol=3,frameon=False, numpoints=1, scatterpoints=1 , handletextpad = 0.2, markerscale=2.)
plt.savefig("longtermtest.pdf", bbox_extra_artists=(lgd,), bbox_inches='tight')
from sys import platform as _platform
if _platform == "darwin":
import os
os.system("open longtermtest.pdf")
| [
2,
17267,
262,
23623,
8265,
198,
11748,
25064,
198,
11748,
2603,
29487,
8019,
26,
2603,
29487,
8019,
13,
1904,
7203,
12315,
4943,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
2603,
29487,
8019,
1330,
4378,
26... | 2.335294 | 1,020 |
from typing import Any, Dict, Optional
import torch
import numpy as np
from theseus.base.metrics.metric_template import Metric
class DiceScore(Metric):
""" Dice score metric for segmentation
num_classes: `int`
number of classes
eps: `float`
epsilon to avoid zero division
thresh: `float`
threshold for binary segmentation
"""
def update(self, outputs: torch.Tensor, batch: Dict[str, Any]):
"""
Perform calculation based on prediction and targets
"""
# outputs: (batch, num_classes, W, H)
# targets: (batch, num_classes, W, H)
targets = batch['targets']
assert len(targets.shape) == 4, "Wrong shape for targets"
assert len(outputs.shape) == 4, "Wrong shape for targets"
self.sample_size += outputs.shape[0]
if self.pred_type == 'binary':
predicts = (outputs > self.thresh).float()
elif self.pred_type =='multi':
predicts = torch.argmax(outputs, dim=1)
predicts = predicts.detach().cpu()
one_hot_predicts = torch.nn.functional.one_hot(
predicts.long(),
num_classes=self.num_classes).permute(0, 3, 1, 2)
for cl in range(self.num_classes):
cl_pred = one_hot_predicts[:,cl,:,:]
cl_target = targets[:,cl,:,:]
score = self.binary_compute(cl_pred, cl_target)
self.scores_list[cl] += sum(score)
| [
6738,
19720,
1330,
4377,
11,
360,
713,
11,
32233,
198,
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
777,
385,
13,
8692,
13,
4164,
10466,
13,
4164,
1173,
62,
28243,
1330,
3395,
1173,
198,
198,
4871,
34381,
26595,
7,
917... | 2.240909 | 660 |
"""
monodromy/io/lrcalc.py
Extracts quantum Littlewood-Richardson coefficients from the package `lrcalc`.
This package is cumbersome to install, so we provide a prebaked copy of this
table in `qlr_table.py`.
"""
from copy import copy
import lrcalc
def qlr(r, k, a, b):
"""
Computes the quantum Littlewood-Richardson coefficients N_{ab}^{c, d} in the
small quantum cohomology ring of the Grassmannian Gr(r, k). For supplied
a and b, this computes the set of c and d for which N = 1.
Returns a dictionary of the form {c: d} over values where N_ab^{c, d} = 1.
"""
return {
tuple(list(c) + [0]*(r - len(c))):
(sum(a) + sum(b) - sum(c)) // (r + k)
for c, value in lrcalc.mult_quantum(a, b, r, k).items() if value == 1
}
def displacements(r, k, skip_to=None):
"""
Iterates over the ordered sequence of partitions of total size `r + k` into
`r` parts, presented as displacements from the terminal partiiton.
If `skip_to` is supplied, start enumeration from this element.
"""
def normalize(p, r, k):
"""
Roll the odometer `p` over until it becomes a legal (`r`, `k`)
displacement.
"""
if p[0] > k:
return None
for index, (item, next_item) in enumerate(zip(p, p[1:])):
if next_item > item:
p[index+1] = 0
p[index] += 1
return normalize(p, r, k)
return p
ok = skip_to is None
p = [0 for j in range(0, r)]
while p is not None:
if p == skip_to:
ok = True
if ok:
yield copy(p)
p[-1] += 1
p = normalize(p, r, k)
def regenerate_qlr_table():
"""
Uses `lrcalc` to rebuild the table stored in `qlr_table.py`.
"""
qlr_table = [] # [[r, k, [*a], [*b], [*c], d], ...]
for r in range(1, 4):
k = 4 - r
# r bounds the length; k bounds the contents
for a in displacements(r, k):
for b in displacements(r, k, skip_to=a):
for c, d in qlr(r, k, a, b).items():
qlr_table.append([r, k, a, b, list(c), d])
return qlr_table
| [
37811,
198,
2144,
375,
50228,
14,
952,
14,
75,
6015,
282,
66,
13,
9078,
198,
198,
11627,
974,
82,
14821,
7703,
3822,
12,
14868,
1371,
261,
44036,
422,
262,
5301,
4600,
75,
6015,
282,
66,
44646,
198,
198,
1212,
5301,
318,
44491,
284,... | 2.142018 | 1,021 |
import pandas as pd
import numpy as np
import datetime
import os
import re
from os import listdir
from os.path import isfile, join
import matplotlib.pyplot as plt
import seaborn as sns
@profile
def date_time_converter(date_time_list):
"""
This function gets the numpy array with date_time in matlab format
and returns a numpy array with date_time in human readable format.
"""
# Empty array to hold the results
date_time_human = []
for i in date_time_list:
date_time_human.append(datetime.datetime.fromordinal(int(i)) +
datetime.timedelta(days=i%1) - datetime.timedelta(days = 366))
return date_time_human
@profile
def PL_samples_file_joiner(data_dir, file_name_format, ignore_file_indices):
"""
This function reads in the data for PL Samples experiment and returns a
nice dataframe with cycles in ascending order.
Args:
data_dir (string): This is the absolute path to the data directory.
file_name_format (string): Format of the filename, used to deduce other files.
ignore_file_indices (list, int): This list of ints tells which to ignore.
Returns:
The complete test data in a dataframe with extra column for capacity in Ah.
"""
# Raise an exception if the type of the inputs is not correct
if not isinstance(data_dir, str):
raise TypeError('data_dir is not of type string')
if not isinstance(file_name_format, str):
raise TypeError('file_name_format is not of type string')
if not isinstance(ignore_file_indices, list):
raise TypeError("ignore_file_indices should be a list")
for i in range(len(ignore_file_indices)):
if not isinstance(ignore_file_indices[i], int):
raise TypeError("""ignore_file_indices elements should be
of type integer""")
if not os.path.exists(join(data_dir, file_name_format)):
raise FileNotFoundError("File {} not found in the location {}"
.format(file_name_format, data_dir))
# get the list of files in the directory
onlyfiles = [f for f in listdir(data_dir) if isfile(join(data_dir, f))]
# Extract the experiment name from the file_name_format
exp_name = file_name_format[0:4]
# Empty dictionary to hold all the dataframe for various files
dict_files = {}
# Iterate over all the files of certain type and get the file number from them
for filename in onlyfiles:
if exp_name in filename:
# Extract the filenumber from the name
file_number = re.search(exp_name + '\((.+?)\).csv', filename).group(1)
# Give a value of dataframe to each key
dict_files[int(file_number)] = pd.read_csv(join(data_dir, filename))
# Empty dictionary to hold the ordered dictionaries
dict_ordered = {}
# Sort the dictionary based on keys
for key in sorted(dict_files.keys()):
dict_ordered[key] = dict_files[key]
# Keys with files to keep, remove the ignore indices from all keys
wanted_keys = np.array(list(set(dict_ordered.keys()) - set(ignore_file_indices)))
# Remove the ignored dataframes for characterization
dict_ord_cycling_data = {k : dict_ordered[k] for k in wanted_keys}
# Concatenate the dataframes to create the total dataframe
df_out = None
for k in wanted_keys:
if df_out is None:
df_next = dict_ord_cycling_data[k]
df_out = pd.DataFrame(data=None, columns=df_next.columns)
df_out = pd.concat([df_out, df_next])
else:
df_next = dict_ord_cycling_data[k]
df_next['Cycle'] = np.array(df_next['Cycle']) + max(np.array(df_out['Cycle']))
df_next['Time_sec'] = np.array(df_next['Time_sec']) + max(np.array(df_out['Time_sec']))
df_next['Charge_Ah'] = np.array(df_next['Charge_Ah']) + max(np.array(df_out['Charge_Ah']))
df_next['Discharge_Ah'] = np.array(df_next['Discharge_Ah']) + max(np.array(df_out['Discharge_Ah']))
df_out = pd.concat([df_out, df_next])
####
# This has been commented out for performance, as we do not need date_time
####
# Convert the Date_Time from matlab datenum to human readable Date_Time
# First convert the series into a numpy array
# date_time_matlab = df_out['Date_Time'].tolist()
# # Apply the conversion to the numpy array
# df_out['Date_Time_new'] = date_time_converter(date_time_matlab)
# Reset the index and drop the old index
df_out_indexed = df_out.reset_index(drop=True)
# Proceed further with correcting the capacity
df_grouped = df_out_indexed.groupby(['Cycle']).count()
# Get the indices when a cycle starts
cycle_start_indices = df_grouped['Time_sec'].cumsum()
# Get the charge_Ah per cycle
# Create numpy array to store the old charge_Ah row, and then
# perform transformation on it, rather than in the pandas series
# this is a lot faster in this case
charge_cycle_ah = np.array(df_out_indexed['Charge_Ah'])
charge_ah = np.array(df_out_indexed['Charge_Ah'])
for i in range(1, len(cycle_start_indices)):
a = cycle_start_indices.iloc[i-1]
b = cycle_start_indices.iloc[i]
charge_cycle_ah[a:b] = charge_ah[a:b] - charge_ah[a-1]
df_out_indexed['charge_cycle_ah'] = charge_cycle_ah
# Get the discharge_Ah per cycle
discharge_cycle_ah = np.array(df_out_indexed['Discharge_Ah'])
discharge_ah = np.array(df_out_indexed['Discharge_Ah'])
for i in range(1, len(cycle_start_indices)):
a = cycle_start_indices.iloc[i-1]
b = cycle_start_indices.iloc[i]
discharge_cycle_ah[a:b] = discharge_ah[a:b] - discharge_ah[a-1]
df_out_indexed['discharge_cycle_ah'] = discharge_cycle_ah
# This is the data column we can use for prediction.
# This is not totally accurate, as this still has some points that go negative,
# due to incorrect discharge_Ah values every few cycles.
# But the machine learning algorithm should consider these as outliers and
# hopefully get over it. We can come back and correct this.
df_out_indexed['capacity_ah'] = charge_ah - discharge_ah
return df_out_indexed
def PL_samples_capacity_cycles(pl_df):
"""
This function finds the capacity in each cycle from the cumulative capacity
in the original file.
Args:
Returns:
"""
return
data_dir = '/home/chintan/uwdirect/chintan/BattDeg/data/PL 12,14'
fnf = 'PL12(4).csv'
ignore_indices = [1, 2, 3]
out_df = PL_samples_file_joiner(data_dir, fnf, ignore_indices) | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
11748,
302,
198,
6738,
28686,
1330,
1351,
15908,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,
11,
4654,
198,
11748,
260... | 2.489472 | 2,707 |
# -*- coding: utf-8 -*-
from gi.repository import GObject as gobject, Gst as gst, Gtk as gtk, GdkX11, GstVideo
import platform
import logging
log = logging.getLogger(__name__)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
308,
72,
13,
260,
1930,
37765,
1330,
402,
10267,
355,
48484,
752,
11,
402,
301,
355,
308,
301,
11,
402,
30488,
355,
308,
30488,
11,
402,
34388,
55,
1157,
... | 2.557143 | 70 |
import unittest
import requests_mock
import rxv
FAKE_IP = '10.0.0.0'
DESC_XML = 'http://%s/YamahaRemoteControl/desc.xml' % FAKE_IP
CTRL_URL = 'http://%s/YamahaRemoteControl/ctrl' % FAKE_IP
| [
11748,
555,
715,
395,
198,
11748,
7007,
62,
76,
735,
198,
11748,
374,
87,
85,
198,
198,
7708,
7336,
62,
4061,
796,
705,
940,
13,
15,
13,
15,
13,
15,
6,
198,
30910,
34,
62,
55,
5805,
796,
705,
4023,
1378,
4,
82,
14,
56,
321,
... | 2.232558 | 86 |
# Copyright (c) 2016 CEF Python, see the Authors file.
# All rights reserved. Licensed under BSD 3-clause license.
# Project website: https://github.com/cztomczak/cefpython
"""Create Table of contents (TOC) for a single .md file or for a directory.
Usage:
toc.py FILE
toc.py DIR
To ignore file when generating TOC, put an empty line just before H1.
"""
import os
import sys
import re
import glob
API_DIR = os.path.join(os.path.dirname(__file__), "..", "api")
def main():
"""Main entry point."""
if len(sys.argv) == 1:
sys.argv.append(API_DIR)
if (len(sys.argv) == 1 or
"-h" in sys.argv or
"--help" in sys.argv or
"/?" in sys.argv):
print(__doc__.strip())
sys.exit(0)
arg1 = sys.argv[1]
if os.path.isdir(arg1):
(modified, warnings) = toc_dir(arg1)
if modified:
print("Done")
else:
print("No changes to TOCs. Files not modified.")
else:
(modified, warnings) = toc_file(arg1)
if modified:
print("Done")
else:
print("No changes to TOC. File not modified.")
if warnings:
print("Warnings: "+str(warnings))
def toc_file(file_):
"""A single file was passed to doctoc. Return bool whether modified
and the number of warnings."""
with open(file_, "rb") as fo:
orig_contents = fo.read().decode("utf-8", "ignore")
# Fix new lines just in case. Not using Python's "rU",
# it is causing strange issues.
orig_contents = re.sub(r"(\r\n|\r|\n)", os.linesep, orig_contents)
(tocsize, contents, warnings) = create_toc(orig_contents, file_)
if contents != orig_contents:
with open(file_, "wb") as fo:
fo.write(contents.encode("utf-8"))
tocsize_str = ("TOC size: "+str(tocsize) if tocsize
else "TOC removed")
print("Modified: "+file_+" ("+tocsize_str+")")
return True, warnings
else:
return False, warnings
def toc_dir(dir_):
"""A directory was passed to doctoc. Return bool whether any file was
modified and the number of warnings."""
files = glob.glob(os.path.join(dir_, "*.md"))
modified_any = False
warnings = 0
for file_ in files:
if "API-categories.md" in file_ or "API-index.md" in file_:
continue
(modified, warnings) = toc_file(file_)
if not modified_any:
modified_any = True if modified else False
return modified_any, warnings
def create_toc(contents, file_):
"""Create or modify TOC for the document contents."""
match = re.search(r"Table of contents:%s(\s*\* \[[^\]]+\]\([^)]+\)%s){2,}"
% (os.linesep, os.linesep), contents)
oldtoc = match.group(0) if match else None
(tocsize, toc, warnings) = parse_headings(contents, file_)
if oldtoc:
if not toc:
# If toc removed need to remove an additional new lines
# that was inserted after toc.
contents = contents.replace(oldtoc+os.linesep, toc)
else:
contents = contents.replace(oldtoc, toc)
elif tocsize:
# Insert after H1, but if there is text directly after H1
# then insert after that text.
first_line = False
if not re.search(r"^#\s+", contents):
print("WARNING: missing H1 on first line. Ignoring file: "+file_)
return 0, contents, warnings+1
lines = contents.splitlines()
contents = ""
toc_inserted = False
for line in lines:
if not first_line:
first_line = True
else:
if not toc_inserted and re.search(r"^(##|###)", line):
contents = contents[0:-len(os.linesep)]
contents += os.linesep + toc + os.linesep + os.linesep
toc_inserted = True
contents += line + os.linesep
# Special case for README.md - remove Quick Links toc for subheadings
re_find = (r" \* \[Docs\]\(#docs\)[\r\n]+"
r" \* \[API categories\]\(#api-categories\)[\r\n]+"
r" \* \[API index\]\(#api-index\)\r?\n?")
contents = re.sub(re_find, "", contents)
return tocsize, contents, warnings
def parse_headings(raw_contents, file_):
"""Parse contents looking for headings. Return a tuple with number
of TOC elements, the TOC fragment and the number of warnings."""
# Remove code blocks
parsable_contents = re.sub(r"```[\s\S]+?```", "", raw_contents)
# Parse H1,H2,H3
headings = re.findall(r"^(#|##|###)\s+(.*)", parsable_contents,
re.MULTILINE)
toc = "Table of contents:" + os.linesep
tocsize = 0
warnings = 0
count_h1 = 0
count_h2 = 0
for heading in headings:
level = heading[0]
level = (1 if level == "#" else
2 if level == "##" else
3 if level == "###" else None)
assert level is not None
title = heading[1].strip()
if level == 1:
count_h1 += 1
if count_h1 > 1:
warnings += 1
print("WARNING: found more than one H1 in "+file_)
continue
if level == 2:
count_h2 += 1
hash_ = headinghash(title)
indent = ""
if level == 3:
if count_h2:
# If there was no H2 yet then H3 shouldn't have indent.
indent = " " * 2
toc += indent + "* [%s](#%s)" % (title, hash_) + os.linesep
tocsize += 1
if tocsize <= 1:
# If there is only one H2/H3 heading do not create TOC.
toc = ""
tocsize = 0
return tocsize, toc, warnings
def headinghash(title):
"""Get a link hash for a heading H1,H2,H3."""
hash_ = title.lower()
hash_ = hash_.replace(" - ", "specialcase1")
hash_ = hash_.replace(" / ", "specialcase2")
hash_ = re.sub(r"[^a-z0-9_\- ]+", r"", hash_)
hash_ = hash_.replace(" ", "-")
hash_ = re.sub(r"[-]+", r"-", hash_)
hash_ = re.sub(r"-$", r"", hash_)
hash_ = hash_.replace("specialcase1", "---")
hash_ = hash_.replace("specialcase2", "--")
return hash_
if __name__ == "__main__":
main()
| [
2,
15069,
357,
66,
8,
1584,
18671,
37,
11361,
11,
766,
262,
46665,
2393,
13,
198,
2,
1439,
2489,
10395,
13,
49962,
739,
347,
10305,
513,
12,
565,
682,
5964,
13,
198,
2,
4935,
3052,
25,
3740,
1378,
12567,
13,
785,
14,
26691,
39532,... | 2.15522 | 2,912 |
from mythic_payloadtype_container.MythicCommandBase import *
import json
| [
6738,
7918,
291,
62,
15577,
2220,
4906,
62,
34924,
13,
41444,
291,
21575,
14881,
1330,
1635,
198,
11748,
33918,
628,
198
] | 3.571429 | 21 |
'''
jpg.py
<alexmichael@uchicago.edu>
Description: Prototype of polyglot generator.
TODO:
- [ ] Build the least significant bit layer injector
'''
import struct
import re
import random
if __name__ == '__main__':
main()
# read()
# test() | [
7061,
6,
198,
9479,
13,
9078,
198,
27,
1000,
87,
76,
40302,
31,
794,
4549,
13,
15532,
29,
198,
198,
11828,
25,
48954,
286,
7514,
4743,
313,
17301,
13,
198,
198,
51,
3727,
46,
25,
198,
197,
12,
685,
2361,
10934,
262,
1551,
2383,
... | 2.788889 | 90 |
'''Winny Node.
'''
#
# Copyright (c) 2006 Pyny Project.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id: node.py 15 2006-12-10 06:23:36Z fuktommy $
#
from . import rc4
from . import config
from . import nyconnection
from .nyexcept import *
from .conv import hexstr, binary
__all__ = ['Node']
__version__ = '$Revision: 15 $'
level = {'Hi': 16, 'Middle': 4, 'Low': 1}
class Node:
'''Winny Node.
Variables:
- isknown
- priority (0<=priority<=0xFF)
- correlation (it is not priority, 0<=correction<=0xFF)
- major Application name
- minor Version
- addr IPv4 address
- port Port number
- reported_address IPv4 address node reporting
- clustering Clustering keywords (len(clustering)<=3)
- nodetype Raw, NAT, DDNS, or Port0
- speed
- sortkey
'''
def can_upstream(self, speed):
'''Check this node can be upstream node.
Argument speed is self-node's.
The constant 4.3 may not work.
'''
return (speed * 0.8 <= self.speed) and (self.speed <= speed * 4.3)
def can_downstream(self, speed):
'''Check this node can be upstream node.
Argument speed is self-node's.
The constant 4.3 may not work.
'''
return not self.can_upstream(speed)
# End of Node
def strnode(s):
'''Make node from string.
Sample:
>>> node = strnode('123.1.2.3:1234')
>>> str(node)
'123.1.2.3:1234'
>>> node = strnode('@ba9582a383c7d6e79cd5d8c71f7347')
>>> str(node)
'123.1.2.3:1234'
'''
if s.startswith('@'):
s = unpack_hash(s)
addr, port = s.split(':')
node = Node()
node.addr, node.port = addr, int(port)
return node
def RC4Key(checksum):
'''RC4 key virtual class.
'''
magic = '\x6f\x70\x69\x65\x77\x66\x36\x61\x73\x63\x78\x6c\x76'
return chr(checksum) + magic[1:]
def pack_hash(inetaddrss):
'''Pack internet address.
n.n.n.n:s -> @xxxx....
sample:
>>> pack_hash('123.1.2.3:1234')
'@ba9582a383c7d6e79cd5d8c71f7347'
'''
checksum = 0
for i in inetaddrss:
checksum = (checksum + ord(i)) & 0xFF
rc4key = RC4Key(checksum)
hash = '@' + hexstr(chr(checksum) + rc4.crypt(rc4key, inetaddrss))
return hash
def unpack_hash(hash):
'''Unpack winny node format.
@xxxx.... -> n.n.n.n:s
sample:
>>> unpack_hash('@ba9582a383c7d6e79cd5d8c71f7347')
'123.1.2.3:1234'
'''
if len(hash) < 20: # len('@^') + len('0.0.0.0:0') * 2 = 20
raise NodeFormatError('Specified hash-string is too small')
elif not hash.startswith('@'):
raise NodeFormatError('Specified hash-string is not hash-string of NodeAddress')
sum = binary(hash[1:3])
encoded = binary(hash[3:])
rc4key = RC4Key(ord(sum))
unpackedstr = rc4.crypt(rc4key, encoded)
checksum = 0
for i in unpackedstr:
checksum += ord(i)
if (checksum & 0xFF) != ord(sum):
raise NodeFormatError('sum check error')
return unpackedstr
if __name__ == '__main__':
_test()
| [
7061,
6,
16643,
3281,
19081,
13,
198,
7061,
6,
198,
2,
198,
2,
15069,
357,
66,
8,
4793,
350,
2047,
88,
4935,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
3... | 2.410115 | 1,819 |
# This is the program to run ping test on local network
# XC.Li @ Apr.10 2018
# read in ping_list and ping_config
ping_list, ping_config = start_up("ping_list.txt", "ping_config.txt")
print(ping_list)
print(ping_config)
| [
2,
770,
318,
262,
1430,
284,
1057,
29400,
1332,
319,
1957,
3127,
201,
198,
2,
1395,
34,
13,
32304,
2488,
2758,
13,
940,
2864,
201,
198,
201,
198,
201,
198,
2,
1100,
287,
29400,
62,
4868,
290,
29400,
62,
11250,
201,
198,
201,
198,
... | 2.593407 | 91 |
import sys
from schools3.config import base_config
config = base_config.Config()
config.num_features = 10
sys.modules[__name__] = config
| [
11748,
25064,
198,
6738,
4266,
18,
13,
11250,
1330,
2779,
62,
11250,
198,
198,
11250,
796,
2779,
62,
11250,
13,
16934,
3419,
198,
198,
11250,
13,
22510,
62,
40890,
796,
838,
198,
198,
17597,
13,
18170,
58,
834,
3672,
834,
60,
796,
4... | 3.181818 | 44 |
#!/usr/bin/env python
import argparse
import math
import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
import boto3
from tqdm.auto import tqdm
client = boto3.client("s3")
objects = []
parser = argparse.ArgumentParser(description="Delete files from S3")
parser.add_argument("--bucket", help="The bucket to query/delete from.", required=True)
parser.add_argument(
"--infile",
help="The file containing the list of objects/prefixes to query or delete.",
)
parser.add_argument(
"--prefix", help="The prefix to list all files in and optionally delete."
)
parser.add_argument(
"--versionsfile",
help="A file with key,versionid listing to delete, generated by the querying of this script.",
)
parser.add_argument(
"--delete",
help="Acutally try to delete after querying",
dest="delete",
action="store_true",
)
parser.add_argument(
"--workers",
help="Number of parallel workers when getting versions from an 'infile'",
type=int,
default=5,
)
parser.set_defaults(delete=False)
args = parser.parse_args()
batch = 1000
if len(list(filter(None, [args.infile, args.prefix, args.versionsfile]))) > 1:
sys.exit("Stopping: please set only one of --infile, --prefix, or --versionsfile!")
if args.infile:
with ThreadPoolExecutor(max_workers=args.workers) as executor:
futures = dict()
with open(args.infile, "r") as input_list:
for line in tqdm(input_list.readlines()):
key = line.strip()
futures[
executor.submit(get_versions, client, args.bucket, key, False)
] = key
with open(args.infile + ".list", "w") as output_list:
for future in tqdm(as_completed(futures)):
key = futures[future]
try:
versions = future.result()
objects += versions
for version in versions:
output_list.write(f"{key},{version['VersionId']}\n")
except Exception as exc:
print("%r generated an exception: %s" % (key, exc))
elif args.prefix:
objects = get_versions(client, args.bucket, args.prefix)
elif args.versionsfile:
with open(args.versionsfile, "r") as input_list:
for line in tqdm(input_list.readlines()):
key, version = line.strip().split(",")
objects += [{"Key": key, "VersionId": version}]
num_objects_to_delete = len(objects)
print(
f"Number of objects (keys/version) in the found for deletion: {num_objects_to_delete}"
)
if args.delete:
if num_objects_to_delete > 0:
number_confirmation = int(
input("Please enter the number of objects to delete to continue: ")
)
if number_confirmation != len(objects):
sys.exit("Delete confirmation failed!")
rounds = math.ceil(len(objects) / batch)
for i in tqdm(range(rounds)):
thisbatch = objects[i * batch : (i + 1) * batch]
response = client.delete_objects(
Bucket=args.bucket, Delete={"Objects": thisbatch, "Quiet": False},
)
# print(response)
print("Done delete")
else:
print("Finished!")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1822,
29572,
198,
11748,
10688,
198,
11748,
25064,
198,
6738,
24580,
13,
69,
315,
942,
1330,
14122,
27201,
23002,
38409,
11,
355,
62,
785,
16838,
198,
198,
11748,
275,
2069,
... | 2.3922 | 1,359 |
#!/usr/bin/env python3
import configparser
import getpass
import logging
import os
import sys
from dataclasses import InitVar, dataclass
from datetime import date, datetime, timedelta
from pathlib import Path
import pandas as pd
import pyodbc
from prometheus_client import CollectorRegistry, Gauge, push_to_gateway
from pytz import timezone
from app import keeper_of_the_state, now_adjusted, predictor
from app.constants import DAY_OF_WEEK, UNENFORCED_DAYS
from app.model import ParkingAvailabilityModel
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
if sys.stdout.isatty():
LOGGER.addHandler(logging.StreamHandler(sys.stdout))
DIRNAME = Path(__file__).parent.absolute()
@dataclass
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
4566,
48610,
198,
11748,
651,
6603,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
25064,
198,
6738,
4818,
330,
28958,
1330,
44707,
19852,
11,
4818,
330,
31172,
198,
6... | 3.052209 | 249 |
# -*- coding: utf-8 -*-
"""
This module is used for testing the functions within the pyhpeimc.plat.system module.
"""
from unittest import TestCase
from pyhpeimc.plat.system import *
from test_machine import auth
# get_system_category
# get_system_device_models
# get_system_series
# Section deals with device authentication templates. Get and Set functions available apply to
# IMC 7.3 and later
class TestCreateTelnetTemplate(TestCase):
"""
Class to test create_telnet_template functions
"""
def test_fail_create_existing_tempalte(self):
"""
Test to ensure can't create the same telnet template twice
:return: integer of 404
rtype int
"""
template = {
"type": "0",
"name": "User_with_Enable",
"authType": "3",
"userName": "",
"userPassword": "password",
"superPassword": "password",
"authTypeStr": "Password + Super/Manager Password (No Operator)",
"timeout": "4",
"retries": "1",
"port": "23",
"version": "1",
"creator": "admin",
"accessType": "1",
"operatorGroupStr": ""
}
output = create_telnet_template(auth.creds, auth.url, template)
output = create_telnet_template(auth.creds, auth.url, template)
#self.assertEqual(output, my_values)
class TestDeleteTelnetTemplate(TestCase):
"""
Tests for delete_telnet_template function
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
1212,
8265,
318,
973,
329,
4856,
262,
5499,
1626,
262,
12972,
71,
431,
320,
66,
13,
489,
265,
13,
10057,
8265,
13,
198,
198,
37811,
198,
198,
6738,
555,
... | 2.350769 | 650 |
import argparse
import math
import os
import subprocess
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
850,
14681,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.911765 | 34 |
from flask import Flask, render_template, redirect, url_for, flash
from flask_bootstrap import Bootstrap
from flask_ckeditor import CKEditor
from flask_login import login_user, LoginManager, current_user, logout_user, login_required
from werkzeug.security import generate_password_hash, check_password_hash
from dotenv import load_dotenv
from database import db
from models import User, ToDoItem
from forms import RegisterForm, LoginForm, TODOItemForm
import requests
import os
app = Flask(__name__)
load_dotenv()
app.config["SECRET_KEY"] = os.environ.get("SECRET_KEY", "APP_SECRET_KEY")
app.config["RECAPTCHA_PUBLIC_KEY"] = os.environ.get("RECAPTCHA_PUBLIC_KEY")
app.config["RECAPTCHA_PRIVATE_KEY"] = os.environ.get("RECAPTCHA_PRIVATE_KEY")
Bootstrap(app)
ckeditor = CKEditor(app)
# Database Connection
app.config["SQLALCHEMY_DATABASE_URI"] = os.environ.get("DATABASE_URL", "sqlite:///todolist.db")
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.app = app
db.init_app(app)
db.create_all()
# Login Manager
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "login"
@login_manager.user_loader
@app.route("/")
@app.route("/register", methods=["GET", "POST"])
@app.route("/login", methods=["GET", "POST"])
@app.route("/logout")
@login_required
@app.route("/dashboard")
@login_required
@app.route("/add-item", methods=["GET", "POST"])
@login_required
@app.route("/delete-item/<int:item_id>")
@login_required
@app.route("/complete-item/<int:item_id>")
@login_required
if __name__ == '__main__':
app.run(debug=True)
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
18941,
11,
19016,
62,
1640,
11,
7644,
198,
6738,
42903,
62,
18769,
26418,
1330,
18892,
26418,
198,
6738,
42903,
62,
694,
35352,
1330,
327,
7336,
67,
2072,
198,
6738,
42903,
62,
38235,
... | 2.745234 | 577 |
#coding: utf-8
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup
from colorama import init
from colorama import Fore, Style
import os, sys, time, traceback, pickle, random, colorama
clear()
logo()
maxi = 0
start = '/@'
end = '/video/'
views = ' '
boosted_link = input('{}\n[>] {}TikTok Video Link ?: {}'.format(Fore.RESET, Fore.LIGHTBLUE_EX, Fore.RESET))
print(' ')
username = boosted_link[boosted_link.find(start)+len(start):boosted_link.rfind(end)]
options = webdriver.ChromeOptions()
options.add_argument('window-size=1000,900')
options.add_experimental_option("excludeSwitches", ["enable-logging"])
browser = webdriver.Chrome(options=options, executable_path=r"chromedriver.exe")
browser.minimize_window()
wait = WebDriverWait(browser, 2)
from selenium import webdriver
init()
os.system('title ' + ' TikTok Booster made by Teilaw#0001 - Boost: @{}'.format(username))
start() | [
2,
66,
7656,
25,
3384,
69,
12,
23,
201,
198,
201,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
201,
198,
6738,
3992,
26230,
62,
37153,
13,
46659,
1330,
13282,
32103,
13511,
201,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
201,
... | 2.913525 | 451 |
from functools import partial
from unittest import TestCase, main
from expects import expect, equal
from questions_three.exceptions import InvalidConfiguration
from twin_sister.expects_matchers import complain
from twin_sister import open_dependency_context
from questions_three.module_cfg.module_cfg import ModuleCfg
UNSET = object()
if "__main__" == __name__:
main()
| [
6738,
1257,
310,
10141,
1330,
13027,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
11,
1388,
198,
198,
6738,
13423,
1330,
1607,
11,
4961,
198,
6738,
2683,
62,
15542,
13,
1069,
11755,
1330,
17665,
38149,
198,
6738,
15203,
62,
82,
1694,
... | 3.518519 | 108 |
# 找到字符串中所有字母异位词 | [
2,
10545,
231,
122,
26344,
108,
27764,
245,
163,
105,
99,
10310,
110,
40792,
33699,
222,
17312,
231,
27764,
245,
162,
107,
235,
28156,
224,
19526,
235,
46237,
235
] | 0.517241 | 29 |
import torch
import torch.nn as nn
import torch.nn.functional as F
# from pytorch_pretrained_bert import BertModel
from transformers.modeling_bert import BertModel
from accuracy_tool import single_label_top1_accuracy
#20200621, bert -> dropout -> conv -> linear -> bn -> softmax: acc=14.42
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
2,
422,
12972,
13165,
354,
62,
5310,
13363,
62,
4835,
1330,
22108,
17633,
198,
6738,
6121,
364,
13,
4666,
10809,
62,
483... | 3.280899 | 89 |
"""Redis Semaphore lock."""
import os
import time
SYSTEM_LOCK_ID = 'SYSTEM_LOCK'
class Semaphore(object):
"""Semaphore lock using Redis ZSET."""
def __init__(self, redis, name, lock_id, timeout, max_locks=1):
"""
Semaphore lock.
Semaphore logic is implemented in the lua/semaphore.lua script.
Individual locks within the semaphore are managed inside a ZSET
using scores to track when they expire.
Arguments:
redis: Redis client
name: Name of lock. Used as ZSET key.
lock_id: Lock ID
timeout: Timeout in seconds
max_locks: Maximum number of locks allowed for this semaphore
"""
self.redis = redis
self.name = name
self.lock_id = lock_id
self.max_locks = max_locks
self.timeout = timeout
with open(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'lua/semaphore.lua',
)
) as f:
self._semaphore = self.redis.register_script(f.read())
@classmethod
def get_system_lock(cls, redis, name):
"""
Get system lock timeout for the semaphore.
Arguments:
redis: Redis client
name: Name of lock. Used as ZSET key.
Returns: Time system lock expires or None if lock does not exist
"""
return redis.zscore(name, SYSTEM_LOCK_ID)
@classmethod
def set_system_lock(cls, redis, name, timeout):
"""
Set system lock for the semaphore.
Sets a system lock that will expire in timeout seconds. This
overrides all other locks. Existing locks cannot be renewed
and no new locks will be permitted until the system lock
expires.
Arguments:
redis: Redis client
name: Name of lock. Used as ZSET key.
timeout: Timeout in seconds for system lock
"""
pipeline = redis.pipeline()
pipeline.zadd(name, {SYSTEM_LOCK_ID: time.time() + timeout})
pipeline.expire(
name, timeout + 10
) # timeout plus buffer for troubleshooting
pipeline.execute()
def release(self):
"""Release semaphore."""
self.redis.zrem(self.name, self.lock_id)
def acquire(self):
"""
Obtain a semaphore lock.
Returns: Tuple that contains True/False if the lock was acquired and number of
locks in semaphore.
"""
acquired, locks = self._semaphore(
keys=[self.name],
args=[self.lock_id, self.max_locks, self.timeout, time.time()],
)
# Convert Lua boolean returns to Python booleans
acquired = True if acquired == 1 else False
return acquired, locks
def renew(self):
"""
Attempt to renew semaphore.
Technically this doesn't know the difference between losing the lock
but then successfully getting a new lock versus renewing your lock
before the timeout. Both will return True.
"""
return self.acquire()
| [
37811,
7738,
271,
12449,
6570,
382,
5793,
526,
15931,
198,
198,
11748,
28686,
198,
11748,
640,
198,
198,
23060,
25361,
62,
36840,
62,
2389,
796,
705,
23060,
25361,
62,
36840,
6,
628,
198,
4871,
12449,
6570,
382,
7,
15252,
2599,
198,
2... | 2.307692 | 1,365 |
__title__ = 'htms_low_api'
__version__ = '2.3.1'
__author__ = 'Arslan Aliev'
__license__ = 'Apache License, Version 2.0'
__copyright__ = 'Copyright (c) 2018-2021 Arslan S. Aliev'
from .htms_par_low import (
PAGESIZE1,
NUMPAGES1,
MAXSTRLEN1,
PAGESIZE2,
NUMPAGES2,
SERVER_IP_DNS,
MAIN_SERVER_PORT,
HTDB_ROOT,
MAX_FILE_NAME_LEN,
MAX_LEN_FILE_DESCRIPTOR,
MAX_LEN_FILE_BODY,
CAGE_SERVER,
DEBUG_UPDATE_CF_1,
DEBUG_UPDATE_CF_2,
DEBUG_UPDATE_RAM,
JWTOKEN,
CAGE_SERVER_WWW
)
from .htms_par_low import HTMS_Low_Err
from .data_types import Types_htms
from .funcs import match, links_dump, ht_dump
from .ht import HT, rename_ht, delete_ht, get_maf, compress_ht, deepcopy_ht
from .maf import MAF
__all__ = (
"HT",
"rename_ht",
"delete_ht",
"compress_ht",
"deepcopy_ht",
"MAF",
"get_maf",
"Types_htms",
"match",
"links_dump",
"ht_dump",
"HTMS_Low_Err",
"PAGESIZE1",
"NUMPAGES1",
"MAXSTRLEN1",
"PAGESIZE2",
"NUMPAGES2",
"SERVER_IP_DNS",
"MAIN_SERVER_PORT",
"HTDB_ROOT",
"MAX_FILE_NAME_LEN",
"MAX_LEN_FILE_DESCRIPTOR",
"MAX_LEN_FILE_BODY",
"CAGE_SERVER",
"DEBUG_UPDATE_CF_1",
"DEBUG_UPDATE_CF_2",
"DEBUG_UPDATE_RAM",
"JWTOKEN",
"CAGE_SERVER_WWW"
)
| [
198,
834,
7839,
834,
796,
705,
4352,
907,
62,
9319,
62,
15042,
6,
198,
834,
9641,
834,
796,
705,
17,
13,
18,
13,
16,
6,
198,
834,
9800,
834,
796,
705,
3163,
6649,
272,
978,
11203,
6,
198,
834,
43085,
834,
796,
705,
25189,
4891,
... | 1.9125 | 640 |
import re
from datetime import date, datetime
import scrapy
from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider
| [
11748,
302,
198,
6738,
4818,
8079,
1330,
3128,
11,
4818,
8079,
198,
198,
11748,
15881,
88,
198,
198,
6738,
308,
1031,
5857,
13,
23814,
1330,
38772,
198,
6738,
308,
1031,
5857,
13,
2777,
4157,
13,
8692,
1330,
7308,
38,
1031,
5857,
4129... | 3.44186 | 43 |
#
# Copyright (c) 2021 Takeshi Yamazaki
# This software is released under the MIT License, see LICENSE.
#
import json
import os
from device_check import get_device_id, get_format, get_device_list
import gst_builder
| [
2,
198,
2,
15069,
357,
66,
8,
33448,
33687,
5303,
14063,
32276,
198,
2,
770,
3788,
318,
2716,
739,
262,
17168,
13789,
11,
766,
38559,
24290,
13,
198,
2,
198,
198,
11748,
33918,
198,
11748,
28686,
198,
6738,
3335,
62,
9122,
1330,
651... | 2.471154 | 104 |
#Escreva um programa que leia dois números inteiros e compare-os
# indicando qual o maior número, menor ou se são iguais
num1 = int(input('Digite um número: '))
num2 = int(input('Digite outro número: '))
if num1 > num2:
print(f'O número {num1} é maior que o número {num2}')
elif num2 > num1:
print(f'O número: {num2} é maior que o número {num1}')
else:
print('os dois valores são IGUAIS') | [
2,
47051,
260,
6862,
23781,
1430,
64,
8358,
443,
544,
466,
271,
299,
21356,
647,
418,
493,
20295,
4951,
304,
8996,
12,
418,
198,
2,
2699,
25440,
4140,
267,
17266,
1504,
299,
21356,
647,
78,
11,
1450,
273,
267,
84,
384,
264,
28749,
... | 2.172973 | 185 |
import torch, argparse, sys
from torch import nn, optim
from torchvision import datasets, transforms, models
from my_model import MyClassifier, save_checkpoint
parser = argparse.ArgumentParser(description='IMAGE CLASSIFICATION TRAINER, PLEASE PROVIDE DIRECTORY WITH IMAGE DATA')
parser.add_argument('data_dir', type=str, default='flowers', help='Directory of input image data')
parser.add_argument('--save_dir', type=str, default='checkpoint.pth', help='Directory to save trained model')
parser.add_argument('--arch', type=str, default='densenet121', help='Model architecture options: densenet121 / vgg11 / alexnet')
parser.add_argument('--hidden_units', type=int, default=256, help='Number of perceptron units in hidden layer')
parser.add_argument('--epochs', type=int, default=2, help='Number of iterations')
parser.add_argument('--learning_rate', type=float, default=0.004, help='Learning rate per iteration')
parser.add_argument('--gpu', action='store_true', default=False, help='Set GPU required')
args = parser.parse_args()
data_dir = args.data_dir
save_dir = args.save_dir
model_arch = args.arch
n_hidden = args.hidden_units
epochs = args.epochs
learn_rate = args.learning_rate
gpu = args.gpu
traindir = data_dir + '/train'
validdir = data_dir + '/valid'
testdir = data_dir + '/test'
traintransform = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
validtransform = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
trainset = datasets.ImageFolder(traindir, transform=traintransform)
validset = datasets.ImageFolder(validdir, transform=validtransform)
testset = datasets.ImageFolder(testdir, transform=validtransform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
validloader = torch.utils.data.DataLoader(validset, batch_size=64)
testloader = torch.utils.data.DataLoader(testset, batch_size=64)
if model_arch == 'densenet121':
model = models.densenet121(pretrained=True)
n_input = 1024
elif model_arch == 'vgg11':
model = models.vgg11(pretrained=True)
n_input = 25088
elif model_arch == 'alexnet':
model = models.alexnet(pretrained=True)
n_input == 9216
else:
print('Error: Architecture not supported... Available options: densenet121 / vgg11 / alexnet')
sys.exit()
for param in model.parameters():
param.requires_grad = False
model.classifier = MyClassifier(n_input=n_input, n_hidden=n_hidden)
device = 'cuda' if gpu else 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learn_rate)
print_steps = 10
train_losses = []
valid_losses = []
train_loss_sum = 0
for e in range(epochs):
for step, (images, labels) in enumerate(trainloader):
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
log_prob = model.forward(images)
loss = criterion(log_prob, labels)
loss.backward()
optimizer.step()
train_loss_sum += loss.item()
if (step+1) % print_steps == 0:
train_losses.append(train_loss_sum / (e*len(trainloader) + step+1))
valid_loss_sum = 0
valid_equality = torch.Tensor()
with torch.no_grad():
model.eval()
for images, labels in validloader:
images, labels = images.to(device), labels.to(device)
log_prob = model.forward(images)
loss = criterion(log_prob, labels)
valid_loss_sum += loss.item()
prob = torch.exp(log_prob)
top_prob, top_class = prob.topk(1, dim=1)
equality = (top_class == labels.view(*top_class.shape)).type(torch.FloatTensor)
valid_equality = torch.cat([valid_equality, equality], dim=0)
model.train()
valid_losses.append(valid_loss_sum / len(validloader))
accuracy = torch.mean(valid_equality).item()
print(f'Epoch: {e+1}/{epochs}, Batch Step: {step+1}/{len(trainloader)}, Training Loss: {round(train_losses[-1], 2)}, Validation Loss: {round(valid_losses[-1], 2)}, Accuracy: {round(accuracy*100)}%')
if len(valid_losses) > 1:
if valid_losses[-1] > valid_losses[-2]:
print('Training failed... Loss increasing!')
break
elif valid_losses[-1] > 0.995*valid_losses[-2]:
print('Training stopped... Gradient vanishing!')
break
if accuracy > 0.8:
print('Training succeed... Accuracy is sufficiently high!')
break
else:
continue
break
save_checkpoint(model, trainset, optimizer, save_dir, model_arch=model_arch, n_hidden=n_hidden)
print('Trained model saved to: {}'.format(save_dir))
| [
11748,
28034,
11,
1822,
29572,
11,
25064,
198,
6738,
28034,
1330,
299,
77,
11,
6436,
198,
6738,
28034,
10178,
1330,
40522,
11,
31408,
11,
4981,
198,
6738,
616,
62,
19849,
1330,
2011,
9487,
7483,
11,
3613,
62,
9122,
4122,
198,
198,
486... | 2.687465 | 1,779 |
"""The package indicator for serve.push.search."""
| [
37811,
464,
5301,
16916,
329,
4691,
13,
14689,
13,
12947,
526,
15931,
628
] | 4 | 13 |
"""empty message
Revision ID: fd05599e10a6
Revises: 8a04dc559206
Create Date: 2017-08-22 14:26:50.486659
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'fd05599e10a6'
down_revision = '8a04dc559206'
branch_labels = None
depends_on = None
| [
37811,
28920,
3275,
198,
198,
18009,
1166,
4522,
25,
277,
67,
2713,
43452,
68,
940,
64,
21,
198,
18009,
2696,
25,
807,
64,
3023,
17896,
38605,
22136,
198,
16447,
7536,
25,
2177,
12,
2919,
12,
1828,
1478,
25,
2075,
25,
1120,
13,
2780... | 2.533898 | 118 |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 功能测试
Case Name : Jsonb额外支持的操作符:? 键/元素字符串是否存在于 JSON 值的顶层,右侧操作数为text
Description :
1.键/元素字符串是否存在于 JSON 值的顶层
2.键/元素字符串是否不存在于 JSON 值的顶层
3.其他格式:null_json,bool_json,unm_json
Expect :
1.键/元素字符串存在于 JSON 值的顶层
2.键/元素字符串不存在于 JSON 值的顶层
3.其他格式:null_jsonb,bool_jsonb,num_jsonb
History :
"""
import os
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
| [
37811,
198,
15269,
357,
66,
8,
33160,
43208,
21852,
1766,
1539,
43,
8671,
13,
198,
198,
9654,
35389,
1046,
318,
11971,
739,
17996,
272,
6599,
43,
410,
17,
13,
198,
1639,
460,
779,
428,
3788,
1864,
284,
262,
2846,
290,
3403,
286,
262... | 1.788632 | 563 |
from flask_restplus import reqparse
from flask_restplus import fields
from server import api
from server.status import APIStatus, to_http_status
request_parser = reqparse.RequestParser()
request_parser.add_argument('token', type=str, required=True, location='headers')
request_token = api.expect(request_parser, validate=True)
| [
6738,
42903,
62,
2118,
9541,
1330,
43089,
29572,
198,
6738,
42903,
62,
2118,
9541,
1330,
7032,
198,
198,
6738,
4382,
1330,
40391,
198,
6738,
4382,
13,
13376,
1330,
7824,
19580,
11,
284,
62,
4023,
62,
13376,
628,
198,
198,
25927,
62,
4... | 3.637363 | 91 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to list file entries."""
import abc
import argparse
import logging
import sys
from dfvfs.analyzer import analyzer
from dfvfs.analyzer import fvde_analyzer_helper
from dfvfs.helpers import command_line
from dfvfs.helpers import volume_scanner
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.lib import errors
from dfvfs.resolver import resolver
from scripts import helpers
try:
# Disable experimental FVDE support.
analyzer.Analyzer.DeregisterHelper(fvde_analyzer_helper.FVDEAnalyzerHelper())
except KeyError:
pass
class FileEntryLister(volume_scanner.VolumeScanner):
"""File entry lister."""
_NON_PRINTABLE_CHARACTERS = list(range(0, 0x20)) + list(range(0x7f, 0xa0))
_ESCAPE_CHARACTERS = str.maketrans({
value: '\\x{0:02x}'.format(value)
for value in _NON_PRINTABLE_CHARACTERS})
def __init__(self, mediator=None):
"""Initializes a file entry lister.
Args:
mediator (VolumeScannerMediator): a volume scanner mediator.
"""
super(FileEntryLister, self).__init__(mediator=mediator)
self._list_only_files = False
def _GetDisplayPath(self, path_spec, path_segments, data_stream_name):
"""Retrieves a path to display.
Args:
path_spec (dfvfs.PathSpec): path specification of the file entry.
path_segments (list[str]): path segments of the full path of the file
entry.
data_stream_name (str): name of the data stream.
Returns:
str: path to display.
"""
display_path = ''
if path_spec.HasParent():
parent_path_spec = path_spec.parent
if parent_path_spec and parent_path_spec.type_indicator == (
dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION):
display_path = ''.join([display_path, parent_path_spec.location])
path_segments = [
segment.translate(self._ESCAPE_CHARACTERS) for segment in path_segments]
display_path = ''.join([display_path, '/'.join(path_segments)])
if data_stream_name:
data_stream_name = data_stream_name.translate(self._ESCAPE_CHARACTERS)
display_path = ':'.join([display_path, data_stream_name])
return display_path or '/'
def _ListFileEntry(
self, file_system, file_entry, parent_path_segments, output_writer):
"""Lists a file entry.
Args:
file_system (dfvfs.FileSystem): file system that contains the file entry.
file_entry (dfvfs.FileEntry): file entry to list.
parent_path_segments (str): path segments of the full path of the parent
file entry.
output_writer (StdoutWriter): output writer.
"""
path_segments = parent_path_segments + [file_entry.name]
display_path = self._GetDisplayPath(file_entry.path_spec, path_segments, '')
if not self._list_only_files or file_entry.IsFile():
output_writer.WriteFileEntry(display_path)
# TODO: print data stream names.
for sub_file_entry in file_entry.sub_file_entries:
self._ListFileEntry(
file_system, sub_file_entry, path_segments, output_writer)
def ListFileEntries(self, base_path_specs, output_writer):
"""Lists file entries in the base path specification.
Args:
base_path_specs (list[dfvfs.PathSpec]): source path specification.
output_writer (StdoutWriter): output writer.
"""
for base_path_spec in base_path_specs:
file_system = resolver.Resolver.OpenFileSystem(base_path_spec)
file_entry = resolver.Resolver.OpenFileEntry(base_path_spec)
if file_entry is None:
path_specification_string = helpers.GetPathSpecificationString(
base_path_spec)
logging.warning(
'Unable to open base path specification:\n{0:s}'.format(
path_specification_string))
return
self._ListFileEntry(file_system, file_entry, [], output_writer)
class OutputWriter(object):
"""Output writer interface."""
def __init__(self, encoding='utf-8'):
"""Initializes an output writer.
Args:
encoding (Optional[str]): input encoding.
"""
super(OutputWriter, self).__init__()
self._encoding = encoding
self._errors = 'strict'
def _EncodeString(self, string):
"""Encodes the string.
Args:
string (str): string to encode.
Returns:
bytes: encoded string.
"""
try:
# Note that encode() will first convert string into a Unicode string
# if necessary.
encoded_string = string.encode(self._encoding, errors=self._errors)
except UnicodeEncodeError:
if self._errors == 'strict':
logging.error(
'Unable to properly write output due to encoding error. '
'Switching to error tolerant encoding which can result in '
'non Basic Latin (C0) characters to be replaced with "?" or '
'"\\ufffd".')
self._errors = 'replace'
encoded_string = string.encode(self._encoding, errors=self._errors)
return encoded_string
@abc.abstractmethod
def Close(self):
"""Closes the output writer object."""
@abc.abstractmethod
def Open(self):
"""Opens the output writer object."""
@abc.abstractmethod
def WriteFileEntry(self, path):
"""Writes the file path.
Args:
path (str): path of the file.
"""
class FileOutputWriter(OutputWriter):
"""Output writer that writes to a file."""
def __init__(self, path, encoding='utf-8'):
"""Initializes an output writer.
Args:
path (str): name of the path.
encoding (Optional[str]): input encoding.
"""
super(FileOutputWriter, self).__init__(encoding=encoding)
self._file_object = None
self._path = path
def Close(self):
"""Closes the output writer object."""
self._file_object.close()
def Open(self):
"""Opens the output writer object."""
# Using binary mode to make sure to write Unix end of lines, so we can
# compare output files cross-platform.
self._file_object = open(self._path, 'wb') # pylint: disable=consider-using-with
def WriteFileEntry(self, path):
"""Writes the file path to file.
Args:
path (str): path of the file.
"""
string = '{0:s}\n'.format(path)
encoded_string = self._EncodeString(string)
self._file_object.write(encoded_string)
class StdoutWriter(OutputWriter):
"""Output writer that writes to stdout."""
def Close(self):
"""Closes the output writer object."""
def Open(self):
"""Opens the output writer object."""
def WriteFileEntry(self, path):
"""Writes the file path to stdout.
Args:
path (str): path of the file.
"""
print(path)
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Lists file entries in a directory or storage media image.'))
argument_parser.add_argument(
'--back_end', '--back-end', dest='back_end', action='store',
metavar='NTFS', default=None, help='preferred dfVFS back-end.')
argument_parser.add_argument(
'--output_file', '--output-file', dest='output_file', action='store',
metavar='source.hashes', default=None, help=(
'path of the output file, default is to output to stdout.'))
argument_parser.add_argument(
'--partitions', '--partition', dest='partitions', action='store',
type=str, default=None, help=(
'Define partitions to be processed. A range of '
'partitions can be defined as: "3..5". Multiple partitions can '
'be defined as: "1,3,5" (a list of comma separated values). '
'Ranges and lists can also be combined as: "1,3..5". The first '
'partition is 1. All partitions can be specified with: "all".'))
argument_parser.add_argument(
'--snapshots', '--snapshot', dest='snapshots', action='store', type=str,
default=None, help=(
'Define snapshots to be processed. A range of snapshots can be '
'defined as: "3..5". Multiple snapshots can be defined as: "1,3,5" '
'(a list of comma separated values). Ranges and lists can also be '
'combined as: "1,3..5". The first snapshot is 1. All snapshots can '
'be specified with: "all".'))
argument_parser.add_argument(
'--volumes', '--volume', dest='volumes', action='store', type=str,
default=None, help=(
'Define volumes to be processed. A range of volumes can be defined '
'as: "3..5". Multiple volumes can be defined as: "1,3,5" (a list '
'of comma separated values). Ranges and lists can also be combined '
'as: "1,3..5". The first volume is 1. All volumes can be specified '
'with: "all".'))
argument_parser.add_argument(
'source', nargs='?', action='store', metavar='image.raw',
default=None, help='path of the directory or storage media image.')
options = argument_parser.parse_args()
if not options.source:
print('Source value is missing.')
print('')
argument_parser.print_help()
print('')
return False
helpers.SetDFVFSBackEnd(options.back_end)
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
if options.output_file:
output_writer = FileOutputWriter(options.output_file)
else:
output_writer = StdoutWriter()
try:
output_writer.Open()
except IOError as exception:
print('Unable to open output writer with error: {0!s}.'.format(
exception))
print('')
return False
mediator = command_line.CLIVolumeScannerMediator()
file_entry_lister = FileEntryLister(mediator=mediator)
volume_scanner_options = volume_scanner.VolumeScannerOptions()
volume_scanner_options.partitions = mediator.ParseVolumeIdentifiersString(
options.partitions)
if options.snapshots == 'none':
volume_scanner_options.snapshots = ['none']
else:
volume_scanner_options.snapshots = mediator.ParseVolumeIdentifiersString(
options.snapshots)
volume_scanner_options.volumes = mediator.ParseVolumeIdentifiersString(
options.volumes)
return_value = True
try:
base_path_specs = file_entry_lister.GetBasePathSpecs(
options.source, options=volume_scanner_options)
if not base_path_specs:
print('No supported file system found in source.')
print('')
return False
file_entry_lister.ListFileEntries(base_path_specs, output_writer)
print('')
print('Completed.')
except errors.ScannerError as exception:
return_value = False
print('')
print('[ERROR] {0!s}'.format(exception))
except KeyboardInterrupt:
return_value = False
print('')
print('Aborted by user.')
output_writer.Close()
return return_value
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
7391,
284,
1351,
2393,
12784,
526,
15931,
198,
198,
11748,
450,
66,
198,
11748,
1822,
29572,
198,
11748,
18931,
... | 2.684562 | 4,042 |
"""
stanCode Breakout Project
Adapted from Eric Roberts's Breakout by
Sonja Johnson-Yu, Kylie Jue, Nick Bowman,
and Jerry Liao.
YOUR DESCRIPTION HERE
This is a breakout game.
"""
from campy.gui.events.timer import pause
from breakoutgraphics import BreakoutGraphics
FRAME_RATE = 1000 / 120 # 120 frames per second
NUM_LIVES = 3 # Number of attempts
if __name__ == '__main__':
main()
| [
37811,
198,
14192,
10669,
12243,
448,
4935,
198,
48003,
276,
422,
7651,
10918,
338,
12243,
448,
416,
198,
31056,
6592,
5030,
12,
40728,
11,
39859,
494,
449,
518,
11,
8047,
38774,
11,
198,
392,
13075,
406,
13481,
13,
198,
198,
56,
1169... | 3.117188 | 128 |
from typing import Dict
import json
from beagle.logging import timer
@timer
| [
6738,
19720,
1330,
360,
713,
198,
11748,
33918,
198,
6738,
307,
19345,
13,
6404,
2667,
1330,
19781,
628,
198,
198,
31,
45016,
198
] | 3.434783 | 23 |
#! /usr/bin/python
#M3 -- Meka Robotics Robot Components
#Copyright (c) 2010 Meka Robotics
#Author: edsinger@mekabot.com (Aaron Edsinger)
#M3 is free software: you can redistribute it and/or modify
#it under the terms of the GNU Lesser General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#M3 is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Lesser General Public License for more details.
#You should have received a copy of the GNU Lesser General Public License
#along with m3. If not, see <http://www.gnu.org/licenses/>.
import os
from m3.eeprom import *
import m3.toolbox as m3t
import time
from struct import pack
print 'Use this tool to overwrite the EEPROM serial number of an EtherCAT slave'
config_path=os.environ['M3_ROBOT']+'/robot_config/eeprom/'
print '--------------------------- Slaves -------------------------------'
os.system('sudo ethercat slaves')
print '------------------------------------------------------------------'
print 'Enter slave id'
sid=m3t.get_int()
print 'Enter serial number'
sn=m3t.get_int()
print 'Enter slave name (eg MA1J0 or EL4132)'
name=m3t.get_string()
#Read in eeprom
cmd='sudo ethercat sii_read -p'+str(sid)
stdout_handle = os.popen(cmd, 'r')
eep=stdout_handle.read()
if len(eep)==0:
print 'Unable to read slave EEPROM.'
exit()
#Write orig to file
fn=config_path+'eeprom_'+name+'_sn_'+str(sn)+'_orig.hex'
out_port = open_binary_output_file(fn)
for c in eep:
write_char(out_port,c)
out_port.close()
#Update binary sn field
hsn=pack('H',sn)
eep2=eep[:28]+hsn+eep[30:]
#Write to file
fn2=config_path+'eeprom_'+name+'_sn_'+str(sn)+'.hex'
out_port = open_binary_output_file(fn2)
for c in eep2:
write_char(out_port,c)
out_port.close()
#Write to slave
print 'Write to slave [y]?'
if m3t.get_yes_no('y'):
cmd='sudo ethercat -p '+str(sid)+' sii_write '+fn2
print 'Executing: ',cmd
os.system(cmd)
print 'Power cycle and hit return'
raw_input()
time.sleep(4.0)
print '--------------------------- Slaves -------------------------------'
os.system('sudo ethercat slaves')
print '------------------------------------------------------------------'
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
198,
198,
2,
44,
18,
1377,
337,
38001,
47061,
16071,
36109,
198,
2,
15269,
357,
66,
8,
3050,
337,
38001,
47061,
198,
2,
13838,
25,
1225,
82,
3889,
31,
76,
988,
397,
313,
13,
785,
357,
3445... | 2.94311 | 791 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from logging import Logger
from typing import NamedTuple, Optional
from idb.grpc.idb_grpc import CompanionServiceStub
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
... | 3.617978 | 89 |
import math
from datetime import datetime, timedelta
from django.shortcuts import get_object_or_404, render
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseForbidden
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.core.cache import cache
from django.db.models import Q, F, Sum
from django.utils.encoding import smart_str
from django.db import transaction
from django.views.decorators.csrf import csrf_exempt
from djangobb_forum.util import build_form, paginate, set_language
from djangobb_forum.models import Category, Forum, Topic, Post, Profile, Reputation,\
Attachment, PostTracking
from djangobb_forum.forms import AddPostForm, EditPostForm, UserSearchForm,\
PostSearchForm, ReputationForm, MailToForm, EssentialsProfileForm,\
PersonalProfileForm, MessagingProfileForm, PersonalityProfileForm,\
DisplayProfileForm, PrivacyProfileForm, ReportForm, UploadAvatarForm
from djangobb_forum.templatetags import forum_extras
from djangobb_forum import settings as forum_settings
from djangobb_forum.util import smiles, convert_text_to_html
from djangobb_forum.templatetags.forum_extras import forum_moderated_by
from djangobb_forum.decorators import require_unbanned_user
from djangobb_forum.auth import unbanned_user_requirement, isa_forum_moderator
from django.utils.translation import get_language
from django.contrib.sites.models import get_current_site
from haystack.query import SearchQuerySet, SQ
@transaction.commit_on_success
@login_required
@require_unbanned_user
@transaction.commit_on_success
@login_required
@require_unbanned_user
@transaction.commit_on_success
@login_required
@require_unbanned_user
@transaction.commit_on_success
@login_required
@require_unbanned_user
@transaction.commit_on_success
@transaction.commit_on_success
@login_required
@require_unbanned_user
@transaction.commit_on_success
@login_required
@require_unbanned_user
@transaction.commit_on_success
@login_required
@require_unbanned_user
@transaction.commit_on_success
@login_required
@require_unbanned_user
@transaction.commit_on_success
@login_required
@require_unbanned_user
@transaction.commit_on_success
@login_required
@require_unbanned_user
@transaction.commit_on_success
@login_required
@require_unbanned_user
@transaction.commit_on_success
@login_required
@require_unbanned_user
@transaction.commit_on_success
@login_required
@require_unbanned_user
@transaction.commit_on_success
@login_required
@require_unbanned_user
@login_required
@require_unbanned_user
@csrf_exempt
def post_preview(request):
'''Preview for markitup'''
markup = request.user.forum_profile.markup
data = request.POST.get('data', '')
data = convert_text_to_html(data, markup)
if forum_settings.SMILES_SUPPORT:
data = smiles(data)
return render(request, 'djangobb_forum/post_preview.html', {'data': data})
| [
11748,
10688,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
220,
198,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
651,
62,
15252,
62,
273,
62,
26429,
11,
8543,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
26429... | 3.057114 | 998 |
# Copyright (c) OpenMMLab. All rights reserved.
import os
import platform
import numpy as np
import pytest
import torch
import torch.distributed as dist
from mmcv.cnn.bricks import ConvModule
from mmcv.cnn.utils import revert_sync_batchnorm
if platform.system() == 'Windows':
import regex as re
else:
import re
@pytest.mark.skipif(
torch.__version__ == 'parrots', reason='not supported in parrots now')
| [
2,
15069,
357,
66,
8,
4946,
44,
5805,
397,
13,
1439,
2489,
10395,
13,
198,
11748,
28686,
198,
11748,
3859,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
11748,
28034,
198,
11748,
28034,
13,
17080,
6169,
355,
1... | 3.118519 | 135 |
from typing import Union
import numpy as np
def hamming_from_elements(
array: np.ndarray, elements: np.ndarray, weight: Union[float, int, np.ndarray] = 1
) -> np.ndarray:
"""Return an array of array made of the hamming distances between each
element and each of the specified points.
The hamming distance allows to quantify the distance between non-numerical
elements. Given two elements with K features, the value weight is added
to the hamming distance for each non-identical feature.
Parameters
----------
array : np.array
The array with size (N, K) the distance is to be computed on. It can be
either 2-D or 1-D.
If the array is one dimensional - array.ndim is equal to 1 - then N is
considered len(array) with K = 1.
If the array is multi dimensional with size (N, K) then it is interpreted
as N elements with K variables each.
element : np.ndarray
The elements in respect of which the distance is computed. It must be of
size (M, K). It contains M arrays with K values each.
weight : Union[float, int, np.ndarray], optional
The value for which the distances should be multiplied by.
If int or float, every variable is assigned the same provided fixed
weight. Every non-match will have a value equal to weight.
If np.ndarray it represents the value for each column. In this case
it must be of length K.
Returns
-------
np.ndarray
The array of distances of dimensions (N, M). For each of the N elements,
there is a distance from each of the M points.
Each element (x, y) is the hamming distance of observation x from
item y, where x=[1, ..., N] and y=[1, ..., M].
"""
number_of_points = elements.shape[0]
# To handle the case where 1-D array is passed
if array.ndim == 1:
n_distances = 1 # If array is 1-D just one observation -> 1 distance
else:
n_distances = array.shape[0] # Else: N observations -> N distances
result = np.zeros((n_distances, number_of_points), dtype=np.float)
for i in range(number_of_points):
result[:, i] = hamming_from_element(array, elements[i], weight).squeeze()
return result
def hamming_from_element(
array: np.ndarray, element: np.ndarray, weight: Union[float, int, np.ndarray] = 1
) -> np.ndarray:
"""Return an array made of the hamming distances between each element
and the specified comparison item.
The hamming distance allows to quantify the distance between non-numerical
elements. Given two elements with K features, the value weight is added
to the hamming distance for each non-identical feature.
Parameters
----------
array : np.array
The array with size (N, K) the distance is to be computed on. It can be
either 2-D or 1-D.
If the array is one dimensional - array.ndim is equal to 1 - then N is
considered len(array) with K = 1.
If the array is multi dimensional with size (N, K) then it is interpreted
as N elements with K variables each.
element : np.ndarray
The 1-D array identifying the item in respect of which the distance is
computed. It must have length K.
weight : Union[float, int, np.ndarray], optional
The value for which the distances should be multiplied by.
If int or float, every variable is assigned the same provided fixed
weight. Every non-match will have a value equal to weight.
If np.ndarray it represents the value for each column. In this case
it must be of length K.
Returns
-------
np.ndarray
The array of distances of dimensions (N, 1).
"""
# Case where 1D array and element is a single item
if (array.ndim == 1) & (element.ndim == 0):
return (array != element).astype(int) * weight
# Computation is different if the array is uni-dimensional
axis = None if array.ndim == 1 else 1
matches_array = (array != element) * weight
return matches_array.sum(axis=axis)
| [
6738,
19720,
1330,
4479,
198,
198,
11748,
299,
32152,
355,
45941,
628,
198,
4299,
8891,
2229,
62,
6738,
62,
68,
3639,
7,
198,
220,
220,
220,
7177,
25,
45941,
13,
358,
18747,
11,
4847,
25,
45941,
13,
358,
18747,
11,
3463,
25,
4479,
... | 2.937098 | 1,399 |
import os
import time
os.system("clear")
print("################################")
print(" ")
print(" Bozkurt Ftp Bf Toolu ")
print(" ")
print("################################")
print()
print("1.Ftp Brute Force")
print("2.Kullanım Kılavuzu")
işlem = input("İşlem =")
if işlem =="1":
x = input("Bu işlemi Yapmadan önce Lütfen Kullanım kılavuzunu okuyunuz Devam etmek için enter'a basınız")
time.sleep(2)
os.system("hydra")
if işlem =="2":
x = input("Toolu indirdiniz indirdiğiniz tool klasörunu açınız içinde user.txt ve pass.txt olcaktır o ikisini tutup masaüstune atınız klasörden cıkarıp sonra toolu python3 ile çalıştırın kolay gelsin")
| [
11748,
28686,
201,
198,
11748,
640,
201,
198,
201,
198,
418,
13,
10057,
7203,
20063,
4943,
201,
198,
201,
198,
4798,
7203,
29113,
4943,
201,
198,
4798,
7203,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,... | 1.934343 | 396 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# create Noah-MP case
# author: Hui ZHENG
import os.path
import string
import shutil
NOAHMP_NML = 'namelist.hrldas.template'
NOAHMP_TBLS = ['GENPARM.TBL', 'MPTABLE.TBL', 'SOILPARM.TBL', 'VEGPARM.TBL']
NOAHMP_EXE = 'noahmp_hrldas.exe'
import argparse
import dateutil.parser
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create Noah-MP case.')
parser.add_argument('caseroot', type=str,
default=os.getcwd(), help='case root directory')
parser.add_argument('-m', '--modelroot', type=str,
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'noahmp'),
help='Noah-MP root root directory')
parser.add_argument('-n', '--namelist', type=str,
help='noahmp namelist template location. (default: MODELROOT/namelist.template)')
parser.add_argument('-f', '--forcing', type=str,
help='top-level directory under which the forcing files are stored. (default: CASEROOT/ldasin)')
parser.add_argument('-i', '--wrfinput', type=str,
help='location of wrfinput file. (default: CASEROOT/wrfinput_d01)')
parser.add_argument('-b', '--begtime', required=True,
help='start date and time', type=str)
parser.add_argument('-e', '--endtime', required=True,
help='end date and time (exclusive)', type=str)
parser.add_argument('-bs', '--begtimespinup',
help='start date and time of spinup', type=str)
parser.add_argument('-l', '--nloop',
help='number of spinup loops (default: 0 or 1 for no spinup)',
default=1, type=int)
args = parser.parse_args()
main(modelroot=args.modelroot,
caseroot=args.caseroot,
dtbeg_s=dateutil.parser.parse(args.begtimespinup) if args.begtimespinup is not None else None,
dtbeg=dateutil.parser.parse(args.begtime),
dtend=dateutil.parser.parse(args.endtime),
nloop=args.nloop,
namelist_template=args.namelist,
forcing=args.forcing,
wrfinput=args.wrfinput)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
2251,
18394,
12,
7378,
1339,
198,
198,
2,
1772,
25,
367,
9019,
1168,
39,
26808,
198,
198,
11748,
28686,
... | 2.143407 | 1,039 |
TEST_SSO_SETTINGS = {
"enable_sso": True,
"sso_private_key": "priv1",
"sso_public_key": "fakeSsoPublicKey",
"sso_url": "http://example.com/server/",
}
| [
51,
6465,
62,
5432,
46,
62,
28480,
51,
20754,
796,
1391,
198,
220,
220,
220,
366,
21633,
62,
82,
568,
1298,
6407,
11,
198,
220,
220,
220,
366,
82,
568,
62,
19734,
62,
2539,
1298,
366,
13776,
16,
1600,
198,
220,
220,
220,
366,
82... | 2.061728 | 81 |
import functools
from typing import List
import pytest
from sqlalchemy.orm import Session
from nonbonded.backend.database import models
from nonbonded.backend.database.crud.datasets import DataSetCRUD
from nonbonded.backend.database.crud.projects import BenchmarkCRUD, OptimizationCRUD
from nonbonded.backend.database.crud.results import (
BenchmarkResultCRUD,
OptimizationResultCRUD,
)
from nonbonded.backend.database.utilities.exceptions import (
BenchmarkNotFoundError,
BenchmarkResultExistsError,
BenchmarkResultNotFoundError,
DataSetEntryNotFound,
ForceFieldExistsError,
OptimizationNotFoundError,
OptimizationResultExistsError,
OptimizationResultNotFoundError,
TargetNotFoundError,
TargetResultNotFoundError,
TargetResultTypeError,
UnableToDeleteError,
UnableToUpdateError,
)
from nonbonded.library.models.forcefield import ForceField
from nonbonded.library.models.projects import Optimization
from nonbonded.library.models.results import (
DataSetResult,
DataSetResultEntry,
DataSetStatistic,
EvaluatorTargetResult,
RechargeTargetResult,
)
from nonbonded.library.statistics.statistics import StatisticType
from nonbonded.tests.backend.crud.utilities import BaseCRUDTest, create_dependencies
from nonbonded.tests.utilities.comparison import compare_pydantic_models, does_not_raise
from nonbonded.tests.utilities.factory import (
create_benchmark,
create_benchmark_result,
create_data_set,
create_data_set_statistic,
create_evaluator_target,
create_force_field,
create_optimization,
create_optimization_result,
create_statistic,
)
| [
11748,
1257,
310,
10141,
198,
6738,
19720,
1330,
7343,
198,
198,
11748,
12972,
9288,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
23575,
198,
198,
6738,
1729,
65,
623,
276,
13,
1891,
437,
13,
48806,
1330,
4981,
198,
6738,
1729,
65,
62... | 2.971326 | 558 |
# coding=utf-8
from django.conf.urls import url
from modules.finance.social_security_audit.code.add_social_security_audit import *
from modules.finance.social_security_audit.code.del_social_security_audit import *
from modules.finance.social_security_audit.code.edit_social_security_audit import *
from modules.finance.social_security_audit.code.export_social_security_audit import *
from modules.finance.social_security_audit.code.list_social_security_audit import *
# 财务-社保审核
urlpatterns = [
url(r'^add$', SocialSecurityAuditCreate.as_view(), name="add_socialsecurityaudit"), # 增
url(r'^(?P<pk>[0-9]+)/delete$', SocialSecurityAuditDelete.as_view(), name="del_socialsecurityaudit"), # 删
url(r'^list$', SocialSecurityAuditListView.as_view(), name="list_socialsecurityaudit"), # 查
url(r'^(?P<pk>[0-9]+)/edit$', SocialSecurityAuditUpdate.as_view(), name="edit_socialsecurityaudit"), # 改
url(r'^export$', SocialSecurityAuditExportView.as_view(), name="export_socialsecurityaudit"), # 导出
]
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
198,
6738,
13103,
13,
69,
14149,
13,
14557,
62,
12961,
62,
3885,
270,
13,
8189,
13,
2860,
62,
14557,
62,
12961,
62,
3885,
270,
1330,
... | 2.675603 | 373 |
import sys
from PyQt5 import QtWidgets as qtw
from PyQt5 import QtCore as qtc
from PyQt5 import QtGui as qtg
from PyQt5 import QtSql as qts
"""
TODO:
- Adding new reviews and new coffees
"""
class CoffeeForm(qtw.QWidget):
"""Form to display/edit all info about a coffee"""
if __name__ == '__main__':
app = qtw.QApplication(sys.argv)
# it's required to save a reference to MainWindow.
# if it goes out of scope, it will be destroyed.
mw = MainWindow()
sys.exit(app.exec())
| [
11748,
25064,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
54,
312,
11407,
355,
10662,
4246,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
14055,
355,
10662,
23047,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
8205,
72,
355,
10662,
25297,
198,
... | 2.663158 | 190 |
#!/usr/bin/env python3.5
import asyncio
import logging
import sys
sys.path.append("lib")
from charms.layer.basic import activate_venv # noqa: E402
activate_venv()
from aiosmtpd.controller import Controller # noqa: E402
from aiosmtpd.handlers import Message # noqa: E402
from aiozmq import rpc # noqa: E402
from charmhelpers.core import hookenv # noqa: E402
import zmq # noqa: E402
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("asyncio").setLevel(logging.INFO)
op = hookenv.action_get("operation")
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(server() if op == "start" else client(op))
except KeyboardInterrupt:
pass
except Exception as e:
msg = str(e)
hookenv.action_fail(msg)
hookenv.log(msg, level=hookenv.ERROR)
finally:
for task in asyncio.Task.all_tasks():
task.cancel()
loop.stop()
loop.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
13,
20,
198,
11748,
30351,
952,
198,
11748,
18931,
198,
11748,
25064,
198,
198,
17597,
13,
6978,
13,
33295,
7203,
8019,
4943,
198,
6738,
41700,
13,
29289,
13,
35487,
1330,
15155,
62,
57... | 2.432099 | 405 |
# BSD 3-Clause License
#
# Copyright (c) 2020, Boris FX
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mocha.project import *
from mocha.ui import find_widget
# If v6, use Pyside2. If V5 or earlier use Pyside
try:
from PySide.QtGui import QLineEdit
from PySide.QtGui import QSlider
except ImportError:
from PySide2.QtWidgets import QLineEdit
from PySide2.QtWidgets import QSlider
| [
2,
347,
10305,
513,
12,
2601,
682,
13789,
198,
2,
198,
2,
15069,
357,
66,
8,
12131,
11,
25026,
19534,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198... | 3.325044 | 563 |
from typing import List
import numpy
from grid.Servicepoint import Servicepoint
from grid.Configuration import Configuration
import tensorflow as tf
from utils.utils import Utils, Storage
| [
6738,
19720,
1330,
7343,
198,
11748,
299,
32152,
198,
6738,
10706,
13,
16177,
4122,
1330,
4809,
4122,
198,
6738,
10706,
13,
38149,
1330,
28373,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
3384,
4487,
13,
26791,
1330,
7273,
4487,... | 4.318182 | 44 |
from django.db import models
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
2,
13610,
534,
4981,
994,
13,
628,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198,
220,
220,
220,
220,
198
] | 2.166667 | 36 |
from . import interpreter, network, process
def ufw_reset():
"""Reset UFW rules."""
network.ufw_reset()
def main():
"""Entry point for the application script."""
# We only allow a single instance
process.ensure_single_instance('velarium')
interpreter.run()
| [
6738,
764,
1330,
28846,
11,
3127,
11,
1429,
628,
198,
4299,
334,
44482,
62,
42503,
33529,
198,
220,
220,
220,
37227,
4965,
316,
471,
24160,
3173,
526,
15931,
198,
220,
220,
220,
3127,
13,
3046,
86,
62,
42503,
3419,
628,
198,
4299,
1... | 3.075269 | 93 |
# Web processing errors
# User Actions
| [
2,
5313,
7587,
8563,
628,
198,
2,
11787,
24439,
628
] | 4.2 | 10 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invokes html2js and appends goog.provide.
https://www.npmjs.com/package/html2js
"""
import subprocess
import sys
if __name__ == '__main__':
main(sys.argv)
| [
2,
15069,
2177,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.474886 | 219 |
from django.contrib.auth import get_user_model
from rest_framework import serializers
from datetime import datetime
import re
from ..models import SwitchGameUS, SwitchGame
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
302,
198,
198,
6738,
11485,
27530,
1330,
14645,... | 3.744681 | 47 |
# ----------------------------------------------------------------------------
# Title: Scientific Visualisation - Python & Matplotlib
# Author: Nicolas P. Rougier
# License: BSD
# ----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
material = {
"red": {
0: "#ffebee",
1: "#ffcdd2",
2: "#ef9a9a",
3: "#e57373",
4: "#ef5350",
5: "#f44336",
6: "#e53935",
7: "#d32f2f",
8: "#c62828",
9: "#b71c1c",
},
"pink": {
0: "#fce4ec",
1: "#f8bbd0",
2: "#f48fb1",
3: "#f06292",
4: "#ec407a",
5: "#e91e63",
6: "#d81b60",
7: "#c2185b",
8: "#ad1457",
9: "#880e4f",
},
"purple": {
0: "#f3e5f5",
1: "#e1bee7",
2: "#ce93d8",
3: "#ba68c8",
4: "#ab47bc",
5: "#9c27b0",
6: "#8e24aa",
7: "#7b1fa2",
8: "#6a1b9a",
9: "#4a148c",
},
"deep purple": {
0: "#ede7f6",
1: "#d1c4e9",
2: "#b39ddb",
3: "#9575cd",
4: "#7e57c2",
5: "#673ab7",
6: "#5e35b1",
7: "#512da8",
8: "#4527a0",
9: "#311b92",
},
"indigo": {
0: "#e8eaf6",
1: "#c5cae9",
2: "#9fa8da",
3: "#7986cb",
4: "#5c6bc0",
5: "#3f51b5",
6: "#3949ab",
7: "#303f9f",
8: "#283593",
9: "#1a237e",
},
"blue": {
0: "#e3f2fd",
1: "#bbdefb",
2: "#90caf9",
3: "#64b5f6",
4: "#42a5f5",
5: "#2196f3",
6: "#1e88e5",
7: "#1976d2",
8: "#1565c0",
9: "#0d47a1",
},
"light blue": {
0: "#e1f5fe",
1: "#b3e5fc",
2: "#81d4fa",
3: "#4fc3f7",
4: "#29b6f6",
5: "#03a9f4",
6: "#039be5",
7: "#0288d1",
8: "#0277bd",
9: "#01579b",
},
"cyan": {
0: "#e0f7fa",
1: "#b2ebf2",
2: "#80deea",
3: "#4dd0e1",
4: "#26c6da",
5: "#00bcd4",
6: "#00acc1",
7: "#0097a7",
8: "#00838f",
9: "#006064",
},
"teal": {
0: "#e0f2f1",
1: "#b2dfdb",
2: "#80cbc4",
3: "#4db6ac",
4: "#26a69a",
5: "#009688",
6: "#00897b",
7: "#00796b",
8: "#00695c",
9: "#004d40",
},
"green": {
0: "#e8f5e9",
1: "#c8e6c9",
2: "#a5d6a7",
3: "#81c784",
4: "#66bb6a",
5: "#4caf50",
6: "#43a047",
7: "#388e3c",
8: "#2e7d32",
9: "#1b5e20",
},
"light green": {
0: "#f1f8e9",
1: "#dcedc8",
2: "#c5e1a5",
3: "#aed581",
4: "#9ccc65",
5: "#8bc34a",
6: "#7cb342",
7: "#689f38",
8: "#558b2f",
9: "#33691e",
},
"lime": {
0: "#f9fbe7",
1: "#f0f4c3",
2: "#e6ee9c",
3: "#dce775",
4: "#d4e157",
5: "#cddc39",
6: "#c0ca33",
7: "#afb42b",
8: "#9e9d24",
9: "#827717",
},
"yellow": {
0: "#fffde7",
1: "#fff9c4",
2: "#fff59d",
3: "#fff176",
4: "#ffee58",
5: "#ffeb3b",
6: "#fdd835",
7: "#fbc02d",
8: "#f9a825",
9: "#f57f17",
},
"amber": {
0: "#fff8e1",
1: "#ffecb3",
2: "#ffe082",
3: "#ffd54f",
4: "#ffca28",
5: "#ffc107",
6: "#ffb300",
7: "#ffa000",
8: "#ff8f00",
9: "#ff6f00",
},
"orange": {
0: "#fff3e0",
1: "#ffe0b2",
2: "#ffcc80",
3: "#ffb74d",
4: "#ffa726",
5: "#ff9800",
6: "#fb8c00",
7: "#f57c00",
8: "#ef6c00",
9: "#e65100",
},
"deep orange": {
0: "#fbe9e7",
1: "#ffccbc",
2: "#ffab91",
3: "#ff8a65",
4: "#ff7043",
5: "#ff5722",
6: "#f4511e",
7: "#e64a19",
8: "#d84315",
9: "#bf360c",
},
"brown": {
0: "#efebe9",
1: "#d7ccc8",
2: "#bcaaa4",
3: "#a1887f",
4: "#8d6e63",
5: "#795548",
6: "#6d4c41",
7: "#5d4037",
8: "#4e342e",
9: "#3e2723",
},
"grey": {
0: "#fafafa",
1: "#f5f5f5",
2: "#eeeeee",
3: "#e0e0e0",
4: "#bdbdbd",
5: "#9e9e9e",
6: "#757575",
7: "#616161",
8: "#424242",
9: "#212121",
},
"blue grey": {
0: "#eceff1",
1: "#cfd8dc",
2: "#b0bec5",
3: "#90a4ae",
4: "#78909c",
5: "#607d8b",
6: "#546e7a",
7: "#455a64",
8: "#37474f",
9: "#263238",
},
}
np.random.seed(123)
plt.figure(figsize=(8, 4))
ax = plt.subplot(1, 1, 1, frameon=False)
ax.axhline(50, 0.05, 1, color="0.5", linewidth=0.5, linestyle="--", zorder=-10)
ax.axhline(100, 0.05, 1, color="0.5", linewidth=0.5, linestyle="--", zorder=-10)
n = 3
bars(0 * (n + 2), "red", n, "A")
bars(1 * (n + 2), "indigo", n, "B")
bars(2 * (n + 2), "orange", n, "C")
bars(3 * (n + 2), "teal", n, "D")
bars(4 * (n + 2), "pink", n, "E")
bars(5 * (n + 2), "cyan", n, "F")
ax.set_xlim(-2, 6 * (n + 2) - 1.5)
ax.set_xticks([])
ax.set_ylim(0, 111)
ax.set_yticks([0, 50, 100])
ax.set_yticklabels(["0%", "50%", "100%"])
plt.legend(
# bbox_to_anchor=(0.0, 1.0, 1.0, 0.1),
loc="upper right",
borderaxespad=0.0,
ncol=3,
handles=[
mpatches.Patch(color=material["blue grey"][1], label="2018"),
mpatches.Patch(color=material["blue grey"][3], label="2019"),
mpatches.Patch(color=material["blue grey"][5], label="2020"),
],
)
plt.tight_layout()
plt.savefig("../../figures/colors/colored-hist.pdf")
plt.show()
| [
2,
16529,
10541,
198,
2,
11851,
25,
220,
220,
22060,
15612,
5612,
532,
11361,
1222,
6550,
29487,
8019,
198,
2,
6434,
25,
220,
29737,
350,
13,
13876,
70,
959,
198,
2,
13789,
25,
347,
10305,
198,
2,
16529,
10541,
198,
11748,
299,
3215... | 1.554239 | 3,881 |
#!/usr/bin/python
# -*-coding:utf-8 -*
import os
import pickle
from random import choice
from data import *
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
66,
7656,
25,
40477,
12,
23,
532,
9,
198,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
6738,
4738,
1330,
3572,
198,
6738,
1366,
1330,
1635,
628
] | 2.820513 | 39 |
from model_test import TransformerNet
import torch.nn.functional as F
from PIL import Image
from tqdm import tqdm
from utils import *
import torch
import argparse
import os
import time
import math
import sys
sys.path.append("..")
from tools import unpadding, preprocess
from thumb_instance_norm import init_thumbnail_instance_norm
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--content", type=str, required=True, help="path to content image")
parser.add_argument("--model", type=str, required=True, help="path to checkpoint model")
parser.add_argument('--patch_size', type=int, default=1000, help='patch size')
parser.add_argument('--thumb_size', type=int, default=1024, help='thumbnail size')
parser.add_argument('--padding', type=int, default=32, help='padding size')
parser.add_argument('--test_speed', action="store_true", help='test the speed')
parser.add_argument('--outf', type=str, default="output", help='path to save')
parser.add_argument('--URST', action="store_true", help='use URST framework')
parser.add_argument("--device", type=str, default="cuda", help="device")
args = parser.parse_args()
print(args)
device = torch.device(args.device)
os.makedirs(args.outf, exist_ok=True)
transform = style_transform()
# Define model and load model checkpoint
net = TransformerNet().to(device)
checkpoint = torch.load(args.model)
new_checkpoint = dict()
for k, v in checkpoint.items():
if not "norm" in k:
new_checkpoint[k.replace("module.", "")] = v
else:
new_checkpoint[k.replace("module.", "")] = v.reshape(1, -1, 1, 1)
checkpoint = new_checkpoint
net.load_state_dict(checkpoint)
net.eval()
repeat = 15 if args.test_speed else 1
time_list = []
for i in range(repeat):
PATCH_SIZE = args.patch_size
PADDING = args.padding
image = Image.open(args.content)
IMAGE_WIDTH, IMAGE_HEIGHT = image.size
torch.cuda.synchronize()
start_time = time.time()
if args.URST:
aspect_ratio = IMAGE_WIDTH / IMAGE_HEIGHT
thumbnail = image.resize((int(aspect_ratio * args.thumb_size), args.thumb_size))
thumbnail = transform(thumbnail).unsqueeze(0).to(device)
patches = preprocess(image, padding=PADDING, transform=transform, patch_size=PATCH_SIZE, cuda=False)
print("patch:", patches.shape)
print("thumbnail:", thumbnail.shape)
with torch.no_grad():
style_transfer_thumbnail(thumbnail, save_path=os.path.join(args.outf, "thumb-%d.jpg" % args.thumb_size),
save=False if args.test_speed else True)
style_transfer_high_resolution(patches, padding=PADDING, collection=False,
save_path=os.path.join(args.outf, "ours-patch%d-padding%d.jpg"%(PATCH_SIZE, PADDING)),
save=False if args.test_speed else True)
# style_transfer_high_resolution(patches, padding=PADDING, collection=True,
# save_path=os.path.join(args.outf, "baseline-width%d-padding%d.jpg"%(PATCH_SIZE, PADDING))
# )
else:
image = transform(image).unsqueeze(0).to(device)
print("image:", image.shape)
with torch.no_grad():
style_transfer_thumbnail(image, save_path=os.path.join(args.outf, "original_result.jpg"),
save=False if args.test_speed else True)
torch.cuda.synchronize()
time_list.append(time.time()-start_time)
print("time: %.2fs" % np.mean(time_list[-10:]))
# print("Max GPU memory allocated: %.4f GB" % (torch.cuda.max_memory_allocated(device=0) / 1024. / 1024. / 1024.))
# print("Total memory of the current GPU: %.4f GB" % (
# torch.cuda.get_device_properties(device=0).total_memory / 1024. / 1024 / 1024))
| [
6738,
2746,
62,
9288,
1330,
3602,
16354,
7934,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
6738,
3384,
4487,
1330,
1635,
198,
11748,
28034,
... | 2.27525 | 1,802 |
import os
import pandas as pd
from glob import glob
import pickle
from fbprophet import Prophet
from matplotlib import pyplot as plt
import seaborn as sns
import pickle
import numpy as np
from tslearn.utils import to_time_series_dataset
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
from tslearn.clustering import TimeSeriesKMeans
bull_market_prices = pd.read_csv('../data/bull-market.csv')
prices_2008 = pd.read_csv('../data/2008-market.csv')
prices_2018 = pd.read_csv('../data/2018-market.csv')
folders = os.listdir('./results')
print(folders)
for folder in folders:
if os.path.isdir('./results/' + folder):
seasonalities = glob('./results/' + folder + '/*-seasonalities.p')
models = glob('./results/' + folder + '/*-model.p')
for i, seasonality in enumerate(seasonalities):
with open(seasonality, 'rb') as f:
data = pickle.load(f)
clf = TimeSeriesKMeans().from_pickle(models[i])
tickers = list(data.keys())
industries = []
for ticker in tickers:
industries.append(get_prices(ticker, seasonality.split('-')[1]).iloc[0]['gind'])
dataset = []
for value in data:
dataset.append(data[value]['seasonality_vector'])
X = to_time_series_dataset(dataset)
X = TimeSeriesScalerMeanVariance().fit_transform(X)
y = clf.predict(X)
clusters = {}
for yi in range(4):
clusters[yi] = []
for xx in X[y == yi]:
i = np.where(X == xx)[0][0]
clusters[yi].append(industries[i])
fig = plt.figure(figsize=(20, 20))
split_file = seasonality.split('-')
for yi in range(4):
plt.subplot(3, 3, yi + 1)
if yi == 1:
title = plt.title("Industry Code by Cluster for Sector {} ({})".format(split_file[0].split('/')[1], split_file[1]))
title.set_position([.5, 1.05])
plt.xlabel('Industrial Code')
plt.ylabel('Count')
plt.xticks(rotation=45, ha="right")
sns.countplot(clusters[yi], palette="RdBu")
fig.savefig('{}-{}-industries-by-cluster.png'.format(split_file[0], split_file[1]), bbox_inches='tight', dpi=250)
else:
continue | [
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
15095,
1330,
15095,
198,
11748,
2298,
293,
198,
6738,
277,
65,
22930,
3202,
1330,
13583,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
11748,
384,
397... | 2.038884 | 1,183 |
#! /usr/bin/env python
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2014-2015, Michigan State University.
# Copyright (C) 2015, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: khmer-project@idyll.org
import argparse
import screed
import sys
import khmer
if __name__ == '__main__':
main()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
770,
2393,
318,
636,
286,
44081,
647,
11,
3740,
1378,
12567,
13,
785,
14,
67,
571,
12,
23912,
14,
14636,
647,
47454,
290,
318,
198,
2,
15069,
357,
34,
8,
1946,
12,
4626,
11,... | 3.28125 | 576 |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import json
import numpy as np
import os
from collections import defaultdict
import cv2
import tqdm
from fvcore.common.file_io import PathManager
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import Boxes, BoxMode, Instances
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
from detectron2.data.datasets import register_coco_instances
register_coco_instances("smd_train", {}, "/root/data/zq/data/SMD/annotations/Training/SMD_VIS_skip_2_train.json",
"/root/data/zq/data/SMD/train")
register_coco_instances("smd_val", {}, "/root/data/zq/data/SMD/annotations/Training/SMD_VIS_skip_2_val.json",
"/root/data/zq/data/SMD/train")
register_coco_instances("smd_test", {}, "/root/data/zq/data/SMD/annotations/Test/SMD_VIS_skip_10.json",
"/root/data/zq/data/SMD/test")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="A script that visualizes the json predictions from COCO or LVIS dataset."
)
parser.add_argument("--input", required=True, help="JSON file produced by the model")
parser.add_argument("--output", required=True, help="output directory")
parser.add_argument("--dataset", help="name of the dataset", default="coco_2017_val")
parser.add_argument("--conf-threshold", default=0.5, type=float, help="confidence threshold")
args = parser.parse_args()
logger = setup_logger()
with PathManager.open(args.input, "r") as f:
predictions = json.load(f)
pred_by_image = defaultdict(list)
for p in predictions:
pred_by_image[p["image_id"]].append(p)
dicts = list(DatasetCatalog.get(args.dataset))
metadata = MetadataCatalog.get(args.dataset)
if hasattr(metadata, "thing_dataset_id_to_contiguous_id"):
elif "lvis" in args.dataset:
# LVIS results are in the same format as COCO results, but have a different
# mapping from dataset category id to contiguous category id in [0, #categories - 1]
else:
raise ValueError("Unsupported dataset: {}".format(args.dataset))
os.makedirs(args.output, exist_ok=True)
for dic in tqdm.tqdm(dicts):
img = cv2.imread(dic["file_name"], cv2.IMREAD_COLOR)[:, :, ::-1]
basename = os.path.basename(dic["file_name"])
predictions = create_instances(pred_by_image[dic["image_id"]], img.shape[:2])
vis = Visualizer(img, metadata)
vis_pred = vis.draw_instance_predictions(predictions).get_image()
vis = Visualizer(img, metadata)
vis_gt = vis.draw_dataset_dict(dic).get_image()
concat = np.concatenate((vis_pred, vis_gt), axis=1)
cv2.imwrite(os.path.join(args.output, basename), concat[:, :, ::-1])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
1439,
6923,
33876,
198,
198,
11748,
1822,
29572,
198,
11748,
33918,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686... | 2.517271 | 1,158 |
import logging
from fnmatch import fnmatch
import six
from nose.plugins.base import Plugin
from nose.selector import Selector
log = logging.getLogger(__name__)
class MockPlugins(object):
'''Mock "plugins" that does nothing to avoid infinite recursion calls
from Selector'''
class NoseSelectPlugin(Plugin):
"""Selects test to run based on tests names matching a pattern."""
def options(self, parser, env):
"""Register command line options"""
parser.add_option("-t", "--select-tests",
dest="selection_criteria", action="append",
default=list(),
metavar="SELECT",
help="Only run tests with a name matching a case-insensitive glob pattern (See fnmatch)")
def _is_selected(self, test_obj):
"""Return True if a test object should be selected based on criteria pattern."""
if not test_obj:
return
if isinstance(test_obj, six.string_types):
name = test_obj
else:
name = objname(test_obj)
#log.debug('object name: %r' % name)
if name:
name = name.lower()
selected = any(fnmatch(name, pat) for pat in self.selection_criteria)
#log.debug('selected:%r name: %r' % (selected, name,))
return selected
else:
return False
def objname(obj):
'''Return the context qualified name of a function, method or class obj'''
if hasattr(obj, 'name'):
return obj.name
# name proper
if hasattr(obj, '__name__'):
names = [obj.__name__]
else:
#this is a class?
names = [obj.__class__.__name__]
# parent class if unbound method
cls = None
if six.PY2:
if hasattr(obj, 'im_class'):
# this is a method
cls = obj.im_class
else:
if getattr(obj, '__self__', None) is not None:
if getattr(obj.__self__, 'cls', None) is not None:
cls = obj.__self__.cls
# parent class if bound method
if getattr(obj, '__self__', None) is not None:
if getattr(obj.__self__, '__class__', None) is not None:
cls = obj.__self__.__class__
if cls is not None:
names.insert(0, cls.__name__)
# module, but ignore __main__ module
if cls is not None:
obj = cls
if hasattr(obj, '__module__') and not obj.__module__.startswith('_'):
names.insert(0, obj.__module__)
name = '.'.join(names)
return name
| [
198,
11748,
18931,
198,
6738,
24714,
15699,
1330,
24714,
15699,
198,
198,
11748,
2237,
198,
198,
6738,
9686,
13,
37390,
13,
8692,
1330,
42636,
198,
6738,
9686,
13,
19738,
273,
1330,
9683,
273,
198,
198,
6404,
796,
18931,
13,
1136,
11187... | 2.232112 | 1,146 |
import os
from argparse import ArgumentParser
import h5py
root_dir = os.path.dirname(os.path.realpath(__file__))
root_parser = ArgumentParser(add_help=False)
root_parser.add_argument('--hdf5_file', type=str, required=True, help='path to the datasets to combine', action="append")
root_parser.add_argument('--save_file', type=str, default=os.path.abspath(os.path.join(root_dir, "combined_dataset.hdf5")))
params = root_parser.parse_args()
assert len(params.hdf5_file) >= 2, "Need at least two datasets to combine"
files = [h5py.File(path, mode="r") for path in params.hdf5_file]
keys = [list(h5.keys()) for h5 in files]
_ = [h5.close() for h5 in files]
h5_all = h5py.File(params.save_file, mode='w')
s_idx = 0
for dataset, path in zip(keys, params.hdf5_file):
for key in dataset:
h5_all[str("s{:03d}".format(s_idx))] = h5py.ExternalLink(path, key)
s_idx += 1
h5_all.flush()
h5_all.close()
| [
11748,
28686,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
198,
11748,
289,
20,
9078,
198,
198,
15763,
62,
15908,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
5305,
6978,
7,
834,
7753,
834,
4008,
198,
198,
15763,
... | 2.475676 | 370 |
# -*- coding: utf-8 -*-
#
# Cipher/DES3.py : DES3
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Triple DES symmetric cipher
`Triple DES`__ (or TDES or TDEA or 3DES) is a symmetric block cipher standardized by NIST_.
It has a fixed data block size of 8 bytes. Its keys are 128 (*Option 1*) or 192
bits (*Option 2*) long.
However, 1 out of 8 bits is used for redundancy and do not contribute to
security. The effective key length is respectively 112 or 168 bits.
TDES consists of the concatenation of 3 simple `DES` ciphers.
The plaintext is first DES encrypted with *K1*, then decrypted with *K2*,
and finally encrypted again with *K3*. The ciphertext is decrypted in the reverse manner.
The 192 bit key is a bundle of three 64 bit independent subkeys: *K1*, *K2*, and *K3*.
The 128 bit key is split into *K1* and *K2*, whereas *K1=K3*.
It is important that all subkeys are different, otherwise TDES would degrade to
single `DES`.
TDES is cryptographically secure, even though it is neither as secure nor as fast
as `AES`.
As an example, encryption can be done as follows:
>>> from Crypto.Cipher import DES
>>> from Crypto import Random
>>> from Crypto.Util import Counter
>>>
>>> key = b'-8B key-'
>>> nonce = Random.new().read(DES.block_size/2)
>>> ctr = Counter.new(DES.block_size*8/2, prefix=nonce)
>>> cipher = DES.new(key, DES.MODE_CTR, counter=ctr)
>>> plaintext = b'We are no longer the knights who say ni!'
>>> msg = nonce + cipher.encrypt(plaintext)
.. __: http://en.wikipedia.org/wiki/Triple_DES
.. _NIST: http://csrc.nist.gov/publications/nistpubs/800-67/SP800-67.pdf
:undocumented: __revision__, __package__
"""
__revision__ = "$Id$"
from Crypto.Cipher import blockalgo
from Crypto.Cipher import _DES3
class DES3Cipher(blockalgo.BlockAlgo):
"""TDES cipher object"""
def __init__(self, key, *args, **kwargs):
"""Initialize a TDES cipher object
See also `new()` at the module level."""
blockalgo.BlockAlgo.__init__(self, _DES3, key, *args, **kwargs)
def new(key, *args, **kwargs):
"""Create a new TDES cipher
:Parameters:
key : byte string
The secret key to use in the symmetric cipher.
It must be 16 or 24 bytes long. The parity bits will be ignored.
:Keywords:
mode : a *MODE_** constant
The chaining mode to use for encryption or decryption.
Default is `MODE_ECB`.
IV : byte string
The initialization vector to use for encryption or decryption.
It is ignored for `MODE_ECB` and `MODE_CTR`.
For `MODE_OPENPGP`, IV must be `block_size` bytes long for encryption
and `block_size` +2 bytes for decryption (in the latter case, it is
actually the *encrypted* IV which was prefixed to the ciphertext).
It is mandatory.
For all other modes, it must be `block_size` bytes longs. It is optional and
when not present it will be given a default value of all zeroes.
counter : callable
(*Only* `MODE_CTR`). A stateful function that returns the next
*counter block*, which is a byte string of `block_size` bytes.
For better performance, use `Crypto.Util.Counter`.
segment_size : integer
(*Only* `MODE_CFB`).The number of bits the plaintext and ciphertext
are segmented in.
It must be a multiple of 8. If 0 or not specified, it will be assumed to be 8.
:Attention: it is important that all 8 byte subkeys are different,
otherwise TDES would degrade to single `DES`.
:Return: an `DES3Cipher` object
"""
return DES3Cipher(key, *args, **kwargs)
#: Electronic Code Book (ECB). See `blockalgo.MODE_ECB`.
MODE_ECB = 1
#: Cipher-Block Chaining (CBC). See `blockalgo.MODE_CBC`.
MODE_CBC = 2
#: Cipher FeedBack (CFB). See `blockalgo.MODE_CFB`.
MODE_CFB = 3
#: This mode should not be used.
MODE_PGP = 4
#: Output FeedBack (OFB). See `blockalgo.MODE_OFB`.
MODE_OFB = 5
#: CounTer Mode (CTR). See `blockalgo.MODE_CTR`.
MODE_CTR = 6
#: OpenPGP Mode. See `blockalgo.MODE_OPENPGP`.
MODE_OPENPGP = 7
#: Size of a data block (in bytes)
block_size = 8
#: Size of a key (in bytes)
key_size = ( 16, 24 )
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
220,
44334,
14,
30910,
18,
13,
9078,
1058,
22196,
18,
198,
2,
198,
2,
38093,
855,
198,
2,
383,
10154,
286,
428,
2393,
389,
7256,
284,
262,
1171,
7386,
1... | 2.923077 | 1,755 |
from __future__ import absolute_import
from ipyvolume._version import __version__
from ipyvolume import styles
from ipyvolume import examples
from ipyvolume import datasets
from ipyvolume import embed
from ipyvolume.widgets import (Mesh,
Scatter,
Volume,
Figure,
quickquiver,
quickscatter,
quickvolshow)
from ipyvolume.transferfunction import (TransferFunction,
TransferFunctionJsBumps,
TransferFunctionWidgetJs3,
TransferFunctionWidget3)
from ipyvolume.pylab import (current,
clear,
controls_light,
figure,
gcf,
xlim,
ylim,
zlim,
xyzlim,
squarelim,
plot_trisurf,
plot_surface,
plot_wireframe,
plot_mesh,
plot,
scatter,
quiver,
show,
animate_glyphs,
animation_control,
gcc,
transfer_function,
plot_isosurface,
volshow,
save,
movie,
screenshot,
savefig,
xlabel,
ylabel,
zlabel,
xyzlabel,
view,
style,
plot_plane,
selector_default)
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
20966,
88,
29048,
13557,
9641,
1330,
11593,
9641,
834,
198,
6738,
20966,
88,
29048,
1330,
12186,
198,
6738,
20966,
88,
29048,
1330,
6096,
198,
6738,
20966,
88,
29048,
1330,
... | 1.377721 | 1,562 |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = 'NanumBarunGothic'
plt.rcParams["font.size"] = 12
mpl.rcParams['axes.unicode_minus'] = False
# 모든 상태의 개수 (종료 상태 제외)
N_STATES = 19
# 감가율
GAMMA = 1
# 종료 상태를 제외한 모든 상태
STATES = np.arange(1, N_STATES + 1)
# 초기 상태 지정
START_STATE = 10
# 두 개의 종료 상태 지정
# 맨 왼쪽의 종료 상태(0)으로 이동하는 행동은 -1의 보상 발생
# 맨 오른쪽의 종료 상태(20)으로 이동하는 행동은 1의 보상 발생
TERMINAL_STATES = [0, N_STATES + 1]
# 벨만 방정식으로 유도된 올바른 상태 가치 값
TRUE_VALUE = np.arange(-20, 22, 2) / 20.0
TRUE_VALUE[0] = TRUE_VALUE[-1] = 0
print(TRUE_VALUE)
# n-스텝 TD 방법
# @value: 본 함수에 의하여 갱신될 각 상태의 가치
# @n: n-스텝 TD 방법의 n
# @alpha: 스텝 사이즈
if __name__ == '__main__':
n_step_td_for_random_walk()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
355,
285,
489,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
489,
83,
13,
6015,
10044,
4105,
14692,
10331,
13,
17989,
8973,
796,
705,
45,
272,
388... | 1.099554 | 673 |
'''
Author: Shuailin Chen
Created Date: 2021-05-18
Last Modified: 2021-05-19
content: adapt from Song Hui's matlab code
'''
import os.path as osp
import numpy as np
from mylib import polSAR_utils as psr
def determinant(A):
''' Calculate determinant of a C3 matrix
Args:
A (ndarray): PolSAR data
Returns:
det (ndarray): determint
'''
A = psr.as_format(A, 'complex_vector_9')
det = A[0, ...] * A[4, ...] * A[8, ...] + A[1, ...] * A[5, ...] \
* A[6, ...] + A[2, ...] * A[3, ...] * A[7, ...] - A[2, ...] \
* A[4, ...] * A[6, ...] - A[1, ...] * A[3, ...] * A[8, ...] \
- A[0, ...] * A[5, ...] * A[7, ...]
return det
def inverse(A):
''' Calculate inverse matrix of a C3 matrix
Args:
A (ndarray): PolSAR data
Returns:
inv (ndarray): inverse matrix
'''
A = psr.as_format(A, 'complex_vector_9')
confA = np.zeros_like(A)
confA[0, ...] = A[4, ...] * A[8, ...] - A[5, ...] * A[7, ...]
confA[1, ...] = -(A[3, ...] * A[8, ...] - A[5, ...] * A[6, ...])
confA[2, ...] = A[3, ...] * A[7, ...] - A[4, ...] * A[6, ...]
confA[3, ...] = -(A[1, ...] * A[8, ...] - A[2, ...] * A[7, ...])
confA[4, ...] = A[0, ...] * A[8, ...] - A[2, ...] * A[6, ...]
confA[5, ...] = -(A[0, ...] * A[7, ...] - A[1, ...] * A[6, ...])
confA[6, ...] = A[1, ...] * A[5, ...] - A[2, ...] * A[4, ...]
confA[7, ...] = -(A[0, ...] * A[5, ...] - A[2, ...] * A[3, ...])
confA[8, ...] = A[0, ...] * A[4, ...] - A[1, ...] * A[3, ...]
adjA = np.zeros_like(A)
for m in range(1, 4):
for n in range(1, 4):
adjA[(m-1)*3+n-1, ...] = confA[(n-1)*3+m-1, ...]
det = determinant(A)
P = 9
inv = adjA / np.tile(det[np.newaxis], (P, 1, 1))
return inv
def distance_by_c3(A, B, type):
''' Pixel-Level Difference Map, by calculating the pixelwise similarities of C3 data between two PolSAR images
Args:
A/B (ndarray): PolSAR data
type (str): distance metric type, 'Bartlett' or 'rw' (revised Wishart)
or 'srw' (symmetric revised Wishart)
Returns:
difference map, in shape like Arg A's
'''
q = 3
A = psr.as_format(A, 'complex_vector_9')
B = psr.as_format(B, 'complex_vector_9')
if type == 'Bartlett':
logdetA = 0.5*np.real(np.log(np.abs(determinant(A))))
logdetB = 0.5*np.real(np.log(np.abs(determinant(B))))
D = np.log(np.abs(determinant((A+B)))) - (logdetA+logdetB)
elif type in ('srw', 'symmetric revised Wishart'):
iA = inverse(A)
iB = inverse(B)
D = np.real(
np.sum(iA * B[[0, 3, 6, 1, 4, 7, 2, 5, 8], ...], axis=0) \
+ np.sum(iB * A[[0, 3, 6, 1, 4, 7, 2, 5, 8], ...], axis=0)
)
D = 0.5*D - q
elif type in ('rw', 'revised Wishart'):
logdetB = np.real(np.log(np.abs(determinant(B))))
logdetA = np.real(np.log(np.abs(determinant(A))))
iB = inverse(B)
iB = iB[[0, 3, 6, 1, 4, 7, 2, 5, 8], ...]
D = logdetB - logdetA + np.sum(iB*A, 0) - q
D = np.real(D)
return D
if __name__ == '__main__':
fa = r'data/2009_SUB_SUB/C3'
fb = r'data/2010_SUB_SUB/C3'
c31 = psr.read_c3(fa)
c32 = psr.read_c3(fb)
# print(determinant(np.expand_dims(c31[:, :15, 0], 2)))
# print(np.squeeze(inverse(np.expand_dims(c31[:, :15, 0], 2)).T))
c31 = np.expand_dims(c31[:, :15, 0], 2)
c32 = np.expand_dims(c32[:, :15, 0], 2)
print(distance_by_c3(c31, c32, 'srw')) | [
7061,
6,
198,
13838,
25,
32344,
603,
259,
12555,
198,
41972,
7536,
25,
33448,
12,
2713,
12,
1507,
198,
5956,
40499,
25,
33448,
12,
2713,
12,
1129,
198,
197,
11299,
25,
6068,
422,
10940,
367,
9019,
338,
2603,
23912,
2438,
198,
7061,
... | 1.883598 | 1,890 |
from evosoro.tools.utils import xml_format
import numpy as np
# TODO: classes should hold dictionaries of variables, vxa tags and values
# TODO: remove most of the hard coded text from read_write_voxelyze.py and replace with a few loops
# TODO: add method to VoxCadParams for organizing (nested) subsections in vxa files
class VoxCadParams(object):
"""Container for VoxCad parameters."""
class Sim(VoxCadParams):
"""Container for VoxCad simulation parameters."""
class Env(VoxCadParams):
"""Container for VoxCad environment parameters."""
class Material(VoxCadParams):
"""Container for VoxCad material parameters."""
# TODO: this class is currently not used
class ObjectiveDict(dict):
"""A dictionary describing the objectives for optimization. See self.add_objective()."""
# def __setitem__(self, key, value):
# # only allow adding entries through add_objective()
# raise SyntaxError
# TODO: want to restrict input but this prevents deep copying: maybe instead just make object with embedded dict
def add_objective(self, name, maximize, tag, node_func=None, output_node_name=None, logging_only=False):
"""Add an optimization objective to the dictionary.
Objectives must be added in order of importance, however fitness is fixed to be the most important.
The keys of an ObjectiveDict correspond to the objective's rank or importance. The ranks are set via the order
in which objectives are added (fitness will auto-correct to rank 0).
For each rank key, starting with 0, the corresponding value is another dictionary with three components:
name, maximized, tag.
Parameters
----------
name : str
The associated individual-level attribute name
maximize : bool
Whether superior individuals maximized (True) or minimize (False) the objective.
tag : str or None
The tag used in parsing the resulting output from a VoxCad simulation.
If this is None then the attribute is calculated outside of VoxCad (in Python only).
node_func : function
If tag is None then the objective is not computed in VoxCad and is instead calculated on an output of a
network.
output_node_name : str
The output node which node_func operates on.
logging_only : bool
If True then don't use as objective, only to track statistics from the simulation.
"""
curr_rank = self.max_rank
# if fitness is not added first, shift every other objective "down" in importance
if name == "fitness" and self.max_rank > 0:
curr_rank = 0 # change the key to rank 0
for rank in reversed(range(len(self))):
self[rank+1] = self[rank]
super(ObjectiveDict, self).__setitem__(curr_rank, {"name": name,
"maximize": maximize,
"tag": xml_format(tag) if tag is not None else None,
"worst_value": -10e6 if maximize else 10e6,
#"worst_value": 0,
"node_func": node_func,
"output_node_name": output_node_name,
"logging_only": logging_only})
# TODO: logging_only 'objectives' should be a separate 'SimStats' class
self.max_rank += 1
| [
6738,
819,
418,
16522,
13,
31391,
13,
26791,
1330,
35555,
62,
18982,
198,
11748,
299,
32152,
355,
45941,
628,
198,
2,
16926,
46,
25,
6097,
815,
1745,
48589,
3166,
286,
9633,
11,
410,
27865,
15940,
290,
3815,
198,
2,
16926,
46,
25,
4... | 2.413975 | 1,517 |
from django.db.models import F
from django.utils import timezone
from rest_framework.viewsets import ModelViewSet, ViewSet
from rest_framework.decorators import list_route, detail_route
from rest_framework.response import Response
from rest_framework.mixins import RetrieveModelMixin
from api.models import ScheduledTest
from api.serializers import ScheduledTestListSerializer, ScheduledTestDetailsSerializer, SubmittedTestListSerializer
| [
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
376,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
6738,
1334,
62,
30604,
13,
1177,
28709,
1330,
9104,
7680,
7248,
11,
3582,
7248,
198,
6738,
1334,
62,
30604,
13,
12501,
273,
... | 3.946429 | 112 |
from sklearn.kernel_ridge import KernelRidge
from sklearn.grid_search import GridSearchCV
import numpy as np
import csv
rawData = np.zeros((4104,4))
with open('Processed Time Dependent Data.csv', 'rb') as f:
reader = csv.reader(f)
labels = reader.next()
i = 0
for row in reader:
for j in range(4):
rawData[i,j] = float(row[j])
i = i+1
kr = KernelRidge(alpha=1, kernel='rbf', gamma=None, degree=3, coef0=1, kernel_params=None)
kr.fit(rawData[:,0:3],rawData[:,3])
samplePoints = np.zeros((6000,4))
with open('Sample Points 2.csv', 'rb') as dat:
readDat = csv.reader(dat)
i = 0
for row in readDat:
for j in range(3):
samplePoints[i,j] = float(row[j])
i = i+1
for iterno in range(1,13):
samplePoints[:,0] = iterno
predictedPoints = kr.predict(samplePoints[:,0:3])
samplePoints[:,3] = predictedPoints
filename = 'Output Data ' + str(iterno) +'.csv'
with open(filename, 'wb') as output:
writer = csv.writer(output, delimiter=',')
for row in samplePoints:
writer.writerow(row)
| [
6738,
1341,
35720,
13,
33885,
62,
12818,
1330,
32169,
49,
3130,
198,
6738,
1341,
35720,
13,
25928,
62,
12947,
1330,
24846,
18243,
33538,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
21370,
198,
198,
1831,
6601,
796,
45941,
13,
9... | 2.303719 | 484 |
import sys
import attr
import parse
from contextlib import redirect_stdout
original_stdout = sys.stdout
@attr.s()
def parse_stdout(patterns, *callbacks):
"""Parses each stdout line in line with `patterns` argument and sequentially calls callbacks
Args:
patterns: tuple of patterns compatible with `parse.search` function
*callbacks:
any functions or functors to be called from result of pattern search,
order matters, as callbacks can modify the pattern search dictionary
Returns:
Context manager redirecting stdout and parsing it line-by-line
Examples:
>>> import minikts.api as kts
>>> from lightgbm import LGBMClassifier
>>> with kts.parse_stdout(kts.patterns.lightgbm, kts.MatplotlibCallback(interval=50)):
... model = LGBMClassifier(n_estimators=1000)
... model.fit(x_train, y_train, eval_set=[(x_train, y_train), (x_test, y_test)])
"""
return redirect_stdout(StreamParser(patterns, callbacks=callbacks))
class Patterns:
"""Common patterns for stdout parsing
Examples:
>>> import minikts.api as kts
>>> from lightgbm import LGBMClassifier
>>> with kts.parse_stdout(kts.patterns.lightgbm, kts.MatplotlibCallback(interval=50)):
... model = LGBMClassifier(n_estimators=1000)
... model.fit(x_train, y_train, eval_set=[(x_train, y_train), (x_test, y_test)])
>>> with kts.parse_stdout(kts.patterns.lightgbm_valid_only, kts.MatplotlibCallback(interval=50)):
... model = LGBMClassifier(n_estimators=1000)
... model.fit(x_train, y_train, eval_set=[(x_test, y_test)])
>>> from catboost import CatBoostClassifier
>>> with kts.parse_stdout(kts.patterns.catboost, kts.MatplotlibCallback(interval=50)):
... model = CatBoostClassifier(n_estimators=1000)
... model.fit(x_train, y_train, eval_set=(x_test, y_test)])
>>> with kts.parse_stdout(kts.patterns.catboost, kts.MatplotlibCallback(interval=50)):
... model = CatBoostClassifier(n_estimators=1000)
... model.fit(x_train, y_train])
>>> from lightfm import LightFM
>>> with kts.parse_stdout(kts.patterns.lightfm, kts.MatplotlibCallback(interval=2)):
... model = LightFM()
... model.fit(interactions, epochs=100, verbose=True)
"""
catboost = ("learn: {train:g}", "test: {valid:g}", "{step:d}:")
xgboost_valid_only = ("valid{}:{valid:g}", "[{step:d}]")
lightgbm_valid_only = ("valid{}:{valid:g}", "[{step:d}]")
xgboost = ("valid{}0{}:{train:g}", "valid{}1{}:{valid:g}", "[{step:d}]")
lightgbm = ("valid{}0{}:{train:g}", "valid{}1{}:{valid:g}", "[{step:d}]")
lightfm = ("{}{epoch:d}", "{}{step:d}")
| [
11748,
25064,
198,
11748,
708,
81,
198,
11748,
21136,
198,
6738,
4732,
8019,
1330,
18941,
62,
19282,
448,
198,
198,
14986,
62,
19282,
448,
796,
25064,
13,
19282,
448,
198,
198,
31,
35226,
13,
82,
3419,
198,
198,
4299,
21136,
62,
19282... | 2.325021 | 1,203 |
import unittest
import pathlib
from diary_tool_main.diary_tool import set_questions_file_path
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
3108,
8019,
198,
6738,
26339,
62,
25981,
62,
12417,
13,
67,
8042,
62,
25981,
1330,
900,
62,
6138,
507,
62,
7753,
62,
6978,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
19... | 2.716981 | 53 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division
import logging
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
__all__ = ['CWT']
def gauss_kernel(radius, n_sigmas=8):
"""Normalized 2D gauss kernel array.
"""
sizex = int(n_sigmas * radius)
sizey = int(n_sigmas * radius)
radius = float(radius)
xc = 0.5 * sizex
yc = 0.5 * sizey
y, x = np.mgrid[0:sizey - 1, 0:sizex - 1]
x = x - xc
y = y - yc
x = x / radius
y = y / radius
g = np.exp(-0.5 * (x ** 2 + y ** 2))
return g / (2 * np.pi * radius ** 2) # g.sum()
def difference_of_gauss_kernel(radius, scale_step, n_sigmas=8):
"""Difference of 2 Gaussians (i.e. Mexican hat) kernel array.
"""
sizex = int(n_sigmas * scale_step * radius)
sizey = int(n_sigmas * scale_step * radius)
radius = float(radius)
xc = 0.5 * sizex
yc = 0.5 * sizey
y, x = np.mgrid[0:sizey - 1, 0:sizex - 1]
x = x - xc
y = y - yc
x1 = x / radius
y1 = y / radius
g1 = np.exp(-0.5 * (x1 ** 2 + y1 ** 2))
g1 = g1 / (2 * np.pi * radius ** 2) # g1.sum()
x1 = x1 / scale_step
y1 = y1 / scale_step
g2 = np.exp(-0.5 * (x1 ** 2 + y1 ** 2))
g2 = g2 / (2 * np.pi * radius ** 2 * scale_step ** 2) # g2.sum()
return g1 - g2
class CWT(object):
"""Continuous wavelet transform.
TODO: describe algorithm
TODO: give references
Initialization of wavelet family.
Parameters
----------
min_scale : float
first scale used
nscales : int
number of scales considered
scale_step : float
base scaling factor
"""
def set_data(self, image, background):
"""Set input images."""
# TODO: check that image and background are consistent
self.image = image - 0.0
self.nx, self.ny = self.image.shape
self.filter = np.zeros((self.nx, self.ny))
self.background = background - 0.0 # hack because of some bug with old version of fft in numpy
self.model = np.zeros((self.nx, self.ny))
self.approx = np.zeros((self.nx, self.ny))
self.transform = np.zeros((self.nscale, self.nx, self.ny))
self.error = np.zeros((self.nscale, self.nx, self.ny))
self.support = np.zeros((self.nscale, self.nx, self.ny))
def set_file(self, filename):
"""Set input images from FITS file"""
# TODO: check the existence of extensions
# Open fits files
hdulist = fits.open(filename)
# TODO: don't hardcode extension numbers and names here ... pass on from gp-cwt
self.set_data(hdulist[0].data, hdulist['NormOffMap'].data)
self.header = hdulist[0].header
self.wcs = WCS(self.header)
def do_transform(self):
"""Do the transform itself."""
# TODO: after unit tests are added switch to astropy fftconvolve here.
from scipy.signal import fftconvolve
total_background = self.model + self.background + self.approx
excess = self.image - total_background
for key, kern in self.kernbase.items():
self.transform[key] = fftconvolve(excess, kern, mode='same')
self.error[key] = np.sqrt(fftconvolve(total_background, kern ** 2, mode='same'))
self.approx = fftconvolve(self.image - self.model - self.bkg,
self.kern_approx, mode='same')
self.approx_bkg = fftconvolve(self.bkg, self.kern_approx, mode='same')
def compute_support_peak(self, nsigma=2.0, nsigmap=4.0, remove_isolated=True):
"""Compute the multiresolution support with hard sigma clipping.
Imposing a minimum significance on a connex region of significant pixels
(i.e. source detection)
"""
from scipy.ndimage import label
# TODO: check that transform has been performed
sig = self.transform / self.error
for key in self.scales.keys():
tmp = sig[key] > nsigma
# produce a list of connex structures in the support
l, n = label(tmp)
for id in range(1, n):
index = np.where(l == id)
if remove_isolated:
if index[0].size == 1:
tmp[index] *= 0.0 # Remove isolated pixels from support
signif = sig[key][index]
if signif.max() < nsigmap: # Remove significant pixels island from support
tmp[index] *= 0.0 # if max does not reach maximal significance
self.support[key] += tmp
self.support[key] = self.support[key] > 0.
def inverse_transform(self):
"""Do the inverse transform (reconstruct the image)."""
res = np.sum(self.support * self.transform, 0)
self.filter += res * (res > 0)
self.model = self.filter
return res
def iterative_filter_peak(self, nsigma=3.0, nsigmap=4.0, niter=2, convergence=1e-5):
"""Run iterative filter peak algorithm."""
var_ratio = 0.0
for iiter in range(niter):
self.do_transform()
self.compute_support_peak(nsigma, nsigmap)
res = self.inverse_transform()
residual = self.image - (self.model + self.approx)
tmp_var = residual.var()
if iiter > 0:
var_ratio = abs((self.residual_var - tmp_var) / self.residual_var)
if var_ratio < convergence:
logging.info("Convergence reached at iteration {0}".format(iiter + 1))
return res
self.residual_var = tmp_var
logging.info("Convergence not formally reached at iteration {0}".format(iiter + 1))
logging.info("Final convergence parameter {0}. Objective was {1}."
"".format(convergence, var_ratio))
return res
def max_scale_image(self):
"""Compute the maximum scale image."""
maximum = np.argmax(self.transform, 0)
return self.scale_array[maximum] * (self.support.sum(0) > 0)
def save_filter(self, filename, clobber=False):
"""Save filter to file."""
hdu = fits.PrimaryHDU(self.filter, self.header)
hdu.writeto(filename, clobber=clobber)
fits.append(filename, self.approx, self.header)
fits.append(filename, self.filter + self.approx, self.header)
fits.append(filename, self.max_scale_image(), self.header)
| [
2,
49962,
739,
257,
513,
12,
565,
682,
347,
10305,
3918,
5964,
532,
766,
38559,
24290,
13,
81,
301,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
7297,
198,
11748,
18931,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
6468,
... | 2.231087 | 2,908 |
import numpy as np
import tensorflow as tf
| [
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
628
] | 3.384615 | 13 |
####################################
# Driftwood 2D Game Dev. Suite #
# widgettree.py #
# Copyright 2014-2017 #
# Michael D. Reiley & Paul Merrill #
####################################
# **********
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# **********
import collections
from ctypes import byref, c_int
from sdl2.sdlttf import *
class WidgetTree:
"""This class reads and builds Widget Trees.
"""
def __init__(self, manager, filename, template_vars):
"""Wrapped from WidgetManager.load().
"""
self.manager = manager
self.driftwood = self.manager.driftwood
widget_list = []
self.__success = True
tree = self.driftwood.resource.request_template(filename, template_vars)
if not tree:
self.__success = False
self.driftwood.log.msg("WARNING", "Widget", "load", "Failed to read widget tree", filename)
if self.__success:
for branch in tree:
# Read the widget tree, one base branch at a time.
res = self.__read_branch(None, branch, template_vars)
if res is None:
self.__success = False
self.driftwood.log.msg("WARNING", "Widget", "load", "Failed to read widget tree branch")
break
widget_list.append(res)
if not self.__success:
self.widgets = None
else:
# Collect a list of IDs of all widgets that were constructed.
self.widgets = list(self.__flatten(widget_list))
def __read_branch(self, parent, branch, template_vars={}):
"""Recursively read and process widget tree branches."""
widget_list = []
if "include" in branch:
# Replace this branch with an included tree.
if "vars" in branch:
# We are overlaying variables.
branch = self.__include(branch["include"], {**template_vars, **branch["vars"]})
else:
branch = self.__include(branch["include"], template_vars)
if not branch:
return None
if type(branch) == list:
# This is a list of branches.
for b in branch:
res = self.__read_branch(parent, b, template_vars)
if res is None:
self.driftwood.log.msg("WARNING", "Widget", "__read_branch", "Failed to read widget tree branch")
return None
widget_list.append(res)
return widget_list
if branch["type"] in ["container", "menu"]:
# Insert a container or menu.
c = self.manager.insert_container(
imagefile=self.__gp(branch, "image", None),
x=self.__gp(branch, "x", None),
y=self.__gp(branch, "y", None),
width=self.__gp(branch, "width", 0),
height=self.__gp(branch, "height", 0),
parent=parent,
active=True
)
if branch["type"] == "container":
# It's a regular container.
if c is None:
self.driftwood.log.msg("WARNING", "Widget", "__read_branch",
"failed to prepare container widget")
return None
if not self.__postprocess_container(c, branch):
self.driftwood.log.msg("WARNING", "Widget", "__read_branch",
"Failed to post-process container widget", c)
return None
widget_list.append(c)
if "members" in branch:
# There are more branches. Recurse them.
for b in branch["members"]:
res = self.__read_branch(c, b, template_vars)
if res is None:
self.driftwood.log.msg("WARNING", "Widget", "__read_branch",
"failed to read widget tree branch")
return None
widget_list.append(res)
else:
# It's a menu.
if c is None:
self.driftwood.log.msg("WARNING", "Widget", "__read_branch",
"failed to prepare menu widget")
return None
if not self.__postprocess_container(c, branch):
self.driftwood.log.msg("WARNING", "Widget", "__read_branch",
"Failed to post-process menu widget", c)
return None
slotmap = self.__build_menu(c, branch)
if not slotmap:
self.driftwood.log.msg("WARNING", "Widget", "__read_branch",
"Failed to build menu widget", c)
return None
widget_list.append(c)
widget_list.append(slotmap)
return widget_list
elif branch["type"] == "text":
# Process and insert a text widget.
res = self.__process_text(parent, branch)
if res is None:
self.driftwood.log.msg("WARNING", "Widget", "__read_branch",
"Failed to process text widgets")
return None
widget_list.append(res)
return widget_list
return None
def __process_text(self, parent, branch):
"""Process Text
Process and insert a text widget, accounting for multiple lines and line-spacing.
"""
# There are several ways this value could be input.
if type(branch["contents"]) is str:
contents = [branch["contents"]]
elif type(branch["contents"]) is list:
contents = branch["contents"]
else:
self.driftwood.log.msg("WARNING", "Widget", "__process_text", "text contents must be string or list")
return None
font = self.driftwood.resource.request_font(branch["font"], branch["size"]).font
tw, th = c_int(), c_int()
# Wrap text.
if "wrap" in branch:
wrapped_contents = []
for n in range(len(contents)):
# Go through each existing line separately.
current = contents[n]
while current:
for p in reversed(range(len(current))):
# Slice back through the line in reverse until it's small enough.
if p:
TTF_SizeUTF8(font, current[:p].encode(), byref(tw), byref(th))
if tw.value > branch["wrap"]:
# Segment is not small enough yet.
continue
if p:
# Append the small-enough line segment to the new list of lines and cut it off of what's
# left to be processed.
wrapped_contents.append(current[:p].strip())
current = current[p:]
else:
# We are done.
current = ""
break
# Replace contents with wrapped contents.
contents = wrapped_contents
# Find text proportions.
TTF_SizeUTF8(font, contents[0].encode(), byref(tw), byref(th))
textheight = th.value
totalheight = th.value * len(contents)
# Calculate positions.
if branch["y"] is None:
branch["y"] = (self.manager[parent].height - totalheight) // 2
# Place lines of text.
t = []
for n in range(len(contents)):
# Check if wrapping put us in an impossible position.
tmpy = int((branch["y"] + n * textheight * self.__gp(branch, "line-height", 1.0)
+ n * self.__gp(branch, "line-spacing", 0)))
if tmpy < 0:
tmpy = 0
self.driftwood.log.msg("WARNING", "Widget", "__process_text", "text wrapped to negative position")
# Insert a textbox.
t.append(self.manager.insert_text(
contents=contents[n],
fontfile=branch["font"],
ptsize=branch["size"],
x=self.__gp(branch, "x", None),
y=tmpy,
width=self.__gp(branch, "width", None),
height=self.__gp(branch, "height", None),
color=self.__gp(branch, "color", "000000FF"),
parent=parent,
active=True
))
if t[-1] is None:
self.driftwood.log.msg("WARNING", "Widget", "__process_text", "failed to prepare text widget")
return None
if not self.__postprocess_text(t, branch):
self.driftwood.log.msg("WARNING", "Widget", "__process_text", "failed to postprocess text widgets",
t)
return None
return t
def __build_menu(self, widget, branch):
"""Build Menu
Build a menu widget out of its component controls, and return a map of control placements."""
slotmap = []
lookups = {}
for segment in branch["slots"]:
if type(segment[0]) == object:
# 1-Dimensional Menu
slotmap.append(self.__process_control(widget, segment))
lookups[slotmap[-1]] = segment
if slotmap[-1] is None:
self.driftwood.log.msg("WARNING", "Widget", "__build_menu",
"failed to prepare menu control widget")
return None
if not self.__postprocess_container(widget, segment):
self.driftwood.log.msg("WARNING", "Widget", "__build_menu",
"Failed to post-process menu control widget", widget)
return None
else:
# 2-Dimensional Menu
slotmap.append([])
for control in segment:
slotmap[-1].append(self.__process_control(widget, control))
lookups[slotmap[-1][-1]] = control
if slotmap[-1][-1] is None:
self.driftwood.log.msg("WARNING", "Widget", "__build_menu",
"failed to prepare menu control widget")
return None
if not self.__postprocess_container(widget, control):
self.driftwood.log.msg("WARNING", "Widget", "__build_menu",
"Failed to post-process menu control widget", widget)
return None
self.__setup_menu(branch, slotmap, lookups)
return slotmap
def __process_control(self, parent, branch):
"""Process a menu control."""
c = self.manager.insert_container(
imagefile=self.__gp(branch["images"], "deselected", None),
x=self.__gp(branch, "x", None),
y=self.__gp(branch, "y", None),
width=self.__gp(branch, "width", 0),
height=self.__gp(branch, "height", 0),
parent=parent,
active=True
)
return c
def __setup_menu(self, branch, slotmap, lookups):
"""Set up menu."""
if "default" in branch:
# Select default control.
self.__select_menu_control(branch, slotmap, lookups, branch["default"])
else:
# Select first control.
if type(slotmap[0]) is int:
# 1-Dimensional menu.
self.__select_menu_control(branch, slotmap, lookups, 0)
else:
# 2-Dimensional menu.
self.__select_menu_control(branch, slotmap, lookups, [0, 0])
oldcontext = self.__register_menu_callbacks(branch["keybinds"])
def __select_menu_control(self, branch, slotmap, lookups, position):
"""Select a control."""
if type(slotmap[0]) is int:
# 1-Dimensional menu.
w = slotmap[position]
control = branch["slots"][position]
else:
# 2-Dimensional menu.
w = slotmap[position[0]][position[1]]
control = branch["slots"][position[0]][position[1]]
if self.manager.selected is not None:
# Deselect previously selected control.
dw = self.manager.selected
self.manager[dw].image = self.driftwood.resource.request_image(lookups[dw]["images"]["deselected"])
self.driftwood.area.changed = True
if "select" in lookups[dw]["triggers"]:
self.driftwood.script.call(*lookups[dw]["triggers"]["select"])
# Select new control.
self.manager.select(w)
self.manager[w].image = self.driftwood.resource.request_image(control["images"]["selected"])
self.driftwood.area.changed = True
if "select" in control["triggers"]:
self.driftwood.script.call(*control["triggers"]["select"])
def __gp(self, branch, prop, fallback):
"""Get Property
Helper function to get a property from a branch, or return the fallback value if it doesn't exist."""
return branch[prop] if prop in branch else fallback
def __include(self, filename, template_vars={}):
"""Include branches from another file.
"""
tree = self.driftwood.resource.request_template(filename, template_vars)
if not tree:
self.driftwood.log.msg("WARNING", "Widget", "__include", "failed to read widget include", filename)
return None
return tree
def __flatten(self, l):
"""https://stackoverflow.com/questions/2158395/flatten-an-irregular-list-of-lists/2158532#2158532"""
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, (str, bytes)):
yield from self.__flatten(el)
else:
yield el
| [
29113,
4242,
198,
2,
39819,
3822,
362,
35,
3776,
6245,
13,
26264,
220,
220,
220,
220,
1303,
198,
2,
26295,
21048,
13,
9078,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
198,
... | 2.02853 | 7,571 |
"""Downloading the images from Firefox based on the search term specified
This scripts scrapes Firefox and downloads Google images of different images
Typical usage example from command line:
python -m cli --nums 10 --search "oyster mushrooms" "crimini mushrooms" "amanita mushrooms" --opp "search"
"""
# built-in libraries
import os
import argparse
from glob import glob
# function from downloader.py module
from downloader import download_google_images
dataset_path = "/persistent/dataset"
if __name__ == "__main__":
# Generate the inputs arguments parser
# if you type into the terminal 'python3 cli.py --help', it will provide the description
parser = argparse.ArgumentParser(
description='obtaining or verifying images for Jordan sneakers downloaded from Google')
# two different command options for inputting the three optional commands
parser.add_argument("-o", "--opp", type=str, default="search",
help="whether or not to download the Google images")
parser.add_argument("-n", "--nums", type=int, default=1,
help="number of images to download")
parser.add_argument("-s", "--search", nargs="+", default="default",
help="the Google search term(s)since there can be multiple")
args = parser.parse_args()
main(args.nums, args.search, args.opp) | [
37811,
10002,
278,
262,
4263,
422,
16802,
1912,
319,
262,
2989,
3381,
7368,
628,
220,
220,
220,
770,
14750,
15881,
274,
16802,
290,
21333,
3012,
4263,
286,
1180,
4263,
628,
220,
220,
220,
48752,
8748,
1672,
422,
3141,
1627,
25,
198,
2... | 3.079823 | 451 |
import os
import ssl
import collections
from typing import AsyncIterator, NamedTuple, Dict, List
import aiomysql
from dffml.base import BaseConfig
from dffml.repo import Repo
from dffml.source.source import BaseSourceContext, BaseSource
from dffml.util.cli.arg import Arg
from dffml.util.entrypoint import entrypoint
@entrypoint("mysql")
| [
11748,
28686,
198,
11748,
264,
6649,
198,
11748,
17268,
198,
6738,
19720,
1330,
1081,
13361,
37787,
11,
34441,
51,
29291,
11,
360,
713,
11,
7343,
198,
198,
11748,
257,
29005,
893,
13976,
198,
198,
6738,
288,
487,
4029,
13,
8692,
1330,
... | 3.214953 | 107 |
import abc
import typing
from karp.domain.models.resource import Resource
from typing import Optional, Callable, TypeVar, List, Dict, Tuple
import logging
import attr
import pydantic
from karp.domain import errors, model
from karp.domain.errors import ConfigurationError
from karp.domain.models.query import Query
from karp.domain.models.entry import Entry
logger = logging.getLogger("karp")
@attr.s(auto_attribs=True)
| [
11748,
450,
66,
198,
11748,
19720,
198,
6738,
479,
5117,
13,
27830,
13,
27530,
13,
31092,
1330,
20857,
198,
6738,
19720,
1330,
32233,
11,
4889,
540,
11,
5994,
19852,
11,
7343,
11,
360,
713,
11,
309,
29291,
198,
11748,
18931,
198,
198,... | 3.34375 | 128 |
"""
This file contains commands related to user configuration
and management.
"""
from discord.ext import commands, tasks
import discord
import errors
import turnipCalculator
import datetime
| [
37811,
198,
1212,
2393,
4909,
9729,
3519,
284,
2836,
8398,
198,
392,
4542,
13,
198,
37811,
198,
6738,
36446,
13,
2302,
1330,
9729,
11,
8861,
198,
11748,
36446,
198,
11748,
8563,
198,
11748,
1210,
541,
9771,
3129,
1352,
198,
11748,
4818,... | 4.386364 | 44 |
import sys
import logging
import argparse
import os
from matchreporter.analysis.analyser import analyse
from matchreporter.collect import scxmlformatter, gaamatchformatter
from matchreporter.db.analysis_db import write_report_data
from matchreporter.helpers.filehelper import load_data_from_file
SPORTS_CODE = 'SportsCode'
GAA_MATCH = 'GAAMatch'
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
if __name__ == "__main__":
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
main(sys.argv[1:])
| [
11748,
25064,
198,
11748,
18931,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
198,
6738,
2872,
260,
26634,
13,
20930,
13,
272,
26266,
263,
1330,
39552,
198,
6738,
2872,
260,
26634,
13,
33327,
1330,
629,
19875,
687,
1436,
11,
31986,
... | 2.734694 | 245 |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import numpy as np
import pytest
@dace.program
@dace.program
@dace.program
if __name__ == "__main__":
test_linalg_inv()
test_linalg_solve()
test_linalg_cholesky()
| [
2,
15069,
13130,
12,
1238,
2481,
35920,
43412,
290,
262,
9637,
34,
68,
7035,
13,
1439,
2489,
10395,
13,
198,
11748,
288,
558,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
628,
628,
198,
198,
31,
67,
558,
13,
23065,
6... | 2.495495 | 111 |
"""
This file tests retrieval of values from the user list.
The user list replaces one cell in the waveform editor for episodic files.
Instead of using a value and a delta, the user can provide a list of values to apply by sweep.
"""
import sys
import pytest
import numpy as np
try:
# this ensures pyABF is imported from this specific path
sys.path.insert(0, "src")
import pyabf
except:
raise ImportError("couldn't import local pyABF")
@pytest.mark.parametrize("abfPath, listValues", [
("data/abfs/171117_HFMixFRET.abf",
[-100.0, 180.0, 160.0, 140.0, 120.0, 100.0, 80.0, 60.0, 40.0, 20.0, 0.0, -20.0, -60.0]),
("data/abfs/19212027.abf",
[-50.0, -55.0, -60.0, -65.0, -70.0, -75.0, -80.0, -85.0, -90.0, -95.0, -100.0, -105.0, -110.0, -115.0, -120.0]),
("data/abfs/user-list-durations.abf",
[4000, 6000, 6000, 10000, 20000, 30000, 30000, 30000, 30000]),
("data/abfs/2020_03_02_0000.abf",
[-200.0, -150.0, -100.0, -50.0, 0.0, 25.0, 50.0, 100.0, 150.0, 200.0, 250.0, 300.0, 350.0, 400.0, 500.0, 600.0]),
])
@pytest.mark.parametrize("abfPath, firstType", [
("data/abfs/171117_HFMixFRET.abf", 22),
("data/abfs/19212027.abf", 24),
("data/abfs/user-list-durations.abf", 35),
("data/abfs/2020_03_02_0000.abf", 62),
])
| [
37811,
198,
1212,
2393,
5254,
45069,
286,
3815,
422,
262,
2836,
1351,
13,
198,
198,
464,
2836,
1351,
24020,
530,
2685,
287,
262,
6769,
687,
5464,
329,
48177,
29512,
3696,
13,
198,
13193,
286,
1262,
257,
1988,
290,
257,
25979,
11,
262,... | 2.170034 | 594 |
usa_elections([('m', 13), ('o', 76), ('m', 54), ('m', 7)])
usa_elections2([('m', 13), ('o', 76), ('m', 54), ('m', 7)])
usa_elections3([('m', 13), ('o', 76), ('m', 54), ('m', 7)])
| [
628,
198,
198,
22064,
62,
9509,
507,
26933,
10786,
76,
3256,
1511,
828,
19203,
78,
3256,
8684,
828,
19203,
76,
3256,
7175,
828,
19203,
76,
3256,
767,
8,
12962,
198,
22064,
62,
9509,
507,
17,
26933,
10786,
76,
3256,
1511,
828,
19203,
... | 2.127907 | 86 |
"""
Current Limitations:
- A member cannot transition from a monthly to a punchcard without losing the remainder of their time
"""
import sys
import pprint
import hashlib
from datetime import datetime, date, time, timedelta
import time
from tinydb import *
from tinydb.operations import decrement
import re
import config
class LoginDatabase:
""" add_member: Adds a new member to the database. All parameters but link are required.
Expiration date is automatically set using the member date and current time. Current time
is recorded. A 16 digit numerical ID is generated and assigned based off the date and member's name
and checked against the database. If the ID has been already been used, it waits .1 second and
tries again until an unused ID has been generated. Also protects against IDs that are less than 16
digits long, as sometimes they were generated that were too short.
Returns: Dictionary of the data entered into the database """
""" retrieve_member: Upon being passed a member ID, checks if the member exists (and not deleted) and then
returns the member's data. Raises LookupError if the member does not exits.
Returns: Document type containing data of the selected member in the members database"""
""" update_member: Updates a member in the database. With the exception of expiration_punches, expiration_date
and link, all of the member data must be passed in. Currently does not support passing
in just the parameters you want to update. In the future, might consider using **kwargs
as a more flexible update. Raises a LookupError if the associated member cannot be found.
Returns: List of documents containing data of the selected member in the members database"""
""" log_member: Logs in member by adding their member_id to the log database, along with:
* Log Date
* First/Last name
* Member Type
* Expiration Date/Punches Remaining
* Link (if Applicable)
If a member is a punchcard member, one punch is removed from their account.
If the member has already logged in during the past day (since midnight), and the debug feature
"config.allow_multiple_scans_a_day" is False, a LookupError will be raised, indicating that the
member has already logged in today.
Returns: List type of the log entry """
""" query_member: Performs a regex search on the database, by member's first name or current day. Ignores members
marked as deleted. The parameter "log_date" can be passed True to use the current day, or a
date object to specify a certain day. If no members match the name, or nobody has logged in
today, a LookupError is raised.
Returns: List of documents containing data of all the members matching the search in the
members database """
""" get_member_sign_offs: When given a member ID. this function retrieves the member's sign ins from the member
database. If the member does not have any sign offs, generates an empty dict containing
the sign-offs listed in config.sign_off_list. The dict should only contain booleans.
If the member ID does not exist in the database, a LookupError is raised.
Returns: Dictionary containing the skills a member has been signed off on. If the member
does not have any recorded sign-offs, generates a dict of all the sign-offs with
the value False """
""" set_member_sign_offs: Updates the sign-offs for a given member.
Returns: Dictionary containing the skills a member has been signed off on. If the member
does not have any recorded sign-offs, generates a dict of all the sign-offs with
the value False """
| [
37811,
198,
11297,
7576,
20597,
25,
198,
220,
220,
220,
532,
317,
2888,
2314,
6801,
422,
257,
9651,
284,
257,
10862,
9517,
1231,
6078,
262,
17675,
286,
511,
640,
628,
198,
37811,
628,
198,
198,
11748,
25064,
198,
11748,
279,
4798,
198... | 2.511136 | 1,796 |
import pandas as pd
import numpy as np
from mimic3benchmark.readers import InHospitalMortalityReader
HEADERS = [
"Index", "Hours", "Capillary refill rate", "Diastolic blood pressure",
"Fraction inspired oxygen", "Glascow coma scale eye opening",
"Glascow coma scale motor response", "Glascow coma scale total",
"Glascow coma scale verbal response", "Glucose", "Heart Rate",
"Height", "Mean blood pressure", "Oxygen saturation", "Respiratory rate",
"Systolic blood pressure", "Temperature", "Weight", "pH", "Label"
]
COMA_SCALE_EYE_OPENING_REPLACEMENTS = {
"1 No Response": 1,
"None": 1,
"2 To pain": 2,
"To Pain": 2,
"3 To speech": 3,
"To Speech": 3,
"4 Spontaneously": 4,
"Spontaneously": 4,
}
COMA_SCALE_MOTOR_REPLACEMENTS = {
"1 No Response": 1,
"No response": 1,
"2 Abnorm extensn": 2,
"Abnormal extension": 2,
"3 Abnorm flexion": 3,
"Abnormal Flexion": 3,
"4 Flex-withdraws": 4,
"Flex-withdraws": 4,
"5 Localizes Pain": 5,
"Localizes Pain": 5,
"6 Obeys Commands": 6,
"Obeys Commands": 6
}
COMA_SCALE_VERBAL_REPLACEMENTS = {
"No Response-ETT": 0,
"1.0 ET/Trach": 0,
"1 No Response": 1,
"No Response": 1,
"2 Incomp sounds": 2,
"Incomprehensible sounds": 2,
"3 Inapprop words": 3,
"Inappropriate Words": 3,
"4 Confused": 4,
"Confused": 4,
"5 Oriented": 5,
"Oriented": 5,
}
if __name__ == "__main__":
df = preprocess(
train_dir="data/in-hospital-mortality/train",
test_dir="data/in-hospital-mortality/test",
split=False,
)
df.to_csv("mortality.csv")
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
26332,
18,
26968,
4102,
13,
961,
364,
1330,
554,
39,
3531,
44,
28337,
33634,
198,
198,
37682,
4877,
796,
685,
198,
220,
220,
220,
366,
15732,
1600,
... | 2.316456 | 711 |
"""User model."""
# Django
from django.db import models
from django.contrib.auth.models import AbstractUser
# Utils
from weight.utils.models import WeightModel
class User(WeightModel, AbstractUser):
"""User model.
Extend from Django abstract user, change the username field to email
and add some extra info.
"""
email = models.EmailField(
'email address',
unique=True,
error_messages={
'unique': 'A user with that email already exist',
}
)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'first_name', 'last_name']
class Meta:
"""Meta class."""
verbose_name = 'Usuario'
verbose_name_plural = 'Usuarios'
def __str__(self):
"""Return username."""
return self.username
def get_short_name(self):
"""Return username."""
return self.username | [
37811,
12982,
2746,
526,
15931,
198,
198,
2,
37770,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
27741,
12982,
198,
198,
2,
7273,
4487,
198,
6738,
3463,
13,
26791,
... | 2.543909 | 353 |
from enum import Enum
from typing import Type
from fastapi.param_functions import Depends, Path, Query
from fastapi.routing import APIRouter, HTTPException
from kanon.units.radices import BasedReal, IllegalBaseValueError
from kanon_api.core.calculations.parser import parse
from kanon_api.utils import safe_radix
router = APIRouter(prefix="/calculations", tags=["calculations"])
@router.get("/{radix}/from_float/")
@router.get("/{radix}/to_float/")
@router.get("/{radix}/compute/")
op_to_token = {
Operation.add: "+",
Operation.sub: "-",
Operation.mul: "*",
Operation.div: "/",
}
@router.get("/{radix}/{operation}/{a}/{b}/")
| [
6738,
33829,
1330,
2039,
388,
198,
6738,
19720,
1330,
5994,
198,
198,
6738,
3049,
15042,
13,
17143,
62,
12543,
2733,
1330,
2129,
2412,
11,
10644,
11,
43301,
198,
6738,
3049,
15042,
13,
81,
13660,
1330,
3486,
4663,
39605,
11,
14626,
1692... | 2.624 | 250 |
n = int(input())
coelhos = 0
ratos = 0
sapos = 0
for i in range(n):
quantia, tipo = input().split()
quantia = int(quantia)
if tipo == 'C':
coelhos += quantia
elif tipo == 'R':
ratos += quantia
else:
sapos += quantia
total = coelhos + ratos + sapos
print(f"Total: {total} cobaias")
print(f"Total de coelhos: {coelhos}")
print(f"Total de ratos: {ratos}")
print(f"Total de sapos: {sapos}")
print(f"Percentual de coelhos: {coelhos * 100 / total:.2f} %")
print(f"Percentual de ratos: {ratos * 100 / total:.2f} %")
print(f"Percentual de sapos: {sapos * 100 / total:.2f} %") | [
77,
220,
796,
493,
7,
15414,
28955,
198,
198,
1073,
417,
71,
418,
796,
657,
198,
10366,
418,
796,
657,
198,
82,
499,
418,
796,
657,
198,
198,
1640,
1312,
287,
2837,
7,
77,
2599,
198,
220,
220,
220,
5554,
544,
11,
8171,
78,
796,
... | 2.123711 | 291 |
import textwrap
from async_asgi_testclient import TestClient
from a2wsgi import WSGIMiddleware
import pytest
import unittest.mock as mock
##__________________________________________________________________||
@pytest.fixture(autouse=True)
##__________________________________________________________________||
@pytest.mark.asyncio
##__________________________________________________________________||
| [
11748,
2420,
37150,
198,
6738,
30351,
62,
292,
12397,
62,
9288,
16366,
1330,
6208,
11792,
198,
198,
6738,
257,
17,
18504,
12397,
1330,
25290,
38,
3955,
2509,
1574,
198,
198,
11748,
12972,
9288,
198,
11748,
555,
715,
395,
13,
76,
735,
... | 4.823529 | 85 |
from typing import Union, Tuple
import torch
from torch import Tensor, device as Device
from utils import embeddings_concat
from backbones import ResNet18,ResNet50, WideResNet50, EfficientNetB5
from utils.visualizer import Visualizer
class BaseModel:
"""The embedding backbone shared by PaDiM and PaDiMSVDD
"""
| [
6738,
19720,
1330,
4479,
11,
309,
29291,
198,
198,
11748,
28034,
198,
6738,
28034,
1330,
309,
22854,
11,
3335,
355,
16232,
198,
198,
6738,
3384,
4487,
1330,
11525,
67,
654,
62,
1102,
9246,
198,
6738,
736,
35095,
1330,
1874,
7934,
1507,
... | 2.939655 | 116 |
# process local states
local = range(24)
# L states
L = {"x0" : [0, 12], "x1" : [1, 13], "q0" : [14], "q1" : [15], "qb" : [20]}
# receive variables
rcv_vars = ["nr0", "nr1", "nrq0", "nrq1"]
# initial states
initial = [0, 1, 12, 13, 18, 19]
# rules
rules = []
rules.append({'idx': 0, 'from': 0, 'to': 2, 'guard': "(and (<= nr1 (* 2 t)) (< nr0 (- n t)))"})
rules.append({'idx': 1, 'from': 0, 'to': 3, 'guard': "(and (<= nr1 (* 2 t)) (>= nr0 (- n t)))"})
rules.append({'idx': 2, 'from': 0, 'to': 4, 'guard': "(and (> nr1 (* 2 t)) (< nr1 (- n t)))"})
rules.append({'idx': 3, 'from': 0, 'to': 5, 'guard': "(and (> nr1 (* 2 t)) (>= nr1 (- n t)))"})
rules.append({'idx': 4, 'from': 1, 'to': 2, 'guard': "(and (<= nr1 (* 2 t)) (< nr0 (- n t)))"})
rules.append({'idx': 5, 'from': 1, 'to': 3, 'guard': "(and (<= nr1 (* 2 t)) (>= nr0 (- n t)))"})
rules.append({'idx': 6, 'from': 1, 'to': 4, 'guard': "(and (> nr1 (* 2 t)) (< nr1 (- n t)))"})
rules.append({'idx': 7, 'from': 1, 'to': 5, 'guard': "(and (> nr1 (* 2 t)) (>= nr1 (- n t)))"})
rules.append({'idx': 8, 'from': 2, 'to': 6, 'guard': "(> nrq0 0)"})
rules.append({'idx': 9, 'from': 2, 'to': 7, 'guard': "(> nrq1 0)"})
rules.append({'idx': 10, 'from': 3, 'to': 8, 'guard': "true"})
rules.append({'idx': 11, 'from': 4, 'to': 9, 'guard': "(> nrq0 0)"})
rules.append({'idx': 12, 'from': 4, 'to': 10, 'guard': "(> nrq1 0)"})
rules.append({'idx': 13, 'from': 5, 'to': 11, 'guard': "true"})
# going back to the beginning of the round
rules.append({'idx': 14, 'from': 6, 'to': 0, 'guard': "true"})
rules.append({'idx': 15, 'from': 8, 'to': 0, 'guard': "true"})
rules.append({'idx': 16, 'from': 9, 'to': 0, 'guard': "true"})
rules.append({'idx': 17, 'from': 7, 'to': 1, 'guard': "true"})
rules.append({'idx': 18, 'from': 10, 'to': 1, 'guard': "true"})
rules.append({'idx': 19, 'from': 11, 'to': 1, 'guard': "true"})
rules.append({'idx': 20, 'from': 6, 'to': 12, 'guard': "true"})
rules.append({'idx': 21, 'from': 8, 'to': 12, 'guard': "true"})
rules.append({'idx': 22, 'from': 9, 'to': 12, 'guard': "true"})
rules.append({'idx': 23, 'from': 7, 'to': 13, 'guard': "true"})
rules.append({'idx': 24, 'from': 10, 'to': 13, 'guard': "true"})
rules.append({'idx': 25, 'from': 11, 'to': 13, 'guard': "true"})
# queen transitions
rules.append({'idx': 26, 'from': 12, 'to': 14, 'guard': "(<= nr1 (* 2 t))"})
rules.append({'idx': 27, 'from': 12, 'to': 15, 'guard': "(> nr1 (* 2 t))"})
rules.append({'idx': 28, 'from': 13, 'to': 14, 'guard': "(<= nr1 (* 2 t))"})
rules.append({'idx': 29, 'from': 13, 'to': 15, 'guard': "(> nr1 (* 2 t))"})
rules.append({'idx': 30, 'from': 14, 'to': 16, 'guard': "true"})
rules.append({'idx': 31, 'from': 15, 'to': 17, 'guard': "true"})
# going back to the beginning of the round
rules.append({'idx': 32, 'from': 16, 'to': 0, 'guard': "true"})
rules.append({'idx': 33, 'from': 17, 'to': 1, 'guard': "true"})
# faulty queen transitions
rules.append({'idx': 34, 'from': 18, 'to': 20, 'guard': "true"})
rules.append({'idx': 35, 'from': 20, 'to': 22, 'guard': "true"})
rules.append({'idx': 36, 'from': 22, 'to': 18, 'guard': "true"})
rules.append({'idx': 37, 'from': 22, 'to': 19, 'guard': "true"})
rules.append({'idx': 38, 'from': 19, 'to': 21, 'guard': "true"})
rules.append({'idx': 39, 'from': 21, 'to': 23, 'guard': "true"})
rules.append({'idx': 40, 'from': 23, 'to': 18, 'guard': "true"})
rules.append({'idx': 41, 'from': 23, 'to': 19, 'guard': "true"})
# parameters, resilience condition
params = ["n", "t", "f"]
active = "(- n (- f 1))"
rc = ["(> n 0)", "(> t 0)", "(> f 0)", "(>= t f)", "(> n (* 4 t))"]
# faults
faults = "byzantine"
faulty = [18, 19, 20, 21, 22, 23]
max_faulty = "1"
queen = [12, 13, 14, 15, 16, 17, 18, 20, 22]
faulty_queen = [18, 20, 22]
phase = 3
# configuration/transition constraints
constraints = []
constraints.append({'type': 'configuration', 'sum': 'eq', 'object': local, 'result': active})
constraints.append({'type': 'configuration', 'sum': 'eq', 'object': faulty, 'result': max_faulty})
constraints.append({'type': 'configuration', 'sum': 'eq', 'object': queen, 'result': "1"})
constraints.append({'type': 'transition', 'sum': 'eq', 'object': range(len(rules)), 'result': active})
constraints.append({'type': 'round_config', 'sum': 'eq', 'object': faulty_queen, 'result': 0})
# receive environment constraints
environment = []
environment.append('(>= nr0 x0)')
environment.append('(<= nr0 (+ x0 f))')
environment.append('(>= nr1 x1)')
environment.append('(<= nr1 (+ x1 f))')
environment.append('(>= nrq0 q0)')
environment.append('(<= nrq0 (+ q0 qb))')
environment.append('(>= nrq1 q1)')
environment.append('(<= nrq1 (+ q1 qb))')
# properties
properties = []
properties.append({'name':'validity0', 'spec':'safety', 'initial':'(= x0 (- n f))', 'qf':'some', 'reachable':'(not (= x1 0))'})
properties.append({'name':'validity1', 'spec':'safety', 'initial':'(= x1 (- n f))', 'qf':'some', 'reachable':'(not (= x0 0))'})
properties.append({'name':'agreement', 'spec':'safety', 'initial':'true', 'qf':'last', 'reachable':'(and (not (= x0 0)) (not (= x1 0)))'})
| [
2,
1429,
1957,
2585,
198,
12001,
796,
2837,
7,
1731,
8,
198,
2,
406,
2585,
198,
43,
796,
19779,
87,
15,
1,
1058,
685,
15,
11,
1105,
4357,
366,
87,
16,
1,
1058,
685,
16,
11,
1511,
4357,
366,
80,
15,
1,
1058,
685,
1415,
4357,
... | 2.341339 | 2,165 |
$NetBSD: patch-Lib_ctypes_macholib_dyld.py,v 1.1 2021/06/23 18:30:24 schmonz Exp $
darwin20 support, via MacPorts.
--- Lib/ctypes/macholib/dyld.py.orig 2021-06-22 19:20:28.000000000 +0000
+++ Lib/ctypes/macholib/dyld.py
@@ -7,6 +7,12 @@ from framework import framework_info
from dylib import dylib_info
from itertools import *
+try:
+ from _ctypes import _dyld_shared_cache_contains_path
+except ImportError:
+ def _dyld_shared_cache_contains_path(*args):
+ raise NotImplementedError
+
__all__ = [
'dyld_find', 'framework_find',
'framework_info', 'dylib_info',
@@ -132,6 +138,12 @@ def dyld_find(name, executable_path=None
), env):
if os.path.isfile(path):
return path
+ try:
+ if _dyld_shared_cache_contains_path(path):
+ return path
+ except NotImplementedError:
+ pass
+
raise ValueError("dylib %s could not be found" % (name,))
| [
3,
7934,
21800,
25,
8529,
12,
25835,
62,
310,
9497,
62,
76,
620,
349,
571,
62,
9892,
335,
13,
9078,
11,
85,
352,
13,
16,
33448,
14,
3312,
14,
1954,
1248,
25,
1270,
25,
1731,
5513,
2144,
89,
5518,
720,
198,
198,
27455,
5404,
1238... | 2.18578 | 436 |
# -*- coding: utf-8 -*-
# cython: language_level=3
# Copyright (c) 2021-present VincentRPS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
"""
Handles so-called 'dependency injection' for commands.
"""
from typing import Any, Callable
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
3075,
400,
261,
25,
3303,
62,
5715,
28,
18,
198,
2,
15069,
357,
66,
8,
33448,
12,
25579,
18653,
49,
3705,
198,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
2... | 3.735736 | 333 |
from Algorithms import *
| [
6738,
978,
7727,
907,
1330,
1635,
198
] | 3.571429 | 7 |
# flask_app.py - a minimal flsk application
# import flask module
from flask import Flask, render_template
# initiate application
app = Flask(__name__)
# define route
@app.route("/")
# attach a function to that route using render_template
# from the flask module
| [
2,
42903,
62,
1324,
13,
9078,
532,
257,
10926,
781,
8135,
3586,
198,
198,
2,
1330,
42903,
8265,
198,
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
198,
198,
2,
22118,
3586,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
628,
198,
2... | 3.657534 | 73 |
import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
''' Uma classe que representa um único alienígena da frota. '''
def __init__(self, ai_settings, screen):
''' Inicializa o alienígina e define sua posição inicial. '''
super(Alien, self).__init__()
self.screen = screen
self.ai_settings = ai_settings
# Carrega a imagem do alienígena e define seu atributo rect.
self.image = pygame.image.load('images/nave-alien.png')
self.rect = self.image.get_rect()
# Inicia cada novo alienígena próximo à parte superior esquerdo da tela.
self.rect.x = self.rect.width # Coloca um espaço à esquerda que seja igual à largura do alienígena.
self.rect.y = self.rect.height # Coloca um espaço acima dele correspondente à sua altura.
# Armazena a posição exata do alienígena.
self.x = float(self.rect.x)
def blitme(self):
''' Desenha o alienigina em sua posição atual. '''
self.screen.blit(self.image, self.rect)
def check_edges(self):
''' Devolve True se o alienígena estiver na borda da tela. '''
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right:
return True
elif self.rect.left <= 0:
return True
def update(self):
''' Move o alienígena para a direita ou para a esquerda. '''
self.x += (self.ai_settings.alien_speed_factor * self.ai_settings.fleet_direction)
''' Permite o movimento para esquerda ou para a direita multiplicando o fator de velocidade
do alienígena pelo valor de fleet_direction. '''
self.rect.x = self.x # Atualiza a posição do rect do alienígena com o valor de self.x.
| [
11748,
12972,
6057,
198,
6738,
12972,
6057,
13,
34975,
578,
1330,
33132,
198,
198,
4871,
17610,
7,
38454,
578,
2599,
198,
220,
220,
220,
705,
7061,
471,
2611,
537,
21612,
8358,
2380,
64,
23781,
6184,
118,
77,
3713,
8756,
8836,
5235,
6... | 2.294423 | 771 |
import gnuradio
from gnuradio import gr
from gnuradio import blocks as grblocks
import sys
if __name__ == '__main__':
duration = float(sys.argv[1])
tb = gr.top_block()
src = gr.null_source(8)
b0 = gr.copy(8)
b1 = grblocks.sub_cc()
b2 = gr.copy(8)
b3 = grblocks.divide_cc()
b4 = gr.copy(8)
sink = gr.null_sink(8)
tb.connect(src, b0, b1, b2, b3, b4, sink)
import time
tb.start()
time.sleep(duration)
print '##RESULT##', sink.nitems_read(0)/duration
import sys; sys.stdout.flush()
tb.stop()
tb.wait()
| [
11748,
19967,
333,
324,
952,
198,
6738,
19967,
333,
324,
952,
1330,
1036,
198,
6738,
19967,
333,
324,
952,
1330,
7021,
355,
1036,
27372,
198,
11748,
25064,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
628,
220,
... | 2.107011 | 271 |
import rand, crypto, SSL, tsafe
from version import __version__ | [
11748,
43720,
11,
21473,
11,
25952,
11,
256,
21230,
198,
6738,
2196,
1330,
11593,
9641,
834
] | 3.9375 | 16 |
# Generated by Django 3.1.13 on 2021-11-05 12:31
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
1485,
319,
33448,
12,
1157,
12,
2713,
1105,
25,
3132,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
import os
import sys
import time
from datetime import datetime
import requests
ICONS = {
"01d": "", # clear sky
"02d": "", # few clouds
"03d": "", # scattered clouds
"04d": "", # broken clouds
"09d": "", # shower rain
"10d": "", # rain
"11d": "", # thunderstorm
"13d": "", # snow
"50d": "", # mist
"01n": "", # clear sky
"02n": "", # few clouds
"03n": "", # scattered clouds
"04n": "", # broken clouds
"09n": "", # shower rain
"10n": "", # rain
"11n": "", # thunderstorm
"13n": "", # snow
"50n": "", # mist
"sunrise": "",
"sunset": "",
"degrees": "°C",
}
ICON_SUNSET = ""
ICON_SUNRISE = ""
ICON_DEGREES = "°C"
API_KEY = os.environ.get("API_KEY_OPEN_WEATHER_MAP")
if not API_KEY:
print("API_KEY_OPEN_WEATHER_MAP environment variable is missing!")
sys.exit(-1)
LOCATION = "Budapest,hu"
URL = "https://api.openweathermap.org/data/2.5/weather"
PARAMETERS = "?q={}&appid={}&units=metric"
PARAMETERS = PARAMETERS.format(LOCATION, API_KEY)
URL += PARAMETERS
if __name__ == "__main__":
main()
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
11748,
7007,
198,
198,
2149,
19213,
796,
1391,
198,
220,
220,
220,
366,
486,
67,
1298,
366,
170,
234,
235,
1600,
220,
1303,
1598,
676... | 1.966957 | 575 |
import tensorflow as tf
import os
# os.environ["TF_CPP_MIN_LOG_LEVEL"]='3'
#----以下是答案部分 begin----#
# 定义placeholder 开始
keeProb = tf.placeholder(tf.float32, shape=())
batchImgInput = tf.placeholder(tf.float32, shape=(None, 224, 224, 3))
labels = tf.placeholder(tf.float32, shape=(None, 4))
# 第一层卷积+归一化+池化 开始
conv1 = tf.layers.Conv2D(filters=96, kernel_size=(11, 11), strides=(4, 4), padding='valid', activation=tf.nn.relu)(
batchImgInput)
lrn1 = tf.nn.local_response_normalization(conv1, alpha=1e-4, beta=0.75, depth_radius=2, bias=2.0)
pool1 = tf.layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid')(conv1)
# 第一层卷积+归一化+池化 结束
# 第二层卷积+归一化+池化 开始
conv2 = tf.layers.Conv2D(filters=256, kernel_size=(5, 5), strides=(1, 1), padding='same', activation=tf.nn.relu)(
pool1)
lrn2 = tf.nn.local_response_normalization(conv2, alpha=1e-4, beta=0.75, depth_radius=2, bias=2.0)
pool2 = tf.layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid')(conv2)
# 第二层卷积+归一化+池化 结束
# 定义三层直接连接的卷积 开始
conv3 = tf.layers.Conv2D(filters=192, kernel_size=(3, 3), strides=(1, 1), padding='same', activation=tf.nn.relu)(
pool2)
conv4 = tf.layers.Conv2D(filters=192, kernel_size=(3, 3), strides=(1, 1), padding='same', activation=tf.nn.relu)(
conv3)
conv5 = tf.layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), padding='same', activation=tf.nn.relu)(
conv4)
# 定义三层直接连接的卷积 结束
# 池化后变为一维 开始
pool3 = tf.layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid')(conv5)
flatten = tf.layers.Flatten()(pool3)
# 池化后变为一维 结束
# 第一层全连接+随机失活 开始
dense1 = tf.layers.Dense(units=512, activation=tf.nn.relu)(flatten)
dropout1 = tf.nn.dropout(dense1, keeProb)
# 第一层全连接+随机失活 结束
# 第二层全连接+随机失活 开始
dense2 = tf.layers.Dense(units=512, activation=tf.nn.relu)(dropout1)
dropout2 = tf.nn.dropout(dense2, keeProb)
# 第二层全连接+随机失活 结束
# 第三层全连接+随机失活 开始
dense3 = tf.layers.Dense(units=256, activation=tf.nn.relu)(dropout2)
dropout3 = tf.nn.dropout(dense3, keeProb)
# 第三层全连接+随机失活 结束
# 额外加了一层全连接层 输出为类别数量 开始
outPuts = tf.layers.Dense(units=4, activation=None)(dropout3)
# 额外加了一层全连接层 输出为类别数量 结束
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=outPuts, labels=labels))
train = tf.train.AdamOptimizer().minimize(loss)
#----以上是答案部分 end----#
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
tf.train.export_meta_graph(filename="step4/modelInfo/AlexNet",
graph=tf.get_default_graph())
tf.reset_default_graph() | [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
28686,
198,
2,
28686,
13,
268,
2268,
14692,
10234,
62,
8697,
47,
62,
23678,
62,
25294,
62,
2538,
18697,
8973,
11639,
18,
6,
198,
2,
650,
20015,
98,
10310,
233,
42468,
163,
18433,
162,
... | 1.674015 | 1,497 |