content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
__author__ = 'ali-pc'
from django.shortcuts import render, redirect
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from .forms import LoginForm, RegisterForm, ProfileForm
@login_required()
| [
171,
119,
123,
834,
9800,
834,
796,
705,
7344,
12,
14751,
6,
201,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
201,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
6284,
201,
198,
6738,
42625,
14208,
13,
3642,
822,
... | 2.930693 | 101 |
from ethereum import utils
print(checksum_encode(bytes.fromhex('2f6bc758319982417b01b6ff09a9860a79c14cbb'))) | [
6738,
304,
17733,
1330,
3384,
4487,
198,
198,
4798,
7,
42116,
388,
62,
268,
8189,
7,
33661,
13,
6738,
33095,
10786,
17,
69,
21,
15630,
2425,
5999,
21113,
1731,
1558,
65,
486,
65,
21,
487,
2931,
64,
4089,
1899,
64,
3720,
66,
1415,
... | 2.319149 | 47 |
from torch import load, save
from os.path import dirname, isfile
from os import listdir, remove
from ast import literal_eval
from cnn.statistics import Statistics
from cnn.trainRegime.regime import TrainRegime
bopsKey = 'bops'
baselineKey = 'Baseline'
folderName = 'FF-4'
plotPath = '/home/vista/Desktop/F-BANNAS_depracated/6.12/{}/plots.data'.format(folderName)
folderPath = '/home/vista/Desktop/F-BANNAS_depracated/6.12/{}'.format(folderName)
# plotFromFile(plotPath)
plotPartitionsFromFolder(folderPath, plotPath)
# addBops(folderPath, '20181113-212540')
# folderPath = '/home/vista/Desktop/Architecture_Search/{}'.format(folderName)
# plotPartitionsFromFolder(folderPath, plotPath)
# generateCSV(folderPath)
# plotAverages(folderPath)
# # ======= L1 distance =================
# from torch import load, save
# from ast import literal_eval
# from cnn.statistics import Statistics
#
# bopsKey = 'bops'
# folderPath = '/home/vista/Desktop/Architecture_Search/FF'
# plotsDataPath = '/home/vista/Desktop/Architecture_Search/FF/plots.data.avg'
# plotsData = load(plotsDataPath)
# bopsPlotData = plotsData[bopsKey]
#
# pivot = [8, 2, 6, 0]
#
# for k, results in bopsPlotData.items():
# # extract 1st layer partition
# if isinstance(k, str):
# partition = literal_eval(k[:k.find('-')])
# else:
# if k == (3, 3):
# partition = [0, 16, 0, 0]
# elif k == (4, 4):
# partition = [0, 0, 16, 0]
# else:
# continue
#
# # calc L1 partition distance from pivot
# partitionDist = sum([abs(x - y) for x, y in zip(partition, pivot)])
#
# newResults = []
# for title, bops, r in results:
# newResults.append((title, partitionDist, r))
#
# bopsPlotData[k] = newResults
#
# Statistics.plotBops(plotsData, bopsKey, 'Baseline', folderPath)
| [
6738,
28034,
1330,
3440,
11,
3613,
198,
6738,
28686,
13,
6978,
1330,
26672,
3672,
11,
318,
7753,
198,
6738,
28686,
1330,
1351,
15908,
11,
4781,
198,
6738,
6468,
1330,
18875,
62,
18206,
198,
198,
6738,
269,
20471,
13,
14269,
3969,
1330,
... | 2.510929 | 732 |
"""
tests.test_overlay
~~~~~~~~~~~~~~~~~~
Test OverlayFS and OverlayFSManager
"""
import fudge
from nose.tools import raises
from testkit import ContextUser
from overlayUtils.overlay import *
from .utils import verify, reset
class TestOverlayMoreGeneral(GenericOverlaySetup):
"""This test class assumes less with each test. Add tests here that
need more mock control
"""
@verify
@raises(AlreadyMounted)
@verify
@fudge.test
def test_overlayfs_from_entry():
"""Test load OverlayFS.from_entry"""
fake_options = (
('rw',),
('lowerdir', 'lower'),
('upperdir', 'upper'),
)
fake_mount_entry = fudge.Fake()
fake_mount_entry.has_attr(device='somedevice', fs_type='overlayfs',
options=fake_options, mount_point='somemount')
overlay = OverlayFS.from_entry(fake_mount_entry)
assert isinstance(overlay, OverlayFS) == True
assert overlay.lower_dir == 'lower'
assert overlay.upper_dir == 'upper'
assert overlay.mount_point == 'somemount'
@raises(InvalidOverlayFS)
@fudge.test
def test_overlayfs_from_entry_fails():
"""Test load OverlayFS.from_entry"""
fake_options = (
('rw',),
)
fake_mount_entry = fudge.Fake()
fake_mount_entry.has_attr(device='somedevice', fs_type='overlayfs',
options=fake_options, mount_point='somemount')
OverlayFS.from_entry(fake_mount_entry)
@fudge.patch('overlayUtils.overlay.MountTable', 'overlayUtils.overlay.OverlayFS')
@raises(OverlayFSDoesNotExist)
| [
37811,
198,
41989,
13,
9288,
62,
2502,
10724,
198,
27156,
4907,
198,
198,
14402,
3827,
10724,
10652,
290,
3827,
10724,
10652,
13511,
198,
37811,
198,
11748,
277,
12587,
198,
6738,
9686,
13,
31391,
1330,
12073,
198,
6738,
1332,
15813,
1330... | 2.604096 | 586 |
from django.urls import path
from apps.carts.views import CartsView, CartsSelectAllView
urlpatterns = [
path('carts/', CartsView.as_view()),
path('carts/selection/', CartsSelectAllView.as_view()),
] | [
628,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
6725,
13,
66,
5889,
13,
33571,
1330,
327,
5889,
7680,
11,
327,
5889,
17563,
3237,
7680,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
66... | 2.6375 | 80 |
import os, sys, time, json, csv, hashlib
from pprint import pprint
#Imports configurations and add these modules to python enviroment
from config import config, system_config, output_files
from cogs.cognitor import cogs
# Sets-up the class and creates a directory for all the temp files.
cogs = cogs(output_files['cogs'], output_files['cogs_csv']);
cogs.threads = config['threads'];
# query sequence you want to check against
cogs.setQuery(output_files['blast_fasta']);
# sets and/or creates the database of sequences to run your analysis against.
cogs.setCogs(system_config['cogs_fasta'], system_config['cogs_p2o'], system_config['cogs_genomes_cogs'], config['create_cogs_db']);
# starts analysis
cogs.cognitor(); | [
11748,
28686,
11,
25064,
11,
640,
11,
33918,
11,
269,
21370,
11,
12234,
8019,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
198,
2,
3546,
3742,
25412,
290,
751,
777,
13103,
284,
21015,
17365,
343,
296,
298,
220,
198,
6738,
4566,
1330,
... | 3.080508 | 236 |
import os, importlib
import sys as sss
global x
h = os.path.join(".")
x = os.environ.get("lkwem")
x = "Hello"
y = x[0]
glob = 1
| [
11748,
28686,
11,
1330,
8019,
198,
11748,
25064,
355,
264,
824,
198,
198,
20541,
2124,
198,
198,
71,
796,
28686,
13,
6978,
13,
22179,
7203,
19570,
198,
87,
796,
28686,
13,
268,
2268,
13,
1136,
7203,
75,
46265,
368,
4943,
198,
198,
8... | 2.177419 | 62 |
dict_ = {"a": {"b": 1}}
return_data_form = dict_.get([k for k, v in dict_.items()][0])
print(return_data_form) | [
11600,
62,
796,
19779,
64,
1298,
19779,
65,
1298,
352,
11709,
198,
7783,
62,
7890,
62,
687,
796,
8633,
44807,
1136,
26933,
74,
329,
479,
11,
410,
287,
8633,
44807,
23814,
3419,
7131,
15,
12962,
198,
4798,
7,
7783,
62,
7890,
62,
687,... | 2.5 | 44 |
import pandas as pd
from _datetime import datetime
from curami.commons import file_utils, mongo_connector
if __name__ == '__main__':
generate_file()
| [
11748,
19798,
292,
355,
279,
67,
198,
6738,
4808,
19608,
8079,
1330,
4818,
8079,
198,
198,
6738,
1090,
6277,
13,
9503,
684,
1330,
2393,
62,
26791,
11,
285,
25162,
62,
8443,
273,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
... | 2.925926 | 54 |
from typing import List, Tuple
from utils import ConceptNet, removeprefix
def render_path_verbose(path: List[int], graph: ConceptNet):
"""this function gives a verbose textual representation for the given path, including all edges between the given nodes, node and label indices and label weights.
Example
-------
> find_word_path("airport", "baggage", conceptnet, max_path_len=3, renderer=render_path_verbose)
['airport (35496)',
'/r/AtLocation (idx 1, weight 3.464, reversed),/r/AtLocation (idx 1, weight 2.828, reversed)',
'baggage (121612)']
"""
if not path:
return []
rendered = [f"{graph.nodes_idx2name[path[0]]} ({path[0]})"]
for path_idx, node_idx in enumerate(path[1:], start=1):
prev_idx = path[
path_idx - 1
] # index of the previous node in path for edge lookup
if (prev_idx, node_idx) in graph.edge_descriptors:
edges = graph.edge_descriptors[(prev_idx, node_idx)]
reverse_edge = False
elif (node_idx, prev_idx) in graph.edge_descriptors:
edges = graph.edge_descriptors[(node_idx, prev_idx)]
reverse_edge = True
else:
raise ValueError(
f"Illegal State: edge descriptors missing for edge present in graph ({node_idx}, {prev_idx})"
)
str_edge = ",".join(
f"{graph.labels_idx2name[e.label_idx]} (idx {e.label_idx}, weight {e.weight}{', reversed' if reverse_edge else ''})"
for e in edges
)
rendered.append(str_edge)
rendered.append(f"{graph.nodes_idx2name[node_idx]} ({node_idx})")
return rendered
def render_path_brief(path: List[int], graph: ConceptNet):
"""this function gives a brief textual representation for the given path.
Example
-------
> find_word_path("airport", "baggage", conceptnet, max_path_len=3, renderer=render_path_verbose)
airport <--AtLocation-- baggage
"""
if not path:
return []
rendered = [graph.nodes_idx2name[path[0]]]
for path_idx, node_idx in enumerate(path[1:], start=1):
prev_idx = path[
path_idx - 1
] # index of the previous node in path for edge lookup
if (prev_idx, node_idx) in graph.edge_descriptors:
edges = graph.edge_descriptors[(prev_idx, node_idx)]
reverse_edge = False
elif (node_idx, prev_idx) in graph.edge_descriptors:
edges = graph.edge_descriptors[(node_idx, prev_idx)]
reverse_edge = True
else:
raise ValueError(
f"Illegal State: edge descriptors missing for edge present in graph ({node_idx}, {prev_idx})"
)
best_edge = max(edges, key=lambda x: x.weight)
best_edge_str = removeprefix(graph.labels_idx2name[best_edge.label_idx], "/r/")
if not reverse_edge:
best_edge_str = f"--{best_edge_str}-->"
else:
best_edge_str = f"<--{best_edge_str}--"
rendered.append(best_edge_str)
rendered.append(graph.nodes_idx2name[node_idx])
return " ".join(rendered)
# from https://github.com/JoshFeldman95/Extracting-CK-from-Large-LM/blob/master/templates/relation_map.json
RELATION_MAP = {
"RelatedTo": "{0} is like {1}",
"ExternalURL": "{0} is described at the following URL {1}",
"FormOf": "{0} is a form of the word {1}",
"IsA": "{0} is {1}",
"PartOf": "{1} has {0}",
"HasA": "{0} has {1}",
"UsedFor": "{0} is used for {1}",
"CapableOf": "{0} can {1}",
"AtLocation": "You are likely to find {0} in {1}",
"Causes": "Sometimes {0} causes {1}",
"HasSubevent": "Something you might do while {0} is {1}",
"HasFirstSubevent": "the first thing you do when you {0} is {1}",
"HasLastSubevent": "the last thing you do when you {0} is {1}",
"HasPrerequisite": "something you need to do before you {0} is {1}",
"HasProperty": "{0} is {1}",
"MotivatedByGoal": "You would {0} because you want to {1}",
"ObstructedBy": "{0} can be prevented by {1}",
"Desires": "{0} wants {1}",
"CreatedBy": "{1} is created by {0}",
"Synonyms": "{0} and {1} have similar meanings",
"Synonym": "{0} and {1} have similar meanings",
"Antonym": "{0} is the opposite of {1}",
"DistinctFrom": "it cannot be both {0} and {1}",
"DerivedFrom": "the word {0} is derived from the word {1}",
"SymbolOf": "{0} is a symbol of {1}",
"DefinedAs": "{0} is defined as {1}",
"Entails": "if {0} is happening, {1} is also happening",
"MannerOf": "{0} is a specific way of doing {1}",
"LocatedNear": "{0} is located near {1}",
"dbpedia": "{0} is conceptually related to {1}",
"SimlarTo": "{0} is similar to {1}",
"EtymologicallyRelatedTo": "the word {0} and the word {1} have the same origin",
"EtymologicallyDerivedFrom": "the word {0} comes from the word {1}",
"CausesDesire": "{0} makes people want {1}",
"MadeOf": "{0} is made of {1}",
"ReceivesAction": "{0} can be {1} ",
"InstanceOf": "{0} is an example of {1}",
"NotDesires": "{0} does not want {1}",
"NotUsedFor": "{0} is not used for {1}",
"NotCapableOf": "{0} is not capable of {1}",
"NotHasProperty": "{0} does not have the property of {1}",
"NotMadeOf": "{0} is not made of {1}",
"NotIsA": "{0} is not {1}",
# New entries added based on ConceptNet
"HasContext": "{0} is in the context of {1}",
"SimilarTo": "{0} is similar to {1}",
'dbpedia/capital': "{1} is the capital of {0}", # sic
'dbpedia/field': "{0} is in the field of {1}",
'dbpedia/genre': "the works of {0} are mainly {1}",
'dbpedia/genus': "{1} is the genus of {0}",
'dbpedia/influencedBy': "{0} was influenced by {1}",
'dbpedia/knownFor': "{0} is known for {1}",
'dbpedia/language': "{1} is the language of {0}",
'dbpedia/leader': "{1} is the leader of {0}",
'dbpedia/occupation': "{0}'s occupation is {1}",
'dbpedia/product': "{0} produces {1}",
}
def render_path_natural(path: List[int], graph: ConceptNet) -> Tuple[str, List[float]]:
"""This function gives a natural language representation of the path using RELATION_MAP. It also returns the weight of all edges involved.
Assignment 2.
Example
-------
> find_word_path("airport", "baggage", conceptnet, max_path_len=3, renderer=render_path_verbose)
airport <--AtLocation-- baggage
"""
if not path:
return "", []
rendered = []
weights = []
for path_idx, node_idx in enumerate(path[1:], start=1):
prev_idx = path[
path_idx - 1
] # index of the previous node in path for edge lookup
if (prev_idx, node_idx) in graph.edge_descriptors:
edges = graph.edge_descriptors[(prev_idx, node_idx)]
reverse_edge = False
elif (node_idx, prev_idx) in graph.edge_descriptors:
edges = graph.edge_descriptors[(node_idx, prev_idx)]
reverse_edge = True
else:
raise ValueError(
f"Illegal State: edge descriptors missing for edge present in graph ({node_idx}, {prev_idx})"
)
best_edge = max(edges, key=lambda x: x.weight)
best_edge_str = removeprefix(graph.labels_idx2name[best_edge.label_idx], "/r/")
if best_edge_str not in RELATION_MAP:
raise ValueError(f"edge type {best_edge_str} not in RELATION_MAP")
if not reverse_edge:
sentence = RELATION_MAP[best_edge_str].format(
graph.nodes_idx2name[prev_idx], graph.nodes_idx2name[node_idx]
)
else:
sentence = RELATION_MAP[best_edge_str].format(
graph.nodes_idx2name[node_idx], graph.nodes_idx2name[prev_idx]
)
sentence = sentence + "."
rendered.append(sentence)
weights.append(best_edge.weight)
return " ".join(rendered), weights
| [
6738,
19720,
1330,
7343,
11,
309,
29291,
198,
6738,
3384,
4487,
1330,
26097,
7934,
11,
4781,
40290,
628,
198,
4299,
8543,
62,
6978,
62,
19011,
577,
7,
6978,
25,
7343,
58,
600,
4357,
4823,
25,
26097,
7934,
2599,
198,
220,
220,
220,
3... | 2.295527 | 3,465 |
import numpy as np
import pandas as pd
from tabulate import tabulate
from .util import *
def key_adder(w2v_model, wordpairs):
"""Account for as many cases of british vs. american spelling as possible
in W2V vocabulary
Args:
w2v_model : standard Word2Vec 'KeyedVectors' data structure
wordps : dictionary of strings containing the british spelling of words spelt using american english
"""
for i in range(len(wordpairs)):
try:
vec = w2v_model[wordpairs[i][1]]
w2v_model.add(wordpairs[i][0], vec)
except KeyError:
# print(wordpairs[i][0])
continue
return w2v_model
def clue_vectorizer(w2v_model, clue_words, pooling):
"""Finds vector representation of clue, by mean/sum pooling and keep track of any clue words that
are not in the W2V model
Args:
w2v_model : standard Word2Vec 'KeyedVectors' data structure
clue_words : list of tokens representing clue
pooling : sum or mean
"""
# Vector representation of clue
clue_vec = np.zeros((300))
clue_errors = []
for word in clue_words:
# Must account for words not in W2V's vocabulary (e.g. 'to')
try:
clue_vec += w2v_model[word].reshape((300))
except KeyError:
clue_errors.append(word)
continue
# Sum or mean pool
if pooling == 'mean':
clue_vec = clue_vec/(len(clue_words))
elif pooling == 'sum':
pass
return clue_vec, clue_errors
def sol_tracker(w2v_model, solution):
"""Keep a record of those solution words that do not appear in the W2V vocabulary
Args :
w2v_model : standard Word2Vec 'KeyedVectors' data structure
solution : List of tokenised words representing solution
"""
sol_errors = []
for word in solution:
# Must account for words not in W2V's vocabulary (e.g. 'to')
try:
_ = w2v_model[word]
except KeyError:
sol_errors.append(word)
continue
return sol_errors
def multi_synonym(w2v_model, multi_syns, n, pooling):
"""Given list of synonyms that represents crossword clue, return aggregate ranking
Args:
w2v_model : standard Word2Vec 'KeyedVectors' data structure
multi_syns : nested list containing lists of each synonym present in a clue
n : number of words to retreive from W2V model for each synonym
pooling : sum or mean
"""
# Construct seperate vector representaion for each synoynm
clue_vecs = []
for synonym in multi_syns:
cv, _ = clue_vectorizer(w2v_model, synonym, pooling=pooling)
clue_vecs.append(cv)
# Find words and corresponding scores of top n most likely answer candidates for each synonym
words = []
scores = []
for i in range(len(multi_syns)):
top_n = w2v_model.similar_by_vector(
clue_vecs[i], topn=n, restrict_vocab=None)
top_list = [top_n[j][0].lower() for j in range(len(top_n))]
score_list = [top_n[k][1] for k in range(len(top_n))]
words.append(top_list)
scores.append(score_list)
# Retreive words common to all rankings
int1 = words[0]
for word_list in words[1:]:
int1 = np.intersect1d(int1, word_list)
# Contingency for no common words between the two retreived rankings
if len(int1) == 0:
return []
# Retrieve scores corresponding to each intersection word in each of the original rankings
scores_list = np.zeros(len(int1))
for i in range(len(words)):
indices = np.intersect1d(words[i], int1, return_indices=True)
int_scores = np.take(scores[i], indices[1])
scores_list += int_scores
# Dictionary for intersection words and summed scores
rank_dict = {}
for A, B in zip(int1, scores_list):
rank_dict[A] = B
# Sort based on score in descending order and return word ranking
sorted_x = dict(
sorted(rank_dict.items(), key=lambda kv: kv[1], reverse=True))
top_list = list(sorted_x.keys())
return top_list
def master_base(w2v_model, data, pairs, pooling, version, topn, verbose,
enhancements={'length': True,
'clue_word': True,
'anagrams': True,
'multi_synonym': True,
'multiword': True}):
"""Finds vector representations of clues and retreives 'topn' answer candidates from within W2V vocabulary
based on cosine similarity score. These answer candidates can then be filtered further using various
combinations of the boolean flags in the 'enhancements' argument, in order to return more accurate answer
candidates for each clue.
Args :
w2v_model : standard Word2Vec 'KeyedVectors' data structure
data : dict containing full dataset
pairs : List of keys to access in clue data structure
pooling : Mean or sum pooling
version : 1 - Baseline,
2 - Access to Enhancements,
topn : Retreive 'topn' answer candidates
verbose : 1 - see clue,answer,rank of correct answer
2 - see the top 10 w2v answers also
enhancements : Dictionary of constraints to consider. Set to True to activate.
Output :
1) Metrics = [
Number of Correct answers in top 10,
Number of Correct answers in top 100,
Ranks of correct answers in top 10,
Rank of correct asnwers in top 100
Number of correct answers at Rank 1,
Number of Correct answers in top 1000
]
2) Errors = [
Clue words not in W2V vocab,
Solution words not in W2V vocab,
Keys of Clue for which Top 10 could not be retreived
Keys of Clue for which Top 100 could not be retreived
Keys of Clue for no answer candidates could be retreived
Key of Clue for which intersection of retreived answer candidates could not be found
in the case of multi-syms constraint set to True
]
3) Pairs = Total number of Clues considered by model from current run
"""
# Retreive keys for current crossword
keys = pairs
count_10 = 0
count_100 = 0
count_1000 = 0
rank_list100 = []
rank_list1000 = []
clue_errors = []
sol_errors = []
clue_track100 = []
clue_track1000 = []
rank1 = 0
clue_track1 = []
multi_clue_track = []
pairs = 0
# For all clues
for key in keys:
# Retreive clue and solution from dataframe
clue, solution = data[key]['all_synonyms'], data[key]['tokenized_solution']
if clue == None:
continue
'----------------------------- Vector Representation of Clue --------------------------------- '
# Clue vector representation
clue_vec, c_errors = clue_vectorizer(w2v_model, clue, pooling=pooling)
clue_errors.append(c_errors)
# Solution words not in vocab
s_errors = sol_tracker(w2v_model, solution)
sol_errors.append(s_errors)
'----------------------------- Retreive Answer Candidates from W2V and Apply Filters --------------------------------- '
# Retreive topn answer candidates
top_100 = w2v_model.similar_by_vector(
clue_vec, topn=topn, restrict_vocab=None)
top_list = [top_100[i][0].lower() for i in range(len(top_100))]
# Version 1
if version == 1:
pass
# Version 2 - allow access to enhancements
if version == 2:
# Return aggregate of rankings for each synonym
if len(data[key]['synonyms']) > 1 and enhancements['multi_synonym'] == True:
multi_list = multi_synonym(
w2v_model, data[key]['synonyms'], n=100000, pooling=pooling)
if len(multi_list) == 0:
#print("Sorry could not find any intersection between candidates returned for each synonym for clue :",key)
multi_clue_track.append(key)
else:
top_list = multi_list
# Filter out words in clue
if enhancements['clue_word'] == True:
top_l = word_remover(top_list, clue)
else:
top_l = top_list
# Filter out words of incorrect length
if enhancements['length'] == True:
if enhancements['multiword'] != True:
top_list = len_filterer(
top_l, len(data[key]['pretty_solution']))
elif enhancements['multiword'] == True:
# Filter by total length including spaces
top_l = pretty_len_filterer(
top_l, len(data[key]['pretty_solution']))
# Filter by individual word length
top_list = len_filterer_multi(
top_l, data[key]['token_lengths'])
# Filter by anagram
if enhancements['anagrams'] == True and data[key]['anagram'] != None:
top_list = anagram_filterer(
top_list, data[key]['anagram'].lower())
# Remove duplicates
top_list = list(dict.fromkeys(top_list))
if len(top_list) == 0:
clue_track1.append(key)
'----------------------------- Record Filtered, Ranked Answer Candidates ---------------------------------'
# Store word and score in dictionary
top_words = {}
top_100word = {}
top_1000word = {}
top_full = {}
# Consider top 10 answer candidates
for i in range(10):
try:
top_words.update({i+1: top_list[i]})
except IndexError:
# clue_track10.append(key)
break
# Consider top 100 answer candidates
for i in range(100):
try:
top_100word.update({i+1: top_list[i]})
except IndexError:
clue_track100.append(key)
break
# Consider top 1000 answer candidates
for i in range(1000):
try:
top_1000word.update({i+1: top_list[i]})
except IndexError:
clue_track1000.append(key)
break
for i in range(len(top_list)):
top_full.update({i+1: top_list[i]})
# Convert to dataframes
top_df = pd.DataFrame.from_dict(
top_words, orient='index', columns=['Word'])
top_100df = pd.DataFrame.from_dict(
top_100word, orient='index', columns=['Word'])
top_1000df = pd.DataFrame.from_dict(
top_1000word, orient='index', columns=['Word'])
top_fulldf = pd.DataFrame.from_dict(
top_full, orient='index', columns=['Word'])
'----------------------------- Compute and Update Model Metrics --------------------------------- '
# Split words by _ when multiword constraint is set to True
if enhancements['multiword'] == True:
# Check if solution contained in top 10/100/1000 or at all
int_10 = [word for word in top_df['Word']
if re.split(r"[_]", word) == solution]
int_100 = [word for word in top_100df['Word']
if re.split(r"[_]", word) == solution]
int_1000 = [word for word in top_1000df['Word']
if re.split(r"[_]", word) == solution]
int_full = [word for word in top_fulldf['Word']
if re.split(r"[_]", word) == solution]
else:
# Check if solution contained in top 10/100/1000 or at all
int_10 = [word for word in top_df['Word'] if [word] == solution]
int_100 = [word for word in top_100df['Word']
if [word] == solution]
int_1000 = [word for word in top_1000df['Word']
if [word] == solution]
int_full = [word for word in top_fulldf['Word']
if [word] == solution]
# Update model modetrics
if len(int_10) > 0:
count_10 += 1
sol_rank10 = top_df.index[top_df['Word'] == int_10[0]].tolist()[0]
if sol_rank10 == 1:
rank1 += 1
if len(int_100) > 0:
count_100 += 1
sol_rank = top_100df.index[top_100df['Word'] == int_100[0]].tolist()[
0]
rank_list100.append(sol_rank)
if len(int_1000) > 0:
count_1000 += 1
sol_rank1k = top_1000df.index[top_1000df['Word'] == int_1000[0]].tolist()[
0]
rank_list1000.append(sol_rank1k)
ans_rank = sol_rank1k
elif len(int_full) > 0:
ans_rank = top_fulldf.index[top_fulldf['Word'] == int_full[0]].tolist()[
0]
else:
ans_rank = str("could not find correct answer")
pairs += 1
# Print model output
if verbose == 2:
print(key)
print("Clue :", data[key]['synonyms'])
print("Answer: {}".format(solution))
print("Rank of Correct Answer :", ans_rank)
print("Top 10 W2V predictions :")
print(tabulate(top_df, headers='keys', tablefmt='psql'))
print("--------------------------------------------------------------------")
elif verbose == 1:
print(key)
print("Clue :", data[key]['synonyms'])
print("Answer: {}".format(solution))
print("Rank of Correct Answer :", ans_rank)
print("--------------------------------------------------------------------")
metrics = [count_10, count_100, rank_list100,
rank_list1000, rank1, count_1000]
errors = [clue_errors, sol_errors, clue_track100,
clue_track1000, clue_track1, multi_clue_track]
return metrics, errors, pairs
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
7400,
5039,
1330,
7400,
5039,
198,
198,
6738,
764,
22602,
1330,
1635,
628,
198,
4299,
1994,
62,
26676,
7,
86,
17,
85,
62,
19849,
11,
1573,
79,
3468,
259... | 2.197561 | 6,479 |
"""
The Koch curve is a fractal that looks something like Figure 5.2. To draw a Koch
curve with length x, all you have to do is:
1. Draw a Koch curve with length x/3.
2. Turn left 60 degrees.
3. Draw a Koch curve with length x/3.
4. Turn right 120 degrees.
5. Draw a Koch curve with length x/3.
6. Turn left 60 degrees.
7. Draw a Koch curve with length x/3.
The exception is if x is less than 3: in that case, you can just draw a straight
line with length x.
1. Write a function called koch that takes a turtle and a length as parameters,
and that uses the turtle to draw a Koch curve with the given length.
2. Write a function called snowflake that draws three Koch curves to make the
outline of a snowflake.
3. The Koch curve can be generalized in several ways. See http://en.wikipedia.org/wiki/Koch_snowflake for examples
and implement your favorite.
"""
from swampy.TurtleWorld import *
world = TurtleWorld()
turtoise = Turtle()
turtoise.delay = 0.01
angle= 60
length = 240
def koch(turtle, length):
"Draws a koch curve"
if length < 3:
turtle.fd(length)
return
go = length/3
koch(turtle, go)
turtle.lt(angle)
koch(turtle, go)
turtle.rt(angle*2)
koch(turtle, go)
turtle.lt(angle)
koch(turtle, go)
def snowflake(turtle, length):
"Draws a snowflake (a triangle with a Koch curve for each side)."
for _ in range(3):
koch(turtle, length)
turtle.rt(angle*2)
koch(turtoise, length)
turtoise.pu()
turtoise.fd(50)
turtoise.pd()
snowflake(turtoise, length)
wait_for_user() | [
37811,
198,
464,
17009,
12133,
318,
257,
12999,
282,
326,
3073,
1223,
588,
11291,
642,
13,
17,
13,
1675,
3197,
257,
17009,
198,
22019,
303,
351,
4129,
2124,
11,
477,
345,
423,
284,
466,
318,
25,
198,
16,
13,
220,
15315,
257,
17009,
... | 2.749129 | 574 |
"""Get relevant data from TMX/XML and save as tab-separated file. Meant to be run on TMX files and extracting
linguistic features for both French (src) and Dutch (tgt). It was used to extract linguistic features from a
pre-parsed XML/TMX corpus.
"""
from os import PathLike
from pathlib import Path
from typing import Generator, Optional, Union
import lxml
from lxml import etree
from lxml.etree import ElementTree, Element
from pandas import DataFrame
def get_feats(token_xml: Element) -> str:
"""Get lemma, pos, xpos, ufeats of a given element
:param token_xml: token XML element
:return: the properties, joined as a string
"""
all_feats = [token_xml.find(el).text for el in ("lemma", "upos", "xpos", "ufeats")]
all_feats = [f if f else "_" for f in all_feats]
return "-".join(all_feats)
def process_file(pfin: Union[str, PathLike], text_id: int) -> Optional[Generator]:
"""For a given XML/TMX file, yield for each segment the file name without .prep, text ID, segment ID,
src and tgt texts and the source and target linguistic features
:param pfin: input file
:param text_id: this text's ID
:return: a generator yielding the required information, or None when the file is not valid XML
"""
nsmap = {"xml": "http://www.w3.org/XML/1998/namespace"}
try:
tree: ElementTree = etree.parse(str(pfin))
# Only select those TUs that have a prop-element. First TU seems like noise - exclude it this way
tus = tree.findall("//tu[prop]")
for tu_id, tu in enumerate(tus, 1):
src = tu.find("./tuv[@xml:lang='fr']/seg", namespaces=nsmap).text
tgt = tu.find("./tuv[@xml:lang='nl']/seg", namespaces=nsmap).text
src_feats = " ".join([get_feats(token_xml)
for token_xml in tu.findall("./tuv[@xml:lang='fr']/stanza//token",
namespaces=nsmap)])
tgt_feats = " ".join([get_feats(token_xml)
for token_xml in tu.findall("./tuv[@xml:lang='nl']/stanza//token",
namespaces=nsmap)])
yield pfin.stem.replace(".prep", ""), text_id, tu_id, src, tgt, src_feats, tgt_feats
except lxml.etree.XMLSyntaxError:
# Occurs when error parsing
return None
def process(indir: Union[str, PathLike], outfile: Union[str, PathLike, None] = None, ext: str = ".tmx"):
"""Process all files with a given extension in a given input directory and write the results as a tab-separated
file to an outputfile
:param indir: the input dir
:param outfile: the output file to write results to. Writes to "data/lingfeats_output.txt" if not given
:param ext: only process files with this extension
"""
if outfile is not None and not outfile.endswith(".txt"):
raise ValueError("'outfile' must end with .txt")
pdin = Path(indir).resolve()
pfout = Path(outfile).resolve() if outfile else Path("data/lingfeats_output.txt")
pfout.mkdir(parents=True, exist_ok=True)
files = list(pdin.glob(f"*{ext}"))
data = [tpl for file_id, pfin in enumerate(files, 1) for tpl in process_file(pfin, file_id) if tpl]
df = DataFrame(data, columns=["file_name", "text_id", "seg_id", "src", "tgt", "src_feats", "tgt_feats"])
df.to_csv(pfout, encoding="utf-8", sep="\t", index=False)
if __name__ == "__main__":
import argparse
cparser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
cparser.add_argument("indir", help="Path to input directory with XML/TMX files.")
cparser.add_argument("-o", "--outfile",
help="Path of the output file, must end with .txt. If not given, writes to lingfeats_output.txt.")
cparser.add_argument("-e", "--ext", default=".tmx",
help="Only process files with this extension.")
cargs = cparser.parse_args()
process(cargs.indir, cargs.outfile, cargs.ext)
| [
37811,
3855,
5981,
1366,
422,
21232,
55,
14,
55,
5805,
290,
3613,
355,
7400,
12,
25512,
515,
2393,
13,
2185,
415,
284,
307,
1057,
319,
21232,
55,
3696,
290,
37895,
198,
1359,
84,
2569,
3033,
329,
1111,
4141,
357,
10677,
8,
290,
1091... | 2.422024 | 1,680 |
# -*- coding: utf-8 -*-
from setuptools import setup
# version info
NAME = "pypalette"
VERSION = "0.1.6"
DESC = "Extract prominent colors from images! Python wrapper for `https://github.com/dayvonjersen/vibrant`."
# requirements
install_requires = []
with open('requirements.txt', "r") as fp:
for line in fp:
if len(line.strip()) > 2:
install_requires.append(line.strip())
# setup config
setup(
name=NAME,
version=VERSION,
description=DESC,
long_description=DESC,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
],
install_requires=install_requires,
author="BiLiangDevelop && frkhit",
url="https://github.com/BiLiangDevelop/py-go-palette",
author_email="frkhit@gmail.com",
license="MIT",
packages=["pypalette", ],
package_data={
"": ["LICENSE", "README.md", "MANIFEST.in"],
"pypalette": ["go-palette-*.so", ]
},
include_package_data=True,
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
2,
2196,
7508,
198,
20608,
796,
366,
79,
4464,
282,
5857,
1,
198,
43717,
796,
366,
15,
13,
16,
13,
21,
1,
198,
30910,
... | 2.518519 | 594 |
# Copyright (c) 2013 Barnstormer Softworks, Ltd.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import geni.rspec.pgmanifest as PGM
m = PGM.Manifest("vts-stage-manifest.xml")
for link in m.links:
d = {"geni_sharelan_token" : "vts-segment-%s" % (link.client_id[4:]),
"geni_sharelan_lanname" : link.client_id}
f = open("%s.json" % (link.client_id), "w+")
json.dump(d, f)
f.close()
| [
2,
15069,
357,
66,
8,
2211,
220,
11842,
12135,
263,
8297,
5225,
11,
12052,
13,
198,
198,
2,
770,
8090,
6127,
5178,
318,
2426,
284,
262,
2846,
286,
262,
29258,
5094,
198,
2,
13789,
11,
410,
13,
362,
13,
15,
13,
1002,
257,
4866,
2... | 2.504425 | 226 |
# Generated by Django 3.0.7 on 2021-01-18 13:40
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
22,
319,
33448,
12,
486,
12,
1507,
1511,
25,
1821,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 15 12:55:31 2016
@author: medialab
"""
import cv | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
30030,
5267,
1315,
1105,
25,
2816,
25,
3132,
1584,
198,
198,
31,
9800,
25,
48174,
397,
198,
37811,
198,
11748,
269,
85
] | 2.4 | 40 |
import mido
#from mido.ports import open_input
from mido import Message, MidiFile, MidiTrack
import numpy as np
import time
from src.LRUDict import LRUDict
# import sys
# sys.path.append('..')
| [
11748,
3095,
78,
198,
2,
6738,
3095,
78,
13,
3742,
1330,
1280,
62,
15414,
198,
6738,
3095,
78,
1330,
16000,
11,
7215,
72,
8979,
11,
7215,
72,
24802,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
6738,
12351,
13,
35972,
8... | 3.063492 | 63 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2021 arijit <arijit@linux-inspiron-5567>
#
# Distributed under terms of the MIT license.
from selenium import webdriver
import time
import subprocess
repo_name = input("[+] Provide the github repository name :")
descript = input("[+] Provide a short description of your project :")
driver = webdriver.Firefox()
driver.get("https://www.github.com/")
time.sleep(4.0)
driver.find_element_by_xpath("/html/body/div[1]/header/div/div[2]/div[2]/div[2]/a").click() # sign-in clicked
time.sleep(4.0)
email = "email-address-to-login" # change these with your email address & password to log into github
passwd = "github-account-password-here"
driver.find_element_by_id("login_field").send_keys(email) # pasted the email to login field
driver.find_element_by_id("password").send_keys(passwd) # pasted your password
driver.find_element_by_xpath("/html/body/div[3]/main/div/div[4]/form/div/input[12]").click() # sign-in button for final submission
time.sleep(4.0)
driver.find_element_by_css_selector(".octicon-plus").click() # clicked drop-down menus
driver.find_element_by_css_selector("a.dropdown-item:nth-child(1)").click() # clicked new repository option
time.sleep(4.0)
driver.find_element_by_id("repository_name").send_keys(repo_name) # pasted given repository name
driver.find_element_by_id("repository_description").send_keys(descript) # pasted description provided by user
time.sleep(4.0)
driver.find_element_by_xpath("/html/body/div[4]/main/div/form/div[4]/button").click() # final submission & creating the repository
time.sleep(4.0)
repository_url = driver.current_url
print(f"[+] Successfully created a new github repository, {repository_url}") # getting the repository url from selenium
driver.quit()
print("[+] connecting local project directory with github repository....") # connecting to remote repo-
subprocess.run(["git", "remote", "add", "origin", repository_url], shell=False)
try:
print("[+] pushing local files to your github repository....") # trying to push local files to remote repo-
subprocess.run(["git", "push", "-u", "origin", "master"], shell=False)
print("[+] project setup successful !!")
except: # exception shows the error to push files
print("[+] Not able to push local files to remote repository.\n[+] Check if you are logged into your github account correctly from your machine(refer readme for more.)")
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
43907,
25,
69,
12685,
28,
40477,
12,
23,
198,
2,
198,
2,
15069,
10673,
33448,
257,
380,
45051,
1279,
39010,
27... | 3.058971 | 797 |
from pynput.keyboard import Listener
keyboardListener()
| [
6738,
279,
2047,
1996,
13,
2539,
3526,
1330,
7343,
877,
628,
198,
2539,
3526,
33252,
3419,
198
] | 3.411765 | 17 |
#!/usr/bin/env python
# coding=utf-8
import os
import tempfile
HTML_BODY = """\
<html>
<head>
<title>Qutescript: {prefix}</title>
</head>
<body>
<pre>{script_path}</pre>
{content}
</body>
"""
HTML_MESSAGE_TEMPLATE = """\
<pre>
{}
</pre>
"""
log_file_path = './qutescript.log'
def log_to_browser(*messages, prefix: str = None, console=True, script_path=None):
"""
Write messages to logs and a temporary file,
Attempt to open the file through FIFO in the browser.
"""
[write_log(msg, console=console) for msg in messages]
html_messages = '\n'.join([HTML_MESSAGE_TEMPLATE.format(m) for m in messages])
html_body = HTML_BODY.format(
content=html_messages,
prefix=prefix,
script_path=script_path,
)
send_html(html_body, prefix=prefix, script_path=script_path)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
28,
40477,
12,
23,
198,
11748,
28686,
198,
11748,
20218,
7753,
198,
198,
28656,
62,
33,
33076,
796,
37227,
59,
198,
27,
6494,
29,
198,
27,
2256,
29,
198,
27,
7839,
29,
48... | 2.41349 | 341 |
#!/usr/bin/env python3
import re
import sys
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
if len(sys.argv) < 2:
usage()
input_file = sys.argv[1]
with beam.Pipeline(options=PipelineOptions()) as p:
(p | beam.io.ReadFromText(input_file)
| "Tokenize" >> beam.FlatMap(lambda line: re.findall(r'[A-Za-z\']+', line))
| "CountWords" >> beam.combiners.Count.PerElement()
| "PrintOutput" >> beam.ParDo(lambda c: print(c)))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
2471,
4891,
62,
40045,
355,
15584,
198,
6738,
2471,
4891,
62,
40045,
13,
25811,
13,
79,
541,
4470,
62,
25811,
1330,
37709,
29046,
19... | 2.526316 | 190 |
import pandas as pd
from tkinter import *
import tkinter as tk
import numpy as np
from tkinter import messagebox
from tkinter import ttk
import datetime
from datetime import timedelta, date
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import re
import ast
def clear_data(self):
''' Clearing data from treeview'''
self.display.delete(*self.display.get_children())
try:
self.bar1.get_tk_widget().place_forget()
except AttributeError:
pass
return None
def Load_excel_data(self):
'''Load choosen data to process into statistics based on variable that is passed'''
file_path = self.filepath
# ----- Driver Log ----- #
if self.variable.get() == "Driver Log":
df = filepath_check(self, file_path)
clear_data(self)
self.display["column"] = list(df.columns)
self.display["show"] = "headings"
for column in self.display["columns"]:
self.display.heading(column, text=column) # column heading = column name
df_rows = df.replace(np.nan,'', regex=True).to_numpy().tolist() # turns the dataframe into a list of lists
for row in df_rows:
self.display.insert("", "end", values=row) # inserts each list to the treeview
return None
# ----- Driver Productivity ----- #
elif self.variable.get() == "Driver Productivity":
clear_data(self)
df = filepath_check(self, file_path)
tempvar=[]
name=[]
count=[]
date=[]
df2 = df.sort_values(by=['Name','Date','Route','Vehicle'])
df3 = df2.groupby(['Name','Date']).count()
for index, row in df2.iterrows():
if [row[0],row[3]] != tempvar: # adding all unique combination to the list
name.append(row[0])
date.append(row[3])
tempvar = [row[0],row[3]]
for number in df3['Route'].values:
count.append(number) # count how many activity
data3 = zip(name,date,count) # combining the data for easier use
mergedData = list(data3)
# Your common display insertion (Driver activity)
self.display["columns"] = ["No", "Name", "Date", "Activity"]
self.display.heading('#0,', text='No')
self.display.column('#0', anchor="center", width=0)
self.display.heading('#1', text='Name')
self.display.column('#1', anchor="center", width=200)
self.display.heading('#2', text='Date')
self.display.column('#2', anchor="center", width=260)
self.display.heading('#3', text='Activity')
self.display.column('#3', anchor="center", width=250)
index = iid = 0
for row in mergedData:
self.display.insert("", index, iid, values=row)
index = iid = index + 1
# ----- Production Graph ----- #
elif self.variable.get() == "Production Graph":
clear_data(self)
df = filepath_check(self, file_path)
if self.targetE.get().isdigit() == True:
tk.messagebox.showerror("Error", f"Target field is empty")
else:
df2 = df.sort_values(by=['Name','Date','Route','Vehicle'])
df3 = df2.groupby(['Date'], as_index=False).count()
df3.columns = ['Date', 'Total Activity','',''] # Only use date and total activity, obsolete data were left blank
# print(df3)
# Display
figure1 = plt.Figure(figsize=(8,4), dpi=88)
ax1 = figure1.add_subplot(111)
self.bar1 = FigureCanvasTkAgg(figure1, self.frame2)
self.bar1.get_tk_widget().place(relx=0.001, rely=0.09)
if self.targetE.get().isdigit() == True:
ax1.axhline(y=int(self.targetE.get()), color='r', linestyle='dashed') # Straight line for target line
df3.plot(x='Date', y='Total Activity', kind='line', ax=ax1)
# ----- Summary graphs ----- #
elif self.variable.get() == "Summary":
clear_data(self)
df = filepath_check(self, file_path)
if self.targetE.get().isdigit() == True:
tk.messagebox.showerror("Error", f"Target field is empty")
else:
df2 = df.sort_values(by=['Name','Date','Route','Vehicle'])
df3 = df2.groupby(['Date'], as_index=False).count()
df3.columns = ['Date', 'Total Activity','',''] # Only use date and total activity, obsolete data were left blank
# print(df3)
# ----- Misc Load ----- #
def misc_load(self):
'''Loading data from excel '''
file_path = self.filepath
df = filepath_check(self, file_path)
clear_data(self)
self.display["column"] = list(df.columns)
# print(self.display["column"])
self.display["show"] = "headings"
for column in self.display["columns"]:
self.display.heading(column, text=column) # column heading = column name
df_rows = df.replace(np.nan,'', regex=True).to_numpy().tolist() # turns the dataframe into a list of lists
for row in df_rows:
self.display.insert("", "end", values=row) # inserts each list to the treeview
self.display.column('#0', anchor="center", width=50)
self.display.column('#1', anchor="center", width=170)
self.display.column('#2', anchor="center", width=120)
self.display.column('#3', anchor="center", width=120)
self.display.column('#4', anchor="center", width=400)
return None
# ----- Post for adding data ----- #
def post(self,name,route,vehicle):
''' Posting data from draft treeview into final treeview'''
if not name.strip(): #Raise error if recieved empty string or space only input
tk.messagebox.showerror("Empty Entry","Name Entry is empty")
return None
if not vehicle.strip():
tk.messagebox.showerror("Empty Entry","vehicle Entry is empty")
return None
if not route.strip():
tk.messagebox.showerror("Empty Entry","Route Entry is empty")
return None
# Your common display insertion (Draft from add data display)
self.displayDraft['show'] = 'headings'
self.displayDraft.heading('#1', text='Name')
self.displayDraft.heading('#2', text='Route')
self.displayDraft.heading('#3', text='Vehicle')
self.displayDraft.heading('#4', text='Date')
self.displayDraft.column('#1', anchor="center", width=85)
self.displayDraft.column('#2', anchor="center", width=85)
self.displayDraft.column('#3', anchor="center", width=85)
self.displayDraft.column('#4', anchor="center", width=143)
self.displaycontent = self.displayDraft
self.displaycontent.insert("",index="end",text=f" ",value=(name,route,vehicle,datetime.datetime.now().strftime("%x")))
def varPostV(self,name,value):
""" Moving all the data from entry to treeview """
regex = re.search("-[@_!#$%^&*()<>?/\|}{~: ]", name) #Prevent user from giving special character and space character
print(regex)
if not regex == None:
tk.messagebox.showerror("Forbidden Entry","The variable name for vehicle must not contain special character or space character")
return None
if not name.strip():
tk.messagebox.showerror("Empty entry","The variable name for vehicle is empty")
return None
if not value.strip():
tk.messagebox.showerror("Empty entry","The variable value for vechicle is empty")
return None
if not value.isdigit():
tk.messagebox.showerror("Empty entry","The variable value for vechicle must be number")
return None
self.varVContent = self.varDispV
self.varVContent.insert("",index="end",text=name,value=float(value))
| [
11748,
19798,
292,
355,
279,
67,
198,
6738,
256,
74,
3849,
1330,
1635,
198,
11748,
256,
74,
3849,
355,
256,
74,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
256,
74,
3849,
1330,
3275,
3524,
198,
6738,
256,
74,
3849,
1330,
256,
304... | 2.471461 | 3,101 |
from __future__ import with_statement
import os
from setuptools import setup, find_packages
from django_type_of_works import __version__ as version#fix
install_requires = ["django >= 1.7, < 1.9",]
try:
setup(
name="django-type-of-works",
version=version,
author="Victor De la Luz",
author_email="itztli@gmail.com",
description="Type of works.",
long_description=open("README.rst").read(),
license="BSD",
#url="http://mezzanine.jupo.org/",
zip_safe=False,
include_package_data=True,
packages=find_packages(),
install_requires=install_requires,
entry_points="""
[console_scripts]
""",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Software Development :: Libraries :: "
"Application Frameworks",
"Topic :: Software Development :: Libraries :: Python Modules",
],)
except:
pass
| [
6738,
11593,
37443,
834,
1330,
351,
62,
26090,
198,
11748,
28686,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
6738,
42625,
14208,
62,
4906,
62,
1659,
62,
5225,
1330,
11593,
9641,
834,
355,
2196,
2,
13... | 2.245399 | 652 |
"""
RiscEmu (c) 2021 Anton Lydike
SPDX-License-Identifier: MIT
"""
import re
from enum import IntEnum
from typing import List
from .Exceptions import ParseException
PSEUDO_OPS = [
'.asciiz',
'.double',
'.extern',
'.global',
'.align',
'.float',
'.kdata',
'.ktext',
'.space',
'.ascii',
'.byte',
'.data',
'.half',
'.text',
'.word',
'.set',
]
COMMENT_START = ["#", ";"]
REG_VALID_SYMBOL_LABEL = re.compile(r'^([A-z_.][A-z_0-9.]*[A-z_0-9]|[A-z_]):')
REG_WHITESPACE_UNTIL_NEWLINE = re.compile(r'^(\s*)\n')
REG_WHITESPACE = re.compile(r'^\s*')
REG_NONWHITESPACE = re.compile(r'^[^\s]*')
REG_UNTIL_NEWLINE = re.compile(r'^[^\n]*')
REG_WHITESPACE_NO_LINEBREAK = re.compile(r'^[ \t]*')
REG_VALID_ARGUMENT = re.compile(
r'^([+-]?(0x[0-9A-f]+|[0-9]+)|[A-z_.][A-z0-9_.]*[A-z_0-9]|[A-z_])(\(([A-z_.][A-z_0-9.]*[A-z_0-9]|[A-z_])\))?'
)
REG_ARG_SPLIT = re.compile(r'^,[ \t]*')
class RiscVInput:
"""
Represents an Assembly file
"""
@staticmethod
def context(self, size: int = 5):
"""
returns a context string:
<local input before pos>|<local input after pos>
"""
start = max(self.pos - size, 0)
end = min(self.pos + size, self.len - 1)
return self.content[start:self.pos] + '|' + self.content[self.pos:end]
class RiscVTokenizer:
"""
A tokenizer for the RISC-V syntax of a given CPU
"""
| [
37811,
198,
49,
2304,
10161,
84,
357,
66,
8,
33448,
9261,
406,
5173,
522,
198,
198,
4303,
36227,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
37811,
198,
198,
11748,
302,
198,
6738,
33829,
1330,
2558,
4834,
388,
198,
6738,
19720,
1330,... | 1.929521 | 752 |
import re
from jenkins_tools.common import Jenkins
import logging
from lxml import etree
import xml.etree.ElementTree as ElementTree
logger = logging.getLogger()
# User pytest - https://docs.pytest.org/en/latest/
| [
11748,
302,
198,
198,
6738,
474,
268,
5331,
62,
31391,
13,
11321,
1330,
21835,
198,
11748,
18931,
198,
6738,
300,
19875,
1330,
2123,
631,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
11703,
27660,
198,
198,
6404,
1362,
796,
... | 3.208955 | 67 |
import pytest
import torch
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from energizer.inference.utils import (
AlphaDropout,
Dropout,
Dropout2d,
Dropout3d,
FeatureAlphaDropout,
local_seed,
patch_dropout_layers,
)
from tests.utils import NUM_FEATURES
def test_patch_exceptions(boring_model):
"""Test if errors are raised on inputs."""
model = boring_model(torch.nn.Linear(NUM_FEATURES, NUM_FEATURES))
with pytest.raises(MisconfigurationException):
patch_dropout_layers(model)
for i in (-0.01, 1.01):
with pytest.raises(ValueError):
patch_dropout_layers(model, prob=i)
@pytest.mark.parametrize("inplace", (True, False))
def test_patch_inplace(boring_model, inplace):
"""Test if inplace works and patch replaces all dropout layers."""
model = boring_model(torch.nn.Sequential(torch.nn.Linear(NUM_FEATURES, NUM_FEATURES), torch.nn.Dropout()))
patched_module = patch_dropout_layers(model, inplace=inplace)
if inplace:
assert patched_module is model
else:
assert patched_module is not model
assert not any(isinstance(module, torch.nn.Dropout) for module in patched_module.modules())
assert any(isinstance(module, Dropout) for module in patched_module.modules())
@pytest.mark.parametrize(
"dropout_cls",
[
AlphaDropout,
Dropout,
Dropout2d,
Dropout3d,
FeatureAlphaDropout,
],
)
def test_consistent_dropout(dropout_cls):
"""Test that the consistent mechanism works."""
t = torch.ones((1, 10, 10))
NUM_INFERENCE_ITERS = 10
dropout_layer = dropout_cls(prob=0.5, consistent=True, num_inference_iters=3)
dropout_layer.eval() # put layer in eval so that it uses the consistent mechanism
for NUM_INFERENCE_ITERS in (10, 20):
dropout_layer.reset_mask(NUM_INFERENCE_ITERS) # reset with different number of iters
a_seeds = [next(dropout_layer.seeds) for _ in range(NUM_INFERENCE_ITERS)]
a = torch.cat([dropout_layer(t) for _ in range(NUM_INFERENCE_ITERS)])
b_seeds = [next(dropout_layer.seeds) for _ in range(NUM_INFERENCE_ITERS)]
b = torch.cat([dropout_layer(t) for _ in range(NUM_INFERENCE_ITERS)])
dropout_layer.reset_mask() # reset with same number of iters
c_seeds = [next(dropout_layer.seeds) for _ in range(NUM_INFERENCE_ITERS)]
c = torch.cat([dropout_layer(t) for _ in range(NUM_INFERENCE_ITERS)])
assert a_seeds == b_seeds
assert a_seeds != c_seeds
assert torch.all(a == b)
assert torch.any(a != c)
@pytest.mark.parametrize(
"dropout_cls",
[
AlphaDropout,
Dropout,
Dropout2d,
Dropout3d,
FeatureAlphaDropout,
],
)
| [
11748,
12972,
9288,
198,
11748,
28034,
198,
6738,
12972,
13165,
354,
62,
2971,
768,
13,
315,
2410,
13,
1069,
11755,
1330,
14136,
11250,
3924,
16922,
198,
198,
6738,
19647,
7509,
13,
259,
4288,
13,
26791,
1330,
357,
198,
220,
220,
220,
... | 2.385141 | 1,171 |
optimizer = dict(type='Adam', lr=1e-4, weight_decay=0, eps=1e-8, training_encoder_lr_multiply=1)
optimizer_config = dict(grad_clip=dict(
clip_norm_mode='all',
max_grad_l2_norm=0.25,
use_scale=True,
))
lr_config = dict(
use_warmup=True,
lr_steps=[14000, 19000],
lr_ratio=0.1,
warmup_factor=0.2,
warmup_iterations=1000,
policy='PythiaScheduler')
total_epochs = 22
| [
40085,
7509,
796,
8633,
7,
4906,
11639,
23159,
3256,
300,
81,
28,
16,
68,
12,
19,
11,
3463,
62,
12501,
323,
28,
15,
11,
304,
862,
28,
16,
68,
12,
23,
11,
3047,
62,
12685,
12342,
62,
14050,
62,
16680,
541,
306,
28,
16,
8,
198,
... | 2.040816 | 196 |
# Tests
assert get_hbal_tree([1, 2, 3, 4, 5]).val == 3
assert get_hbal_tree([1, 2, 3, 4, 5, 6]).val == 4
| [
628,
198,
2,
30307,
198,
30493,
651,
62,
71,
6893,
62,
21048,
26933,
16,
11,
362,
11,
513,
11,
604,
11,
642,
35944,
2100,
6624,
513,
198,
30493,
651,
62,
71,
6893,
62,
21048,
26933,
16,
11,
362,
11,
513,
11,
604,
11,
642,
11,
... | 2.117647 | 51 |
"""
Brainstem Acroname Hub
See documentation for brainstem here:
https://acroname.com/reference/python/index.html
"""
from rspy import log
if __name__ == '__main__':
import os, sys, getopt
try:
opts,args = getopt.getopt( sys.argv[1:], '',
longopts = [ 'help', 'recycle' ])
except getopt.GetoptError as err:
print( '-F-', err ) # something like "option -a not recognized"
usage()
if args or not opts:
usage()
# See the end of the file for all the option handling
try:
import brainstem
except ModuleNotFoundError:
log.w( 'No acroname library is available!' )
raise
hub = None
class NoneFoundError( RuntimeError ):
"""
"""
def discover():
"""
Return all Acroname module specs in a list. Raise NoneFoundError if one is not found!
"""
log.d( 'discovering Acroname modules ...' )
# see https://acroname.com/reference/_modules/brainstem/module.html#Module.discoverAndConnect
try:
log.debug_indent()
specs = brainstem.discover.findAllModules( brainstem.link.Spec.USB )
if not specs:
raise NoneFoundError()
for spec in specs:
log.d( '...', spec )
finally:
log.debug_unindent()
return specs
def connect( spec = None ):
"""
Connect to the hub. Raises RuntimeError on failure
"""
global hub
if not hub:
hub = brainstem.stem.USBHub3p()
if spec:
specs = [spec]
else:
specs = discover()
spec = specs[0]
result = hub.connectFromSpec( spec )
if result != brainstem.result.Result.NO_ERROR:
raise RuntimeError( "failed to connect to acroname (result={})".format( result ))
elif len(specs) > 1:
log.d( 'connected to', spec )
def ports():
"""
:return: a list of all ports currently occupied (and enabled)
"""
occupied_ports = []
for port in range(8):
if port_power( port ) > 0.0:
occupied_ports.append( port )
return occupied_ports
def enable_ports( ports = None, disable_other_ports = False, sleep_on_change = 0 ):
"""
Set enable state to provided ports
:param ports: List of port numbers; if not provided, enable all ports
:param disable_other_ports: if True, the ports not in the list will be disabled
:param sleep_on_change: Number of seconds to sleep if any change is made
:return: True if no errors found, False otherwise
"""
global hub
result = True
changed = False
for port in range(0, 8):
#
if ports is None or port in ports:
if not is_port_enabled( port ):
action_result = hub.usb.setPortEnable( port )
if action_result != brainstem.result.Result.NO_ERROR:
result = False
else:
changed = True
#
elif disable_other_ports:
if is_port_enabled( port ):
action_result = hub.usb.setPortDisable( port )
if action_result != brainstem.result.Result.NO_ERROR:
result = False
else:
changed = True
#
if changed and sleep_on_change:
import time
time.sleep( sleep_on_change )
#
return result
def disable_ports( ports ):
"""
:param ports: List of port numbers
:return: True if no errors found, False otherwise
"""
global hub
result = True
for port in ports:
#
action_result = hub.usb.setPortDisable( port )
if action_result != brainstem.result.Result.NO_ERROR:
result = False
#
return result
def recycle_ports( portlist = None, timeout = 2 ):
"""
Disable and enable a port
:param timeout: how long to wait before re-enabling
:return: True if everything OK, False otherwise
"""
if portlist is None:
portlist = ports()
#
result = disable_ports( portlist )
#
import time
time.sleep( timeout )
#
result = enable_ports( portlist ) and result
#
return result
def set_ports_usb2( portlist = None, timeout = 100e-3 ):
"""
Set USB ports to USB2
"""
if portlist is None:
portlist = ports()
#
recycle_ports( portlist, timeout = timeout )
#
global hub
for port in portlist:
hub.usb.setSuperSpeedDataEnable( port )
hub.usb.setHiSpeedDataEnable( port )
hub.usb.setSuperSpeedDataDisable( port )
def set_ports_usb3( portlist = None, timeout = 100e-3 ):
"""
Set USB ports to support USB3
"""
if portlist is None:
portlist = ports()
#
recycle_ports( portlist, timeout = timeout )
#
global hub
for port in portlist:
hub.usb.setSuperSpeedDataEnable( port )
hub.usb.setHiSpeedDataEnable( port )
hub.usb.setHiSpeedDataDisable( port )
def port_power( port ):
"""
"""
if port < 0 or port > 7:
raise ValueError("port number can be only within 0 and 7 (inclusive)")
#
global hub
micro_volt = hub.usb.getPortVoltage( port )
micro_curr = hub.usb.getPortCurrent( port )
volt = float(micro_volt.value) / 10.0 ** 6
amps = float(micro_curr.value) / 10.0 ** 6
#
return volt * amps
def get_port_from_usb( first_usb_index, second_usb_index ):
"""
Based on last two USB location index, provide the port number
"""
acroname_port_usb_map = {(4, 4): 0,
(4, 3): 1,
(4, 2): 2,
(4, 1): 3,
(3, 4): 4,
(3, 3): 5,
(3, 2): 6,
(3, 1): 7,
}
return acroname_port_usb_map[(first_usb_index, second_usb_index)]
if __name__ == '__main__':
for opt,arg in opts:
if opt in ('--recycle'):
connect()
enable_ports() # so ports() will return all
recycle_ports()
| [
37811,
198,
44687,
927,
4013,
1313,
480,
14699,
198,
198,
6214,
10314,
329,
3632,
927,
994,
25,
198,
5450,
1378,
330,
1313,
480,
13,
785,
14,
35790,
14,
29412,
14,
9630,
13,
6494,
198,
37811,
198,
198,
6738,
374,
2777,
88,
1330,
260... | 2.271808 | 2,671 |
# Generated by Django 3.1.4 on 2021-09-03 13:33
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import users.models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
19,
319,
33448,
12,
2931,
12,
3070,
1511,
25,
2091,
198,
198,
11748,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
198,
11748,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12102,
2024,
... | 3.024691 | 81 |
from django.contrib import admin
from .models import SummaryTask
admin.site.register(SummaryTask)
# Register your models here.
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
201,
198,
6738,
764,
27530,
1330,
21293,
25714,
201,
198,
201,
198,
28482,
13,
15654,
13,
30238,
7,
22093,
25714,
8,
201,
198,
201,
198,
2,
17296,
534,
4981,
994,
13,
201,
198
] | 3.292683 | 41 |
from functools import wraps
import inspect
def counter(fn):
"""Bu fonksiyon bir 'decorator' olup çıkışa "inner" adında bir 'closure' iletir. İçerdeki inner fonksiyonu,
kullanılacak asıl fonksiyonları modifiye eder. Burada modifiye olayı kullanılan fonksiyonun kaçıncı defa
çağırıldığını ekrana basma'dır. """
count = 0
# fonksiyon aynı ada sahip olsa da artık inner closure döndürüleceğinden debugging sorunu var. fn fonksiyonu yerine closure'ın
# 'metadata,docstring,signature vs'si döndürülecek. Çözüm:
@wraps(fn)
return inner
@counter
@counter
print(inspect.signature(mult)) # çıktı: (a, b)
| [
6738,
1257,
310,
10141,
1330,
27521,
198,
11748,
10104,
628,
198,
4299,
3753,
7,
22184,
2599,
198,
220,
220,
220,
37227,
38374,
277,
261,
591,
7745,
261,
35122,
705,
12501,
273,
1352,
6,
25776,
929,
6184,
100,
30102,
74,
30102,
46481,
... | 2.163265 | 294 |
""" env util """
import errno
import functools
import os
import signal
from functools import wraps
from types import LambdaType
from typing import Any, Callable, Dict, Optional, Sequence, Type, Union
import gym
import numpy as np
import rospy
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecEnv
def lmap(v, x, y) -> float:
"""Linear map of value v with range x to desired range y."""
return y[0] + (v - x[0]) * (y[1] - y[0]) / (x[1] - x[0])
def with_retry(
retries_limit: int = 3,
allowed_exceptions: Optional[Sequence[Exception]] = None,
):
"""a decorator retry operation a few times and allow parameterize
Args:
retries_limit (int, optional):
[maximum retry number].Defaults to 3.
allowed_exceptions (Optional[Sequence[Exception]], optional):
[if specified, retry the operation]. Defaults to None.
"""
def retry(operation):
"""a decorator retry operation a few times"""
@wraps(operation)
return wrapped
return retry
def update_dict(dic: Dict, key: str, new_v: int) -> Dict:
"""update nested dictionary key value with new_value
Args:
dic ([dict]): a nested dictionary with variant length
key ([str]): key at any depth
new_v ([int]): new value for same key of dic
Returns:
[dict]: dictionary with new value
"""
for k, val in dic.items():
if isinstance(val, dict):
update_dict(val, key, new_v)
if k == key:
dic[k] = new_v
return dic
def my_vec_env(
env_id: Union[str, Type[gym.Env]],
n_envs: int = 1,
seed: Optional[int] = None,
start_index: int = 0,
monitor_dir: Optional[str] = None,
wrapper_class: Optional[Callable[[gym.Env], gym.Env]] = None,
env_kwargs: Optional[Dict[str, Any]] = None,
vec_env_cls: Optional[Type[Union[DummyVecEnv, SubprocVecEnv]]] = None,
vec_env_kwargs: Optional[Dict[str, Any]] = None,
monitor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs, # pylint: disable=unused-argument
) -> VecEnv:
"""
Create a wrapped, monitored ``VecEnv``.
By default it uses a ``DummyVecEnv`` which is usually faster
than a ``SubprocVecEnv``.
:param env_id: the environment ID or the environment class
:param n_envs: the number of environments you wish to have in parallel
:param seed: the initial seed for the random number generator
:param start_index: start rank index
:param monitor_dir: Path to a folder where the monitor files will be saved.
If None, no file will be written, however, the env will still be wrapped
in a Monitor wrapper to provide additional information about training.
:param wrapper_class: Additional wrapper to use on the environment.
This can also be a function with single argument that wraps the environment in many things.
:param env_kwargs: Optional keyword argument to pass to the env constructor
:param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.
:param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.
:param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.
:return: The wrapped environment
"""
env_kwargs = {} if env_kwargs is None else env_kwargs
vec_env_kwargs = {} if vec_env_kwargs is None else vec_env_kwargs
monitor_kwargs = {} if monitor_kwargs is None else monitor_kwargs
# No custom VecEnv is passed
if vec_env_cls is None:
# Default: use a DummyVecEnv
vec_env_cls = DummyVecEnv
# return vec_env_cls(
# [make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs
# )
return vec_env_cls([make_env(i + start_index) for i in range(n_envs)])
def parallel_vec_env(
env_id: Union[str, Type[gym.Env]],
n_envs: int = 1,
seed: Optional[int] = None,
start_index: int = 0,
monitor_dir: Optional[str] = None,
wrapper_class: Optional[Callable[[gym.Env], gym.Env]] = None,
env_kwargs: Optional[Dict[str, Any]] = None,
vec_env_cls: Optional[Type[Union[DummyVecEnv, SubprocVecEnv]]] = None,
vec_env_kwargs: Optional[Dict[str, Any]] = None,
monitor_kwargs: Optional[Dict[str, Any]] = None,
**kwargs, # pylint: disable=unused-argument
) -> VecEnv:
"""
Create a wrapped, monitored ``VecEnv``.
By default it uses a ``DummyVecEnv`` which is usually faster
than a ``SubprocVecEnv``.
:param env_id: the environment ID or the environment class
:param n_envs: the number of environments you wish to have in parallel
:param seed: the initial seed for the random number generator
:param start_index: start rank index
:param monitor_dir: Path to a folder where the monitor files will be saved.
If None, no file will be written, however, the env will still be wrapped
in a Monitor wrapper to provide additional information about training.
:param wrapper_class: Additional wrapper to use on the environment.
This can also be a function with single argument that wraps the environment in many things.
:param env_kwargs: Optional keyword argument to pass to the env constructor
:param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.
:param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.
:param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.
:return: The wrapped environment
"""
env_kwargs = {} if env_kwargs is None else env_kwargs
vec_env_kwargs = {} if vec_env_kwargs is None else vec_env_kwargs
monitor_kwargs = {} if monitor_kwargs is None else monitor_kwargs
# No custom VecEnv is passed
if vec_env_cls is None:
# Default: use a DummyVecEnv
vec_env_cls = DummyVecEnv
# return vec_env_cls(
# [make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs
# )
return vec_env_cls([make_env(i + start_index) for i in range(n_envs)])
| [
37811,
17365,
7736,
37227,
198,
11748,
11454,
3919,
198,
11748,
1257,
310,
10141,
198,
11748,
28686,
198,
11748,
6737,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
3858,
1330,
21114,
6814,
6030,
198,
6738,
19720,
1330,
4377,
11,
4... | 2.698588 | 2,266 |
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is a class for SegMapTransform."""
import mmcv
from vega.common import ClassFactory, ClassType
@ClassFactory.register(ClassType.TRANSFORM)
class SegMapTransform(object):
"""Semantic segmentation maps transform, which contains.
1. rescale the segmentation map to expected size
2. flip the image (if needed)
3. pad the image (if needed)
:param size_divisor: defaults to None
:type size_divisor: tuple
"""
def __init__(self, size_divisor=None):
"""Construct the SegMapTransform class."""
self.size_divisor = size_divisor
def __call__(self, img, scale, flip=False, keep_ratio=True):
"""Call function of SegMapTransform."""
if keep_ratio:
img = mmcv.imrescale(img, scale, interpolation='nearest')
else:
img = mmcv.imresize(img, scale, interpolation='nearest')
if flip:
img = mmcv.imflip(img)
if self.size_divisor is not None:
img = mmcv.impad_to_multiple(img, self.size_divisor)
return img
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
357,
34,
8,
12131,
13,
43208,
21852,
1766,
1539,
12052,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
... | 2.815308 | 601 |
import unittest
from app.bot.downloader import *
| [
11748,
555,
715,
395,
198,
6738,
598,
13,
13645,
13,
15002,
263,
1330,
1635,
198
] | 3.266667 | 15 |
from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="webhelpers2_grid",
version="0.9",
description=""" HTML Grid renderer that helps generating HTML tables (or other structures)
for data presentation, supports ordering, sorting columns, and is very customizable
""",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Framework :: Pyramid",
"Framework :: Django",
"Framework :: Flask",
"Framework :: Pylons",
"Topic :: Internet :: WWW/HTTP",
],
author="Marcin Lulek",
author_email="info@webreactor.eu",
license="BSD",
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(),
zip_safe=True,
include_package_data=True,
package_data={
"": ["*.txt", "*.rst", "*.ini", "*.css"],
"webhelpers2_grid": ["stylesheets/*.css"],
},
extras_require={
"dev": ["coverage", "pytest", "tox", "mock", "jinja2"],
"lint": ["black"],
},
test_suite="webhelpers2_grid.tests",
install_requires=["webhelpers2"],
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
6738,
28686,
1330,
3108,
198,
6738,
33245,
1330,
1280,
198,
198,
1456,
796,
3108,
13,
397,
2777,
776,
7,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
198,
4480,
1280,
... | 2.543478 | 598 |
import torch
import pandas as pd
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from walker import load_a_HIN_from_pandas_neighbors, getNeighbors
from merge1.model_merge_1 import NSTrainSet, HIN2vec, train
# set method parameters
window = 5
walk = 10
walk_length = 55
embed_size = 100
neg = 5
num_sample_nei = 3
sigmoid_reg = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f'device = {device}')
demo_edge = pd.read_csv('./train_tcm.csv', index_col=0)
edges = [demo_edge]
print('finish loading edges')
# init HIN
hin, edge_data_by_type, edge_types = load_a_HIN_from_pandas_neighbors(edges)
hin.window = window
dataset = NSTrainSet(hin.sample(walk_length, walk), hin.node_size, neg=neg)
neighbors = getNeighbors(
edge_type_count=len(edge_types),
num_nodes=hin.node_size,
edge_data_by_type=edge_data_by_type,
edge_types=edge_types,
num_neighbor_samples=num_sample_nei)
hin2vec = HIN2vec(
edge_types,
neighbors,
hin.node_size,
num_sample_nei,
hin.path_size,
embed_size,
sigmoid_reg)
# load model
# hin2vec.load_state_dict(torch.load('hin2vec.pt'))
# set training parameters
n_epoch = 1
batch_size = 64
log_interval = 200
if torch.cuda.is_available():
print('Use ', device)
hin2vec = hin2vec.to(device)
else:
print('Use CPU')
data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
optimizer = optim.AdamW(hin2vec.parameters()) # 原作者使用的是SGD? 这里使用AdamW
loss_function = nn.BCELoss()
for epoch in range(n_epoch):
train(neighbors, log_interval, hin2vec, device, data_loader, optimizer, loss_function, epoch)
torch.save(hin2vec, 'hin2vec.pt')
# set output parameters [the output file is a bit different from the original code.]
node_vec_fname = 'node_vec_merge_1_' + \
str(window) + '_' +\
str(walk) + '_' +\
str(walk_length) + '_' +\
str(embed_size) + '_' +\
str(neg) + '_' +\
str(num_sample_nei) + '_' +\
str(sigmoid_reg) + '_' +\
str(n_epoch) + '_' +\
str(batch_size) + '_' +\
str(log_interval) +\
'.txt'
# path_vec_fname = 'meta_path_vec.txt'
path_vec_fname = None
print(f'saving node embedding vectors to {node_vec_fname}...')
node_embeds = pd.DataFrame(hin2vec.embeds_start.weight.data.numpy())
node_embeds.rename(hin.id2node).to_csv(node_vec_fname, sep=' ')
if path_vec_fname:
print(f'saving meta path embedding vectors to {path_vec_fname}...')
path_embeds = pd.DataFrame(hin2vec.embeds_path.weight.data.numpy())
path_embeds.rename(hin.id2path).to_csv(path_vec_fname, sep=' ')
# save model
# torch.save(hin2vec.state_dict(), 'hin2vec.pt') | [
11748,
28034,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
11748,
28034,
13,
40085,
355,
6436,
201,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
201,
198,
201,
198,... | 2.100218 | 1,377 |
import re
import time
import os.path
import bencode
from django.db import models
from django.utils import timezone
from django.utils.functional import cached_property
from pyquery.pyquery import PyQuery
from bibliotik.settings import BIBLIOTIK_GET_TORRENT_URL
from home.models import TransTorrentBase
from what_transcode.utils import get_info_hash_from_data
EBOOK_FORMATS = ['EPUB', 'PDF', 'MOBI', 'AZW3', 'DJVU', 'CBR', 'CHM', 'TXT']
LANGUAGES = ['English', 'Irish', 'German', 'French', 'Spanish', 'Italian', 'Latin', 'Japanese',
'Danish', 'Swedish', 'Norwegian', 'Dutch', 'Russian', 'Polish', 'Portuguese', 'Greek',
'Turkish', 'Hungarian', 'Korean', 'Chinese', 'Thai', 'Indonesian', 'Arabic']
| [
11748,
302,
198,
11748,
640,
198,
11748,
28686,
13,
6978,
198,
198,
11748,
1888,
8189,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
6738,
42625,
14208,
13,
26791,
13,
45124,
1... | 2.775665 | 263 |
import pytest
from pytest_cases import parametrize_with_cases
@pytest.mark.parametrize('dummy_amount', [1, 0, -1])
@parametrize_with_cases('dummy_amount', cases=".", prefix="case_amount")
| [
11748,
12972,
9288,
198,
198,
6738,
12972,
9288,
62,
33964,
1330,
5772,
316,
380,
2736,
62,
4480,
62,
33964,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
10786,
67,
13513,
62,
17287,
3256,
685,
16,
11,
657,
11,
532... | 2.694444 | 72 |
# -*- coding: utf-8 -*-
import subprocess
from workflow import Workflow, PasswordNotFound
__author__ = 'zhouqi'
if __name__ == '__main__':
wf = Workflow()
wf.cache_data('scripts', get_all_scripts())
# wf.cache_data('brew_all_formulas', get_all_packages())
# wf.cache_data('brew_installed_formulas', get_installed_packages())
# wf.cache_data('brew_info', get_info())
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
850,
14681,
198,
6738,
30798,
1330,
5521,
11125,
11,
30275,
3673,
21077,
198,
198,
834,
9800,
834,
796,
705,
38536,
40603,
6,
628,
628,
198,
361,
11593,
3672,
834... | 2.606667 | 150 |
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
from mock import patch, call
from ovh import APIError, ResourceNotFoundError, InvalidCredential
from octodns.provider.ovh import OvhProvider
from octodns.record import Record
from octodns.zone import Zone
| [
2,
198,
2,
198,
2,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
3467,
198,
220,
220,
220,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738... | 3.316832 | 101 |
# capture a sequence of images with constant exposure
# import necessary packages
import os
import datetime
import time
import picamera
# calibrate camera
camera = picamera.PiCamera()
camera.resolution = (3280, 2464)
camera.meter_mode = 'spot'
camera.image_denoise = False
# sets constant exposure for the camera based on a given ISO
# prints current camera settings to the console
# inital exposure
cameraCalibration()
printCameraSettings()
# confirm exposure settings
while True:
print("Enter '0' to use the current settings.")
exposureCheck = input("New ISO: ")
if exposureCheck == "0":
break
else:
cameraCalibration(exposureCheck)
printCameraSettings()
# query user
fileName = input("File Name: ")
imgFormat = input("Image Format [jpeg/png/bmp/yuv/rgb/bgr]: ")
originalCount = input("Sequence Lenth (in images): ")
delay = input("Image Delay (in seconds): ")
# create workspace
os.mkdir(fileName, 0o777)
directory = fileName + "/"
imgName = directory + fileName + "_{counter:03d}." + imgFormat
originalCount = int(originalCount)
# save settings reference file
settingsFile = open(directory + fileName + "_settings.txt", "w+")
settingsFile.write(str(datetime.datetime.now()))
settingsFile.write("\n" + "Exposure Speed: ")
settingsFile.write(str(camera.exposure_speed * 0.000001))
settingsFile.write("\n" + "Digital Gain: " + str(camera.digital_gain))
settingsFile.write("\n" + "Analog Gain: " + str(camera.analog_gain))
settingsFile.write("\n" + "White Balance Gain: " + str(camera.awb_gains))
settingsFile.write("\n" + "ISO: " + str(camera.iso))
settingsFile.close()
# initialize time tracking
startTime = time.time()
imgTime = time.time()
timeFile = open(directory + fileName + "_times.txt", "w+")
timeFile.write("Specified Delay: " + str(delay) + " seconds.")
# capture sequence
for i in enumerate(camera.capture_continuous(imgName, format=imgFormat)):
# timing and counting
elapsedTime = time.time() - imgTime
truePhoto = i[0] + 1
# print and record status
print("Image " + str(truePhoto) + " of " + str(originalCount) + \
" captured in " + time.strftime("%S seconds.", time.gmtime(elapsedTime)))
timeFile.write("\n Image " + str(truePhoto) + " captured in " \
+ time.strftime("%S seconds.", time.gmtime(elapsedTime)))
# delay implementation
if elapsedTime < int(delay):
effectiveDelay = int(delay) - elapsedTime
print("Holding for Delay: " + \
time.strftime("%S seconds.", time.gmtime(effectiveDelay)))
time.sleep(effectiveDelay)
else:
print("Warning: Encoder Time Exceeded Delay")
imgTime = time.time() # reset time for next image
# allow sequence length to be expanded
if truePhoto == originalCount:
print("All " + str(originalCount) + " images saved.")
newCount = input("Additional Images? ")
originalCount += int(newCount)
if truePhoto == int(originalCount):
break
else:
print("Capturing Image " + str(truePhoto + 1) + "...")
# close out and free resources
finalTime = time.time() - startTime
timeFile.write("\n" + "Total Time Elapsed: " + \
time.strftime("%M:%S.", time.gmtime(finalTime)))
timeFile.close()
camera.close()
print(str(fileName) + " capture has completed with " + str(originalCount) \
+ " total images in " + time.strftime("%M:%S.", time.gmtime(finalTime)))
| [
2,
8006,
257,
8379,
286,
4263,
351,
6937,
7111,
198,
198,
2,
1330,
3306,
10392,
198,
11748,
28686,
198,
11748,
4818,
8079,
198,
11748,
640,
198,
11748,
8301,
18144,
198,
198,
2,
33801,
378,
4676,
198,
25695,
796,
8301,
18144,
13,
3872... | 2.889171 | 1,182 |
"""
Longest common subsequence implementation starter
"""
from binary_search_tree.binary_search_tree import BinarySearchTree, Node
if __name__ == '__main__':
tree = BinarySearchTree()
nodes_to_add = [Node(8), Node(4), Node(2), Node(3), Node(10), Node(16),
Node(13), Node(12),Node(11)]
for node in nodes_to_add:
tree.add(node)
print(tree.get_size())
print(tree.traverse_breadth_tree)
tree.remove(Node(12))
print(tree.get_size())
tree.get_max_height()
RESULT = tree.find(Node(13)).root
assert RESULT == 13, 'Root not found'
| [
37811,
198,
14617,
395,
2219,
6399,
594,
7822,
14217,
198,
37811,
198,
6738,
13934,
62,
12947,
62,
21048,
13,
39491,
62,
12947,
62,
21048,
1330,
45755,
18243,
27660,
11,
19081,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834... | 2.450413 | 242 |
print("Hello world")
import math
print(math.sqrt(81))
# List example
days_of_the_week = ["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]
for i in days_of_the_week:
print(i)
| [
4798,
7203,
15496,
995,
4943,
201,
198,
11748,
10688,
201,
198,
4798,
7,
11018,
13,
31166,
17034,
7,
6659,
4008,
201,
198,
2,
7343,
1672,
201,
198,
12545,
62,
1659,
62,
1169,
62,
10464,
796,
14631,
21934,
2430,
23810,
2430,
26133,
243... | 2.810811 | 74 |
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import networkx as nx
import random
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import cPickle as cp
import time
sys.path.append('%s/../../pytorch_structure2vec/s2v_lib' % os.path.dirname(os.path.realpath(__file__)))
from pytorch_util import weights_init
sys.path.append('%s/../common' % os.path.dirname(os.path.realpath(__file__)))
from cmd_args import cmd_args, save_args
from gcn_modules import GCNModule, S2VNodeClassifier
from node_utils import load_binary_data, load_txt_data, run_test, StaticGraph
if __name__ == '__main__':
random.seed(cmd_args.seed)
np.random.seed(cmd_args.seed)
torch.manual_seed(cmd_args.seed)
# features, labels, idx_train, idx_val, idx_test = load_binary_data(cmd_args.data_folder + '/' + cmd_args.dataset, cmd_args.dataset)
features, labels, idx_train, idx_val, idx_test = load_txt_data(cmd_args.data_folder + '/' + cmd_args.dataset, cmd_args.dataset)
features = Variable( features )
labels = Variable( torch.LongTensor( np.argmax(labels, axis=1) ) )
if cmd_args.gm == 'mean_field':
mod = S2VNodeClassifier
elif cmd_args.gm == 'gcn':
mod = GCNModule
if cmd_args.saved_model is not None:
print('loading')
with open('%s-args.pkl' % cmd_args.saved_model, 'rb') as f:
base_args = cp.load(f)
gcn = mod(**vars(base_args))
gcn.load_state_dict(torch.load(cmd_args.saved_model+ '.model'))
else:
gcn = mod(**vars(cmd_args))
orig_adj = Variable( gcn.norm_tool.normed_adj )
if cmd_args.ctx == 'gpu':
gcn = gcn.cuda()
labels = labels.cuda()
if cmd_args.phase == 'test':
run_test(gcn, features, orig_adj, idx_test, labels)
sys.exit()
optimizer = optim.Adam(gcn.parameters(), lr=cmd_args.learning_rate, weight_decay=cmd_args.weight_decay)
best_val = None
gen = adj_generator()
for epoch in range(cmd_args.num_epochs):
t = time.time()
gcn.train()
optimizer.zero_grad()
cur_adj = next(gen)
_, loss_train, acc_train = gcn(features, cur_adj, idx_train, labels)
acc_train = acc_train.sum() / float(len(idx_train))
loss_train.backward()
optimizer.step()
gcn.eval()
_, loss_val, acc_val = gcn(features, orig_adj, idx_val, labels)
acc_val = acc_val.sum() / float(len(idx_val))
print('Epoch: {:04d}'.format(epoch+1),
'loss_train: {:.4f}'.format(loss_train.data[0]),
'acc_train: {:.4f}'.format(acc_train),
'loss_val: {:.4f}'.format(loss_val.data[0]),
'acc_val: {:.4f}'.format(acc_val),
'time: {:.4f}s'.format(time.time() - t))
if best_val is None or acc_val > best_val:
best_val = acc_val
print('----saving to best model since this is the best valid loss so far.----')
torch.save(gcn.state_dict(), cmd_args.save_dir + '/model-%s-epoch-best-%.2f.model' % (cmd_args.gm, cmd_args.del_rate))
save_args(cmd_args.save_dir + '/model-%s-epoch-best-%.2f-args.pkl' % (cmd_args.gm, cmd_args.del_rate), cmd_args)
run_test(gcn, features, orig_adj, idx_test, labels)
# pred = gcn(features, adh)
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
4738,
198,
6738,
28034,
13,
2306,
519,
6335... | 2.212179 | 1,560 |
"""
Exceptions raised by django-chunk-upload.
"""
class ChunkUploadError(Exception):
"""
Exception raised if errors in the request/process.
"""
| [
37811,
198,
3109,
11755,
4376,
416,
42625,
14208,
12,
354,
2954,
12,
25850,
13,
198,
37811,
628,
198,
4871,
609,
2954,
41592,
12331,
7,
16922,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
35528,
4376,
611,
8563,
287,
262,
2581... | 3.098039 | 51 |
import transitfeed
schedule = transitfeed.Schedule()
schedule.AddAgency("Europe Agency", "http://iflyagency.com", "Europe/Madrid", "Europe_Agency")
service_period = schedule.GetDefaultServicePeriod()
service_period.SetStartDate("20070101")
service_period.SetEndDate("20190101")
service_period.SetWeekdayService(True)
service_period.SetDateHasService('20070704', False)
lleida = schedule.AddStop(lng=0.6199348, lat=41.6183423, name="Lleida", stop_id="Lleida")
barcelona = schedule.AddStop(lng=2.0785566, lat=41.3947687, name="Barcelona", stop_id="Barcelona")
paris = schedule.AddStop(lng=2.2071319, lat=48.8587737, name="Paris", stop_id="Paris")
brussels = schedule.AddStop(lng=4.305179, lat=50.854954, name="Brussels", stop_id="Brussels")
amsterdam = schedule.AddStop(lng=4.7581896, lat=52.3745287, name="Amsterdam", stop_id="Amsterdam")
berlin = schedule.AddStop(lng=13.1452858, lat=52.5072095, name="Berlin", stop_id="Berlin")
viena = schedule.AddStop(lng=16.2396348, lat=48.2205994, name="Viena", stop_id="Viena")
praga = schedule.AddStop(lng=14.325199, lat=50.0595849, name="Praga", stop_id="Praga")
milan = schedule.AddStop(lng=9.1075211, lat=45.4627123, name="Milan", stop_id="Milan")
roma = schedule.AddStop(lng=12.395572, lat=41.9099856, name="Roma", stop_id="Roma")
florence = schedule.AddStop(lng=11.1707567, lat=43.7799367, name="Florence", stop_id="Florence")
route_lleida = schedule.AddRoute(short_name="1", long_name="Route Lleida", route_type="Bus", route_id="route_lleida")
route_bcn = schedule.AddRoute(short_name="2", long_name="Route Barcelona", route_type="Bus", route_id="route_bcn")
route_paris = schedule.AddRoute(short_name="3", long_name="Route Paris", route_type="Bus", route_id="route_paris")
route_brussels = schedule.AddRoute(short_name="4", long_name="Route Brusels",route_type="Bus", route_id="route_brussels")
route_amsterdam = schedule.AddRoute(short_name="5", long_name="Route Amsterdam",route_type="Bus", route_id="route_amsterdam")
route_berlin = schedule.AddRoute(short_name="6", long_name="Route Berlin",route_type="Bus", route_id="route_berlin")
route_viena = schedule.AddRoute(short_name="7", long_name="Route Viena",route_type="Bus", route_id="route_viena")
route_praga = schedule.AddRoute(short_name="8", long_name="Route Praga",route_type="Bus", route_id="route_praga")
route_milan = schedule.AddRoute(short_name="9", long_name="Route Milan",route_type="Bus", route_id="route_milan")
route_roma = schedule.AddRoute(short_name="10", long_name="Route Roma",route_type="Bus", route_id="route_roma")
route_florence = schedule.AddRoute(short_name="11", long_name="Route Florence",route_type="Bus", route_id="route_florence")
#From Lleida
triplleidaBcn = route_lleida.AddTrip(schedule, headsign="From Lleida To Barcelona")
triplleidaBcn.AddStopTime(lleida, stop_time='09:00:00')
triplleidaBcn.AddStopTime(barcelona, stop_time='10:30:00')
triplleidaParis = route_lleida.AddTrip(schedule, headsign="From Lleida To Paris")
triplleidaParis.AddStopTime(lleida, stop_time='09:00:00')
triplleidaParis.AddStopTime(paris, stop_time='18:45:00')
triplleidaBrusels = route_lleida.AddTrip(schedule, headsign="From Lleida To Brusels")
triplleidaBrusels.AddStopTime(lleida, stop_time='09:00:00')
triplleidaBrusels.AddStopTime(brussels, stop_time='18:45:00')
triplleidaAms = route_lleida.AddTrip(schedule, headsign="From Lleida To Amsterdam")
triplleidaAms.AddStopTime(lleida, stop_time='09:00:00')
triplleidaAms.AddStopTime(amsterdam, stop_time='18:45:00')
triplleidaBerlin = route_lleida.AddTrip(schedule, headsign="From Lleida To Berlin")
triplleidaBerlin.AddStopTime(lleida, stop_time='09:00:00')
triplleidaBerlin.AddStopTime(berlin, stop_time='18:45:00')
triplleidaViena = route_lleida.AddTrip(schedule, headsign="From Lleida To Viena")
triplleidaViena.AddStopTime(lleida, stop_time='09:00:00')
triplleidaViena.AddStopTime(viena, stop_time='18:45:00')
triplleidaPraga = route_lleida.AddTrip(schedule, headsign="From Lleida To Praga")
triplleidaPraga.AddStopTime(lleida, stop_time='09:00:00')
triplleidaPraga.AddStopTime(praga, stop_time='18:45:00')
triplleidaMilan = route_lleida.AddTrip(schedule, headsign="From Lleida To Milan")
triplleidaMilan.AddStopTime(lleida, stop_time='09:00:00')
triplleidaMilan.AddStopTime(milan, stop_time='18:45:00')
triplleidaRoma = route_lleida.AddTrip(schedule, headsign="From Lleida To Roma")
triplleidaRoma.AddStopTime(lleida, stop_time='09:00:00')
triplleidaRoma.AddStopTime(roma, stop_time='18:45:00')
triplleidaFlorence = route_lleida.AddTrip(schedule, headsign="From Lleida To Florence")
triplleidaFlorence.AddStopTime(lleida, stop_time='09:00:00')
triplleidaFlorence.AddStopTime(florence, stop_time='18:45:00')
#From Barcelona
tripBarcelonaParis = route_bcn.AddTrip(schedule, headsign="From Barcelona To Paris")
tripBarcelonaParis.AddStopTime(barcelona, stop_time='09:00:00')
tripBarcelonaParis.AddStopTime(paris, stop_time='18:45:00')
tripBarcelonaBrussels = route_bcn.AddTrip(schedule, headsign="From Barcelona To Brusels")
tripBarcelonaBrussels.AddStopTime(barcelona, stop_time='09:00:00')
tripBarcelonaBrussels.AddStopTime(brussels, stop_time='18:45:00')
tripBarcelonaAms = route_bcn.AddTrip(schedule, headsign="From Barcelona To Amsterdam")
tripBarcelonaAms.AddStopTime(barcelona, stop_time='09:00:00')
tripBarcelonaAms.AddStopTime(amsterdam, stop_time='18:45:00')
tripBarcelonaBerlin = route_bcn.AddTrip(schedule, headsign="From Barcelona To Berlin")
tripBarcelonaBerlin.AddStopTime(barcelona, stop_time='09:00:00')
tripBarcelonaBerlin.AddStopTime(berlin, stop_time='18:45:00')
tripBarcelonaViena = route_bcn.AddTrip(schedule, headsign="From Barcelona To Viena")
tripBarcelonaViena.AddStopTime(barcelona, stop_time='09:00:00')
tripBarcelonaViena.AddStopTime(viena, stop_time='18:45:00')
tripBarcelonaPraga = route_bcn.AddTrip(schedule, headsign="From Barcelona To Praga")
tripBarcelonaPraga.AddStopTime(barcelona, stop_time='09:00:00')
tripBarcelonaPraga.AddStopTime(praga, stop_time='18:45:00')
tripBarcelonaMilan = route_bcn.AddTrip(schedule, headsign="From Barcelona To Milan")
tripBarcelonaMilan.AddStopTime(barcelona, stop_time='09:00:00')
tripBarcelonaMilan.AddStopTime(milan, stop_time='18:45:00')
tripbarcelonaRoma = route_bcn.AddTrip(schedule, headsign="From Barcelona To Roma")
tripbarcelonaRoma.AddStopTime(barcelona, stop_time='09:00:00')
tripbarcelonaRoma.AddStopTime(roma, stop_time='18:45:00')
tripbarcelonaFlorence = route_bcn.AddTrip(schedule, headsign="From Barcelona To Florence")
tripbarcelonaFlorence.AddStopTime(barcelona, stop_time='09:00:00')
tripbarcelonaFlorence.AddStopTime(florence, stop_time='18:45:00')
#From Paris
tripParis1 = route_paris.AddTrip(schedule, headsign="From Paris To Brusels")
tripParis1.AddStopTime(paris, stop_time='09:00:00')
tripParis1.AddStopTime(brussels, stop_time='18:45:00')
tripParis2 = route_paris.AddTrip(schedule, headsign="From Paris To Amsterdam")
tripParis2.AddStopTime(paris, stop_time='09:00:00')
tripParis2.AddStopTime(amsterdam, stop_time='18:45:00')
tripParis3 = route_paris.AddTrip(schedule, headsign="From Paris To Berlin")
tripParis3.AddStopTime(paris, stop_time='09:00:00')
tripParis3.AddStopTime(berlin, stop_time='18:45:00')
tripParis4 = route_paris.AddTrip(schedule, headsign="From Paris To Viena")
tripParis4.AddStopTime(paris, stop_time='09:00:00')
tripParis4.AddStopTime(viena, stop_time='18:45:00')
tripParis5 = route_paris.AddTrip(schedule, headsign="From Paris To Praga")
tripParis5.AddStopTime(paris, stop_time='09:00:00')
tripParis5.AddStopTime(praga, stop_time='18:45:00')
tripParis6 = route_paris.AddTrip(schedule, headsign="From Paris To Milan")
tripParis6.AddStopTime(paris, stop_time='09:00:00')
tripParis6.AddStopTime(milan, stop_time='18:45:00')
tripParis7 = route_paris.AddTrip(schedule, headsign="From Paris To Roma")
tripParis7.AddStopTime(paris, stop_time='09:00:00')
tripParis7.AddStopTime(roma, stop_time='18:45:00')
tripParis8 = route_paris.AddTrip(schedule, headsign="From Paris To Florence")
tripParis8.AddStopTime(paris, stop_time='09:00:00')
tripParis8.AddStopTime(florence, stop_time='18:45:00')
#From Brusels
tripBrussels1 = route_brussels.AddTrip(schedule, headsign="From Brusels To Amsterdam")
tripBrussels1.AddStopTime(brussels, stop_time='09:00:00')
tripBrussels1.AddStopTime(amsterdam, stop_time='18:45:00')
tripBrussels2 = route_brussels.AddTrip(schedule, headsign="From Brusels To Berlin")
tripBrussels2.AddStopTime(brussels, stop_time='09:00:00')
tripBrussels2.AddStopTime(berlin, stop_time='18:45:00')
tripBrussels3 = route_brussels.AddTrip(schedule, headsign="From Brusels To Viena")
tripBrussels3.AddStopTime(brussels, stop_time='09:00:00')
tripBrussels3.AddStopTime(viena, stop_time='18:45:00')
tripBrussels4 = route_brussels.AddTrip(schedule, headsign="From Brusels To Praga")
tripBrussels4.AddStopTime(brussels, stop_time='09:00:00')
tripBrussels4.AddStopTime(praga, stop_time='18:45:00')
tripBrussels4 = route_brussels.AddTrip(schedule, headsign="From Brusels To Milan")
tripBrussels4.AddStopTime(brussels, stop_time='09:00:00')
tripBrussels4.AddStopTime(milan, stop_time='18:45:00')
tripBrussel5 = route_brussels.AddTrip(schedule, headsign="From Brusels To Roma")
tripBrussel5.AddStopTime(brussels, stop_time='09:00:00')
tripBrussel5.AddStopTime(roma, stop_time='18:45:00')
tripBrussel6 = route_brussels.AddTrip(schedule, headsign="From Brusels To Florence")
tripBrussel6.AddStopTime(brussels, stop_time='09:00:00')
tripBrussel6.AddStopTime(florence, stop_time='18:45:00')
#From Amsterdam
tripAmsterdam1 = route_amsterdam.AddTrip(schedule, headsign="From Amsterdam To Berlin")
tripAmsterdam1.AddStopTime(amsterdam, stop_time='09:00:00')
tripAmsterdam1.AddStopTime(berlin, stop_time='18:45:00')
tripAmsterdam2 = route_amsterdam.AddTrip(schedule, headsign="From Amsterdam To Viena")
tripAmsterdam2.AddStopTime(amsterdam, stop_time='09:00:00')
tripAmsterdam2.AddStopTime(viena, stop_time='18:45:00')
tripAmsterdam3 = route_amsterdam.AddTrip(schedule, headsign="From Amsterdam To Praga")
tripAmsterdam3.AddStopTime(amsterdam, stop_time='09:00:00')
tripAmsterdam3.AddStopTime(praga, stop_time='18:45:00')
tripAmsterdam4 = route_amsterdam.AddTrip(schedule, headsign="From Amsterdam To Milan")
tripAmsterdam4.AddStopTime(amsterdam, stop_time='09:00:00')
tripAmsterdam4.AddStopTime(milan, stop_time='18:45:00')
tripAmsterdam5 = route_amsterdam.AddTrip(schedule, headsign="From Amsterdam To Roma")
tripAmsterdam5.AddStopTime(amsterdam, stop_time='09:00:00')
tripAmsterdam5.AddStopTime(roma, stop_time='18:45:00')
tripAmsterdam6 = route_amsterdam.AddTrip(schedule, headsign="From Amsterdam To Florence")
tripAmsterdam6.AddStopTime(amsterdam, stop_time='09:00:00')
tripAmsterdam6.AddStopTime(florence, stop_time='18:45:00')
#From Berlin
tripBerlinViena = route_berlin.AddTrip(schedule, headsign="From Berlin To Viena")
tripBerlinViena.AddStopTime(berlin, stop_time='09:00:00')
tripBerlinViena.AddStopTime(viena, stop_time='18:45:00')
tripBerlinPraga = route_berlin.AddTrip(schedule, headsign="From Berlin To Praga")
tripBerlinPraga.AddStopTime(berlin, stop_time='09:00:00')
tripBerlinPraga.AddStopTime(praga, stop_time='18:45:00')
tripBerlinMilan = route_berlin.AddTrip(schedule, headsign="From Berlin To Milan")
tripBerlinMilan.AddStopTime(berlin, stop_time='09:00:00')
tripBerlinMilan.AddStopTime(milan, stop_time='18:45:00')
tripBerlinRoma = route_berlin.AddTrip(schedule, headsign="From Berlin To Roma")
tripBerlinRoma.AddStopTime(berlin, stop_time='09:00:00')
tripBerlinRoma.AddStopTime(roma, stop_time='18:45:00')
tripberlinFlorence = route_berlin.AddTrip(schedule, headsign="From Berlin To Florence")
tripberlinFlorence.AddStopTime(berlin, stop_time='09:00:00')
tripberlinFlorence.AddStopTime(florence, stop_time='18:45:00')
#From Viena
tripVienaPraga = route_viena.AddTrip(schedule, headsign="From Viena To Praga")
tripVienaPraga.AddStopTime(viena, stop_time='09:00:00')
tripVienaPraga.AddStopTime(praga, stop_time='18:45:00')
tripVienaMilan = route_viena.AddTrip(schedule, headsign="From Viena To Milan")
tripVienaMilan.AddStopTime(viena, stop_time='09:00:00')
tripVienaMilan.AddStopTime(milan, stop_time='18:45:00')
tripVienaRoma = route_viena.AddTrip(schedule, headsign="From Viena To Roma")
tripVienaRoma.AddStopTime(viena, stop_time='09:00:00')
tripVienaRoma.AddStopTime(roma, stop_time='18:45:00')
tripVienaFlorencia = route_viena.AddTrip(schedule, headsign="From Viena To Florence")
tripVienaFlorencia.AddStopTime(viena, stop_time='09:00:00')
tripVienaFlorencia.AddStopTime(florence, stop_time='18:45:00')
#From Praga
tripPragaMilan = route_praga.AddTrip(schedule, headsign="From Praga To Milan")
tripPragaMilan.AddStopTime(praga, stop_time='09:00:00')
tripPragaMilan.AddStopTime(milan, stop_time='18:45:00')
tripPragaRoma = route_praga.AddTrip(schedule, headsign="From Praga To Roma")
tripPragaRoma.AddStopTime(praga, stop_time='09:00:00')
tripPragaRoma.AddStopTime(roma, stop_time='18:45:00')
tripPragaFlorence = route_praga.AddTrip(schedule, headsign="From Praga To Florence")
tripPragaFlorence.AddStopTime(praga, stop_time='09:00:00')
tripPragaFlorence.AddStopTime(florence, stop_time='18:45:00')
#From Milan
tripMilanRoma = route_milan.AddTrip(schedule, headsign="From Milan To Roma")
tripMilanRoma.AddStopTime(milan, stop_time='09:00:00')
tripMilanRoma.AddStopTime(roma, stop_time='18:45:00')
tripMilanFlorence = route_milan.AddTrip(schedule, headsign="From Milan To Florence")
tripMilanFlorence.AddStopTime(milan, stop_time='09:00:00')
tripMilanFlorence.AddStopTime(florence, stop_time='18:45:00')
#From Roma
tripRomaFlorence = route_roma.AddTrip(schedule, headsign="From Roma To Florence")
tripRomaFlorence.AddStopTime(roma, stop_time='09:00:00')
tripRomaFlorence.AddStopTime(florence, stop_time='18:45:00')
schedule.Validate()
schedule.WriteGoogleTransitFeed('google_transit_europe.zip') | [
11748,
11168,
12363,
628,
198,
15952,
5950,
796,
11168,
12363,
13,
27054,
5950,
3419,
198,
15952,
5950,
13,
4550,
32,
4949,
7203,
16112,
7732,
1600,
366,
4023,
1378,
361,
306,
40955,
13,
785,
1600,
366,
16112,
14,
18454,
6058,
1600,
366... | 2.599398 | 5,312 |
import network
if __name__ == '__main__':
from machine import SPI
from Maix import GPIO
import socket, time
from fpioa_manager import fm
################ config ################
local_ip = "192.168.0.117"
local_netmask = "255.255.255.0"
local_gateway = "255.255.255.0"
local_dns_server = "8.8.8.8"
server_ip = "192.168.0.141"
server_port = 8000
addr = (server_ip, server_port)
#######################################
network_wiznet5k()
if lan.isconnected():
is_dhcp = False
if is_dhcp:
# #dhcp: Dynamic IP acquisition, It's not necessary
while True:
if(lan.nic.dhclient()):
print("DHCP IP:", lan.ifconfig())
break
else:
lan.ifconfig()
############################## UDP Test ##############################
# # The server must first know the client's IP and port number through the message sent by the client before it send the message to the client
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(5)
while True:
sock.sendto("Client send: hello UDP\n".encode(), addr)
try:
data, addr1 = sock.recvfrom(10)
print("Recv from server: ", data)
except Exception as e:
pass
time.sleep_ms(500)
sock.close()
############################ UDP Test end ############################
############################## TCP Test ##############################
# The TCP server needs to be pre-started
# sock = socket.socket()
# sock.connect(addr)
# while 1:
# sock.send("Client send: Hello TCP\n")
# try:
# data = sock.recv(10)
# print("Recv from Server: ", data)
# except Exception as e:
# print(e)
# time.sleep(500)
# sock.close()
############################ TCP Test end ############################
| [
11748,
3127,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
628,
220,
422,
4572,
1330,
49091,
198,
220,
422,
6669,
844,
1330,
50143,
198,
220,
1330,
17802,
11,
640,
198,
220,
422,
277,
79,
952,
64,
62,
37153,
13... | 2.615708 | 713 |
from .base import Base
import subprocess
| [
6738,
764,
8692,
1330,
7308,
198,
11748,
850,
14681,
628
] | 4.2 | 10 |
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorforce import Agent, Environment
if __name__ == '__main__':
main()
| [
2,
15069,
12131,
309,
22854,
3174,
4816,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 4.063492 | 189 |
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
file_name=''
new_name=''
main()
| [
11748,
28686,
201,
198,
11748,
15095,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
201,
198,
7753,
62,
3672,
28,
7061,
201,
198,
3605,
62,
3672,
28,
7061,
201,
198,
2... | 2.346154 | 52 |
#!/usr/bin/env python
# encoding: utf-8
import os
import re
from setuptools import setup, find_packages
setup(
name='status',
version=version_read(),
description='HTTP status code reporting for GET and POST requests',
long_description=(docs_read('README.rst')),
url='https://github.com/chrissimpkins/status',
license='MIT license',
author='Christopher Simpkins',
author_email='git.simpkins@gmail.com',
platforms=['any'],
entry_points = {
'console_scripts': [
'status = status.app:main'
],
},
packages=find_packages("lib"),
package_dir={'': 'lib'},
install_requires=['Naked'],
keywords='HTTP, request, POST, GET, status code, status, website, URL, internet, network, networking',
include_package_data=True,
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Internet :: WWW/HTTP :: Site Management :: Link Checking',
'Topic :: System',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Monitoring',
'License :: OSI Approved :: MIT License'
],
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
11748,
28686,
198,
11748,
302,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
628,
198,
198,
40406,
7,
198,
220,
220,
220,
... | 2.665663 | 664 |
from unittest import skipUnless, TestCase
import os
from app_common.apptools.testing_utils import temp_bringup_ui_for
try:
from pybleau.app.api import FilterExpression, FilterExpressionManager
except ImportError:
pass
if os.environ.get("ETS_TOOLKIT", "qt4") == "null":
ui_available = False
else:
ui_available = True
@skipUnless(ui_available, "NO UI BACKEND AVAILABLE")
| [
6738,
555,
715,
395,
1330,
14267,
28042,
11,
6208,
20448,
198,
11748,
28686,
198,
198,
6738,
598,
62,
11321,
13,
1324,
31391,
13,
33407,
62,
26791,
1330,
20218,
62,
48580,
929,
62,
9019,
62,
1640,
198,
28311,
25,
198,
220,
220,
220,
... | 2.818841 | 138 |
###########
# Translations between more friendly describtions of OGR-drivers and short codes.
# adresses: TRUI issue #2 https://bitbucket.org/KMS/trui/issue/2/ogr-driver-naming
# more or less taken directly from http://www.gdal.org/ogr/ogr_formats.html
# simlk, march 2013
##############
# FORMAT: Long name: (short_name,dco,lco)
OGR_LONG_TO_SHORT = {
"Aeronav FAA files": ["AeronavFAA", "", ""],
"ESRI ArcObjects": ["ArcObjects", "", ""],
"Arc/Info Binary Coverage": ["AVCBin", "", ""],
"Arc/Info .E00 (ASCII) Coverage": ["AVCE00", "", ""],
"Arc/Info Generate": ["ARCGEN", "", ""],
"Atlas BNA": ["BNA", "", ""],
"AutoCAD DWG": ["DWG", "", ""],
"AutoCAD DXF": ["DXF", "", ""],
"Comma Separated Value (.csv)": ["CSV", "", ""],
"CouchDB/GeoCouch": ["CouchDB", "", ""],
"DODS/OPeNDAP": ["DODS", "", ""],
"EDIGEO": ["EDIGEO", "", ""],
"ElasticSearch": ["ElasticSearch", "", ""],
"ESRI FileGDB": ["FileGDB", "", ""],
"ESRI Personal GeoDatabase": ["PGeo", "", ""],
"ESRI ArcSDE": ["SDE", "", ""],
"ESRI Shapefile": ["ESRI Shapefile", "", ""],
"FMEObjects Gateway": ["FMEObjects Gateway", "", ""],
"GeoJSON": ["GeoJSON", "", ""],
"Geoconcept Export": ["Geoconcept", "", ""],
"Geomedia .mdb": ["Geomedia", "", ""],
"GeoRSS": ["GeoRSS", "", ""],
"Google Fusion Tables": ["GFT", "", ""],
"GML": ["GML", "", ""],
"GMT": ["GMT", "", ""],
"GPSBabel": ["GPSBabel", "", ""],
"GPX": ["GPX", "", ""],
"GRASS": ["GRASS", "", ""],
"GPSTrackMaker (.gtm, .gtz)": ["GPSTrackMaker", "", ""],
"Hydrographic Transfer Format": ["HTF", "", ""],
"Idrisi Vector (.VCT)": ["Idrisi", "", ""],
"Informix DataBlade": ["IDB", "", ""],
"INTERLIS1": ["Interlis 1", "", ""],
"INTERLIS2": ["Interlis 2", "", ""],
"INGRES": ["INGRES", "", ""],
"KML": ["KML", "", ""],
"LIBKML": ["LIBKML", "", ""],
"Mapinfo File": ["MapInfo File", "", ""],
"Microstation dgn (v.7)": ["DGN", "", ""],
"Access MDB": ["MDB", "", ""],
"Memory": ["Memory", "", ""],
"MySQL": ["MySQL", "", ""],
"NAS - ALKIS": ["NAS", "", ""],
"Oracle Spatial": ["OCI", "", ""],
"ODBC": ["ODBC", "", ""],
"MS SQL Spatial server": ["MSSQLSpatial", "", ""],
"Open Document Spreadsheet": ["ODS", "", ""],
"OGDI Vectors": ["OGDI", "", ""],
"OpenAir": ["OpenAir", "", ""],
"OpenStreetMap": ["OSM", "", ""],
"PCI Geomatics DbFile": ["PCIDSK", "", ""],
"Geospatial PDF": ["PDF", "", ""],
"PDS": ["PDS", "", ""],
"PostgreSQL data dump": ["PGDump", "", ""],
"PostgreSQL/Postgis": ["PostgreSQL", "", ""],
"EPIInfo .REC": ["REC", "", ""],
"S-57 (ENC)": ["S57", "", ""],
"SDTS": ["SDTS", "", ""],
"SEG-P1/UKOOA P1/90": ["SEGUKOOA", "", ""],
"SEG-Y": ["SEGY", "", ""],
"Norwegian SOSI Std.": ["SOSI", "", ""],
"SQLite": ["SQLite", "", ""],
"SpatiaLite": ["SQLite", "SPATIALITE=YES", ""],
"SUA": ["SUA", "", ""],
"SVG": ["SVG", "", ""],
"UK .NTF": ["UK. NTF", "", ""],
"U.S. Census TIGER/Line": ["TIGER", "", ""],
"VFK data": ["VFK", "", ""],
"VRT-Virtual Datasource": ["VRT", "", ""],
"OGC WFS": ["WFS", "", ""],
"MS Excel format": ["XLS", "", ""],
"MS Open XML spreadsheet": ["XLSX", "", ""],
"X-Plane/Flightgear data": ["XPLANE", "", ""]}
# Define or pickle this to speed up start up time
OGR_SHORT_TO_LONG = dict() # A one to many mapping
for key, val in OGR_LONG_TO_SHORT.items():
if val[0] in OGR_SHORT_TO_LONG:
OGR_SHORT_TO_LONG[val[0]].append(key)
else:
OGR_SHORT_TO_LONG[val[0]] = [key]
| [
7804,
21017,
201,
198,
2,
3602,
49905,
1022,
517,
8030,
2225,
45240,
286,
440,
10761,
12,
36702,
290,
1790,
12416,
13,
201,
198,
2,
512,
16746,
25,
7579,
10080,
2071,
1303,
17,
3740,
1378,
2545,
27041,
316,
13,
2398,
14,
42,
5653,
1... | 2.105622 | 1,761 |
#! /usr/bin/env python3
import ctypes as ct
import os
from bcc import BPF
from bcc.table import BPF_MAP_TYPE_HASH_OF_MAPS
from bcc.libbcc import lib
lib.bpf_create_map_in_map.argtypes = [ct.c_int, ct.c_char_p, ct.c_int, ct.c_int, ct.c_int, ct.c_int]
# pin the inner map
setup = BPF(text="""
BPF_HASH(inner_map, u64, u64);
""")
map_in_map_fd = lib.bpf_create_map_in_map(BPF_MAP_TYPE_HASH_OF_MAPS, ct.c_char_p(0),
ct.sizeof(ct.c_longlong), setup["inner_map"].map_fd, 10240, 0)
lib.bpf_obj_pin(map_in_map_fd, b'/sys/fs/bpf/map_in_map')
# load the pinned map
with open("map_in_map.c", "r") as f:
txt = f.read()
print(txt)
bpf = BPF(text=txt)
if __name__ == "__main__":
bpf["testificate"].open_perf_buffer(print_event)
while 1:
try:
bpf.perf_buffer_poll()
pass
except KeyboardInterrupt:
exit()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
269,
19199,
355,
269,
83,
198,
11748,
28686,
198,
198,
6738,
275,
535,
1330,
20997,
37,
198,
6738,
275,
535,
13,
11487,
1330,
20997,
37,
62,
33767,
62,
25216,
62,
... | 1.94843 | 446 |
# Python code to illustrate
# regression using data set
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
import pandas as pd
# Load CSV and columns
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
df = pd.read_csv("https://raw.githubusercontent.com/Cibah/machine-learning/supervised-learning/datasets/csv-files/Housing.csv")
Y = df['price']
X = df['lotsize']
X=X.values.reshape(len(X),1)
Y=Y.values.reshape(len(Y),1)
# Split the data into training/testing sets
X_train = X[:-250]
X_test = X[-250:]
# Split the targets into training/testing sets
Y_train = Y[:-250]
Y_test = Y[-250:]
# Plot outputs
plt.scatter(X_test, Y_test, color='black')
plt.title('Test Data')
plt.xlabel('Size')
plt.ylabel('Price')
#plt.xticks(())
#plt.yticks(())
#1. Linear Regression
#2. Logistic Regression
#3. Ridge Regression
#4. Lasso Regression
#5. Polynomial Regression
#6. Bayesian Linear Regression
#7. Stepwise Regression
#8. ElasticNet Regression
# Create linear regression object
#regr = linear_model.LinearRegression()
# Create regression object
#regr = linear_model.Lasso()
#regr = linear_model.Perceptron()
#regr = linear_model.ARDRegression()
#regr = linear_model.BayesianRidge()
regr = linear_model.ElasticNet()
#regr = linear_model.LogisticRegression()
#regr = linear_model.Ridge()
# Train the model using the training sets
regr.fit(X_train, Y_train)
# Plot outputs
plt.plot(X_test, regr.predict(X_test), color='red',linewidth=3)
plt.show() | [
2,
11361,
2438,
284,
19418,
198,
2,
20683,
1262,
1366,
900,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
1330,
40522,
11,
14174,
62,
19849,
198,
11748,
19798,
... | 2.758559 | 555 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from distributions import Categorical, Categorical2D
from utils import init, init_normc_
import math
import numpy as np
from densenet_pytorch.densenet import DenseNet
# from coord_conv_pytorch.coord_conv import nn.Conv2d, nn.Conv2dTranspose
#from nn.Conv2d_pytorch.nn.Conv2d import nn.Conv2d, nn.Conv2dTranspose
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
198,
6738,
24570,
1330,
327,
2397,
12409,
11,
327,
2397,
12409,
17,
35,
198,
6738,
3384,
4487,
1330,
2315,
11,
2315,
62,... | 2.515528 | 161 |
import pytest
from lispyc import nodes
from lispyc.exceptions import SpecialFormSyntaxError
from lispyc.nodes import Branch, ComposedForm, Cond, Constant, Program, Variable
from lispyc.parser import parse
from .data import FORM_PROGRAMS, FORMS
VALID_1_BRANCH_PROGRAMS = [
"($name$ $value$ ((x 7 y) (a b)) $default$)",
"($name$ $value$ ((cond (a b) 1.7) ()) $default$)",
]
VALID_PROGRAMS = [
"($name$ $value$ (a 7.9) (1 true) $default$)",
"($name$ $value$ ((x 7 y) (a b)) ((cond (a b) false) ()) $default$)",
]
VALID_NODES = [
(Branch(Variable("a"), Constant(7.9)), Branch(Constant(1), Constant(True))),
(
Branch(
ComposedForm(Variable("x"), (Constant(7), Variable("y"))),
ComposedForm(Variable("a"), (Variable("b"),)),
),
Branch(Cond((Branch(Variable("a"), Variable("b")),), Constant(False)), nodes.List(())),
),
]
VALID = list(zip(VALID_PROGRAMS, VALID_NODES))
@pytest.mark.parametrize(["default_program", "default"], FORMS)
@pytest.mark.parametrize(["program", "branches"], VALID)
@pytest.mark.parametrize(["default_program", "default"], FORMS)
@pytest.mark.parametrize(["value_program", "value"], FORMS)
@pytest.mark.parametrize(["program", "branches"], VALID)
@pytest.mark.parametrize("default", FORM_PROGRAMS)
@pytest.mark.parametrize("value", FORM_PROGRAMS)
@pytest.mark.parametrize("program", VALID_PROGRAMS)
@pytest.mark.parametrize("program", VALID_1_BRANCH_PROGRAMS)
@pytest.mark.parametrize("invalid_branch", FORM_PROGRAMS)
@pytest.mark.parametrize("program", VALID_PROGRAMS)
@pytest.mark.parametrize("default", FORM_PROGRAMS)
@pytest.mark.parametrize("value", FORM_PROGRAMS)
@pytest.mark.parametrize("program", VALID_1_BRANCH_PROGRAMS)
@pytest.mark.parametrize("value", FORM_PROGRAMS)
@pytest.mark.parametrize("program", VALID_1_BRANCH_PROGRAMS)
@pytest.mark.parametrize("invalid_branch", FORM_PROGRAMS)
@pytest.mark.parametrize("program", VALID_PROGRAMS)
| [
11748,
12972,
9288,
198,
198,
6738,
300,
8802,
88,
66,
1330,
13760,
198,
6738,
300,
8802,
88,
66,
13,
1069,
11755,
1330,
6093,
8479,
13940,
41641,
12331,
198,
6738,
300,
8802,
88,
66,
13,
77,
4147,
1330,
20551,
11,
3082,
1335,
8479,
... | 2.36899 | 832 |
import json
from modules.const import Const
from modules.lib.agent_utils import run_on_bash
| [
11748,
33918,
198,
6738,
13103,
13,
9979,
1330,
4757,
198,
6738,
13103,
13,
8019,
13,
25781,
62,
26791,
1330,
1057,
62,
261,
62,
41757,
628
] | 3.72 | 25 |
import redis
from django.conf import settings
from .models import Product
# Connect to redis
conn = redis.Redis(host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
db=settings.REDIS_DB) | [
11748,
2266,
271,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
764,
27530,
1330,
8721,
198,
198,
2,
8113,
284,
2266,
271,
198,
37043,
796,
2266,
271,
13,
7738,
271,
7,
4774,
28,
33692,
13,
22083,
1797,
62,
39,
10892,
1... | 2.237624 | 101 |
import pandas as pd
import numpy as np
import ast
import pickle
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
6468,
198,
11748,
2298,
293,
628,
628
] | 3.190476 | 21 |
# Defining a class called heapnode that has stores the symbol,frequency of the symbol, pointer to each of its children
# and a flag variable called "bits" which denotes if its the left child or the right child
# Function that creates our huffman tree by taking our priority queue as input and outputs the root node
# We first sort the queue by frequency and define the parent of these leafnode, with frequency as the sum of
# the frequncies of the first two nodes of our sorted priority queue. We then pop off those two nodes
# and push our newnode into our queue and sort it again. We repeat this process till the size of our priority queue is 1.
# A function that creates a dictionary that stores our symbols as our key and its codeword as its keyvalue | [
2,
2896,
3191,
257,
1398,
1444,
24575,
17440,
326,
468,
7000,
262,
6194,
11,
35324,
286,
262,
6194,
11,
17562,
284,
1123,
286,
663,
1751,
198,
2,
290,
257,
6056,
7885,
1444,
366,
9895,
1,
543,
43397,
611,
663,
262,
1364,
1200,
393,
... | 4.387283 | 173 |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for
# full license information.
import pytest
import asyncio
import sample_content
from horton_logging import logger
# Amount of time to wait after updating desired properties.
wait_time_for_desired_property_updates = 5
async def wait_for_reported_properties_update(*, properties_sent, client, registry):
"""
Helper function which uses the registry to wait for reported properties
to update to the expected value
"""
while True:
if getattr(client, "module_id", None):
twin_received = await registry.get_module_twin(
client.device_id, client.module_id
)
else:
twin_received = await registry.get_device_twin(client.device_id)
reported_properties_received = twin_received["reported"]
if "$version" in reported_properties_received:
del reported_properties_received["$version"]
if "$metadata" in reported_properties_received:
del reported_properties_received["$metadata"]
logger("expected:" + str(properties_sent["reported"]))
logger("received:" + str(reported_properties_received))
if properties_sent["reported"] == reported_properties_received:
# test passed
return
else:
logger("Twin does not match. Sleeping for 2 seconds and retrying.")
await asyncio.sleep(2)
| [
2,
15069,
357,
66,
8,
5413,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
5964,
13,
4091,
38559,
24290,
2393,
287,
262,
1628,
6808,
329,
198,
2,
1336,
5964,
1321,
13,
198,
198,
11748,
12972,
9288,
198,
11748,
30351,
952... | 2.787037 | 540 |
"""
Entrada de dados
"""
num_1 = int(input('Digite um número: '))
num_2 = int(input('Digite outro: '))
print(num_1 ** num_2) | [
37811,
198,
14539,
81,
4763,
390,
9955,
418,
198,
37811,
198,
198,
22510,
62,
16,
796,
493,
7,
15414,
10786,
19511,
578,
23781,
299,
21356,
647,
78,
25,
705,
4008,
198,
22510,
62,
17,
796,
493,
7,
15414,
10786,
19511,
578,
503,
305,... | 2.172414 | 58 |
# Generated by Django 3.1.4 on 2020-12-05 11:59
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
19,
319,
12131,
12,
1065,
12,
2713,
1367,
25,
3270,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from .aarch64_disasm_processor import AArch64DisassemblyProcessor
from .amd64_disasm_processor import AMD64DisassemblyProcessor
from typing import List
import angr
| [
6738,
764,
64,
998,
2414,
62,
6381,
8597,
62,
41341,
1330,
317,
19895,
2414,
7279,
41873,
18709,
273,
198,
6738,
764,
28745,
2414,
62,
6381,
8597,
62,
41341,
1330,
10324,
2414,
7279,
41873,
18709,
273,
198,
198,
6738,
19720,
1330,
7343,... | 3.553191 | 47 |
from zeit.cms.content.util import objectify_soup_fromstring
import lxml.etree
import lxml.objectify
import zope.interface
import zope.location.location
import zope.proxy
import zope.schema
import zope.schema.interfaces
import zope.security._proxy
import zope.security.checker
import zope.security.proxy
DEFAULT_MARKER = object()
class IXMLTree(zope.schema.interfaces.IField):
"""A field containing an lxml.objectified tree."""
# This is here to avoid circular imports
@zope.interface.implementer(zope.schema.interfaces.IFromUnicode)
@zope.interface.implementer(IXMLTree)
EMPTY = object()
def apply_default_values(context, interface, set_none=False):
"""Apply default values from ``interface`` to ``context``."""
for name, field in zope.schema.getFields(interface).items():
if field.readonly:
continue
__traceback_info__ = (name,)
default = getattr(field, 'default')
# don't set None values (#9406)
if default is None and not set_none:
continue
current = getattr(context, name, EMPTY)
# don't cause a field to be written unnecessarily
if current == default:
continue
# if a value exists, don't overwrite it if it's valid (#10362)
if current is not EMPTY and current is not field.missing_value:
field = field.bind(context)
try:
field.validate(current)
except zope.schema.ValidationError:
pass
else:
continue
# now we have both an attribute without a meaningful value and a
# meaningful value to set it to
setattr(context, name, default)
| [
6738,
41271,
270,
13,
46406,
13,
11299,
13,
22602,
1330,
2134,
1958,
62,
82,
10486,
62,
6738,
8841,
198,
11748,
300,
19875,
13,
316,
631,
198,
11748,
300,
19875,
13,
15252,
1958,
198,
11748,
1976,
3008,
13,
39994,
198,
11748,
1976,
30... | 2.527489 | 673 |
from collections import defaultdict, Counter
from typing import List
from scorer.product_of_experts import Expert
from scorer.scorer import get_relations
| [
6738,
17268,
1330,
4277,
11600,
11,
15034,
198,
6738,
19720,
1330,
7343,
198,
198,
6738,
30664,
13,
11167,
62,
1659,
62,
23100,
912,
1330,
25516,
198,
6738,
30664,
13,
1416,
11934,
1330,
651,
62,
39468,
628
] | 4.333333 | 36 |
# web_app/routes/home_routes.py
from flask import Blueprint, render_template
home_routes = Blueprint("home_routes", __name__)
@home_routes.route("/")
@home_routes.route("/hello")
@home_routes.route("/about")
| [
2,
3992,
62,
1324,
14,
81,
448,
274,
14,
11195,
62,
81,
448,
274,
13,
9078,
201,
198,
201,
198,
6738,
42903,
1330,
39932,
11,
8543,
62,
28243,
201,
198,
201,
198,
11195,
62,
81,
448,
274,
796,
39932,
7203,
11195,
62,
81,
448,
27... | 2.27551 | 98 |
import numpy as np
from runner.action.set.variable import Variable
class Continuous(Variable):
"""Continuous variable
low <= variable < high
Args:
low (float): low boundary
high (float): high boundary
route (str): route to value (see Action)
"""
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
17490,
13,
2673,
13,
2617,
13,
45286,
1330,
35748,
628,
198,
4871,
45012,
7,
43015,
2599,
198,
220,
220,
220,
37227,
17875,
5623,
7885,
628,
220,
220,
220,
220,
220,
220,
220,
1877,
19841... | 2.570248 | 121 |
"""empty message
Revision ID: 64169b01290d
Revises: 53b58719e03b
Create Date: 2019-04-17 17:29:09.098849
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '64169b01290d'
down_revision = '53b58719e03b'
branch_labels = None
depends_on = None
| [
37811,
28920,
3275,
198,
198,
18009,
1166,
4522,
25,
5598,
22172,
65,
486,
24369,
67,
198,
18009,
2696,
25,
7192,
65,
44617,
1129,
68,
3070,
65,
198,
16447,
7536,
25,
13130,
12,
3023,
12,
1558,
1596,
25,
1959,
25,
2931,
13,
2931,
34... | 2.555556 | 117 |
# -*- coding:utf-8 -*-
from app.libs.redprint import Redprint
api = Redprint('user')
@api.route('/login', methods=['POST'])
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
6738,
598,
13,
8019,
82,
13,
445,
4798,
1330,
2297,
4798,
198,
198,
15042,
796,
2297,
4798,
10786,
7220,
11537,
628,
198,
31,
15042,
13,
38629,
10786,
14,
38235,
3256,
5050... | 2.54 | 50 |
#!/usr/bin/env python
__author__ = 'cmantas'
import sqlite3
import matplotlib.pyplot as plt
import argparse
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, show
import numpy as np
conn = sqlite3.connect('../results.db')
c = conn.cursor()
# print plt.style.available
plt.style.use('fivethirtyeight')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
834,
9800,
834,
796,
705,
11215,
415,
292,
6,
628,
198,
11748,
44161,
578,
18,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
1822,
29572,
198,
6738,
285,
... | 2.89375 | 160 |
import torch
def matrix_symmetry(matrix):
"""Checks the symmetry of the input matrix.
Parameters
----------
matrix: torch.FloatTensor
The matrix to check.
"""
if torch.eq(matrix, matrix.t()).all().item() == 0:
raise ValueError("The input matrix is not symmetrical.")
def pu_labels(labels):
"""Checks that the input labels correspond to a PU learning setting.
Parameters
----------
labels: torch.LongTensor
The label vector to check.
"""
if not labels.dim() == 1:
raise ValueError("Input tensor must have a dimension of 1.")
if not ((labels == 0) | (labels == 1)).nonzero().size(0) == labels.size(0):
raise ValueError("Input tensor contains value(s) different from 0 and 1.")
def adj_matrix_labels_dim(adj_matrix, labels):
"""Checks that the dimensions of the adjacency matrix and the labels vector
are compatible.
Parameters
----------
adj_matrix: torch.FloatTensor
The adjacency matrix to check.
labels: torch.LongTensor
The label vector to check.
"""
if not adj_matrix.size() == torch.Size([labels.size(0), labels.size(0)]):
raise ValueError(
"The dimensions of the adjacency matrix and the label vector are incompatible."
)
def features_labels_dim(features, labels):
"""Checks that the dimensions of the features matrix and the labels vector
are compatible.
Parameters
----------
features: torch.FloatTensor
The feature matrix to check.
labels: torch.LongTensor
The label vector to check.
"""
if not features.size() == torch.Size([labels.size(0), labels.size(0)]):
raise ValueError(
"The dimensions of the adjacency matrix and the label vector are incompatible."
)
| [
11748,
28034,
628,
198,
4299,
17593,
62,
1837,
3020,
11973,
7,
6759,
8609,
2599,
198,
220,
220,
220,
37227,
7376,
4657,
262,
40686,
286,
262,
5128,
17593,
13,
628,
220,
220,
220,
40117,
198,
220,
220,
220,
24200,
438,
198,
220,
220,
... | 2.772382 | 659 |
import datetime
import dateutil.parser
if __name__ == "__main__":
tests = (
"January 3, 2003", # a string
(5, "Oct", 55), # a tuple
"Thursday, November 18", # longer string without year
"7/24/04", # a string with slashes
"24-7-2004", # European-format string
{'date':"5-10-1955", "dayfirst":True}, # a dict including the kwarg
"5-10-1955", # dayfirst, no kwarg
19950317, # not a string
"11AM on the 11th day of 11th month, in the year of our Lord 1945",
)
for test in tests: # testing date formats
tryparse(test) # try to parse
| [
11748,
4818,
8079,
198,
11748,
3128,
22602,
13,
48610,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
5254,
796,
357,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
366,
21339,
513,
11... | 1.67803 | 528 |
from model.simulator import RunSingleTactic
from model.config_classes import SingleConfig
from plotting.paper_figs import CombinedModelPlot
# config_sing = SingleConfig(10, 10**(-3), 10**(-6), 1, 1, 0.5, 0.5)
config_sing = SingleConfig(10, None, None, 1, 1, 0.5, 0.5)
config_sing.load_saved = False
RR, RS, SR = (10**(-8), 10**(-3), 10**(-6))
config_sing.primary_inoculum = dict(
RR=RR,
RS=RS,
SR=SR,
SS=1 - RR - RS - SR
)
config_sing.add_string()
run_s = RunSingleTactic()
run_s.yield_stopper = 0
output = run_s.run(config_sing)
CombinedModelPlot(output, config_sing.config_string_img)
| [
6738,
2746,
13,
14323,
8927,
1330,
5660,
28008,
51,
12009,
198,
6738,
2746,
13,
11250,
62,
37724,
1330,
14206,
16934,
198,
6738,
29353,
13,
20189,
62,
5647,
82,
1330,
32028,
17633,
43328,
628,
198,
2,
4566,
62,
12215,
796,
14206,
16934,... | 2.47561 | 246 |
#!/usr/bin/env python
# coding: utf-8
# # Self-Driving Car Engineer Nanodegree
#
#
# ## Project: **Finding Lane Lines on the Road**
# ***
# In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below.
#
# Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.
#
# In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.
#
# ---
# Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.
#
# **Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**
#
# ---
# **The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**
#
# ---
#
# <figure>
# <img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" />
# <figcaption>
# <p></p>
# <p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p>
# </figcaption>
# </figure>
# <p></p>
# <figure>
# <img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" />
# <figcaption>
# <p></p>
# <p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p>
# </figcaption>
# </figure>
# **Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
# ## Import Packages
# In[1]:
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
get_ipython().run_line_magic('matplotlib', 'inline')
# ## Read in an Image
# In[2]:
#reading in an image
image = mpimg.imread('test_images/solidWhiteCurve.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
# ## Ideas for Lane Detection Pipeline
# **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
#
# `cv2.inRange()` for color selection
# `cv2.fillPoly()` for regions selection
# `cv2.line()` to draw lines on an image given endpoints
# `cv2.addWeighted()` to coadd / overlay two images
# `cv2.cvtColor()` to grayscale or change color
# `cv2.imwrite()` to output images to file
# `cv2.bitwise_and()` to apply a mask to an image
#
# **Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
# ## Helper Functions
# Below are some helper functions to help get you started. They should look familiar from the lesson!
# In[24]:
import math
# Python 3 has support for cool math symbols.
# ## Test Images
#
# Build your pipeline to work on the images in the directory "test_images"
# **You should make sure your pipeline works well on these images before you try the videos.**
# In[4]:
import os
os.listdir("test_images/")
# ## Build a Lane Finding Pipeline
#
#
# Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
#
# Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
# In[25]:
# then save them to the test_images_output directory.
## 1: Convert to grayscale
gray=grayscale(image)
plt.imshow(gray,cmap='gray')
## 2: Smooth
gray_smooth=gaussian_blur(gray,3)
plt.imshow(gray_smooth,cmap='gray')
## 3: Canny Edge Detection
#Variables
lowth=100
highth=200
#Apply function
edges=canny(gray_smooth,lowth,highth)
plt.imshow(edges, cmap='gray')
## 4: Create mask
#Vertices of mask
leftlow=[30,540]
rightlow=[930,540]
leftup=[460,320]
rightup=[510,320]
#Create and apply mask
mask=np.array([[leftlow],[rightlow],[rightup],[leftup]])
area=region_of_interest(edges, [mask])
plt.imshow(area,cmap='gray')
## 5: Hough transformation
#Variables
rho=6
theta=np.pi/60
threshold=5
min_line_len=45
max_line_gap=60
#Apply function
HoughLines=hough_lines(area, rho, theta, threshold, min_line_len, max_line_gap)
plt.imshow(HoughLines)
#Final picture
Final=weighted_img(HoughLines, image, α=0.8, β=1., γ=0.)
plt.imshow(Final)
# ## Test on Videos
#
# You know what's cooler than drawing lanes over images? Drawing lanes over video!
#
# We can test our solution on two provided videos:
#
# `solidWhiteRight.mp4`
#
# `solidYellowLeft.mp4`
#
# **Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
#
# **If you get an error that looks like this:**
# ```
# NeedDownloadError: Need ffmpeg exe.
# You can download it by calling:
# imageio.plugins.ffmpeg.download()
# ```
# **Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**
# In[6]:
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
# In[7]:
# Let's try the one with the solid white lane on the right first ...
# In[8]:
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
get_ipython().run_line_magic('time', 'white_clip.write_videofile(white_output, audio=False)')
# Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
# In[9]:
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
# ## Improve the draw_lines() function
#
# **At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".**
#
# **Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**
# Now for the one with the solid yellow lane on the left. This one's more tricky!
# In[10]:
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
get_ipython().run_line_magic('time', 'yellow_clip.write_videofile(yellow_output, audio=False)')
# In[11]:
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(yellow_output))
# ## Writeup and Submission
#
# If you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file.
#
# ## Optional Challenge
#
# Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!
# In[12]:
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
get_ipython().run_line_magic('time', 'challenge_clip.write_videofile(challenge_output, audio=False)')
# In[13]:
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(challenge_output))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1303,
12189,
12,
20564,
1075,
1879,
23164,
18008,
1098,
70,
631,
198,
2,
220,
198,
2,
220,
198,
2,
22492,
4935,
25,
12429,
36276,
15016... | 3.387691 | 3,477 |
#!/usr/bin/env python
"""
Convert from interval file to summary tree file. Default input file format is BED (0-based, half-open intervals).
usage: %prog <options> in_file out_file
-c, --chr-col: chromosome column, default=1
-s, --start-col: start column, default=2
-e, --end-col: end column, default=3
-t, --strand-col: strand column, default=6
-G, --gff: input is GFF format, meaning start and end coordinates are 1-based, closed interval
"""
from __future__ import division
import sys, fileinput, optparse
from galaxy import eggs
import pkg_resources; pkg_resources.require( "bx-python" )
from galaxy.visualization.tracks.summary import *
from bx.intervals.io import *
from galaxy.datatypes.util.gff_util import *
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
3103,
1851,
422,
16654,
2393,
284,
10638,
5509,
2393,
13,
15161,
5128,
2393,
5794,
318,
347,
1961,
357,
15,
12,
3106,
11,
2063,
12,
9654,
20016,
737,
198,
198,
26060,
... | 2.973282 | 262 |
import requests
import json
import pandas as pd
from configparser import ConfigParser
from .log import logger
from .exceptions import QualitubeException
config = ConfigParser()
config.read("config.ini")
API_KEY = config['credentials']['api_key']
class Videos:
"""
Wrapper Class to the YouTube Data API v3's `Videos` endpoint with
extra functionalities.
"""
| [
11748,
7007,
198,
11748,
33918,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
4566,
48610,
1330,
17056,
46677,
198,
6738,
764,
6404,
1330,
49706,
198,
6738,
764,
1069,
11755,
1330,
9537,
270,
3266,
16922,
628,
198,
11250,
796,
17056,
... | 3.336283 | 113 |
import os
import pandas as pd, numpy as np
from astrobase.lcmath import find_lc_timegroups
from numpy import array as nparr
from astropy import units as u
from timmy.paths import DATADIR, RESULTSDIR
datestr = '20200624'
postpath = os.path.join(RESULTSDIR, 'radvel_fitting',
f'{datestr}_simple_planet',
'TOI837_derived.csv.tar.bz2')
df = pd.read_csv(postpath)
cutoff = 99.7 # 3-sigma
val = np.percentile(nparr(df.mpsini1), cutoff)*u.Mearth
print(f'Mpsini 3sigma (99.7th percentile): {val}')
print(f'Mpsini 3sigma (99.7th percentile): {val.to(u.Mjup)}')
postpath = os.path.join(RESULTSDIR, 'radvel_fitting',
f'{datestr}_simple_planet',
'TOI837_chains.csv.tar.bz2')
df = pd.read_csv(postpath)
val = np.percentile(nparr(df.logk1), cutoff)
print(f'logk1 3sigma (99.7th percentile): {val:.8f}')
| [
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
11,
299,
32152,
355,
45941,
198,
6738,
6468,
305,
8692,
13,
44601,
11018,
1330,
1064,
62,
44601,
62,
2435,
24432,
198,
6738,
299,
32152,
1330,
7177,
355,
299,
1845,
81,
198,
6738,
64... | 2.059908 | 434 |
#!/usr/bin/env python
from __future__ import division, unicode_literals
import os
import argparse
import math
import codecs
import torch
from itertools import count
import onmt.io
import onmt.translate
import onmt
import onmt.ModelConstructor
import onmt.modules
import opts
parser = argparse.ArgumentParser(
description='translate.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
opts.add_md_help_argument(parser)
opts.translate_opts(parser)
opt = parser.parse_args()
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
11593,
37443,
834,
1330,
7297,
11,
28000,
1098,
62,
17201,
874,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
10688,
198,
11748,
40481,
82,
198,
11748,
28034,
198,
1... | 2.955801 | 181 |
import numpy as np
from utils.util import sign
from utils.metrics.np_v import metric
| [
11748,
299,
32152,
355,
45941,
198,
6738,
3384,
4487,
13,
22602,
1330,
1051,
198,
6738,
3384,
4487,
13,
4164,
10466,
13,
37659,
62,
85,
1330,
18663,
628,
198
] | 3.107143 | 28 |
import sys, logging, os, random, math, open_color, arcade
#check to make sure we are running the right version of Python
version = (3,7)
assert sys.version_info >= version, "This script requires at least Python {0}.{1}".format(version[0],version[1])
#turn on logging, in case we have to leave ourselves debugging messages
logging.basicConfig(format='[%(filename)s:%(lineno)d] %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
MARGIN = 30
SCREEN_TITLE = "Bullet exercise"
NUM_ENEMIES = 5
STARTING_LOCATION = (400,100)
BULLET_DAMAGE = 10
ENEMY_HP = 100
HIT_SCORE = 10
KILL_SCORE = 100
if __name__ == "__main__":
main() | [
11748,
25064,
11,
18931,
11,
28686,
11,
4738,
11,
10688,
11,
1280,
62,
8043,
11,
27210,
198,
198,
2,
9122,
284,
787,
1654,
356,
389,
2491,
262,
826,
2196,
286,
11361,
198,
9641,
796,
357,
18,
11,
22,
8,
198,
30493,
25064,
13,
9641... | 2.655303 | 264 |
from ..linalg import lu, svd, qr, eig
from numpy import random as _random, sqrt
from numpy.linalg import norm
from ..utils import _float, _svdCond, traceXTX, eig_flip, svd_flip
from ..random import uniform
# __all__ = ['randomizedSVD', 'randomizedEig']
def randomized_projection(X, k, solver = 'lu', max_iter = 4):
"""
[Edited 8/11/2018 Added QR Q_only parameter]
Projects X onto some random eigenvectors, then using a special
variant of Orthogonal Iteration, finds the closest orthogonal
representation for X.
Solver can be QR or LU or None.
"""
n, p = X.shape
if max_iter == 'auto':
# From Modern Big Data Algorithms --> seems like <= 4 is enough.
_min = n if n <= p else p
max_iter = 5 if k < 0.1 * _min else 4
Q = uniform(-5, 5, p, int(k), X.dtype)
XT = X.T
_solver = lambda x: lu(x, L_only = True)
if solver == 'qr': _solver = lambda x: qr(x, Q_only = True)
elif solver == None: _solver = lambda x: x
for __ in range(max_iter):
Q = _solver(XT @ _solver(X @ Q))
return qr(X @ Q, Q_only = True)
def randomizedSVD(X, n_components = 2, max_iter = 'auto', solver = 'lu', n_oversamples = 10):
"""
[Edited 9/11/2018 Fixed SVD_flip]
HyperLearn's Fast Randomized SVD is approx 10 - 30 % faster than
Sklearn's implementation depending on n_components and max_iter.
Uses NUMBA Jit accelerated functions when available, and tries to
reduce memory overhead by chaining operations.
Uses QR, LU or no solver to find the best SVD decomp. QR is most stable,
but can be 2x slower than LU.
****n_oversamples = 10. This follows Sklearn convention to increase the chance
of more accurate SVD.
References
--------------
* Sklearn's RandomizedSVD
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
n,p = X.shape
transpose = (n < p)
X = X if not transpose else X.T
X = _float(X)
Q = randomized_projection(X, n_components + n_oversamples, solver, max_iter)
U, S, VT = svd(Q.T @ X, U_decision = transpose, transpose = True)
U = Q @ U
if transpose:
U, VT = VT.T, U.T
return U[:, :n_components], S[:n_components], VT[:n_components, :]
def randomizedEig(X, n_components = 2, max_iter = 'auto', solver = 'lu', n_oversamples = 10):
"""
[Edited 9/11/2018 Fixed Eig_Flip]
HyperLearn's Randomized Eigendecomposition is an extension of Sklearn's
randomized SVD. HyperLearn notices that the computation of U is not necessary,
hence will use QR followed by SVD or just SVD depending on the situation.
Likewise, solver = LU is default, and follows randomizedSVD
References
--------------
* Sklearn's RandomizedSVD
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
n,p = X.shape
transpose = (n < p)
X = X if not transpose else X.T
X = _float(X)
Q = randomized_projection(X, n_components + n_oversamples, solver, max_iter)
if transpose:
V, S2, __ = svd(Q.T @ X, U_decision = transpose, transpose = True)
V = Q @ V
S2 **= 2
else:
S2, V = eig(Q.T @ X, U_decision = transpose)
return S2[:n_components], V[:, :n_components]
def randomizedPinv(X, n_components = None, alpha = None):
"""
[Added 6/11/2018]
Implements fast randomized pseudoinverse with regularization.
Can be used as an approximation to the matrix inverse.
"""
if alpha != None: assert alpha >= 0
alpha = 0 if alpha is None else alpha
if n_components == None:
# will provide approx sqrt(p) - 1 components.
# A heuristic, so not guaranteed to work.
k = int(sqrt(X.shape[1]))-1
if k <= 0: k = 1
else:
k = int(n_components) if n_components > 0 else 1
X = _float(X)
U, S, VT = randomizedSVD(X, n_components)
U, S, VT = _svdCond(U, S, VT, alpha)
return VT.T * S @ U.T
| [
198,
6738,
11485,
75,
1292,
70,
1330,
300,
84,
11,
264,
20306,
11,
10662,
81,
11,
304,
328,
198,
6738,
299,
32152,
1330,
4738,
355,
4808,
25120,
11,
19862,
17034,
198,
6738,
299,
32152,
13,
75,
1292,
70,
1330,
2593,
198,
6738,
11485... | 2.733125 | 1,600 |
#This code attempts to do a proof of principle calucation that our idea works in high dimensions
#To gain some intition as to what is going on, we will do things in two dimensions first so that we can plot everything
#Import standard python packages:
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
#import ploting packages
import os
os.environ["PATH"] += ':/usr/local/texlive/2015/bin/x86_64-darwin'
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.tick_params(labelsize=16)
plt.clf()
#declare simulation paramaters:
#basel proliferation rate
rho = 1.0
#death rate
b = 1.0
#the number of Tregs
#set to 2 for this code, do not change
Num_treg = 2
#the number of Tcells
Num_tcell = 100
#the number of sites
Num_sites = 250
#the Treg- antigen site binding strenth
c = 1.0
#the varience in Treg-antigen binding around zero
sigma_c = 0.0001
#the varience in Treg-antigen binding around c
sigma_cp = 0.0001
pval_cell = 0.1
pval_reg = 0.1
#pval_reg * Num_treg > 1
print(pval_reg * Num_sites )
#generate a Treg-Tcell graph
#######################################
#the vx densities
max_v = 1.0
vx = max_v * np.ones(Num_sites) + np.random.uniform(-max_v/2,max_v/2,Num_sites)
#layer 1 is the conectivity of the Tcells and antigen sequences
layer1 = np.zeros([Num_tcell,Num_sites])
layer1 = np.random.choice( [0,1] , size= (Num_tcell,Num_sites), replace = True , p= [1- pval_cell,pval_cell])
#each site needs at least one t cell to bind to it
for i in range(Num_sites):
if ( np.sum(layer1[:,i]) <= 0 ):
num = np.random.randint(0,Num_tcell)
layer1[num,i] = 1
#each tcell should bind to at least site
for i in range(Num_tcell):
if ( np.sum( layer1[i,:] <= 0 ) ):
num = np.random.randint(0,Num_sites)
layer1[ i , num ] = 1
#layer 2 is the connectivvity of the binding of the antigen sites to the number of Tregs
layer2 = np.zeros([Num_sites,Num_treg])
layer2 = (c + np.random.normal(0,sigma_cp,(Num_sites,Num_treg) ) )* np.random.choice( [0,1] , size = (Num_sites , Num_treg), replace = True , p=[1-pval_reg, pval_reg])
#make sure that all of the elements have probility that is less than 1
for i in range(Num_sites):
for j in range(Num_treg):
if ( layer2[i,j] > 1):
layer2[i,j] = 1
#add some noise to each site, still make sure each element is less than 1
for i in range(Num_sites):
for j in range(Num_treg):
if ( layer2[i,j] == 0 ):
val = np.abs( np.random.normal(0,sigma_c ) )
layer2[i,j] = np.random.uniform(0,1)
if (val < 1):
layer2[i,j] = val
#add some noise around c to each site
for i in range(Num_sites):
if ( np.sum(layer2[i,:]) <= c - 0.01 ):
num = np.random.randint(0,Num_treg)
layer2[i,num] = c + np.random.normal(0,sigma_cp )
#compute the mean binding, this will be used when we look at how well our aproxomation works
mean_reg = np.zeros([Num_treg])
for i in range(Num_treg):
mean_reg[i] = np.sum( layer1[i,:] * layer2[:,i] )
#compute just graphical overlap
# +1 for each treg connected to tcell
connectivity_count = np.zeros([Num_tcell , Num_treg])
for i in range(Num_tcell):
for j in range(Num_treg):
val = 0
for k in range(Num_sites):
if ( layer1[i,k] > c/10 and layer2[k,j] > c/10 ):
val = val + 1
connectivity_count[i,j] = 1
#now compute the r_{i} and \phi quantities
######################################################################
#compute the matrix overlaps
phi_reg_reg = np.zeros([Num_treg,Num_treg])
for i in range(Num_treg):
for j in range(Num_treg):
phi_reg_reg[i,j] = np.sum( vx[:]*layer2[:,i]*layer2[:,j] )
#compute the matrix overlaps
phi_cell_reg = np.zeros([Num_tcell,Num_treg])
for i in range(Num_tcell):
for j in range(Num_treg):
phi_cell_reg[i,j] = np.sum( vx[:]*layer1[i,:]*layer2[:,j] )
rvals = np.zeros([Num_tcell])
for i in range(Num_tcell):
rvals[i] = np.sum( vx[:]*layer1[i,:] )
####################################################################################
#compute the Treg steady state and active set of constraints
#QP is done with CVXOPT packages
from cvxopt import matrix, solvers
import numpy as np
solvers.options['show_progress'] = False
#Set up the quadratic part of QP matrix
Qmat = np.zeros([ Num_treg , Num_treg ])
for i in range(Num_treg):
for j in range(Num_treg):
Qmat[i,j] = phi_reg_reg[i,j]
#Convert to CVXOPT format
Q = matrix(Qmat)
p = np.zeros(Num_treg)
p = matrix(p)
G = np.zeros([Num_tcell + Num_treg , Num_treg ])
for i in range(Num_tcell):
for j in range(Num_treg):
G[i,j] = -1.0* phi_cell_reg[i,j] * (rvals[i]**(-1.0) )
#enforce positivity
for i in range(Num_tcell, Num_tcell + Num_treg):
G[i, i - Num_tcell - Num_treg ] = -1.0
G = matrix(G)
h = np.zeros([Num_tcell + Num_treg])
for i in range(Num_tcell):
h[i] = -1.0 * (rho)
for i in range(Num_tcell, Num_treg):
h[i] = 0.0
h = matrix(h)
sol = solvers.qp(Q, p, G, h)
#the QP solution, this is Treg part
vals = np.array( sol['x'] )
#the dual varibles
dual = np.array( sol['z'] )
dual = dual[0:Num_tcell]
Treg = vals[:,0]
#compute the distance to the specalist point
distance = np.sqrt( np.sum( (Treg - (rho/c)*np.ones(Num_treg) )**2 ) )
print('distance')
print(distance)
#now plot what the constrains look like
##########################################################################################################################################
import matplotlib.pyplot as plt
#import ploting packages
import os
os.environ["PATH"] += ':/usr/local/texlive/2015/bin/x86_64-darwin'
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.tick_params(labelsize=16)
plt.clf()
fontsize1 = 22
#plt.xlabel("$ c' / c $", fontsize = fontsize1 + 10)
#plt.ylabel("$ \\textrm{ Fraction } \\lambda_{i} \\textrm{ with } \\frac{ d \\lambda_{i} }{ dt } > 0 $" ,fontsize=fontsize1 + 10)
graph_space = np.linspace(0, 5*rho/c, 200)
for i in range(Num_tcell):
if( phi_cell_reg[i,0] != 0 ):
plt.plot( graph_space, (rvals[i]*rho - phi_cell_reg[i,1] * graph_space)/(phi_cell_reg[i,0]), 'b' )
if( phi_cell_reg[i,0] == 0 ):
plt.axvline(x=rho/c , color = 'b')
plt.grid()
plt.plot(Treg[1],Treg[0],'*',markersize = 25,markeredgecolor='k', markerfacecolor='g' )
plt.ylim(0,5)
plt.xlim(0,5)
plt.tight_layout()
plt.show()
plt.clf()
##########################################################################################################################################
#Now, consider the addition of new antigen sites
#the number of new antigen sites introduced, ie number of 'testing' samples
N_trials = 200
#the space of Treg- antigen site binding
cspace = np.linspace( 0 , 1 , 5 )
#count the accuracy, number of tcells binding
count_pos_test = np.zeros([ len(cspace) , N_trials ])
for cpoint in range(len(cspace)):
#the testing val
cnew = c*cspace[cpoint]
for run in range(N_trials):
#the value of the new site density,
#in this case density is very low compared to prexisting sites
vx_mag = 1/4.0
#######################################################################################################################################
#add new site
vx_new = np.zeros(Num_sites + 1)
vx_new[0:Num_sites] = vx
vx_new[Num_sites] = vx_mag
#the new Tcell- antigen site binding
new_layer1 = np.zeros([Num_tcell,Num_sites + 1])
new_layer1[:,0:Num_sites] = layer1
new_layer1[:,Num_sites] = np.random.choice( [0,1] , size = (Num_tcell) , replace = True , p = [(1 - pval_cell ), pval_cell ])
cnew = 1
#the new Treg-antigen site binding
new_layer2 = np.zeros( [ Num_sites + 1 , Num_treg ] )
new_layer2[0:Num_sites,:] = layer2
new_layer2[Num_sites, :] = cnew * np.random.choice( [0,1] , size = (Num_treg) , replace = True , p = [( 1 - pval_reg ), pval_reg ])
print(layer2)
print(new_layer2)
print( np.shape( new_layer2 ) )
print( new_layer2[Num_sites, :] )
quit()
#now compute the new r_{i} and matrix elements
################################################################
for i in range(Num_treg):
for j in range(Num_treg):
phi_reg_reg[i,j] = np.sum( vx_new[:]*new_layer2[:,i]*new_layer2[:,j] )
#compute the matrix overlaps
phi_cell_reg = np.zeros([Num_tcell,Num_treg])
for i in range(Num_tcell):
for j in range(Num_treg):
phi_cell_reg[i,j] = np.sum( vx_new[:]*new_layer1[i,:]*new_layer2[:,j] )
rvals = np.zeros([Num_tcell])
for i in range(Num_tcell):
rvals[i] = np.sum( vx_new[:]*new_layer1[i,:] )
#compute which have positive growth rates
count = 0
for i in range(Num_tcell):
val = ( rvals[i]*rho - np.sum( phi_cell_reg[i,:].dot( Treg[:] ) ) )
if(val > 0):
count = count + 1
count_pos_test[cpoint,run] = count
#the mean and error of number of tcells with positve growth rate
avg_test_low = np.zeros([len(cspace)])
test_low_err = np.zeros( [len(cspace)] )
for i in range(len(cspace)):
avg_test_low[i] = np.sum(count_pos_test[i,:])/N_trials
test_low_err[i] = np.sum( count_pos_test[i,:]**2 )/N_trials - ( np.sum( count_pos_test[i,:] ) / N_trials )**2
test_low_err = np.sqrt(test_low_err)
#start again, now for a new v_{x} that is the same as mean of the prexisting graph
#########################################################################################################
count_pos_test = np.zeros([ len(cspace) , N_trials ])
for cpoint in range(len(cspace)):
#the testing val
cnew = c*cspace[cpoint]
for run in range(N_trials):
vx_mag = 1.0
#######################################################################################################################################
#add new site
vx_new = np.zeros(Num_sites + 1)
vx_new[0:Num_sites] = vx
vx_new[Num_sites] = vx_mag
new_layer1 = np.zeros([Num_tcell,Num_sites + 1])
new_layer1[:,0:Num_sites] = layer1
new_layer1[:,Num_sites] = np.random.choice( [0,1] , size = (Num_tcell) , replace = True , p = [(1-pval_cell ), pval_cell ])
new_layer2 = np.zeros([Num_sites+1,Num_treg])
new_layer2[0:Num_sites,:] = layer2
new_layer2[Num_sites, :] = cnew * np.random.choice( [0,1] , size = (Num_treg) , replace = True , p = [(1 - pval_reg ), pval_reg ])
for i in range(Num_treg):
for j in range(Num_treg):
phi_reg_reg[i,j] = np.sum( vx_new[:]*new_layer2[:,i]*new_layer2[:,j] )
#compute the matrix overlaps
phi_cell_reg = np.zeros([Num_tcell,Num_treg])
for i in range(Num_tcell):
for j in range(Num_treg):
phi_cell_reg[i,j] = np.sum( vx_new[:]*new_layer1[i,:]*new_layer2[:,j] )
rvals = np.zeros([Num_tcell])
for i in range(Num_tcell):
rvals[i] = np.sum( vx_new[:]*new_layer1[i,:] )
#compute which have positive growth rates
count = 0
for i in range(Num_tcell):
val = ( rvals[i]*rho - np.sum( phi_cell_reg[i,:].dot( Treg[:] ) ) )
if(val > 0):
count = count + 1
count_pos_test[cpoint,run] = count
avg_test_med = np.zeros([len(cspace)])
test_med_err = np.zeros( [len(cspace)] )
for i in range(len(cspace)):
avg_test_med[i] = np.sum(count_pos_test[i,:])/N_trials
test_med_err[i] = np.sum( count_pos_test[i,:]**2 )/N_trials - ( np.sum( count_pos_test[i,:] ) / N_trials )**2
test_med_err = np.sqrt( test_med_err )
#start again
#########################################################################################################
count_pos_train = np.zeros([ len(cspace) , N_trials ])
count_pos_test = np.zeros([ len(cspace) , N_trials ])
for cpoint in range(len(cspace)):
#the testing val
cnew = c*cspace[cpoint]
for run in range(N_trials):
vx_mag = 4.0
#######################################################################################################################################
#add new site
vx_new = np.zeros(Num_sites + 1)
vx_new[0:Num_sites] = vx
vx_new[Num_sites] = vx_mag
new_layer1 = np.zeros([Num_tcell,Num_sites + 1])
new_layer1[:,0:Num_sites] = layer1
new_layer1[:,Num_sites] = np.random.choice( [0,1] , size = (Num_tcell) , replace = True , p = [(1-pval_cell ), pval_cell ])
new_layer2 = np.zeros([Num_sites+1,Num_treg])
new_layer2[0:Num_sites,:] = layer2
new_layer2[Num_sites, :] = cnew * np.random.choice( [0,1] , size = (Num_treg) , replace = True , p = [(1- pval_reg ), pval_reg ])
for i in range(Num_treg):
for j in range(Num_treg):
phi_reg_reg[i,j] = np.sum( vx_new[:]*new_layer2[:,i]*new_layer2[:,j] )
#compute the matrix overlaps
phi_cell_reg = np.zeros([Num_tcell,Num_treg])
for i in range(Num_tcell):
for j in range(Num_treg):
phi_cell_reg[i,j] = np.sum( vx_new[:]*new_layer1[i,:]*new_layer2[:,j] )
rvals = np.zeros([Num_tcell])
for i in range(Num_tcell):
rvals[i] = np.sum( vx_new[:]*new_layer1[i,:] )
#compute which have positive growth rates
count = 0
for i in range(Num_tcell):
val = ( rvals[i]*rho - np.sum( phi_cell_reg[i,:].dot( Treg[:] ) ) )
if(val > 0):
count = count + 1
count_pos_test[cpoint,run] = count
#print(count)
avg_test_high = np.zeros([len(cspace)])
test_high_err = np.zeros( [len(cspace)] )
for i in range(len(cspace)):
avg_test_high[i] = np.sum(count_pos_test[i,:])/N_trials
test_high_err[i] = np.sum( count_pos_test[i,:]**2 )/N_trials - ( np.sum( count_pos_test[i,:] ) / N_trials )**2
test_high_err = np.sqrt( test_high_err )
# plt.plot( cspace, avg_test_high / Num_tcell , '-.', label="$ v_{x} ="+str(3.0)+"$", color = 'r')
# plt.plot( cspace, avg_test_med / Num_tcell , '-.', label="$ v_{x} ="+str(1.0)+"$" , color = 'g')
# plt.plot( cspace, avg_test_low / Num_tcell , '-.', label="$ v_{x} ="+str(0.3)+"$", color = 'b')
plt.errorbar( cspace , avg_test_high , yerr = test_high_err, label="$ v_{x} ="+str(4.0)+"$", color = 'r' ,fmt = 'o' ,linestyle='dashed')
plt.errorbar( cspace , avg_test_med , yerr = test_med_err, label="$ v_{x} ="+str(1.0)+"$", color = 'g' , fmt = 'o', linestyle='dashed')
plt.errorbar( cspace , avg_test_low , yerr = test_low_err, label="$ v_{x} ="+str(0.25)+"$", color = 'b' , fmt = 'o', linestyle='dashed')
plt.grid()
#plt.ylim(0,.14)
plt.legend( prop={'size': 21})
plt.tight_layout()
plt.savefig("Principle")
plt.show()
plt.clf()
##########################################################################################################################################
import matplotlib.pyplot as plt
#import ploting packages
import os
os.environ["PATH"] += ':/usr/local/texlive/2015/bin/x86_64-darwin'
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.tick_params(labelsize=16)
plt.clf()
fontsize1 = 22
plt.xlabel("$ c' / c $", fontsize = fontsize1 + 10)
plt.ylabel("$ \\textrm{ Fraction } \\lambda_{i} \\textrm{ with } \\frac{ d \\lambda_{i} }{ dt } > 0 $" ,fontsize=fontsize1 + 10)
plt.grid()
graph_space = np.linspace(0, 5*rho/c, 200)
for i in range(Num_tcell):
if( phi_cell_reg[i,0] != 0 ):
plt.plot( graph_space, (rvals[i]*rho - phi_cell_reg[i,1] * graph_space)/(phi_cell_reg[i,0]), 'b' )
if( phi_cell_reg[i,0] == 0 ):
plt.axvline(x=rho/c , color = 'b')
plt.plot(Treg[1],Treg[0],'*',markersize = 25,markeredgecolor='k', markerfacecolor='g' )
#plt.legend()
plt.ylim(0,5)
plt.xlim(0,5)
plt.grid()
plt.tight_layout()
plt.show()
plt.clf() | [
2,
1212,
2438,
6370,
284,
466,
257,
6617,
286,
7989,
2386,
1229,
341,
326,
674,
2126,
2499,
287,
1029,
15225,
198,
2,
2514,
4461,
617,
493,
653,
355,
284,
644,
318,
1016,
319,
11,
356,
481,
466,
1243,
287,
734,
15225,
717,
523,
32... | 2.367681 | 6,473 |
from typing import List
from tortoise.contrib.pydantic import pydantic_model_creator
from api.database.schemas import Pokemon
# Base Pokemon Pydantic Model created from the Tortoise-ORM Model.
PokemonBase = pydantic_model_creator(Pokemon, name="PokemonIn", exclude_readonly=True)
# Outgoing Pokemon Pydantic Model created from the Tortoise-ORM Model.
PokemonOut = pydantic_model_creator(Pokemon, name="Pokemon")
class PokemonIn(PokemonBase):
"""Incoming Pokemon Pydantic Model."""
types_ids: List[int]
moves_ids: List[int]
class PokemonUpdate(PokemonBase):
"""Update Pokemon Pydantic Model."""
types_to_add: List[int]
moves_to_add: List[int]
types_to_remove: List[int]
moves_to_remove: List[int]
| [
6738,
19720,
1330,
7343,
198,
198,
6738,
7619,
25678,
13,
3642,
822,
13,
79,
5173,
5109,
1330,
279,
5173,
5109,
62,
19849,
62,
45382,
198,
198,
6738,
40391,
13,
48806,
13,
1416,
4411,
292,
1330,
14878,
628,
198,
2,
7308,
14878,
350,
... | 3.02459 | 244 |
from Util import Analyzer, Plot, APlot
from QueueBase import Queue
from multiprocesser import Process
import random
import numpy
if __name__ == '__main__':
p = Process(8)
fin = p.Exec(main, [0.01 * i + 0.3 for i in range(170)])
# print(fin)
plt = APlot(fin)
plt.show()
# main(0.625)
| [
6738,
7273,
346,
1330,
16213,
9107,
11,
28114,
11,
3486,
26487,
198,
6738,
4670,
518,
14881,
1330,
4670,
518,
198,
6738,
18540,
305,
919,
263,
1330,
10854,
198,
11748,
4738,
198,
11748,
299,
32152,
628,
628,
198,
198,
361,
11593,
3672,
... | 2.4 | 130 |
# Copyright 2021 Seb Seager
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import qiskit
from qiskit.quantum_info import Statevector
import streamlit as st
# change article.text to whatever the API defines
# encoding step
@st.cache()
if __name__ == "__main__":
main()
| [
2,
15069,
33448,
20639,
1001,
3536,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
... | 3.622407 | 241 |
from rest_framework import routers
from .views import SourceModelViewset
app_name = "geosource"
router = routers.SimpleRouter()
router.register(r"", SourceModelViewset, basename="geosource")
urlpatterns = router.urls
| [
6738,
1334,
62,
30604,
1330,
41144,
198,
198,
6738,
764,
33571,
1330,
8090,
17633,
7680,
2617,
198,
198,
1324,
62,
3672,
796,
366,
469,
418,
1668,
1,
198,
198,
472,
353,
796,
41144,
13,
26437,
49,
39605,
3419,
198,
198,
472,
353,
13... | 3.126761 | 71 |
from typing import Callable
import concurrent.futures as futures
import enum
import threading
import pyservices.generated.eventstore.service_pb2 as es
import pyservices.generated.eventstore.event_pb2 as esdata
import pyservices.generated.eventstore.service_pb2_grpc as esgrpc
import pyservices.generated.consumerstore.service_pb2_grpc as consumergrpc
import pyservices.generated.consumerstore.service_pb2 as consumerdata
import grpc
ConsumerCB = Callable[[esdata.AppLogEntry], None]
| [
6738,
19720,
1330,
4889,
540,
198,
11748,
24580,
13,
69,
315,
942,
355,
25650,
198,
11748,
33829,
198,
11748,
4704,
278,
198,
11748,
279,
893,
712,
1063,
13,
27568,
13,
15596,
8095,
13,
15271,
62,
40842,
17,
355,
1658,
198,
11748,
279... | 3.335616 | 146 |
from django.contrib.messages.views import SuccessMessageMixin
from django.views.generic.edit import CreateView
from django.urls import reverse
from django.shortcuts import render
from django.utils.encoding import force_text
from django.utils.http import urlsafe_base64_decode
from .signupForm import SignupForm
from ...models.user import CustomUser
from ...tokens import account_activation_token
| [
6738,
42625,
14208,
13,
3642,
822,
13,
37348,
1095,
13,
33571,
1330,
16282,
12837,
35608,
259,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
13,
19312,
1330,
13610,
7680,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42... | 3.576577 | 111 |
import matplotlib as mpl
font = {'family' : 'serif',
'size' : 16}
mpl.rc('font', **font)
import numpy as np
radii = np.fromfile('radii.dat', dtype=float, count=-1, sep='')
forces = np.fromfile('forces.dat', dtype=float, count=-1, sep='')
import matplotlib.pyplot as plt
plt.figure()
r = np.arange(min(radii), 10, 1)
plt.scatter(radii, forces, label='$a_\mathrm{data}(r)$')
plt.plot(r, f(r), label='$a(r) = 2/r$')
plt.axvline(1.0/256, label='$L/N_\mathrm{grid}$', color = 'r')
plt.xlabel("$r$",fontsize=20)
plt.ylabel("$a(r)$",fontsize=20)
plt.yscale('log')
plt.xscale('log')
plt.xlim(min(radii), 1.1 * max(radii))
plt.legend(loc='upper right')
plt.savefig('force-law.pdf', bbox_inches='tight', transparent=True)
| [
11748,
2603,
29487,
8019,
355,
285,
489,
198,
10331,
796,
1391,
6,
17989,
6,
1058,
705,
2655,
361,
3256,
198,
220,
220,
220,
220,
220,
220,
220,
705,
7857,
6,
220,
220,
1058,
1467,
92,
198,
76,
489,
13,
6015,
10786,
10331,
3256,
1... | 2.128655 | 342 |
import random
import torch
from torchsegmentor.operator.base_operator import BaseOperator
from torchsegmentor.model.pspnet import make_pspnet
| [
11748,
4738,
198,
11748,
28034,
198,
6738,
28034,
325,
5154,
273,
13,
46616,
13,
8692,
62,
46616,
1330,
7308,
18843,
1352,
198,
198,
6738,
28034,
325,
5154,
273,
13,
19849,
13,
862,
79,
3262,
1330,
787,
62,
862,
79,
3262,
628
] | 3.512195 | 41 |
from abc import ABC
| [
6738,
450,
66,
1330,
9738,
628
] | 3.5 | 6 |
from .exceptions import NotSignedInError
from .endpoint import Sites, Views, Users, Groups, Workbooks, Datasources, Projects, Auth, Schedules, ServerInfo
import requests
| [
6738,
764,
1069,
11755,
1330,
1892,
50,
3916,
818,
12331,
198,
6738,
764,
437,
4122,
1330,
37034,
11,
29978,
11,
18987,
11,
27441,
11,
5521,
12106,
11,
16092,
292,
2203,
11,
29898,
11,
26828,
11,
27774,
5028,
11,
9652,
12360,
198,
198... | 3.822222 | 45 |
# coding: utf-8
import numpy as np
import pandas as pd
import scipy.misc as spm
import os, sys
import posterior_predictive_inferences as ppi
import encode_decode as edcode
if __name__=='__main__':
ngram_path = sys.argv[1]
n = int(ngram_path.split('gram')[0][-1])
result_dir = os.path.split(ngram_path)[0]
hdf5_path = os.path.join(result_dir, 'variational_parameters.h5')
df_ngram = pd.read_csv(ngram_path)
df_stick = pd.read_hdf(hdf5_path, key='/sublex/stick')
log_assignment_probs = ppi.get_log_assignment_probs(df_stick)
df_code = pd.read_csv(os.path.join(result_dir, 'symbol_coding.csv'), encoding='utf-8')
encoder,decoder = edcode.df2coder(df_code)
data_path = sys.argv[2]
df_data = pd.read_csv(data_path, encoding='utf-8', sep='\t').fillna('')
# prefixes = edcode.encode_data(df_data.prefix, encoder, add_end_symbol = False)
# targets = edcode.encode_data(df_data.target_c + ',' + df_data.target_v, encoder, add_end_symbol = False)
# suffixes = [(encoder['END'],)]*df_data.shape[0]
# inventory = [code for value,code in encoder.iteritems() if not value in ['END', 'START']]
# start_code = encoder['START']
words = edcode.encode_data(df_data.word, encoder)
start_code = encoder['START']
# log_probs = ppi.get_log_posterior_predict_prob_of_target(prefixes, targets, suffixes, df_ngram, log_assignment_probs, n, start_code, inventory)
unnormalized_log_probs = ppi.get_unnormalized_log_posterior_predict_prob_of_target(words, df_ngram, log_assignment_probs, n, start_code)
# df_data['log_prob_target'] = log_probs
df_data['unnormalized_log_prob_target'] = unnormalized_log_probs
normalize_over_prefix(df_data, 'unnormalized_log_prob_target')
# classification_probs = ppi.posterior_predict_classification(words, df_ngram, log_assignment_probs, n, start_code)
# for sublex_id, class_probs in enumerate(classification_probs.T):
# df_data.loc[:,'sublex_%i' % sublex_id] = class_probs
datafile_root = os.path.splitext(os.path.split(data_path)[1])[0]
ratio_filename = datafile_root+'_log-posterior-predictive-prob_target.tsv'
df_data.to_csv(os.path.join(result_dir, ratio_filename), index=False, sep='\t', encoding='utf-8') | [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
629,
541,
88,
13,
44374,
355,
599,
76,
198,
11748,
28686,
11,
25064,
198,
11748,
34319,
62,
79,
17407,
425,
... | 2.430976 | 891 |
from import1b import *
print(var)
| [
6738,
1330,
16,
65,
1330,
1635,
198,
4798,
7,
7785,
8,
198
] | 2.833333 | 12 |
from django.urls import path
from .views.registroUserView import registroUserView
from .views.detalleUserView import detalleUserView
from .views.ListaInmueblesView import ListaInmueblesView
from .views.CrearInmuebleView import CrearInmuebleView
from .views.EliminarInmuebleView import EliminarInmuebleView
from .views.ListarInmublesHostView import ListarInmueblesHostView
from .views.DetalleInmuebleView import DetalleInmueble
from .views.ActualizarInmuebleView import ActualizarInmuebleView
urlpatterns = [
path('usuario/registro/', registroUserView.as_view()),
path('usuario/detalle-usuario/', detalleUserView.as_view()),
path('lista-inmuebles/', ListaInmueblesView.as_view()),
path('crear-inmueble/', CrearInmuebleView.as_view()),
path('eliminar-inmueble/<int:pk>/', EliminarInmuebleView.as_view()),
path('lista-inmuebles-host/', ListarInmueblesHostView.as_view()),
path('inmueble/<slug:url_id>/', DetalleInmueble.as_view()),
path('lista-inmuebles-host/modificar/<int:inmueble_id>/', ActualizarInmuebleView.as_view())
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
33571,
13,
2301,
396,
305,
12982,
7680,
1330,
4214,
305,
12982,
7680,
198,
6738,
764,
33571,
13,
15255,
6765,
12982,
7680,
1330,
1062,
6765,
12982,
7680,
198,
6738,
764,
3357... | 2.475524 | 429 |
"""
This code was generated by Codezu.
Changes to this file may cause incorrect behavior and will be lost if
the code is regenerated.
"""
from mozurestsdk.mozuclient import default as default_client
from mozurestsdk.mozuurl import MozuUrl;
from mozurestsdk.urllocation import UrlLocation
from mozurestsdk.apicontext import ApiContext;
| [
198,
37811,
198,
220,
220,
220,
770,
2438,
373,
7560,
416,
6127,
27624,
13,
220,
220,
220,
220,
220,
628,
220,
220,
220,
19179,
284,
428,
2393,
743,
2728,
11491,
4069,
290,
481,
307,
2626,
611,
198,
220,
220,
220,
262,
2438,
318,
... | 2.951613 | 124 |