content stringlengths 1 1.05M | input_ids listlengths 1 883k | ratio_char_token float64 1 22.9 | token_count int64 1 883k |
|---|---|---|---|
#
# Copyright (C) 2009 Mendix. All rights reserved.
#
SUCCESS = 0
# Starting the Mendix Runtime can fail in both a temporary or permanent way.
# Some of the errors can be fixed with some help of the user.
#
# The default m2ee cli program will only handle a few of these cases, by
# providing additional hints or interactive choices to fix the situation and
# will default to echoing back the error message received from the runtime.
# Database to be used does not exist
start_NO_EXISTING_DB = 2
# Database structure is out of sync with the application domain model, DDL
# commands need to be run to synchronize the database.
start_INVALID_DB_STRUCTURE = 3
# Constant definitions used in the application model are missing from the
# configuration.
start_MISSING_MF_CONSTANT = 4
# In the application database, a user account was detected which has the
# administrative role (as specified in the modeler) and has password '1'.
start_ADMIN_1 = 5
# ...
start_INVALID_STATE = 6
start_MISSING_DTAP = 7
start_MISSING_BASEPATH = 8
start_MISSING_RUNTIMEPATH = 9
start_INVALID_LICENSE = 10
start_SECURITY_DISABLED = 11
start_STARTUP_ACTION_FAILED = 12
start_NO_MOBILE_IN_LICENSE = 13
check_health_INVALID_STATE = 2
| [
2,
198,
2,
15069,
357,
34,
8,
3717,
337,
19573,
13,
1439,
2489,
10395,
13,
198,
2,
198,
198,
12564,
4093,
7597,
796,
657,
198,
198,
2,
17962,
262,
337,
19573,
43160,
460,
2038,
287,
1111,
257,
8584,
393,
7748,
835,
13,
198,
2,
2... | 3.284553 | 369 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch.utils.data
import torchvision
| [
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
1439,
6923,
33876,
198,
11748,
28034,
13,
26791,
13,
7890,
198,
11748,
28034,
10178,
628,
628
] | 4.034483 | 29 |
# dkhomeleague.py
import json
import logging
import os
from string import ascii_uppercase
import pandas as pd
from requests_html import HTMLSession
import browser_cookie3
import pdsheet
def contest_leaderboard(self, contest_id):
"""Gets contest leaderboard"""
url = self.api_url + f'scores/v1/megacontests/{contest_id}/leaderboard'
params = self._embed_params('leaderboard')
return self.get_json(url, params=params)
def contest_lineup(self, draftgroup_id, entry_key):
"""Gets contest lineup
Args:
draftgroup_id (int): the draftgroupId
entry_key (int): the id for the user's entry into the contest
can find entryKey in the leaderboard resource
Returns:
dict
"""
url = self.api_url + f'scores/v2/entries/{draftgroup_id}/{entry_key}'
params = self._embed_params('roster')
return self.get_json(url, params=params)
def get_json(self, url, params, headers=None, response_object=False):
"""Gets json resource"""
headers = headers if headers else {}
r = self.s.get(url, params=params, headers=headers, cookies=self.cj)
if response_object:
return r
try:
return r.json()
except:
return r.content()
def historical_contests(self, limit=50, offset=0):
"""Gets historical contests"""
url = self.api_url + f'contests/v1/contestsets/league/{self.league_key}/historical'
extra_params = {'limit': limit, 'offset': offset}
params = dict(**self.base_params, **extra_params)
return self.get_json(url, params=params)
def historical_contests_user(self):
"""Gets user historical results"""
url = self.api_url + f'scores/v1/entries/user/{self.username}/historical'
extra_params = {'contestSetKey': self.league_key, 'contestSetType': 'league'}
params = dict(**self.base_params, **extra_params)
return self.get_json(url, params=params)
def league_metadata(self):
"""Gets league metadata"""
url = self.api_url + f'leagues/v2/leagues/{self.league_key}'
params = self.base_params
return self.get_json(url, params=params)
def upcoming_contests(self):
"""Gets upcoming contests"""
url = self.api_url + f'contests/v1/contestsets/league/{self.league_key}'
params = self.base_params
return self.get_json(url, params=params)
class Parser:
"""Parses league results"""
def __init__(self, league_key=None, username=None):
"""Creates instance
Args:
league_key (str): id for home league
username (str): your username
Returns:
Parser
"""
logging.getLogger(__name__).addHandler(logging.NullHandler())
self.league_key = league_key if league_key else os.getenv('DK_LEAGUE_KEY')
self.username = username if username else os.getenv('DK_USERNAME')
def _to_dataframe(self, container):
"""Converts container to dataframe"""
return pd.DataFrame(container)
def _to_obj(self, pth):
"""Reads json text in pth and creates python object"""
if isinstance(pth, str):
pth = Path(pth)
return json.loads(pth.read_text())
def contest_entry(self, data):
"""Parses contest entry
Args:
data (dict): parsed JSON
Returns:
list: of dict
"""
wanted = ['draftGroupId', 'contestKey', 'entryKey', 'lineupId', 'userName',
'userKey', 'timeRemaining', 'rank', 'fantasyPoints']
player_wanted = ['displayName', 'rosterPosition', 'percentDrafted', 'draftableId', 'score',
'statsDescription', 'timeRemaining']
entry = data['entries'][0]
d = {k: entry[k] for k in wanted}
d['players'] = []
for player in entry['roster']['scorecards']:
d['players'].append({k: player[k] for k in player_wanted})
return d
def contest_leaderboard(self, data):
"""Parses contest leaderboard
Args:
data (dict): parsed JSON
Returns:
list: of dict
"""
wanted = ['userName', 'userKey', 'draftGroupId', 'contestKey', 'entryKey', 'rank', 'fantasyPoints']
return [{k: item.get(k) for k in wanted} for item in data['leaderBoard']]
def historical_contests(self, data):
"""Parses historical league contests
Args:
data (dict): parsed JSON
Returns:
list: of contest dict
"""
vals = []
wanted = ['contestStartTime', 'gameSetKey', 'contestKey', 'name', 'draftGroupId',
'entries', 'maximumEntries', 'maximumEntriesPerUser', 'entryFee', 'contestState']
for contest in data['contests']:
d = {k: contest[k] for k in wanted}
attrs = contest['attributes']
if attrs.get('Root Recurring Contest ID'):
d['recurringContestId'] = attrs.get('Root Recurring Contest ID')
vals.append(d)
return vals
def historical_contests_user(self, data):
"""Parses historical contests for user in league
Args:
data (dict): parsed JSON
Returns:
list: of dict
"""
wanted = ['draftGroupId', 'contestKey', 'entryKey', 'userName', 'userKey', 'rank', 'fantasyPoints',
'fantasyPointsOpponent', 'userNameOpponent']
return [{k: item[k] for k in wanted} for item in data['entries']]
def league_members(self, data):
"""Gets league members
Example URL: https://api.draftkings.com/leagues/v2/leagues/67ymkfy8
Args:
data (dict): parsed JSON
Returns:
list: of str
"""
return [item['username'] for item in data['league']['members']]
def league_metadata(self, data):
"""Gets league metadata
Example URL: https://api.draftkings.com/leagues/v2/leagues/67ymkfy8
Args:
data (dict): parsed JSON
Returns:
dict: with user details
"""
d = {}
league = data['league']
d['league_name'] = league['name']
d['league_key'] = league['key']
d['league_commissioner'] = league['creatorUsername']
d['members'] = {item['username']: item['userKey'] for item in league['members']}
return d
if __name__ == '__main__':
pass
| [
2,
288,
74,
11195,
19316,
13,
9078,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
6738,
4731,
1330,
355,
979,
72,
62,
7211,
2798,
589,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
7007,
62,
6494,
1330,
11532,
... | 2.234818 | 2,964 |
#!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
t = np.arange(0.0, 3.0, 0.01)
s = np.sin(2.5 * np.pi * t)
plt.plot(t, s)
plt.xlabel('time (s)')
plt.ylabel('voltage (mV)')
plt.title('Sine Wave')
plt.grid(True)
plt.show() | [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
83,
796,
45941,
13,
283,
858,
7,
15,
13,
15,
11,
513,
13,
15,
11,
657,
13... | 1.937008 | 127 |
#Escreva um programa que leia um valor em metros e o exiba convertido em centimetros e milimetros.
n = float(input('\033[32mDigite o numero:\033[m'))
print('O nmero digitado \033[33m{0:.0f}m\033[m.\n'
'Ele apresentado em centimetros fica \033[33m{0:.2f}cm\033[m.\n'
'Apresentado em milmetros fica \033[33m{0:.3f}mm\033[m'
.format(n))
#print('O nmero em metros {0}.\n
# O nmero em convertido para centimetros {1}.\n
# O nmero convertido para milimetros {2}'
# .format(n, n/100, n/1000))
| [
2,
47051,
260,
6862,
23781,
1430,
64,
8358,
443,
544,
23781,
1188,
273,
795,
1138,
4951,
304,
267,
409,
23718,
10385,
17305,
795,
1247,
38813,
4951,
304,
1465,
38813,
4951,
13,
198,
77,
796,
12178,
7,
15414,
10786,
59,
44427,
58,
2624... | 2.14346 | 237 |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 21 08:47:08 2019
@author: dordoloy
"""
import os
import pika
import config
import getpass
publish_fanout() | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
2556,
2310,
8487,
25,
2857,
25,
2919,
13130,
198,
198,
31,
9800,
25,
288,
585,
349,
726,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
... | 2.57377 | 61 |
<<<<<<< HEAD
# Lint as: python3
=======
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
# Copyright 2020 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Some layered modules/functions to help users writing custom training loop."""
import abc
import contextlib
import functools
import inspect
<<<<<<< HEAD
=======
import os
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
import numpy as np
import tensorflow as tf
def create_loop_fn(step_fn):
"""Creates a multiple steps function driven by the python while loop.
Args:
step_fn: A function which takes `iterator` as input.
Returns:
A callable defined as the `loop_fn` defination below.
"""
def loop_fn(iterator, num_steps, state=None, reduce_fn=None):
"""A loop function with multiple steps.
Args:
iterator: A nested structure of tf.data `Iterator` or
`DistributedIterator`.
num_steps: The number of steps in the loop. If `num_steps==-1`, will
iterate until exausting the iterator.
state: An optional initial state before running the loop.
reduce_fn: a callable defined as `def reduce_fn(state, value)`, where
`value` is the outputs from `step_fn`.
Returns:
The updated state.
"""
try:
step = 0
# To make sure the OutOfRangeError exception can be handled well with
# async remote eager, we need to wrap the loop body in a `async_scope`.
with tf.experimental.async_scope():
while (num_steps == -1 or step < num_steps):
outputs = step_fn(iterator)
if reduce_fn is not None:
state = reduce_fn(state, outputs)
step += 1
return state
except (StopIteration, tf.errors.OutOfRangeError):
tf.experimental.async_clear_error()
return state
return loop_fn
def create_tf_while_loop_fn(step_fn):
"""Create a multiple steps function driven by tf.while_loop on the host.
Args:
step_fn: A function which takes `iterator` as input.
Returns:
A callable defined as the `loop_fn` defination below.
"""
def loop_fn(iterator, num_steps):
"""A loop function with multiple steps.
Args:
iterator: A nested structure of tf.data `Iterator` or
`DistributedIterator`.
num_steps: The number of steps in the loop. Must be a tf.Tensor.
"""
if not isinstance(num_steps, tf.Tensor):
raise ValueError("`num_steps` should be an `tf.Tensor`. Python object "
"may cause retracing.")
for _ in tf.range(num_steps):
step_fn(iterator)
return loop_fn
<<<<<<< HEAD
=======
def create_global_step() -> tf.Variable:
"""Creates a `tf.Variable` suitable for use as a global step counter.
Creating and managing a global step variable may be necessary for
`AbstractTrainer` subclasses that perform multiple parameter updates per
`Controller` "step", or use different optimizers on different steps.
In these cases, an `optimizer.iterations` property generally can't be used
directly, since it would correspond to parameter updates instead of iterations
in the `Controller`'s training loop. Such use cases should simply call
`step.assign_add(1)` at the end of each step.
Returns:
A non-trainable scalar `tf.Variable` of dtype `tf.int64`, with only the
first replica's value retained when synchronizing across replicas in
a distributed setting.
"""
return tf.Variable(
0,
dtype=tf.int64,
trainable=False,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
def make_distributed_dataset(strategy, dataset_or_fn, *args, **kwargs):
"""A helper function to create distributed dataset.
Args:
strategy: An instance of `tf.distribute.Strategy`.
dataset_or_fn: A instance of `tf.data.Dataset` or a function which takes an
`tf.distribute.InputContext` as input and returns a `tf.data.Dataset`. If
it is a function, it could optionally have an argument named
`input_context` which is `tf.distribute.InputContext` argument type.
*args: The list of arguments to be passed to dataset_or_fn.
**kwargs: Any keyword arguments to be passed.
Returns:
A distributed Dataset.
"""
if strategy is None:
strategy = tf.distribute.get_strategy()
if isinstance(dataset_or_fn, tf.data.Dataset):
return strategy.experimental_distribute_dataset(dataset_or_fn)
if not callable(dataset_or_fn):
raise ValueError("`dataset_or_fn` should be either callable or an instance "
"of `tf.data.Dataset`")
def dataset_fn(ctx):
"""Wrapped dataset function for creating distributed dataset.."""
# If `dataset_or_fn` is a function and has `input_context` as argument
# names, pass `ctx` as the value of `input_context` when calling
# `dataset_or_fn`. Otherwise `ctx` will not be used when calling
# `dataset_or_fn`.
argspec = inspect.getfullargspec(dataset_or_fn)
args_names = argspec.args
if "input_context" in args_names:
kwargs["input_context"] = ctx
ds = dataset_or_fn(*args, **kwargs)
return ds
return strategy.experimental_distribute_datasets_from_function(dataset_fn)
<<<<<<< HEAD
self._summary_writer = None
=======
self._summary_writers = {}
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
if global_step is None:
self._global_step = tf.summary.experimental.get_step()
else:
self._global_step = global_step
<<<<<<< HEAD
def flush(self):
"""Flush the underlying summary writer."""
if self._enabled:
tf.summary.flush(self.summary_writer)
def write_summaries(self, items):
"""Write a bulk of summaries.
Args:
items: a dictionary of `Tensors` for writing summaries.
"""
# TODO(rxsang): Support writing summaries with nested structure, so users
# can split the summaries into different directories for nicer visualization
# in Tensorboard, like train and eval metrics.
if not self._enabled:
return
with self.summary_writer.as_default():
for name, tensor in items.items():
self._summary_fn(name, tensor, step=self._global_step)
=======
def summary_writer(self, relative_path=""):
"""Returns the underlying summary writer.
Args:
relative_path: The current path in which to write summaries, relative to
the summary directory. By default it is empty, which specifies the root
directory.
"""
if self._summary_writers and relative_path in self._summary_writers:
return self._summary_writers[relative_path]
if self._enabled:
self._summary_writers[relative_path] = tf.summary.create_file_writer(
os.path.join(self._summary_dir, relative_path))
else:
self._summary_writers[relative_path] = tf.summary.create_noop_writer()
return self._summary_writers[relative_path]
def flush(self):
"""Flush the underlying summary writers."""
if self._enabled:
tf.nest.map_structure(tf.summary.flush, self._summary_writers)
def write_summaries(self, summary_dict):
"""Write summaries for the given values.
This recursively creates subdirectories for any nested dictionaries
provided in `summary_dict`, yielding a hierarchy of directories which will
then be reflected in the TensorBoard UI as different colored curves.
E.g. users may evaluate on muliple datasets and return `summary_dict` as a
nested dictionary.
```
{
"dataset": {
"loss": loss,
"accuracy": accuracy
},
"dataset2": {
"loss": loss2,
"accuracy": accuracy2
},
}
```
This will create two subdirectories "dataset" and "dataset2" inside the
summary root directory. Each directory will contain event files including
both "loss" and "accuracy" summaries.
Args:
summary_dict: A dictionary of values. If any value in `summary_dict` is
itself a dictionary, then the function will recursively create
subdirectories with names given by the keys in the dictionary. The
Tensor values are summarized using the summary writer instance specific
to the parent relative path.
"""
if not self._enabled:
return
self._write_summaries(summary_dict)
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
def train_function_with_summaries(*args, **kwargs):
"""Utility function to support TPU summaries via multiple `tf.function`s.
This permits interleaving summaries inside TPU-compatible code, but without
any performance impact on steps that do not write summaries.
Usage is as a decorator, similar to `tf.function`, and any `tf.function`
arguments will be passed through if supplied:
@trainer.train_function_with_summaries
def train(self, num_steps):
...
The decorated function is assumed to be a loop method accepting a `num_steps`
parameter, as for instance would be called within the `Controller`'s outer
train loop. The implementation here assumes that `summary_frequency` is
divisible by `steps_per_loop`. The decorated method should accept two
arguments, `self` and `num_steps`.
Two `tf.function` versions of `train_fn` are created: one inside a summary
writer scope with soft device placement enabled (used on steps that require
summary writing), and one with no summary writer present and soft device
placement disabled (used on all other steps).
Args:
*args: Arguments to pass through to `tf.function`.
**kwargs: Keyword arguments to pass through to `tf.function`.
Returns:
If the first argument is a callable, returns the decorated callable.
Otherwise, returns a decorator.
"""
if args and callable(args[0]):
train_fn, args = args[0], args[1:]
return decorator(train_fn)
return decorator
def get_value(x) -> np.ndarray:
"""Returns the value of a variable/tensor.
Args:
x: input variable.
Returns:
<<<<<<< HEAD
A Numpy array.
=======
A Numpy array or number.
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
"""
if not tf.is_tensor(x):
return x
return x.numpy()
| [
16791,
16791,
16791,
27,
39837,
198,
2,
406,
600,
355,
25,
21015,
18,
198,
1421,
18604,
198,
16471,
33409,
257,
23,
1157,
64,
18,
65,
22,
68,
21,
30120,
22047,
1507,
324,
23,
3104,
66,
2079,
69,
6048,
69,
18,
69,
1270,
5066,
68,
... | 2.910295 | 3,701 |
from dataclasses import dataclass
from apischema import deserialize, deserializer
from apischema.json_schema import deserialization_schema
# Could be shorten into deserializer(Expression), because class is callable too
assert deserialization_schema(Expression) == {
"$schema": "http://json-schema.org/draft/2019-09/schema#",
"type": ["string", "integer"],
}
assert deserialize(Expression, 0) == deserialize(Expression, "1 - 1") == Expression(0)
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
6738,
2471,
46097,
2611,
1330,
748,
48499,
1096,
11,
748,
48499,
7509,
198,
6738,
2471,
46097,
2611,
13,
17752,
62,
15952,
2611,
1330,
748,
48499,
1634,
62,
15952,
2611,
628,
628... | 3.108108 | 148 |
# -*- coding: utf-8 -*-
'''
Created on 02/12/2011
@author: chra
'''
import csv
from operator import itemgetter
# ----- Funcin media de la notas de los alumnos ----------
# ----------------------------------------------------------
fin = open('alumnos.csv')
lector = csv.DictReader(fin, delimiter=",") # si no se pone delimiter, coge la coma por defecto // devuelve diccionario
# lector = csv.reader(fin, delimiter=",") <-- Devuelve lista
alumnos = []
for linea in lector:
alumnos.append((linea['Alumno'], media(linea)))
# -------- Ordenar por nombre de alumno -----------
alumnos.sort()
print 'Orden por nombre de alumno'
for al in alumnos:
print "%-10s %6.2f" % al #10 espacios entre cadena (nombre - nota media) y permite 6 digitos, 2 de ellos decimales.
# --------------------------------------------------
# --------- Ordenar por nota -----------------------
print '\nOrden por nota'
alumnos.sort(key=itemgetter(1),reverse=True)
for al in alumnos:
print "%-10s %6.2f" % al
#---------------------------------------------------
# Crea un fichero 'lista_ordenada_notas.csv' y escribe la lista ordenada por notas
fw = open('lista_ordenada_notas.csv', 'w')
csvwriter = csv.writer(fw)
for al in alumnos:
csvwriter.writerow(al)
fw.close() | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
41972,
319,
7816,
14,
1065,
14,
9804,
198,
198,
31,
9800,
25,
442,
430,
198,
7061,
6,
198,
198,
11748,
269,
21370,
198,
6738,
10088,
1330,
2378,
1136,
... | 2.790749 | 454 |
from src.interpreter.functions.math.add import add
from src.interpreter.functions.math.div import div
from src.interpreter.functions.math.mod import mod
from src.interpreter.functions.math.mul import mul
from src.interpreter.functions.math.pow import pow_func
from src.interpreter.functions.math.sub import sub
| [
6738,
12351,
13,
3849,
3866,
353,
13,
12543,
2733,
13,
11018,
13,
2860,
1330,
751,
198,
6738,
12351,
13,
3849,
3866,
353,
13,
12543,
2733,
13,
11018,
13,
7146,
1330,
2659,
198,
6738,
12351,
13,
3849,
3866,
353,
13,
12543,
2733,
13,
... | 3.12 | 100 |
from setuptools import setup
setup(name='pygazetteer',
version='0.1.0',
description='Location extractor by looking up gazetteer',
url='https://github.com/monkey2000/pygazetteer',
license='MIT',
packages=['pygazetteer'],
install_requires=[
'pyahocorasick'
],
zip_safe=False,
include_package_data=True)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
3672,
11639,
9078,
70,
1031,
5857,
263,
3256,
198,
220,
220,
220,
220,
220,
2196,
11639,
15,
13,
16,
13,
15,
3256,
198,
220,
220,
220,
220,
220,
6764,
11639,
14749,
7925,
273,... | 2.279503 | 161 |
ID = {"Worldwide":0,
"AF": 1,
"AL": 2,
"DZ": 3,
"AD": 5,
"AO": 6,
"AI": 7,
"AG": 9,
"AR": 10,
"AM": 11,
"AW": 12,
"AT": 14,
"AZ": 15,
"BS": 16,
"BH": 17,
"BD": 18,
"BB": 19,
"BY": 20,
"BZ": 22,
"BJ": 23,
"BM": 24,
"BO": 26,
"BA": 27,
"BW": 28,
"BV": 29,
"BR": 30,
"BN": 31,
"BG": 32,
"BF": 33,
"BI": 34,
"KH": 35,
"CM": 36,
"CV": 38,
"KY": 39,
"TD": 41,
"CL": 42,
"CN": 43,
"CC": 45,
"CO": 46,
"KM": 47,
"CG": 48,
"CK": 49,
"CR": 50,
"CI": 51,
"HR": 52,
"CU": 53,
"CY": 54,
"CZ": 55,
"DK": 56,
"DJ": 57,
"DM": 58,
"DO": 59,
"TL": 60,
"EC": 61,
"EG": 62,
"SV": 63,
"EE": 66,
"ET": 67,
"FO": 69,
"FJ": 70,
"FI": 71,
"FR": 72,
"GF": 73,
"PF": 74,
"GA": 75,
"GM": 76,
"GE": 77,
"DE": 78,
"GH": 79,
"GR": 81,
"GD": 83,
"GP": 84,
"GT": 86,
"GN": 87,
"GY": 88,
"HT": 89,
"HN": 90,
"HK": 91,
"HU": 92,
"IS": 93,
"ID": 94,
"IQ": 95,
"IE": 96,
"IT": 97,
"JM": 98,
"JO": 100,
"KZ": 101,
"KE": 102,
"KI": 103,
"KW": 104,
"KG": 105,
"LA": 106,
"LV": 107,
"LB": 108,
"LS": 109,
"LR": 110,
"LY": 111,
"LT": 113,
"LU": 114,
"MO": 115,
"MK": 116,
"MG": 117,
"MW": 118,
"MY": 119,
"MV": 120,
"ML": 121,
"MT": 122,
"MQ": 124,
"MR": 125,
"MU": 126,
"MX": 128,
"FM": 129,
"MD": 130,
"MC": 131,
"MN": 132,
"MA": 134,
"MZ": 135,
"MM": 136,
"NA": 137,
"NP": 139,
"NL": 140,
"AN": 141,
"NC": 142,
"NZ": 143,
"NI": 144,
"NE": 145,
"NG": 146,
"NO": 149,
"OM": 150,
"PK": 151,
"PW": 152,
"PA": 153,
"PG": 154,
"PY": 155,
"PE": 156,
"PH": 157,
"PL": 159,
"PT": 160,
"QA": 162,
"RE": 163,
"RO": 164,
"RW": 166,
"KN": 167,
"LC": 168,
"SA": 171,
"SN": 172,
"SC": 173,
"SG": 175,
"SK": 176,
"SI": 177,
"SO": 179,
"ZA": 180,
"KR": 181,
"ES": 182,
"LK": 183,
"SH": 184,
"SR": 186,
"SZ": 187,
"SE": 188,
"CH": 189,
"TW": 191,
"TJ": 192,
"TZ": 193,
"TH": 194,
"TG": 195,
"TT": 198,
"TN": 199,
"TR": 200,
"TM": 201,
"UG": 203,
"UA": 204,
"AE": 205,
"GB": 206,
"UY": 207,
"UZ": 208,
"VE": 211,
"VN": 212,
"VG": 213,
"YE": 216,
"ZM": 218,
"ZW": 219,
"RS": 220,
"ME": 221,
"IN": 225,
"TC": 234,
"CD": 235,
"GG": 236,
"IM": 237,
"JE": 239,
"CW": 246, }
| [
2389,
796,
19779,
10603,
4421,
1298,
15,
11,
198,
220,
220,
220,
220,
220,
366,
8579,
1298,
352,
11,
198,
220,
220,
220,
220,
220,
366,
1847,
1298,
362,
11,
198,
220,
220,
220,
220,
220,
366,
35,
57,
1298,
513,
11,
198,
220,
220... | 1.482092 | 2,122 |
"""
These classes are a collection of the needed tools to read external data.
The External type objects created by these classes are initialized before
the Stateful objects by functions.Model.initialize.
"""
import re
import os
import warnings
import pandas as pd # TODO move to openpyxl
import numpy as np
import xarray as xr
from openpyxl import load_workbook
from . import utils
def _series_selector(self, x_row_or_col, cell):
"""
Selects if a series data (DATA/LOOKUPS), should be read by columns,
rows or cellrange name.
Based on the input format of x_row_or_col and cell.
The format of the 2 variables must be consistent.
Parameters
----------
x_row_or_col: str
String of a number if series is given in a row, letter if series is
given in a column or name if the series is given by cellrange name.
cell: str
Cell identificator, such as "A1", or name if the data is given
by cellrange name.
Returns
-------
series_across: str
"row" if series is given in a row
"column" if series is given in a column
"name" if series and data are given by range name
"""
try:
# if x_row_or_col is numeric the series must be a row
int(x_row_or_col)
return "row"
except ValueError:
if self._split_excel_cell(cell):
# if the cell can be splitted means that the format is
# "A1" like then the series must be a column
return "column"
else:
return "name"
class ExtData(External):
"""
Class for Vensim GET XLS DATA/GET DIRECT DATA
"""
def add(self, file_name, sheet, time_row_or_col, cell,
interp, coords):
"""
Add information to retrieve new dimension in an already declared object
"""
self.files.append(file_name)
self.sheets.append(sheet)
self.time_row_or_cols.append(time_row_or_col)
self.cells.append(cell)
self.coordss.append(coords)
if not interp:
interp = "interpolate"
if interp != self.interp:
raise ValueError(self.py_name + "\n"
+ "Error matching interpolation method with "
+ "previously defined one")
if list(coords) != list(self.coordss[0]):
raise ValueError(self.py_name + "\n"
+ "Error matching dimensions with previous data")
def initialize(self):
"""
Initialize all elements and create the self.data xarray.DataArray
"""
data = []
zipped = zip(self.files, self.sheets, self.time_row_or_cols,
self.cells, self.coordss)
for (self.file, self.sheet, self.x_row_or_col,
self.cell, self.coords) in zipped:
data.append(self._initialize_data("data"))
self.data = utils.xrmerge(data)
class ExtLookup(External):
"""
Class for Vensim GET XLS LOOKUPS/GET DIRECT LOOKUPS
"""
def add(self, file_name, sheet, x_row_or_col, cell, coords):
"""
Add information to retrieve new dimension in an already declared object
"""
self.files.append(file_name)
self.sheets.append(sheet)
self.x_row_or_cols.append(x_row_or_col)
self.cells.append(cell)
self.coordss.append(coords)
if list(coords) != list(self.coordss[0]):
raise ValueError(self.py_name + "\n"
+ "Error matching dimensions with previous data")
def initialize(self):
"""
Initialize all elements and create the self.data xarray.DataArray
"""
data = []
zipped = zip(self.files, self.sheets, self.x_row_or_cols,
self.cells, self.coordss)
for (self.file, self.sheet, self.x_row_or_col,
self.cell, self.coords) in zipped:
data.append(self._initialize_data("lookup"))
self.data = utils.xrmerge(data)
class ExtConstant(External):
"""
Class for Vensim GET XLS CONSTANTS/GET DIRECT CONSTANTS
"""
def add(self, file_name, sheet, cell, coords):
"""
Add information to retrieve new dimension in an already declared object
"""
self.files.append(file_name)
self.sheets.append(sheet)
self.transposes.append(cell[-1] == '*')
self.cells.append(cell.strip('*'))
self.coordss.append(coords)
if list(coords) != list(self.coordss[0]):
raise ValueError(self.py_name + "\n"
+ "Error matching dimensions with previous data")
def initialize(self):
"""
Initialize all elements and create the self.data xarray.DataArray
"""
data = []
zipped = zip(self.files, self.sheets, self.transposes,
self.cells, self.coordss)
for (self.file, self.sheet, self.transpose,
self.cell, self.coords) in zipped:
data.append(self._initialize())
self.data = utils.xrmerge(data)
def _initialize(self):
"""
Initialize one element
"""
self._resolve_file(root=self.root)
split = self._split_excel_cell(self.cell)
if split:
data_across = "cell"
cell = split
else:
data_across = "name"
cell = self.cell
shape = utils.compute_shape(self.coords, reshape_len=2,
py_name=self.py_name)
if self.transpose:
shape.reverse()
data = self._get_constant_data(data_across, cell, shape)
if self.transpose:
data = data.transpose()
if np.any(np.isnan(data)):
# nan values in data
if data_across == "name":
cell_type = "Cellrange"
else:
cell_type = "Reference cell"
if self.missing == "warning":
warnings.warn(
self.py_name + "\n"
+ "Constant value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
)
elif self.missing == "raise":
raise ValueError(
self.py_name + "\n"
+ "Constant value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
)
# Create only an xarray if the data is not 0 dimensional
if len(self.coords) > 0:
reshape_dims = tuple(utils.compute_shape(self.coords))
if len(reshape_dims) > 1:
data = self._reshape(data, reshape_dims)
data = xr.DataArray(
data=data, coords=self.coords, dims=list(self.coords)
)
return data
def _get_constant_data(self, data_across, cell, shape):
"""
Function thar reads data from excel file for CONSTANT
Parameters
----------
data_across: "cell" or "name"
The way to read data file.
cell: int or str
If data_across is "cell" the lefttop split cell value where
the data is.
If data_across is "name" the cell range name where the data is.
shape:
The shape of the data in 2D.
Returns
-------
data: float/ndarray(1D/2D)
The values of the data.
"""
if data_across == "cell":
# read data from topleft cell name using pandas
start_row, start_col = cell
return self._get_data_from_file(
rows=[start_row, start_row + shape[0]],
cols=[start_col, start_col + shape[1]])
else:
# read data from cell range name using OpenPyXL
data = self._get_data_from_file_opyxl(cell)
try:
# Remove length=1 axis
data_shape = data.shape
if data_shape[1] == 1:
data = data[:, 0]
if data_shape[0] == 1:
data = data[0]
except AttributeError:
# Data is a float, nothing to do
pass
# Check data dims
try:
if shape[0] == 1 and shape[1] != 1:
assert shape[1] == len(data)
elif shape[0] != 1 and shape[1] == 1:
assert shape[0] == len(data)
elif shape[0] == 1 and shape[1] == 1:
assert isinstance(data, float)
else:
assert tuple(shape) == data.shape
except AssertionError:
raise ValueError(self.py_name + "\n"
+ "Data given in:\n"
+ self._file_sheet
+ "\tData name:\t{}\n".format(cell)
+ " has not the same shape as the"
+ " given coordinates")
return data
class ExtSubscript(External):
"""
Class for Vensim GET XLS SUBSCRIPT/GET DIRECT SUBSCRIPT
"""
| [
37811,
198,
4711,
6097,
389,
257,
4947,
286,
262,
2622,
4899,
284,
1100,
7097,
1366,
13,
198,
464,
34579,
2099,
5563,
2727,
416,
777,
6097,
389,
23224,
878,
198,
1169,
1812,
913,
5563,
416,
5499,
13,
17633,
13,
36733,
1096,
13,
198,
... | 1.995981 | 4,728 |
# File: C (Python 2.4)
from direct.gui.DirectGui import *
from direct.interval.IntervalGlobal import *
from direct.fsm.FSM import FSM
from direct.showbase.PythonUtil import Functor
from pandac.PandaModules import *
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.piratesgui import PiratesGuiGlobals
from pirates.piratesgui.TabBar import TopTab, TabBar
| [
2,
9220,
25,
327,
357,
37906,
362,
13,
19,
8,
198,
198,
6738,
1277,
13,
48317,
13,
13470,
8205,
72,
1330,
1635,
198,
6738,
1277,
13,
3849,
2100,
13,
9492,
2100,
22289,
1330,
1635,
198,
6738,
1277,
13,
69,
5796,
13,
10652,
44,
1330... | 2.965986 | 147 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from logging import getLogger as getSysLogger
from logging import *
# Some of the build slave environments don't see the following when doing
# 'from logging import *'
# see https://bugzilla.mozilla.org/show_bug.cgi?id=700415#c35
from logging import getLoggerClass, addLevelName, setLoggerClass, shutdown, debug, info, basicConfig
import json
_default_level = INFO
_LoggerClass = getLoggerClass()
# Define mozlog specific log levels
START = _default_level + 1
END = _default_level + 2
PASS = _default_level + 3
KNOWN_FAIL = _default_level + 4
FAIL = _default_level + 5
CRASH = _default_level + 6
# Define associated text of log levels
addLevelName(START, 'TEST-START')
addLevelName(END, 'TEST-END')
addLevelName(PASS, 'TEST-PASS')
addLevelName(KNOWN_FAIL, 'TEST-KNOWN-FAIL')
addLevelName(FAIL, 'TEST-UNEXPECTED-FAIL')
addLevelName(CRASH, 'PROCESS-CRASH')
def getLogger(name, handler=None):
"""
Returns the logger with the specified name.
If the logger doesn't exist, it is created.
If handler is specified, adds it to the logger. Otherwise a default handler
that logs to standard output will be used.
:param name: The name of the logger to retrieve
:param handler: A handler to add to the logger. If the logger already exists,
and a handler is specified, an exception will be raised. To
add a handler to an existing logger, call that logger's
addHandler method.
"""
setLoggerClass(MozLogger)
if name in Logger.manager.loggerDict:
if handler:
raise ValueError('The handler parameter requires ' + \
'that a logger by this name does ' + \
'not already exist')
return Logger.manager.loggerDict[name]
logger = getSysLogger(name)
logger.setLevel(_default_level)
if handler is None:
handler = StreamHandler()
handler.setFormatter(MozFormatter())
logger.addHandler(handler)
logger.propagate = False
return logger
| [
2,
770,
8090,
6127,
5178,
318,
2426,
284,
262,
2846,
286,
262,
29258,
5094,
198,
2,
13789,
11,
410,
13,
362,
13,
15,
13,
1002,
257,
4866,
286,
262,
4904,
43,
373,
407,
9387,
351,
428,
2393,
11,
198,
2,
921,
460,
7330,
530,
379,
... | 2.694774 | 842 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.misc.fluxes Contains the ObservedImageMaker class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import astronomical modules
from astropy.units import Unit
from astropy import constants
# Import the relevant PTS classes and modules
from ..tools.logging import log
from ..tools import filesystem as fs
from ..basics.filter import Filter
from ...magic.core.image import Image
from ...magic.core.frame import Frame
from ...magic.basics.coordinatesystem import CoordinateSystem
from ..tools.special import remote_filter_convolution, remote_convolution_frame
# -----------------------------------------------------------------
# The speed of light
speed_of_light = constants.c
# -----------------------------------------------------------------
# -----------------------------------------------------------------
def instrument_name(datacube_path, prefix):
"""
This function ...
:param datacube_path:
:param prefix:
:return:
"""
return fs.name(datacube_path).split("_total.fits")[0].split(prefix + "_")[1]
# -----------------------------------------------------------------
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
23,
532,
9,
12,
198,
2,
41906,
17174,
9,
198,
2,
12429,
220,
220,
220,
220,
220,
220,
20907,
1377,
11361,
16984,
15813,
329,
1762,
351,
14277,
... | 4.148148 | 378 |
# RiveScript-Python
#
# This code is released under the MIT License.
# See the "LICENSE" file for more information.
#
# https://www.rivescript.com/
def get_topic_triggers(rs, topic, thats, depth=0, inheritance=0, inherited=False):
"""Recursively scan a topic and return a list of all triggers.
Arguments:
rs (RiveScript): A reference to the parent RiveScript instance.
topic (str): The original topic name.
thats (bool): Are we getting triggers for 'previous' replies?
depth (int): Recursion step counter.
inheritance (int): The inheritance level counter, for topics that
inherit other topics.
inherited (bool): Whether the current topic is inherited by others.
Returns:
[]str: List of all triggers found.
"""
# Break if we're in too deep.
if depth > rs._depth:
rs._warn("Deep recursion while scanning topic inheritance")
# Keep in mind here that there is a difference between 'includes' and
# 'inherits' -- topics that inherit other topics are able to OVERRIDE
# triggers that appear in the inherited topic. This means that if the top
# topic has a trigger of simply '*', then NO triggers are capable of
# matching in ANY inherited topic, because even though * has the lowest
# priority, it has an automatic priority over all inherited topics.
#
# The getTopicTriggers method takes this into account. All topics that
# inherit other topics will have their triggers prefixed with a fictional
# {inherits} tag, which would start at {inherits=0} and increment if this
# topic has other inheriting topics. So we can use this tag to make sure
# topics that inherit things will have their triggers always be on top of
# the stack, from inherits=0 to inherits=n.
# Important info about the depth vs inheritance params to this function:
# depth increments by 1 each time this function recursively calls itrs.
# inheritance increments by 1 only when this topic inherits another
# topic.
#
# This way, '> topic alpha includes beta inherits gamma' will have this
# effect:
# alpha and beta's triggers are combined together into one matching
# pool, and then those triggers have higher matching priority than
# gamma's.
#
# The inherited option is True if this is a recursive call, from a topic
# that inherits other topics. This forces the {inherits} tag to be added
# to the triggers. This only applies when the top topic 'includes'
# another topic.
rs._say("\tCollecting trigger list for topic " + topic + "(depth="
+ str(depth) + "; inheritance=" + str(inheritance) + "; "
+ "inherited=" + str(inherited) + ")")
# topic: the name of the topic
# depth: starts at 0 and ++'s with each recursion
# Topic doesn't exist?
if not topic in rs._topics:
rs._warn("Inherited or included topic {} doesn't exist or has no triggers".format(
topic
))
return []
# Collect an array of triggers to return.
triggers = []
# Get those that exist in this topic directly.
inThisTopic = []
if not thats:
# The non-that structure is {topic}->[array of triggers]
if topic in rs._topics:
for trigger in rs._topics[topic]:
inThisTopic.append([ trigger["trigger"], trigger ])
else:
# The 'that' structure is: {topic}->{cur trig}->{prev trig}->{trig info}
if topic in rs._thats.keys():
for curtrig in rs._thats[topic].keys():
for previous, pointer in rs._thats[topic][curtrig].items():
inThisTopic.append([ pointer["trigger"], pointer ])
# Does this topic include others?
if topic in rs._includes:
# Check every included topic.
for includes in rs._includes[topic]:
rs._say("\t\tTopic " + topic + " includes " + includes)
triggers.extend(get_topic_triggers(rs, includes, thats, (depth + 1), inheritance, True))
# Does this topic inherit others?
if topic in rs._lineage:
# Check every inherited topic.
for inherits in rs._lineage[topic]:
rs._say("\t\tTopic " + topic + " inherits " + inherits)
triggers.extend(get_topic_triggers(rs, inherits, thats, (depth + 1), (inheritance + 1), False))
# Collect the triggers for *this* topic. If this topic inherits any
# other topics, it means that this topic's triggers have higher
# priority than those in any inherited topics. Enforce this with an
# {inherits} tag.
if topic in rs._lineage or inherited:
for trigger in inThisTopic:
rs._say("\t\tPrefixing trigger with {inherits=" + str(inheritance) + "}" + trigger[0])
triggers.append(["{inherits=" + str(inheritance) + "}" + trigger[0], trigger[1]])
else:
triggers.extend(inThisTopic)
return triggers
def get_topic_tree(rs, topic, depth=0):
"""Given one topic, get the list of all included/inherited topics.
:param str topic: The topic to start the search at.
:param int depth: The recursion depth counter.
:return []str: Array of topics.
"""
# Break if we're in too deep.
if depth > rs._depth:
rs._warn("Deep recursion while scanning topic trees!")
return []
# Collect an array of all topics.
topics = [topic]
# Does this topic include others?
if topic in rs._includes:
# Try each of these.
for includes in sorted(rs._includes[topic]):
topics.extend(get_topic_tree(rs, includes, depth + 1))
# Does this topic inherit others?
if topic in rs._lineage:
# Try each of these.
for inherits in sorted(rs._lineage[topic]):
topics.extend(get_topic_tree(rs, inherits, depth + 1))
return topics
| [
2,
371,
425,
7391,
12,
37906,
198,
2,
198,
2,
770,
2438,
318,
2716,
739,
262,
17168,
13789,
13,
198,
2,
4091,
262,
366,
43,
2149,
24290,
1,
2393,
329,
517,
1321,
13,
198,
2,
198,
2,
3740,
1378,
2503,
13,
380,
1158,
6519,
13,
7... | 2.835907 | 2,072 |
#Copyright 2009 Humanitarian International Services Group
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
'''
Created Aug 4, 2009
connection pool abstraction over previous Connection.py which is now SingleConnection.py
sets up module scope connection pool, currently with no size limit
pool for both connections with dictionary cursors and regular cursors
reconnects to db every x hours depending on config file
@author: Andrew
'''
from utaka.src.dataAccess.SingleConnection import Connection as SingleConnection
import utaka.src.Config as Config
import MySQLdb
import datetime
dcp = [SingleConnection(True)]
rcp = [SingleConnection(False)]
dbTimer = datetime.datetime.today()
dbTimeout = datetime.timedelta(hours = int(Config.get('database', 'connection_timeout_in_hours')))
| [
2,
15269,
3717,
5524,
8353,
4037,
6168,
4912,
198,
2,
198,
2,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
5832,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,... | 3.857143 | 329 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md')) as fin:
long_description = fin.read()
setup(
name='pylint-pytest',
version='1.0.3',
author='Reverb Chu',
author_email='pylint-pytest@reverbc.tw',
maintainer='Reverb Chu',
maintainer_email='pylint-pytest@reverbc.tw',
license='MIT',
url='https://github.com/reverbc/pylint-pytest',
description='A Pylint plugin to suppress pytest-related false positives.',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(exclude=['tests', 'sandbox']),
install_requires=[
'pylint',
'pytest>=4.6',
],
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
tests_require=['pytest', 'pylint'],
keywords=['pylint', 'pytest', 'plugin'],
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
28686,
1330,
3108,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
628,
198,
1456,
796,
3108,... | 2.574841 | 628 |
# Reverse TCP Shell in Python For Offensive Security/Penetration Testing Assignments
# Connect on LinkedIn https://www.linkedin.com/in/lismore or Twitter @patricklismore
#=========================================================================================================================================
# Python TCP Client
import socket
import subprocess
#Start client function
#Main function
#Program entry point
main()
| [
2,
31849,
23633,
17537,
287,
11361,
1114,
26855,
4765,
14,
25553,
316,
1358,
23983,
2195,
570,
902,
220,
198,
2,
8113,
319,
27133,
220,
3740,
1378,
2503,
13,
25614,
259,
13,
785,
14,
259,
14,
75,
1042,
382,
393,
3009,
2488,
29615,
7... | 4.824176 | 91 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/engine.h>
"""
TYPES = """
static const long Cryptography_HAS_ENGINE_CRYPTODEV;
typedef ... ENGINE;
typedef ... RSA_METHOD;
typedef ... DSA_METHOD;
typedef ... ECDH_METHOD;
typedef ... ECDSA_METHOD;
typedef ... DH_METHOD;
typedef struct {
void (*seed)(const void *, int);
int (*bytes)(unsigned char *, int);
void (*cleanup)();
void (*add)(const void *, int, double);
int (*pseudorand)(unsigned char *, int);
int (*status)();
} RAND_METHOD;
typedef ... STORE_METHOD;
typedef int (*ENGINE_GEN_INT_FUNC_PTR)(ENGINE *);
typedef ... *ENGINE_CTRL_FUNC_PTR;
typedef ... *ENGINE_LOAD_KEY_PTR;
typedef ... *ENGINE_CIPHERS_PTR;
typedef ... *ENGINE_DIGESTS_PTR;
typedef ... ENGINE_CMD_DEFN;
typedef ... UI_METHOD;
static const unsigned int ENGINE_METHOD_RSA;
static const unsigned int ENGINE_METHOD_DSA;
static const unsigned int ENGINE_METHOD_RAND;
static const unsigned int ENGINE_METHOD_ECDH;
static const unsigned int ENGINE_METHOD_ECDSA;
static const unsigned int ENGINE_METHOD_CIPHERS;
static const unsigned int ENGINE_METHOD_DIGESTS;
static const unsigned int ENGINE_METHOD_STORE;
static const unsigned int ENGINE_METHOD_ALL;
static const unsigned int ENGINE_METHOD_NONE;
static const int ENGINE_R_CONFLICTING_ENGINE_ID;
"""
FUNCTIONS = """
ENGINE *ENGINE_get_first(void);
ENGINE *ENGINE_get_last(void);
ENGINE *ENGINE_get_next(ENGINE *);
ENGINE *ENGINE_get_prev(ENGINE *);
int ENGINE_add(ENGINE *);
int ENGINE_remove(ENGINE *);
ENGINE *ENGINE_by_id(const char *);
int ENGINE_init(ENGINE *);
int ENGINE_finish(ENGINE *);
void ENGINE_load_openssl(void);
void ENGINE_load_dynamic(void);
void ENGINE_load_builtin_engines(void);
void ENGINE_cleanup(void);
ENGINE *ENGINE_get_default_RSA(void);
ENGINE *ENGINE_get_default_DSA(void);
ENGINE *ENGINE_get_default_ECDH(void);
ENGINE *ENGINE_get_default_ECDSA(void);
ENGINE *ENGINE_get_default_DH(void);
ENGINE *ENGINE_get_default_RAND(void);
ENGINE *ENGINE_get_cipher_engine(int);
ENGINE *ENGINE_get_digest_engine(int);
int ENGINE_set_default_RSA(ENGINE *);
int ENGINE_set_default_DSA(ENGINE *);
int ENGINE_set_default_ECDH(ENGINE *);
int ENGINE_set_default_ECDSA(ENGINE *);
int ENGINE_set_default_DH(ENGINE *);
int ENGINE_set_default_RAND(ENGINE *);
int ENGINE_set_default_ciphers(ENGINE *);
int ENGINE_set_default_digests(ENGINE *);
int ENGINE_set_default_string(ENGINE *, const char *);
int ENGINE_set_default(ENGINE *, unsigned int);
unsigned int ENGINE_get_table_flags(void);
void ENGINE_set_table_flags(unsigned int);
int ENGINE_register_RSA(ENGINE *);
void ENGINE_unregister_RSA(ENGINE *);
void ENGINE_register_all_RSA(void);
int ENGINE_register_DSA(ENGINE *);
void ENGINE_unregister_DSA(ENGINE *);
void ENGINE_register_all_DSA(void);
int ENGINE_register_ECDH(ENGINE *);
void ENGINE_unregister_ECDH(ENGINE *);
void ENGINE_register_all_ECDH(void);
int ENGINE_register_ECDSA(ENGINE *);
void ENGINE_unregister_ECDSA(ENGINE *);
void ENGINE_register_all_ECDSA(void);
int ENGINE_register_DH(ENGINE *);
void ENGINE_unregister_DH(ENGINE *);
void ENGINE_register_all_DH(void);
int ENGINE_register_RAND(ENGINE *);
void ENGINE_unregister_RAND(ENGINE *);
void ENGINE_register_all_RAND(void);
int ENGINE_register_STORE(ENGINE *);
void ENGINE_unregister_STORE(ENGINE *);
void ENGINE_register_all_STORE(void);
int ENGINE_register_ciphers(ENGINE *);
void ENGINE_unregister_ciphers(ENGINE *);
void ENGINE_register_all_ciphers(void);
int ENGINE_register_digests(ENGINE *);
void ENGINE_unregister_digests(ENGINE *);
void ENGINE_register_all_digests(void);
int ENGINE_register_complete(ENGINE *);
int ENGINE_register_all_complete(void);
int ENGINE_ctrl(ENGINE *, int, long, void *, void (*)(void));
int ENGINE_cmd_is_executable(ENGINE *, int);
int ENGINE_ctrl_cmd(ENGINE *, const char *, long, void *, void (*)(void), int);
int ENGINE_ctrl_cmd_string(ENGINE *, const char *, const char *, int);
ENGINE *ENGINE_new(void);
int ENGINE_free(ENGINE *);
int ENGINE_up_ref(ENGINE *);
int ENGINE_set_id(ENGINE *, const char *);
int ENGINE_set_name(ENGINE *, const char *);
int ENGINE_set_RSA(ENGINE *, const RSA_METHOD *);
int ENGINE_set_DSA(ENGINE *, const DSA_METHOD *);
int ENGINE_set_ECDH(ENGINE *, const ECDH_METHOD *);
int ENGINE_set_ECDSA(ENGINE *, const ECDSA_METHOD *);
int ENGINE_set_DH(ENGINE *, const DH_METHOD *);
int ENGINE_set_RAND(ENGINE *, const RAND_METHOD *);
int ENGINE_set_STORE(ENGINE *, const STORE_METHOD *);
int ENGINE_set_destroy_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_init_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_finish_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_ctrl_function(ENGINE *, ENGINE_CTRL_FUNC_PTR);
int ENGINE_set_load_privkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR);
int ENGINE_set_load_pubkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR);
int ENGINE_set_ciphers(ENGINE *, ENGINE_CIPHERS_PTR);
int ENGINE_set_digests(ENGINE *, ENGINE_DIGESTS_PTR);
int ENGINE_set_flags(ENGINE *, int);
int ENGINE_set_cmd_defns(ENGINE *, const ENGINE_CMD_DEFN *);
const char *ENGINE_get_id(const ENGINE *);
const char *ENGINE_get_name(const ENGINE *);
const RSA_METHOD *ENGINE_get_RSA(const ENGINE *);
const DSA_METHOD *ENGINE_get_DSA(const ENGINE *);
const ECDH_METHOD *ENGINE_get_ECDH(const ENGINE *);
const ECDSA_METHOD *ENGINE_get_ECDSA(const ENGINE *);
const DH_METHOD *ENGINE_get_DH(const ENGINE *);
const RAND_METHOD *ENGINE_get_RAND(const ENGINE *);
const STORE_METHOD *ENGINE_get_STORE(const ENGINE *);
const EVP_CIPHER *ENGINE_get_cipher(ENGINE *, int);
const EVP_MD *ENGINE_get_digest(ENGINE *, int);
int ENGINE_get_flags(const ENGINE *);
const ENGINE_CMD_DEFN *ENGINE_get_cmd_defns(const ENGINE *);
EVP_PKEY *ENGINE_load_private_key(ENGINE *, const char *, UI_METHOD *, void *);
EVP_PKEY *ENGINE_load_public_key(ENGINE *, const char *, UI_METHOD *, void *);
void ENGINE_add_conf_module(void);
"""
MACROS = """
void ENGINE_load_cryptodev(void);
"""
CUSTOMIZATIONS = """
#if defined(LIBRESSL_VERSION_NUMBER)
static const long Cryptography_HAS_ENGINE_CRYPTODEV = 0;
void (*ENGINE_load_cryptodev)(void) = NULL;
#else
static const long Cryptography_HAS_ENGINE_CRYPTODEV = 1;
#endif
"""
| [
2,
770,
2393,
318,
10668,
11971,
739,
262,
2846,
286,
262,
24843,
13789,
11,
10628,
198,
2,
362,
13,
15,
11,
290,
262,
347,
10305,
13789,
13,
4091,
262,
38559,
24290,
2393,
287,
262,
6808,
286,
428,
16099,
198,
2,
329,
1844,
3307,
... | 2.600651 | 2,459 |
import pytest
from httpx import AsyncClient
from conf_test_db import app
from tests.shared.info import category_info, product_info
| [
11748,
12972,
9288,
198,
6738,
2638,
87,
1330,
1081,
13361,
11792,
198,
198,
6738,
1013,
62,
9288,
62,
9945,
1330,
598,
198,
6738,
5254,
13,
28710,
13,
10951,
1330,
6536,
62,
10951,
11,
1720,
62,
10951,
628,
198
] | 3.526316 | 38 |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
_viso2 = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
_viso2 = swig_import_helper()
del swig_import_helper
else:
import _viso2
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
try:
_object = object
_newclass = 1
except __builtin__.Exception:
_newclass = 0
SwigPyIterator_swigregister = _viso2.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
VisualOdometry_swigregister = _viso2.VisualOdometry_swigregister
VisualOdometry_swigregister(VisualOdometry)
calibration_swigregister = _viso2.calibration_swigregister
calibration_swigregister(calibration)
bucketing_swigregister = _viso2.bucketing_swigregister
bucketing_swigregister(bucketing)
VO_parameters_swigregister = _viso2.VO_parameters_swigregister
VO_parameters_swigregister(VO_parameters)
VisualOdometryMono_swigregister = _viso2.VisualOdometryMono_swigregister
VisualOdometryMono_swigregister(VisualOdometryMono)
Mono_parameters_swigregister = _viso2.Mono_parameters_swigregister
Mono_parameters_swigregister(Mono_parameters)
VisualOdometryStereo_swigregister = _viso2.VisualOdometryStereo_swigregister
VisualOdometryStereo_swigregister(VisualOdometryStereo)
Stereo_parameters_swigregister = _viso2.Stereo_parameters_swigregister
Stereo_parameters_swigregister(Stereo_parameters)
Matrix_swigregister = _viso2.Matrix_swigregister
Matrix_swigregister(Matrix)
def Matrix_eye(m):
"""Matrix_eye(m) -> Matrix"""
return _viso2.Matrix_eye(m)
def Matrix_diag(M):
"""Matrix_diag(M) -> Matrix"""
return _viso2.Matrix_diag(M)
def Matrix_reshape(M, m, n):
"""Matrix_reshape(M, m, n) -> Matrix"""
return _viso2.Matrix_reshape(M, m, n)
def Matrix_rotMatX(angle):
"""Matrix_rotMatX(angle) -> Matrix"""
return _viso2.Matrix_rotMatX(angle)
def Matrix_rotMatY(angle):
"""Matrix_rotMatY(angle) -> Matrix"""
return _viso2.Matrix_rotMatY(angle)
def Matrix_rotMatZ(angle):
"""Matrix_rotMatZ(angle) -> Matrix"""
return _viso2.Matrix_rotMatZ(angle)
def Matrix_cross(a, b):
"""Matrix_cross(a, b) -> Matrix"""
return _viso2.Matrix_cross(a, b)
def Matrix_inv(M):
"""Matrix_inv(M) -> Matrix"""
return _viso2.Matrix_inv(M)
Matcher_swigregister = _viso2.Matcher_swigregister
Matcher_swigregister(Matcher)
Matcher_parameters_swigregister = _viso2.Matcher_parameters_swigregister
Matcher_parameters_swigregister(Matcher_parameters)
p_match_swigregister = _viso2.p_match_swigregister
p_match_swigregister(p_match)
Reconstruction_swigregister = _viso2.Reconstruction_swigregister
Reconstruction_swigregister(Reconstruction)
point3d_swigregister = _viso2.point3d_swigregister
point3d_swigregister(point3d)
point2d_swigregister = _viso2.point2d_swigregister
point2d_swigregister(point2d)
track_swigregister = _viso2.track_swigregister
track_swigregister(track)
MatchVector_swigregister = _viso2.MatchVector_swigregister
MatchVector_swigregister(MatchVector)
Point3dVector_swigregister = _viso2.Point3dVector_swigregister
Point3dVector_swigregister(Point3dVector)
TrackVector_swigregister = _viso2.TrackVector_swigregister
TrackVector_swigregister(TrackVector)
# This file is compatible with both classic and new-style classes.
| [
2,
770,
2393,
373,
6338,
7560,
416,
12672,
3528,
357,
4023,
1378,
2503,
13,
2032,
328,
13,
2398,
737,
198,
2,
10628,
513,
13,
15,
13,
1065,
198,
2,
198,
2,
2141,
407,
787,
2458,
284,
428,
2393,
4556,
345,
760,
644,
345,
389,
180... | 2.657574 | 1,393 |
# -*- coding: utf-8 -*-
import asyncio
import datetime
import json
import logging
import sys
from typing import Optional
import aiohttp
from aiohttp import ClientSession
from . import __version__
from .errors import (
BadGateway,
BadRequest,
Forbidden,
HTTPException,
InternalServerError,
NotFound,
RateLimited
)
__log__ = logging.getLogger(__name__)
__all__ = (
'Route',
'HTTPClient'
)
def __to_json(self, obj):
return json.dumps(obj, separators=(',', ':'), ensure_ascii=True)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
30351,
952,
198,
11748,
4818,
8079,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
25064,
198,
6738,
19720,
1330,
32233,
198,
198,
11748,
257,
952,
4023,
198,
67... | 2.638614 | 202 |
'''
What is a Mother Vertex?
A mother vertex in a graph G = (V,E) is a vertex v such that all other vertices in G can be reached by a path from v.
How to find mother vertex?
Case 1:- Undirected Connected Graph : In this case, all the vertices are mother vertices as we can reach to all the other nodes in the graph.
Case 2:- Undirected/Directed Disconnected Graph : In this case, there is no mother vertices as we cannot reach to all the other nodes in the graph.
Case 3:- Directed Connected Graph : In this case, we have to find a vertex -v in the graph such that we can reach to all the other nodes in the graph through a directed path.
SOLUTION:
If there exist mother vertex (or vertices), then one of the mother vertices is the last finished vertex in DFS. (Or a mother vertex has the maximum finish time in DFS traversal).
A vertex is said to be finished in DFS if a recursive call for its DFS is over, i.e., all descendants of the vertex have been visited.
Algorithm :
Do DFS traversal of the given graph. While doing traversal keep track of last finished vertex v. This step takes O(V+E) time.
If there exist mother vertex (or vetices), then v must be one (or one of them). Check if v is a mother vertex by doing DFS/BFS from v. This step also takes O(V+E) time.
Note that there is no need to literally store the finish time for each vertex.
We can just do:
...
...
if node not in visited:
dfs(node)
latest = node
...
...
# Check if latest is indeed a mother vertex.
'''
| [
7061,
6,
198,
2061,
318,
257,
10584,
4643,
16886,
30,
198,
32,
2802,
37423,
287,
257,
4823,
402,
796,
357,
53,
11,
36,
8,
318,
257,
37423,
410,
884,
326,
477,
584,
9421,
1063,
287,
402,
460,
307,
4251,
416,
257,
3108,
422,
410,
... | 3.509259 | 432 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
... | 2.177778 | 45 |
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT License.
"""
import re
import yaml
from html.parser import HTMLParser
from mistune import Renderer
from mechanical_markdown.step import Step
start_token = 'STEP'
end_token = 'END_STEP'
ignore_links_token = 'IGNORE_LINKS'
end_ignore_links_token = 'END_IGNORE'
| [
198,
37811,
198,
15269,
357,
66,
8,
5413,
10501,
13,
198,
26656,
15385,
739,
262,
17168,
13789,
13,
198,
37811,
198,
198,
11748,
302,
198,
11748,
331,
43695,
198,
198,
6738,
27711,
13,
48610,
1330,
11532,
46677,
198,
6738,
4020,
1726,
... | 3.122642 | 106 |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
from compliance_checker.runner import ComplianceChecker, CheckSuite
from compliance_checker.cf.util import download_cf_standard_name_table
from compliance_checker import __version__
if __name__ == "__main__":
sys.exit(main())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
1822,
29572,
198,
11748,
25064,
198,
6738,
11846,
62,
9122,
263,
13,
16737,
1330,
40536,
9787,
263,
11,
6822,
5606,
578,
... | 3.378947 | 95 |
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import gettext as _
apphook_pool.register(CategoriesAppHook)
| [
6738,
269,
907,
13,
1324,
62,
8692,
1330,
16477,
4090,
381,
198,
6738,
269,
907,
13,
1324,
25480,
62,
7742,
1330,
598,
25480,
62,
7742,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
355,
4808,
628,
198,
198,
1324,
... | 3.054545 | 55 |
from django.db.models.signals import pre_save
from django.dispatch import receiver
from blacklist import models
from hashlib import sha256
| [
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
12683,
874,
1330,
662,
62,
21928,
198,
6738,
42625,
14208,
13,
6381,
17147,
1330,
9733,
198,
6738,
38810,
1330,
4981,
198,
6738,
12234,
8019,
1330,
427,
64,
11645,
628,
198
] | 3.710526 | 38 |
# Generated by Django 3.1 on 2020-08-22 17:48
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
319,
12131,
12,
2919,
12,
1828,
1596,
25,
2780,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.966667 | 30 |
import re
# regex for a user or channel mention at the beginning of a message
# example matches: " <@UJQ07L30Q> ", "<#C010P8N1ABB|interns>"
# interactive playground: https://regex101.com/r/2Z7eun/2
MENTION_PATTERN = r"(?:^\s?<@(.*?)>\s?)|(?:^\s?<#(.*?)\|.*?>\s?)"
def get_set_element(_set):
"""get the element from the set to which the iterator points; returns an
arbitrary item
"""
for element in _set:
return element
def get_person_from_match(user_id, match):
"""given a Match, return the Person corresponding to the passed user ID
"""
if match.person_1.user_id == user_id:
return match.person_1
elif match.person_2.user_id == user_id:
return match.person_2
else:
raise Exception(f"Person with user ID \"{user_id}\" is not part of "
f"the passed match ({match}).")
def get_other_person_from_match(user_id, match):
"""given a Match, return the Person corresponding to the user who is NOT
the passed user ID (i.e. the other Person)
"""
if match.person_1.user_id == user_id:
return match.person_2
elif match.person_2.user_id == user_id:
return match.person_1
else:
raise Exception(f"Person with user ID \"{user_id}\" is not part of "
f"the passed match ({match}).")
def blockquote(message):
"""return `message` with markdown blockquote formatting (start each line
with "> ")
"""
if message:
return re.sub(r"^", "> ", message, flags=re.MULTILINE)
else:
return None
def get_mention(message):
"""get the user or channel ID mentioned at the beginning of a message, if
any
"""
match = re.search(MENTION_PATTERN, message)
if match:
# return the first not-None value in the match group tuple, be it a
# user or channel mention
# https://stackoverflow.com/a/18533669
return next(group for group in match.group(1, 2) if group is not None)
else:
return None
def remove_mention(message):
"""remove the user or channel mention from the beginning of a message, if
any
"""
return re.sub(MENTION_PATTERN, "", message, count=1)
| [
11748,
302,
628,
198,
2,
40364,
329,
257,
2836,
393,
6518,
3068,
379,
262,
3726,
286,
257,
3275,
198,
2,
1672,
7466,
25,
366,
32406,
52,
41,
48,
2998,
43,
1270,
48,
29,
33172,
33490,
2,
34,
20943,
47,
23,
45,
16,
6242,
33,
91,
... | 2.559859 | 852 |
# In Search for the Lost Memory [Explorer Thief] (3526)
# To be replaced with GMS's exact dialogue.
# Following dialogue has been edited from DeepL on JMS's dialogue transcript (no KMS footage anywhere):
# https://kaengouraiu2.blog.fc2.com/blog-entry-46.html
recoveredMemory = 7081
darkLord = 1052001
sm.setSpeakerID(darkLord)
sm.sendNext("The way you moved without a trace...you must have exceptional talent. "
"Long time no see, #h #.")
sm.sendSay("Since when did you grow up to this point? You're no less inferior to any Dark Lord. "
"You were just a greenhorn that couldn't even hide their presence...Hmph, well, it's been a while since then. "
"Still, it feels weird to see you become so strong. I guess this is how it feels to be proud.")
sm.sendSay("But don't let your guard down. Know that there's still more progress to be made. "
"As the one who has made you into a thief, I know you that you can be even stronger...!")
sm.startQuest(parentID)
sm.completeQuest(parentID)
sm.startQuest(recoveredMemory)
sm.setQRValue(recoveredMemory, "1", False) | [
2,
554,
11140,
329,
262,
9164,
14059,
685,
18438,
11934,
23471,
60,
357,
2327,
2075,
8,
198,
2,
1675,
307,
6928,
351,
402,
5653,
338,
2748,
10721,
13,
198,
2,
14207,
10721,
468,
587,
13012,
422,
10766,
43,
319,
449,
5653,
338,
10721... | 3.35873 | 315 |
from django.contrib.auth.views import LoginView
from django.urls import path
from student import views
urlpatterns = [
path('studentclick', views.studentclick_view, name='student-click'),
path('studentlogin', LoginView.as_view(
template_name='student/studentlogin.html'), name='studentlogin'),
path('studentsignup', views.student_signup_view, name='studentsignup'),
path('student-dashboard', views.student_dashboard_view,
name='student-dashboard'),
path('student-check', views.student_check_view, name='student-check'),
path('student-exam', views.student_exam_view, name='student-exam'),
path('take-exam/<int:pk>', views.take_exam_view, name='take-exam'),
path('start-exam/<int:pk>', views.start_exam_view, name='start-exam'),
path('calculate-marks', views.calculate_marks_view, name='calculate-marks'),
path('view-result', views.view_result_view, name='view-result'),
path('check-marks/<int:pk>', views.check_marks_view, name='check-marks'),
path('student-marks', views.student_marks_view, name='student-marks'),
path('expel/<int:pk>', views.student_expel_view, name='expel'),
path('video_feed', views.video_feed, name='video-feed'),
path('train_feed', views.train_feed, name='train-feed'),
path('check_feed', views.check_feed, name='check-feed'),
path('logout', views.student_logout_view, name='student-logout'),
]
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
33571,
1330,
23093,
7680,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
3710,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
501... | 2.742188 | 512 |
# -*- coding: utf-8 -*-
from aiida.common import NotExistent
from aiida.orm import Dict
from aiida_quantumespresso.calculations.pw import PwCalculation
from aiida_quantumespresso.parsers import QEOutputParsingError
from aiida_quantumespresso.parsers.parse_raw import convert_qe_to_aiida_structure
from aiida_quantumespresso.parsers.parse_raw.neb import parse_raw_output_neb
from aiida_quantumespresso.parsers.parse_raw.pw import parse_stdout as parse_pw_stdout
from aiida_quantumespresso.parsers.parse_raw.pw import reduce_symmetries
from aiida_quantumespresso.parsers.parse_xml.exceptions import XMLParseError, XMLUnsupportedFormatError
from aiida_quantumespresso.parsers.parse_xml.pw.parse import parse_xml as parse_pw_xml
from aiida_quantumespresso.parsers.pw import PwParser
from .base import Parser
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
257,
72,
3755,
13,
11321,
1330,
1892,
3109,
7609,
198,
6738,
257,
72,
3755,
13,
579,
1330,
360,
713,
198,
198,
6738,
257,
72,
3755,
62,
40972,
8139,
18302,
568,
... | 2.698997 | 299 |
from django.utils.translation import ugettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models.pluginmodel import CMSPlugin
plugin_pool.register_plugin(SubmenuPlugin)
| [
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
198,
198,
6738,
269,
907,
13,
33803,
62,
8692,
1330,
40773,
37233,
14881,
198,
6738,
269,
907,
13,
33803,
62,
7742,
1330,
13877,
62,
7742,
198,... | 3.367647 | 68 |
import collections
import datetime
import json
import multiprocessing
import os
import subprocess
import sys
import time
_SSHD_BINARY_PATH = "/usr/sbin/sshd"
EnvironmentConfig = collections.namedtuple(
"EnvironmentConfig",
["hosts", "port", "is_chief", "pools", "job_id"])
def _get_available_gpus():
"""Returns the number of GPUs on the machine."""
pool = multiprocessing.Pool(1)
result = pool.map(_sub_process_num_gpus, [None])[0]
pool.close()
pool.join()
return result
def parse_environment_config(env_config_str, job_id):
"""Parses environment config and returns a list of hosts as well as the role.
Returns:
An EnvironmentConfig.
"""
if env_config_str:
ssh_port = -1
env_config_json = json.loads(env_config_str)
cluster = env_config_json.get("cluster")
if not cluster:
return None, True
hosts = []
pools = collections.defaultdict(list)
for pool_type, tasks_per_type in cluster.items():
if pool_type == "master":
pool_type = "chief"
for host_and_port in tasks_per_type:
host, port = host_and_port.split(":")
if host == "127.0.0.1":
host = "localhost"
port = int(port)
if ssh_port == -1:
ssh_port = port
elif ssh_port != port:
raise ValueError("Inconsistent ssh ports across tasks %d != %d." %
(ssh_port, port))
hosts.append(host)
pools[pool_type].append(host)
is_chief = False
has_chief = "chief" in pools
if (env_config_json["task"]["type"] == "master" or
env_config_json["task"]["type"] == "chief"):
is_chief = True
if int(env_config_json["task"]["index"]) != 0:
raise ValueError("Only one master node is expected.")
elif ((not has_chief) and
(env_config_json["task"]["type"] == "worker") and
int(env_config_json["task"]["index"]) == 0):
is_chief = True
pools["chief"].append(pools["worker"].pop(0))
elif env_config_json["task"]["type"] != "worker":
raise ValueError("Unexpected task type for Horovod training: %s." %
env_config_json["task"]["type"])
return EnvironmentConfig(hosts=hosts, port=port, is_chief=is_chief,
pools=pools, job_id=job_id)
else:
return EnvironmentConfig(hosts=["localhost"], port=2222, is_chief=True,
pools={"chief": ["localhost"]}, job_id=job_id)
if __name__ == "__main__":
main()
| [
11748,
17268,
198,
11748,
4818,
8079,
198,
11748,
33918,
198,
11748,
18540,
305,
919,
278,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
640,
198,
198,
62,
5432,
10227,
62,
33,
1268,
13153,
62,
34219,
796,
12... | 2.339869 | 1,071 |
import datetime
from pydantic import Field
from typing import (
ClassVar,
List,
Dict,
Optional,
)
from smaregipy.base_api import (
BaseServiceRecordApi,
BaseServiceCollectionApi,
)
from smaregipy.utils import NoData, DictUtil
| [
11748,
4818,
8079,
198,
6738,
279,
5173,
5109,
1330,
7663,
198,
6738,
19720,
1330,
357,
198,
220,
220,
220,
5016,
19852,
11,
198,
220,
220,
220,
7343,
11,
198,
220,
220,
220,
360,
713,
11,
198,
220,
220,
220,
32233,
11,
198,
8,
19... | 2.591837 | 98 |
#!/usr/bin/env python3
"""
Base-Client Class
This is the parent-class of all client-classes and holds properties and functions they all depend on.
Author: Jason Cabezuela
"""
import src.util.debugger as Debugger
import src.util.configmaker as configmaker
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
14881,
12,
11792,
5016,
198,
1212,
318,
262,
2560,
12,
4871,
286,
477,
5456,
12,
37724,
290,
6622,
6608,
290,
5499,
484,
477,
4745,
319,
13,
198,
198,
13838,
25,
8982... | 3.569444 | 72 |
# -*- coding: utf-8 -*-
import sys
from cryptomon.common import Colors
if sys.version_info >= (3, 0):
import io
else:
import StringIO as io
ascii_title = """
/$$$$$$ /$$ /$$ /$$
/$$__ $$ | $$ | $$$ /$$$
| $$ \__/ /$$$$$$ /$$ /$$ /$$$$$$ /$$$$$$ /$$$$$$ | $$$$ /$$$$ /$$$$$$ /$$$$$$$
| $$ /$$__ $$| $$ | $$ /$$__ $$|_ $$_/ /$$__ $$| $$ $$/$$ $$ /$$__ $$| $$__ $$
| $$ | $$ \__/| $$ | $$| $$ \ $$ | $$ | $$ \ $$| $$ $$$| $$| $$ \ $$| $$ \ $$
| $$ $$| $$ | $$ | $$| $$ | $$ | $$ /$$| $$ | $$| $$\ $ | $$| $$ | $$| $$ | $$
| $$$$$$/| $$ | $$$$$$$| $$$$$$$/ | $$$$/| $$$$$$/| $$ \/ | $$| $$$$$$/| $$ | $$
\______/ |__/ \____ $$| $$____/ \___/ \______/ |__/ |__/ \______/ |__/ |__/
/$$ | $$| $$
| $$$$$$/| $$
\______/ |__/
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
25064,
198,
6738,
8194,
16698,
13,
11321,
1330,
29792,
198,
361,
25064,
13,
9641,
62,
10951,
18189,
357,
18,
11,
657,
2599,
198,
220,
220,
220,
1330,
33245,
... | 1.503106 | 805 |
import cv2
video=cv2.VideoCapture(r'C:\Users\ISHITA\Desktop\ML project\UEM_PROJECT_COM\pedestrian.mp4')
#pre trained pedestrian and car classifier
car_tracker_file=(r'C:\Users\ISHITA\Desktop\ML project\UEM_PROJECT_COM\car.xml')
pedestrian_tracker_file=(r'C:\Users\ISHITA\Desktop\ML project\UEM_PROJECT_COM\pedestrian.xml')
#create car n pedestrian classifier
car_tracker=cv2.CascadeClassifier(car_tracker_file)
pedestrian_tracker=cv2.CascadeClassifier(pedestrian_tracker_file)
#run forever untill car stop
while True:
(read_successful,frame)=video.read()
gr_frame=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#detect cars n pedestrian
cars=car_tracker.detectMultiScale(gr_frame)
pedestrians=pedestrian_tracker.detectMultiScale(gr_frame)
#draw rectangle around cars
for(x,y,w,h) in cars:
cv2.rectangle(frame,(x+1,y+2),(x+w,y+h),(255,0,0),2)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2)
#draw rectangle around pedestrian
for(x,y,w,h) in pedestrians:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2)
#display
cv2.imshow('car n pedestrians',frame)
key = cv2.waitKey(1)
#stopping condition
if key == 83 or key== 115:
break
# release the VideoCapture object
video.release()
print('Press "s" to stop')
print('Hey!')
| [
11748,
269,
85,
17,
198,
198,
15588,
28,
33967,
17,
13,
10798,
49630,
7,
81,
6,
34,
7479,
14490,
59,
18422,
2043,
32,
59,
36881,
59,
5805,
1628,
59,
52,
3620,
62,
31190,
23680,
62,
9858,
59,
9124,
395,
4484,
13,
3149,
19,
11537,
... | 2.246206 | 593 |
# site settings rest api serializers
from rest_framework import serializers
from saleor.wing.models import Wing as Table
| [
2,
2524,
6460,
1334,
40391,
11389,
11341,
198,
198,
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
5466,
273,
13,
5469,
13,
27530,
1330,
13405,
355,
8655,
628,
628
] | 4.166667 | 30 |
from . import FishBase
from . import FishGlobals
| [
6738,
764,
1330,
13388,
14881,
198,
6738,
764,
1330,
13388,
9861,
672,
874,
198
] | 3.5 | 14 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
#
# Distributed under terms of the MIT license.
"""
Strategy base class
"""
from abc import ABCMeta, abstractmethod
from tinydb import TinyDB, Query
from node import Node
import json
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
43907,
25,
69,
12685,
28,
40477,
12,
23,
198,
2,
198,
2,
198,
2,
4307,
6169,
739,
2846,
286,
262,
17168,
596... | 2.896552 | 87 |
'''
Problem Statement:
If we list all the natural numbers below 10 that are multiples of 3 or 5,
we get 3,5,6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below N.
'''
from __future__ import print_function
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
'''store multiples of 3 and 5 in a set and then add'''
n = int(input().strip())
l = set()
x = 3
y = 5
while(x<n):
l.add(x)
x+=3
while(y<n):
l.add(y)
y+=5
print(sum(l))
| [
7061,
6,
198,
40781,
21983,
25,
198,
1532,
356,
1351,
477,
262,
3288,
3146,
2174,
838,
326,
389,
5021,
2374,
286,
513,
393,
642,
11,
198,
732,
651,
513,
11,
20,
11,
21,
290,
860,
13,
383,
2160,
286,
777,
5021,
2374,
318,
2242,
1... | 2.464789 | 213 |
import copy
from django.db import NotSupportedError
from django.db.models import Expression
from .fields import mysql_compile_json_path, postgres_compile_json_path, FallbackJSONField
| [
11748,
4866,
198,
6738,
42625,
14208,
13,
9945,
1330,
1892,
48181,
12331,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
41986,
198,
198,
6738,
764,
25747,
1330,
48761,
62,
5589,
576,
62,
17752,
62,
6978,
11,
1281,
34239,
62,
5589,... | 3.557692 | 52 |
import unittest
import tempfile
import pathlib
import datetime
import warnings
from IPython.testing.globalipapp import start_ipython, get_ipython
import pandas.util.testing as tm
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas import read_excel
import pytest
ip = get_ipython()
ip.magic('load_ext excelify')
| [
11748,
555,
715,
395,
198,
11748,
20218,
7753,
198,
11748,
3108,
8019,
198,
11748,
4818,
8079,
198,
11748,
14601,
198,
198,
6738,
6101,
7535,
13,
33407,
13,
20541,
541,
1324,
1330,
923,
62,
541,
7535,
11,
651,
62,
541,
7535,
198,
198,... | 3.275229 | 109 |
# https://www.reddit.com/r/dailyprogrammer/comments/3ltee2/20150921_challenge_233_easy_the_house_that_ascii/
import random
import sys
if __name__ == "__main__":
main()
| [
2,
3740,
1378,
2503,
13,
10748,
13,
785,
14,
81,
14,
29468,
23065,
647,
14,
15944,
14,
18,
75,
660,
68,
17,
14,
1264,
29022,
2481,
62,
36747,
3540,
62,
25429,
62,
38171,
62,
1169,
62,
4803,
62,
5562,
62,
292,
979,
72,
14,
198,
... | 2.5 | 70 |
#!/usr/bin/env python
# encoding: utf-8
from __future__ import division
from decimal import Decimal
import subprocess
import threading
import urllib2
import urllib
import httplib
import json
import re
import hashlib
import base64
# import zlib
from lib.command.runtime import UserInput
from lib.helper.CameraHelper import CameraHelper
from lib.sound import Sound
from util import Util
from util.Res import Res
from util.log import *
from lib.model import Callback
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
628,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
32465,
1330,
4280,
4402,
220,
220,
198,
11748,
850,
14681,
198,
11748,
4704,
278,
198,
11748,
... | 3.477941 | 136 |
#############################
# Collaborators: (enter people or resources who/that helped you)
# If none, write none
#
#
#############################
base = input('Enter the base: ")
height =
area = # Calculate the area of the triangle
print("The area of the triangle is (area).") | [
14468,
7804,
4242,
2,
198,
2,
37322,
2024,
25,
357,
9255,
661,
393,
4133,
508,
14,
5562,
4193,
345,
8,
198,
2,
1002,
4844,
11,
3551,
4844,
198,
2,
198,
2,
198,
14468,
7804,
4242,
2,
198,
198,
8692,
796,
5128,
10786,
17469,
262,
... | 3.620253 | 79 |
"""
an adaptation of pyopencl's reduction kernel for weighted avarages
like sum(a*b)
mweigert@mpi-cbg.de
"""
from __future__ import print_function, unicode_literals, absolute_import, division
from six.moves import zip
import pyopencl as cl
from pyopencl.tools import (
context_dependent_memoize,
dtype_to_ctype, KernelTemplateBase,
_process_code_for_macro)
import numpy as np
from gputools import get_device
import sys
# {{{ kernel source
KERNEL = r"""//CL//
<%
inds = range(len(map_exprs))
%>
#define GROUP_SIZE ${group_size}
% for i,m in enumerate(map_exprs):
#define READ_AND_MAP_${i}(i) (${m})
% endfor
#define REDUCE(a, b) (${reduce_expr})
% if double_support:
#if __OPENCL_C_VERSION__ < 120
#pragma OPENCL EXTENSION cl_khr_fp64: enable
#endif
#define PYOPENCL_DEFINE_CDOUBLE
% endif
#include <pyopencl-complex.h>
${preamble}
typedef ${out_type} out_type;
__kernel void ${name}(
% for i in inds:
__global out_type *out__base_${i},
% endfor
long out__offset, ${arguments},
unsigned int seq_count, unsigned int n)
{
% for i in inds:
__global out_type *out_${i} = (__global out_type *) (
(__global char *) out__base_${i} + out__offset);
% endfor
${arg_prep}
% for i in inds:
__local out_type ldata_${i}[GROUP_SIZE];
out_type acc_${i} = ${neutral};
% endfor
unsigned int lid = get_local_id(0);
unsigned int i = get_group_id(0)*GROUP_SIZE*seq_count + lid;
//printf("seq: %d\tlid = %d\ti=%d\n",seq_count,lid,i);
for (unsigned s = 0; s < seq_count; ++s)
{
if (i >= n)
break;
% for i in inds:
acc_${i} = REDUCE(acc_${i}, READ_AND_MAP_${i}(i));
% endfor
i += GROUP_SIZE;
}
% for i in inds:
ldata_${i}[lid] = acc_${i};
% endfor
<%
cur_size = group_size
%>
% while cur_size > 1:
barrier(CLK_LOCAL_MEM_FENCE);
<%
new_size = cur_size // 2
assert new_size * 2 == cur_size
%>
if (lid < ${new_size})
{
% for i in inds:
ldata_${i}[lid] = REDUCE(
ldata_${i}[lid],
ldata_${i}[lid + ${new_size}]);
% endfor
}
<% cur_size = new_size %>
% endwhile
if (lid == 0) {
% for i in inds:
out_${i}[get_group_id(0)] = ldata_${i}[0];
% endfor
//printf("result: %.4f\n",out_0[get_group_id(0)] );
}
}
"""
# }}}
# {{{ main reduction kernel
if __name__=='__main__':
from gputools import OCLArray, OCLReductionKernel
k1 = OCLReductionKernel(np.float32,
neutral="0", reduce_expr="a+b",
map_expr="x[i]",
arguments="__global float *x")
k2 = OCLMultiReductionKernel(np.float32,
neutral="0", reduce_expr="a+b",
map_exprs=["y[i]*x[i]","x[i]"],
arguments="__global float *x, __global float *y")
N = 512
a = OCLArray.from_array(np.ones((N,N),np.float32))
b = OCLArray.from_array(2.*np.ones((N,N),np.float32))
o1 = OCLArray.empty((),np.float32)
o2 = OCLArray.empty((),np.float32)
from time import time
t = time()
for _ in range(400):
k1(a)
k1(b)
k1(a).get()
k1(b).get()
print(time()-t)
t = time()
#print k2(a,b, outs = [o1,o2])
for _ in range(400):
k2(a[0],b[0], outs = [o1,o2])
o1.get()
print(time()-t)
# open("kern_new_1.txt","w").write(("%s"%k2.stage_1_inf).replace("\\n","\n"))
# open("kern_new_2.txt","w").write(("%s"%k2.stage_2_inf).replace("\\n","\n"))
| [
37811,
198,
272,
16711,
286,
12972,
9654,
565,
338,
7741,
9720,
329,
26356,
1196,
283,
1095,
198,
198,
2339,
2160,
7,
64,
9,
65,
8,
198,
198,
76,
732,
328,
861,
31,
3149,
72,
12,
21101,
70,
13,
2934,
198,
198,
37811,
198,
6738,
... | 1.838975 | 2,186 |
#! /usr/bin/env python
import mcpi.minecraft as minecraft
import mcpi.block as block
import random
import time
mc = minecraft.Minecraft.create()
# ----------------------------------------------------------------------
# S E T U P
# ----------------------------------------------------------------------
# Where Am I?
pos = mc.player.getTilePos()
print "Game center point is %d, %d, %d" % (pos.x, pos.y, pos.z)
limit=256
mc.setBlocks(pos.x, pos.y, pos.z, pos.x+10, pos.y-256, pos.z+10, block.AIR.id)
mc.setBlocks(pos.x, pos.y, pos.z, pos.x-10, pos.y+256, pos.z-10, block.DIAMOND_ORE.id)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
285,
13155,
72,
13,
17761,
355,
6164,
3323,
198,
11748,
285,
13155,
72,
13,
9967,
355,
2512,
198,
11748,
4738,
198,
11748,
640,
198,
198,
23209,
796,
6164,
3323,
13,
39... | 2.865385 | 208 |
#-*- encoding:utf-8 -*-
from __future__ import print_function
import sys
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except:
pass
import codecs
from textrank4zh import TextRank4Keyword, TextRank4Sentence
text = codecs.open('../test/doc/01.txt', 'r', 'utf-8').read()
tr4w = TextRank4Keyword()
tr4w.analyze(text=text, lower=True, window=2) # py2textutf8strunicodepy3utf8bytesstr
print( '' )
for item in tr4w.get_keywords(20, word_min_len=1):
print(item.word, item.weight)
print()
print( '' )
for phrase in tr4w.get_keyphrases(keywords_num=20, min_occur_num= 2):
print(phrase)
tr4s = TextRank4Sentence()
tr4s.analyze(text=text, lower=True, source = 'all_filters')
print()
print( '' )
for item in tr4s.get_key_sentences(num=3):
print(item.weight, item.sentence) | [
2,
12,
9,
12,
21004,
25,
40477,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
25064,
198,
28311,
25,
198,
220,
220,
220,
18126,
7,
17597,
8,
198,
220,
220,
220,
25064,
13,
2617,
12286,
... | 2.412121 | 330 |
import random
import uuid
import sys
import json
from faker import Factory
from faker.providers.person.fi_FI import Provider as PersonProvider
fake = Factory.create('fi_FI')
email_by_user = {}
users_by_id = {}
data = json.load(sys.stdin)
anonymize_users(data)
remove_secrets(data)
json.dump(data, sys.stdout, indent=4)
| [
11748,
4738,
198,
11748,
334,
27112,
198,
11748,
25064,
198,
11748,
33918,
198,
6738,
277,
3110,
1330,
19239,
198,
6738,
277,
3110,
13,
15234,
4157,
13,
6259,
13,
12463,
62,
11674,
1330,
32549,
355,
7755,
29495,
198,
198,
30706,
796,
19... | 2.875 | 112 |
import random
from typing import Optional, Tuple, Union
import numpy as np
import torch
from torch import Tensor
from torch_geometric.utils import coalesce, degree, remove_self_loops
from .num_nodes import maybe_num_nodes
def negative_sampling(edge_index: Tensor,
num_nodes: Optional[Union[int, Tuple[int, int]]] = None,
num_neg_samples: Optional[int] = None,
method: str = "sparse",
force_undirected: bool = False) -> Tensor:
r"""Samples random negative edges of a graph given by :attr:`edge_index`.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int or Tuple[int, int], optional): The number of nodes,
*i.e.* :obj:`max_val + 1` of :attr:`edge_index`.
If given as a tuple, then :obj:`edge_index` is interpreted as a
bipartite graph with shape :obj:`(num_src_nodes, num_dst_nodes)`.
(default: :obj:`None`)
num_neg_samples (int, optional): The (approximate) number of negative
samples to return.
If set to :obj:`None`, will try to return a negative edge for every
positive edge. (default: :obj:`None`)
method (string, optional): The method to use for negative sampling,
*i.e.*, :obj:`"sparse"` or :obj:`"dense"`.
This is a memory/runtime trade-off.
:obj:`"sparse"` will work on any graph of any size, while
:obj:`"dense"` can perform faster true-negative checks.
(default: :obj:`"sparse"`)
force_undirected (bool, optional): If set to :obj:`True`, sampled
negative edges will be undirected. (default: :obj:`False`)
:rtype: LongTensor
"""
assert method in ['sparse', 'dense']
size = num_nodes
bipartite = isinstance(size, (tuple, list))
size = maybe_num_nodes(edge_index) if size is None else size
size = (size, size) if not bipartite else size
force_undirected = False if bipartite else force_undirected
idx, population = edge_index_to_vector(edge_index, size, bipartite,
force_undirected)
if idx.numel() >= population:
return edge_index.new_empty((2, 0))
if num_neg_samples is None:
num_neg_samples = edge_index.size(1)
if force_undirected:
num_neg_samples = num_neg_samples // 2
prob = 1. - idx.numel() / population # Probability to sample a negative.
sample_size = int(1.1 * num_neg_samples / prob) # (Over)-sample size.
neg_idx = None
if method == 'dense':
# The dense version creates a mask of shape `population` to check for
# invalid samples.
mask = idx.new_ones(population, dtype=torch.bool)
mask[idx] = False
for _ in range(3): # Number of tries to sample negative indices.
rnd = sample(population, sample_size, idx.device)
rnd = rnd[mask[rnd]] # Filter true negatives.
neg_idx = rnd if neg_idx is None else torch.cat([neg_idx, rnd])
if neg_idx.numel() >= num_neg_samples:
neg_idx = neg_idx[:num_neg_samples]
break
mask[neg_idx] = False
else: # 'sparse'
# The sparse version checks for invalid samples via `np.isin`.
idx = idx.to('cpu')
for _ in range(3): # Number of tries to sample negative indices.
rnd = sample(population, sample_size, device='cpu')
mask = np.isin(rnd, idx)
if neg_idx is not None:
mask |= np.isin(rnd, neg_idx.to('cpu'))
mask = torch.from_numpy(mask).to(torch.bool)
rnd = rnd[~mask].to(edge_index.device)
neg_idx = rnd if neg_idx is None else torch.cat([neg_idx, rnd])
if neg_idx.numel() >= num_neg_samples:
neg_idx = neg_idx[:num_neg_samples]
break
return vector_to_edge_index(neg_idx, size, bipartite, force_undirected)
def batched_negative_sampling(
edge_index: Tensor,
batch: Union[Tensor, Tuple[Tensor, Tensor]],
num_neg_samples: Optional[int] = None,
method: str = "sparse",
force_undirected: bool = False,
) -> Tensor:
r"""Samples random negative edges of multiple graphs given by
:attr:`edge_index` and :attr:`batch`.
Args:
edge_index (LongTensor): The edge indices.
batch (LongTensor or Tuple[LongTensor, LongTensor]): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example.
If given as a tuple, then :obj:`edge_index` is interpreted as a
bipartite graph connecting two different node types.
num_neg_samples (int, optional): The number of negative samples to
return. If set to :obj:`None`, will try to return a negative edge
for every positive edge. (default: :obj:`None`)
method (string, optional): The method to use for negative sampling,
*i.e.*, :obj:`"sparse"` or :obj:`"dense"`.
This is a memory/runtime trade-off.
:obj:`"sparse"` will work on any graph of any size, while
:obj:`"dense"` can perform faster true-negative checks.
(default: :obj:`"sparse"`)
force_undirected (bool, optional): If set to :obj:`True`, sampled
negative edges will be undirected. (default: :obj:`False`)
:rtype: LongTensor
"""
if isinstance(batch, Tensor):
src_batch, dst_batch = batch, batch
else:
src_batch, dst_batch = batch[0], batch[1]
split = degree(src_batch[edge_index[0]], dtype=torch.long).tolist()
edge_indices = torch.split(edge_index, split, dim=1)
num_src = degree(src_batch, dtype=torch.long)
cum_src = torch.cat([src_batch.new_zeros(1), num_src.cumsum(0)[:-1]])
if isinstance(batch, Tensor):
num_nodes = num_src.tolist()
cumsum = cum_src
else:
num_dst = degree(dst_batch, dtype=torch.long)
cum_dst = torch.cat([dst_batch.new_zeros(1), num_dst.cumsum(0)[:-1]])
num_nodes = torch.stack([num_src, num_dst], dim=1).tolist()
cumsum = torch.stack([cum_src, cum_dst], dim=1).unsqueeze(-1)
neg_edge_indices = []
for i, edge_index in enumerate(edge_indices):
edge_index = edge_index - cumsum[i]
neg_edge_index = negative_sampling(edge_index, num_nodes[i],
num_neg_samples, method,
force_undirected)
neg_edge_index += cumsum[i]
neg_edge_indices.append(neg_edge_index)
return torch.cat(neg_edge_indices, dim=1)
def structured_negative_sampling(edge_index, num_nodes: Optional[int] = None,
contains_neg_self_loops: bool = True):
r"""Samples a negative edge :obj:`(i,k)` for every positive edge
:obj:`(i,j)` in the graph given by :attr:`edge_index`, and returns it as a
tuple of the form :obj:`(i,j,k)`.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
contains_neg_self_loops (bool, optional): If set to
:obj:`False`, sampled negative edges will not contain self loops.
(default: :obj:`True`)
:rtype: (LongTensor, LongTensor, LongTensor)
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
row, col = edge_index.cpu()
pos_idx = row * num_nodes + col
if not contains_neg_self_loops:
loop_idx = torch.arange(num_nodes) * (num_nodes + 1)
pos_idx = torch.cat([pos_idx, loop_idx], dim=0)
rand = torch.randint(num_nodes, (row.size(0), ), dtype=torch.long)
neg_idx = row * num_nodes + rand
mask = torch.from_numpy(np.isin(neg_idx, pos_idx)).to(torch.bool)
rest = mask.nonzero(as_tuple=False).view(-1)
while rest.numel() > 0: # pragma: no cover
tmp = torch.randint(num_nodes, (rest.size(0), ), dtype=torch.long)
rand[rest] = tmp
neg_idx = row[rest] * num_nodes + tmp
mask = torch.from_numpy(np.isin(neg_idx, pos_idx)).to(torch.bool)
rest = rest[mask]
return edge_index[0], edge_index[1], rand.to(edge_index.device)
def structured_negative_sampling_feasible(
edge_index: Tensor, num_nodes: Optional[int] = None,
contains_neg_self_loops: bool = True) -> bool:
r"""Returns :obj:`True` if
:meth:`~torch_geometric.utils.structured_negative_sampling` is feasible
on the graph given by :obj:`edge_index`.
:obj:`~torch_geometric.utils.structured_negative_sampling` is infeasible
if atleast one node is connected to all other nodes.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
contains_neg_self_loops (bool, optional): If set to
:obj:`False`, sampled negative edges will not contain self loops.
(default: :obj:`True`)
:rtype: bool
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
max_num_neighbors = num_nodes
edge_index = coalesce(edge_index, num_nodes=num_nodes)
if not contains_neg_self_loops:
edge_index, _ = remove_self_loops(edge_index)
max_num_neighbors -= 1 # Reduce number of valid neighbors
deg = degree(edge_index[0], num_nodes)
# True if there exists no node that is connected to all other nodes.
return bool(torch.all(deg < max_num_neighbors))
###############################################################################
| [
11748,
4738,
198,
6738,
19720,
1330,
32233,
11,
309,
29291,
11,
4479,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
28034,
1330,
309,
22854,
198,
198,
6738,
28034,
62,
469,
16996,
13,
26791,
1330,
46064,
344,
11... | 2.229029 | 4,375 |
import sys
if sys.version_info[:2] >= (3, 0):
# pylint: disable=E0611,F0401,I0011
from urllib.request import build_opener
else:
from urllib2 import build_opener
from . import __version__
urls = {
'gdata': "https://www.googleapis.com/youtube/v3/",
'watchv': "http://www.youtube.com/watch?v=%s",
'playlist': ('http://www.youtube.com/list_ajax?'
'style=json&action_get_list=1&list=%s'),
'thumb': "http://i.ytimg.com/vi/%s/default.jpg",
'bigthumb': "http://i.ytimg.com/vi/%s/mqdefault.jpg",
'bigthumbhd': "http://i.ytimg.com/vi/%s/hqdefault.jpg",
# For internal backend
'vidinfo': ('https://www.youtube.com/get_video_info?video_id=%s&'
'eurl=https://youtube.googleapis.com/v/%s&sts=%s'),
'embed': "https://youtube.com/embed/%s"
}
api_key = "AIzaSyCIM4EzNqi1in22f4Z3Ru3iYvLaY8tc3bo"
user_agent = "pafy " + __version__
lifespan = 60 * 60 * 5 # 5 hours
opener = build_opener()
opener.addheaders = [('User-Agent', user_agent)]
cache = {}
def_ydl_opts = {'quiet': True, 'prefer_insecure': False, 'no_warnings': True}
# The following are specific to the internal backend
UEFSM = 'url_encoded_fmt_stream_map'
AF = 'adaptive_fmts'
jsplayer = r';ytplayer\.config\s*=\s*({.*?});'
itags = {
'5': ('320x240', 'flv', "normal", ''),
'17': ('176x144', '3gp', "normal", ''),
'18': ('640x360', 'mp4', "normal", ''),
'22': ('1280x720', 'mp4', "normal", ''),
'34': ('640x360', 'flv', "normal", ''),
'35': ('854x480', 'flv', "normal", ''),
'36': ('320x240', '3gp', "normal", ''),
'37': ('1920x1080', 'mp4', "normal", ''),
'38': ('4096x3072', 'mp4', "normal", '4:3 hi-res'),
'43': ('640x360', 'webm', "normal", ''),
'44': ('854x480', 'webm', "normal", ''),
'45': ('1280x720', 'webm', "normal", ''),
'46': ('1920x1080', 'webm', "normal", ''),
'82': ('640x360-3D', 'mp4', "normal", ''),
'83': ('640x480-3D', 'mp4', 'normal', ''),
'84': ('1280x720-3D', 'mp4', "normal", ''),
'100': ('640x360-3D', 'webm', "normal", ''),
'102': ('1280x720-3D', 'webm', "normal", ''),
'133': ('426x240', 'm4v', 'video', ''),
'134': ('640x360', 'm4v', 'video', ''),
'135': ('854x480', 'm4v', 'video', ''),
'136': ('1280x720', 'm4v', 'video', ''),
'137': ('1920x1080', 'm4v', 'video', ''),
'138': ('4096x3072', 'm4v', 'video', ''),
'139': ('48k', 'm4a', 'audio', ''),
'140': ('128k', 'm4a', 'audio', ''),
'141': ('256k', 'm4a', 'audio', ''),
'160': ('256x144', 'm4v', 'video', ''),
'167': ('640x480', 'webm', 'video', ''),
'168': ('854x480', 'webm', 'video', ''),
'169': ('1280x720', 'webm', 'video', ''),
'170': ('1920x1080', 'webm', 'video', ''),
'171': ('128k', 'ogg', 'audio', ''),
'172': ('192k', 'ogg', 'audio', ''),
'218': ('854x480', 'webm', 'video', 'VP8'),
'219': ('854x480', 'webm', 'video', 'VP8'),
'242': ('360x240', 'webm', 'video', 'VP9'),
'243': ('480x360', 'webm', 'video', 'VP9'),
'244': ('640x480', 'webm', 'video', 'VP9 low'),
'245': ('640x480', 'webm', 'video', 'VP9 med'),
'246': ('640x480', 'webm', 'video', 'VP9 high'),
'247': ('720x480', 'webm', 'video', 'VP9'),
'248': ('1920x1080', 'webm', 'video', 'VP9'),
'249': ('48k', 'opus', 'audio', 'Opus'),
'250': ('56k', 'opus', 'audio', 'Opus'),
'251': ('128k', 'opus', 'audio', 'Opus'),
'256': ('192k', 'm4a', 'audio', '6-channel'),
'258': ('320k', 'm4a', 'audio', '6-channel'),
'264': ('2560x1440', 'm4v', 'video', ''),
'266': ('3840x2160', 'm4v', 'video', 'AVC'),
'271': ('1920x1280', 'webm', 'video', 'VP9'),
'272': ('3414x1080', 'webm', 'video', 'VP9'),
'278': ('256x144', 'webm', 'video', 'VP9'),
'298': ('1280x720', 'm4v', 'video', '60fps'),
'299': ('1920x1080', 'm4v', 'video', '60fps'),
'302': ('1280x720', 'webm', 'video', 'VP9'),
'303': ('1920x1080', 'webm', 'video', 'VP9'),
}
| [
11748,
25064,
198,
361,
25064,
13,
9641,
62,
10951,
58,
25,
17,
60,
18189,
357,
18,
11,
657,
2599,
198,
220,
220,
220,
1303,
279,
2645,
600,
25,
15560,
28,
36,
3312,
1157,
11,
37,
3023,
486,
11,
40,
405,
1157,
198,
220,
220,
220... | 2.101768 | 1,867 |
# Copyright (C) 2015, 2016 GoSecure Inc.
"""
Telnet Transport and Authentication for the Honeypot
@author: Olivier Bilodeau <obilodeau@gosecure.ca>
"""
from __future__ import annotations
import struct
from twisted.conch.telnet import (
ECHO,
LINEMODE,
NAWS,
SGA,
AuthenticatingTelnetProtocol,
ITelnetProtocol,
)
from twisted.python import log
from cowrie.core.config import CowrieConfig
from cowrie.core.credentials import UsernamePasswordIP
| [
2,
15069,
357,
34,
8,
1853,
11,
1584,
1514,
49793,
3457,
13,
198,
37811,
198,
33317,
3262,
19940,
290,
48191,
329,
262,
21788,
13059,
198,
198,
31,
9800,
25,
45674,
24207,
1098,
559,
1279,
25898,
1098,
559,
31,
70,
577,
66,
495,
13,... | 3.006369 | 157 |
import astropy.io.fits as fits
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import obj_data as od
import saphires as saph
from astropy.time import Time
from astropy.visualization import ZScaleInterval, SqrtStretch, ImageNormalize
from matplotlib.backends.backend_pdf import PdfPages
ra = od.ra
dec = od.dec
pmra = od.pmra
pmdec = od.pmdec
plx = od.plx
epoch = od.epoch
matplotlib.rcParams.update({'font.size': 12})
def write_fits(fn, data, im_headers, wcs_header):
'''
Writes a new fits file including the image data and
and updated header for the new image
Parameters
----------
fn: string
The desired file name of the new fits file
data: array-like
Contains all the image data
Returns
-------
avg_airmass: float
the amount of atmosphere obscuring the target, found in image header. Here
the airmass for all images is averaged
bjd: float
Barycentric Julian Date, found in the image header
header: Header
'''
for keys in wcs_header:
if keys not in ['HISTORY', 'COMMENT']:
im_headers[0][keys] = wcs_header[keys]
airmass = []
for i in im_headers:
airmass.append(i['AIRMASS'])
avg_airmass = np.mean(airmass)
im_headers[0]['AIRMASS'] = avg_airmass
jd_middle = np.zeros(len(im_headers))
for i in range(len(im_headers)):
jd_middle[i] = Time(im_headers[i]['DATE-OBS'], format='isot').jd
exptime = im_headers[i]['EXPTIME']
jd_middle[i] = jd_middle[i] + (exptime/2.0)/3600.0/24.0
isot_date_obs = Time(np.mean(jd_middle), format='jd').isot
tele = im_headers[0]['SITEID']
brv,bjd,bvcorr = saph.utils.brvc(isot_date_obs,0.0,tele,ra=ra,dec=dec,epoch=epoch, pmra=pmra, pmdec=pmdec, px=plx)
im_headers[0]['BJD'] = bjd[0]
header = im_headers[0]
hdu_p = fits.PrimaryHDU(data=data, header=header)
hdu = fits.HDUList([hdu_p])
hdu.writeto(fn)
return avg_airmass, bjd, header
def write_txt(name, sources, stars_tbl, fwhm, results=None, t0=None,t1=None,t2=None,t3=None,t4=None,t5=None):
'''
Short text file with diagnostic info about each image set, specifically
for a successful run of the image set
Parameters
----------
name: string
name of the saved file
sources: Table
tabulated info about all the stars found on the image
stars_tbl: Table
tabulated info about all the stars used to form a psf
results: Table
tabulated info about all the stars found with the photometry routine
'''
f = open(name, 'w')
f.write('Number of stars in sources: '+np.str(len(sources))+'\nNumber of stars in stars_tbl: '+np.str(len(stars_tbl))
+'\nNumbers of stars in results: '+np.str(len(results))+'\nMin, Max, Median peaks in sources: '
+np.str(np.min(sources['peak']))+', '+np.str(np.max(sources['peak']))+', '+np.str(np.median(sources['peak']))
+'\nMin, Max, Median fluxes in results: '+np.str(np.min(results['flux_fit']))+', '+np.str(np.max(results['flux_fit']))+', '
+np.str(np.median(results['flux_fit']))+'\nFWHM: '+np.str(fwhm)+'\n')
if t5:
t_1 = t1-t0
t_2 = t2-t1
t_3 = t3-t2
t_4 = t4-t3
t_5 = t5-t4
t_f = t5-t0
f.write('Time to combine images: '+np.str(t_1)+'\nTime to find stars: '+np.str(t_2)+'\nTime to build psf: '
+np.str(t_3)+'\nTime to run photometry: '+np.str(t_4)+'\nTime to get wcs: '+np.str(t_5)+'\nTotal time: '
+np.str(t_f)+'\n')
f.close()
| [
11748,
6468,
28338,
13,
952,
13,
21013,
355,
11414,
198,
11748,
2603,
29487,
8019,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
26181,
62,
7890,
355,
16298,
198,
11748,
47... | 2.134994 | 1,726 |
"""Tests for tools and arithmetics for monomials of distributed polynomials. """
from sympy.polys.monomialtools import (
monomials, monomial_count,
monomial_key, lex, grlex, grevlex,
monomial_mul, monomial_div,
monomial_gcd, monomial_lcm,
monomial_max, monomial_min,
monomial_divides,
Monomial,
InverseOrder, ProductOrder
)
from sympy.polys.polyerrors import ExactQuotientFailed
from sympy.abc import a, b, c, x, y, z
from sympy.utilities.pytest import raises
| [
37811,
51,
3558,
329,
4899,
290,
610,
342,
27757,
329,
937,
296,
8231,
286,
9387,
745,
6213,
296,
8231,
13,
37227,
198,
198,
6738,
10558,
88,
13,
35428,
82,
13,
2144,
49070,
31391,
1330,
357,
198,
220,
220,
220,
937,
296,
8231,
11,
... | 2.591623 | 191 |
import re
import base64
import hmac
import hashlib
import logging
import requests
from datetime import datetime
| [
11748,
302,
198,
11748,
2779,
2414,
198,
11748,
289,
20285,
198,
11748,
12234,
8019,
198,
11748,
18931,
198,
11748,
7007,
198,
6738,
4818,
8079,
1330,
4818,
8079,
628
] | 4.035714 | 28 |
from tkinter import *
from PIL import ImageGrab
import numpy as np
import cv2
import time
import pyautogui as pg
import DirectInputRoutines as DIR
from LogKey import key_check
last_time = time.time()
one_hot = [0, 0, 0, 0, 0, 0]
hash_dict = {'w':0, 's':1, 'a':2, 'd':3, 'c':4, 'v':5}
X = []
y = []
root = Tk()
frame = Frame(root, width=100, height=100)
frame.bind("<KeyPress>", keydown)
frame.bind("<KeyRelease>", keyup)
frame.pack()
frame.focus_set()
root.mainloop()
np.save("X.npy", X)
np.save("y.npy", y) | [
6738,
256,
74,
3849,
1330,
1635,
201,
198,
6738,
350,
4146,
1330,
7412,
48400,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
269,
85,
17,
201,
198,
11748,
640,
201,
198,
11748,
12972,
2306,
519,
9019,
355,
23241,
201,
198,... | 2.214876 | 242 |
# Generated by Django 2.2.5 on 2019-10-05 23:22
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
20,
319,
13130,
12,
940,
12,
2713,
2242,
25,
1828,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from django.template.loaders.base import Loader as BaseLoader
from django.template.base import TemplateDoesNotExist
| [
6738,
42625,
14208,
13,
28243,
13,
2220,
364,
13,
8692,
1330,
8778,
263,
355,
7308,
17401,
198,
6738,
42625,
14208,
13,
28243,
13,
8692,
1330,
37350,
13921,
3673,
3109,
396,
628,
198
] | 3.6875 | 32 |
import sigvisa.database.db
from sigvisa.database.dataset import *
import sigvisa.utils.geog
cursor = database.db.connect().cursor()
detections, arid2num = read_detections(cursor, 1237680000, 1237680000 + 168 * 3600, arrival_table="leb_arrival", noarrays=False)
last_det = dict()
overlaps = 0
for det in detections:
site = det[0]
time = det[2]
if site in last_det:
gap = time - last_det[site]
if gap < 5:
print " arrival %d at siteid %d occured %f seconds after previous at %f : phase %s" % (det[1], site, gap, last_det[site], det[DET_PHASE_COL])
overlaps = overlaps + 1
last_det[site] = time
print "total overlaps: ", overlaps, " out of ", len(detections), " detections"
| [
11748,
43237,
4703,
64,
13,
48806,
13,
9945,
198,
6738,
43237,
4703,
64,
13,
48806,
13,
19608,
292,
316,
1330,
1635,
198,
11748,
43237,
4703,
64,
13,
26791,
13,
469,
519,
198,
198,
66,
21471,
796,
6831,
13,
9945,
13,
8443,
22446,
66... | 2.44 | 300 |
import pytest
from onnx import TensorProto
from onnx import helper as oh
import finn.core.onnx_exec as oxe
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.streamline.reorder import MoveTransposePastJoinAdd
from finn.util.basic import gen_finn_dt_tensor
# Permutation of transpose node
| [
11748,
12972,
9288,
198,
198,
6738,
319,
77,
87,
1330,
309,
22854,
2964,
1462,
198,
6738,
319,
77,
87,
1330,
31904,
355,
11752,
198,
198,
11748,
957,
77,
13,
7295,
13,
261,
77,
87,
62,
18558,
355,
12018,
68,
198,
6738,
957,
77,
13... | 3.088235 | 102 |
import json
from lib import authz
from lib.logger import logger
from lib.exclusions import exclusions, state_machine
def can_requirement_be_remediated(requirement):
"""
Mehtod to validate whether a requirement is capable of being remediated.
:param requirement: The dict representing the requirement to check.
:returns bool: A boolean representing whether requirement can or cannot be remediated.
"""
return 'remediation' in requirement
| [
11748,
33918,
198,
198,
6738,
9195,
1330,
6284,
89,
198,
6738,
9195,
13,
6404,
1362,
1330,
49706,
198,
6738,
9195,
13,
1069,
11539,
1330,
10293,
507,
11,
1181,
62,
30243,
628,
198,
198,
4299,
460,
62,
8897,
24615,
62,
1350,
62,
260,
... | 3.706349 | 126 |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, patch
from stacks.utils.RMFTestCase import *
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
7061,
6,
198,
26656,
15385,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
273,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
17080,
6169,
351,
... | 3.864754 | 244 |
import quandl
import math
import numpy as np
from sklearn import preprocessing, cross_validation, svm
from sklearn.linear_model import LinearRegression
import pickle
import datetime
from matplotlib import style
import matplotlib.pyplot as plot
# Config
isLoadFromLocal = True
quandl.ApiConfig.api_key = '76eCnz6z9XTH8nfLWeQU'
style.use('ggplot')
# Loading data
if isLoadFromLocal:
df = pickle.load(open("DataFromQuandl_Stock_Chap2.pickle", "rb"))
else:
df = quandl.get('WIKI/GOOGL')
pickle.dump(df, open("DataFromQuandl_Stock_Chap2.pickle", "wb+"))
# Data pre-processing
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Close']) / df['Adj. Close']
df['PCT_Change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open']
df = df[['Adj. Close', 'HL_PCT', 'PCT_Change', 'Adj. Volume']]
forecastCol = 'Adj. Close'
df.fillna('-99999', inplace = True)
forecastOut = int(math.ceil(0.01*len(df)))
df['label'] = df[forecastCol].shift(-forecastOut)
# df['label'].plot()
# df[forecastCol].plot()
# plot.legend(loc = 4)
# plot.show()
x = np.array(df.drop(['label'], 1))
print(x)
x = preprocessing.scale(x)
print(x)
xLately = x[-forecastOut:]
x = x[:-forecastOut]
df.dropna(inplace = True)
y = np.array(df['label'])
# Regression
x_train, x_test, y_train, y_test = cross_validation.train_test_split(x, y, test_size=0.1)
# classifier = svm.SVR(kernel='linear') # SVM SVR
classifier = LinearRegression(n_jobs=3) # Linear Regression
classifier.fit(x_train, y_train)
accuracy = classifier.score(x_test, y_test)
forecastSet = classifier.predict(xLately)
print('Accuracy is ', accuracy, '\nForecasted values are ', forecastSet, '\nNumber of values is ', forecastOut)
df['Forecast'] = np.nan
lastDate = df.iloc[-1].name
print(lastDate)
lastTime = lastDate.timestamp()
print(lastTime)
oneDay = 24 * 60 * 60 # seconds in a day
nextTime = lastTime + oneDay
for iter in forecastSet:
nextDate = datetime.datetime.fromtimestamp(nextTime)
nextTime += oneDay
df.loc[nextDate] = [np.nan for _ in range(len(df.columns) - 1)] + [iter]
df['Adj. Close'].plot()
df['Forecast'].plot()
plot.legend(loc = 4)
plot.xlabel('Date')
plot.ylabel('Price')
plot.show() | [
11748,
627,
392,
75,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
1330,
662,
36948,
11,
3272,
62,
12102,
341,
11,
264,
14761,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
44800,
8081,
2234,
198,
... | 2.509302 | 860 |
tagtypes = {
'chad': (IccProfile.read_s15Fixed16ArrayType),
'cprt': (IccProfile.read_string),
'desc': (IccProfile.read_string),
'dmdd': (IccProfile.read_string),
'tech': (IccProfile.read_signature_type),
'vued': (IccProfile.read_string),
'wtpt': (IccProfile.read_xyztype),
'bkpt': (IccProfile.read_xyztype), # private type?
'rTRC': (IccProfile.read_trctype),
'gTRC': (IccProfile.read_trctype),
'bTRC': (IccProfile.read_trctype),
'rXYZ': (IccProfile.read_xyztype),
'gXYZ': (IccProfile.read_xyztype),
'bXYZ': (IccProfile.read_xyztype),
}
if __name__=='__main__':
import numpy as np
import sys
with open(sys.argv[1], 'rb') as file:
data = np.fromfile(file, dtype="uint8")
profile = IccProfile(data)
print(profile.tostring()) | [
198,
12985,
19199,
796,
1391,
198,
220,
220,
220,
705,
354,
324,
10354,
357,
40,
535,
37046,
13,
961,
62,
82,
1314,
13715,
1433,
19182,
6030,
828,
198,
220,
220,
220,
705,
66,
1050,
83,
10354,
357,
40,
535,
37046,
13,
961,
62,
884... | 2.124352 | 386 |
from .binary_search import binary_search
| [
6738,
764,
39491,
62,
12947,
1330,
13934,
62,
12947,
628,
628,
628,
628,
628,
628,
198
] | 3.3125 | 16 |
if __name__ == '__main__':
print('Module 2') | [
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
3601,
10786,
26796,
362,
11537
] | 2.380952 | 21 |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 29 16:40:53 2017
@author: Sergio
"""
#Analisis de variables
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import ensemble, tree, linear_model
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.utils import shuffle
import warnings
#Ignorar los warnings
warnings.filterwarnings('ignore')
#Lectura de los datos
#En train se guandan los datos con los que se entrenar al modelo
train = pd.read_csv('train.csv')
#En test se guarda el conjunto de datos para el test
test = pd.read_csv('test.csv')
#Primero hay que eliminar las varibles que tengan un nmero alto de valores perdidos
#El nmero de valores perdidos de cada conjunto en cada variable
NAs = pd.concat([train.isnull().sum()/1460, test.isnull().sum()/1459], axis=1, keys=['Train', 'Test'])
#print(NAs)
#Eliminar todas las variables que tengan ms de un 0.2 de valores perdidos
eliminar = []
nvars = 0
for index, row in NAs.iterrows():
print(index)
print(row['Test'])
if (row['Test'] > 0.2) or (row ['Train'] > 0.2):
eliminar.append(index)
#En la variable eliminar estan los nombres de las variables que deben ser directamente eliminadas
#Dentro de las variables a eliminar encontramos que la variable de Alley NA no indica desconocido, es un posible valor ms de los posibles a tomar
#Esa variable debe seguir estando en nuestro conjunto
print(eliminar)
eliminar.remove('Alley')
eliminar.remove('FireplaceQu')#Sucede lo mismo que con Alley
train.drop(eliminar,axis=1, inplace=True)
test.drop(eliminar,axis=1, inplace=True)
"""
Ahora es necesario un anlisis ms profundo de las variables.
En primer lugar encontramos algunas variables que parecen tener una representacin
numrica, como por ejemplo 'MSSubClass' o 'OverallCond'.
Al leer la documentacin sobre que informacin aportan las variables
encontramos que OverallCond aunque sea una variable aparentemente nominal
expresa cosas que son medibles como la calidad, es decir muestra una puntuacin entre 1 y 10
"""
#Variables numricas que deben ser transformadas a string
test['MSSubClass'] = test['MSSubClass'].astype(str)
train['MSSubClass'] = train['MSSubClass'].astype(str)
test['YrSold'] = test['YrSold'].astype(str)
train['YrSold'] = train['YrSold'].astype(str)
#Variables categricas que deben ser numricas, ya que expresan puntuacin
#El lgico pensar que aumentar la puntuacin en algo hace efecto directo en el precio final
ExterQualvalues = {'ExterQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
ExterCondvalues = {'ExterCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
BsmQualvalues = {'BsmtQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
BsmCondvalues = {'BsmtCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1,}}
HeatingQCvalues = {'HeatingQC':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
KitchenQualvalues = {'KitchenQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
FireplaceQuvalues = {'FireplaceQu':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
GarageCondvalues = {'GarageCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
GarageQualvalues = {'GarageQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
PoolQCvalues = {'PoolQC':{'Ex':4,'Gd':3,'TA':2,'Fa':1}}
#Reemplazar los valores en las tablas
train.replace(ExterQualvalues,inplace=True)
train.replace(ExterCondvalues,inplace=True)
train.replace(BsmQualvalues,inplace=True)
train.replace(BsmCondvalues,inplace=True)
train.replace(HeatingQCvalues,inplace=True)
train.replace(KitchenQualvalues,inplace=True)
train.replace(FireplaceQuvalues,inplace=True)
train.replace(GarageCondvalues,inplace=True)
train.replace(GarageQualvalues,inplace=True)
train.replace(PoolQCvalues,inplace=True)
test.replace(ExterQualvalues,inplace=True)
test.replace(ExterCondvalues,inplace=True)
test.replace(BsmQualvalues,inplace=True)
test.replace(BsmCondvalues,inplace=True)
test.replace(HeatingQCvalues,inplace=True)
test.replace(KitchenQualvalues,inplace=True)
test.replace(FireplaceQuvalues,inplace=True)
test.replace(GarageCondvalues,inplace=True)
test.replace(GarageQualvalues,inplace=True)
test.replace(PoolQCvalues,inplace=True)
#Ahora tenemos todas las variables con un tipo de dato 'correcto'
#Cuantas variables de cada tipo tenemos
train_labels = train.pop('SalePrice')
features = pd.concat([train, test], keys=['train', 'test'])
enteras = features.dtypes[features.dtypes == 'int64'].index
flotantes = features.dtypes[features.dtypes == 'float64'].index
nominales = features.dtypes[features.dtypes == 'object'].index
#Se pasa a formato lista para su uso
ent = []
for var in enteras:
ent.append(var)
flot = []
for var in flotantes:
flot.append(var)
nom = []
for var in nominales:
nom.append(var)
numericas = ent+flot
#Ahora es necesario rellenar los valores perdidos de cada variable.
"""En algunas de las variables que han sido transformadas a numricas
NAN no expresa que el dato no exista, sino que expresa puntuacin 0"""
features['BsmtQual'] = features['BsmtQual'].fillna(0)
features['BsmtCond'] = features['BsmtCond'].fillna(0)
features['FireplaceQu'] = features['FireplaceQu'].fillna(0)
features['GarageQual'] = features['GarageQual'].fillna(0)
features['GarageCond'] = features['GarageCond'].fillna(0)
#El resto de variables pueden rellenarse con la media
for var in numericas:
if features[var].isnull().sum() > 0:
features[var] = features[var].fillna(features[var].mean())
#El resto ce variables nomnales se rellenan con el valor ms frecuente
for var in nominales:
if features[var].isnull().sum() > 0:
features[var] = features[var].fillna(features[var].mode()[0])
"""Una vez que la tabla de datos est en el formato correcto vamos a estudiar la correlacin
de las variables con el precio. Las variables que presenten una correlacin baja se descartarn
ya que lo nico que van a hacer es hacer que nuestro modelo se impreciso.
Si se imputan demasiadas variables perderemos informacin valiosa y el modelo volver a ser impreciso.
Sacando un Heatmap se puede ver la correlacin de las variables"""
#train_labels = np.log(train_labels)#La transformacin logartmica de los datos los aproxima a una distribucin normal
complete = features.loc['train']#Solo se usan las entradas de entrenamiento
complete = pd.concat([complete,train_labels],axis=1)#Se adjunta la columna de precios de nuevo
correlationPlot = complete.corr()#Mantiene la matriz de correlacin en un DataFrame
f,ax = plt.subplots(figsize=(12,9))#Configuracin del tamao de la imagen
sns.heatmap(correlationPlot,vmax=.8,square=True)#Crea el heatmap con los valores de correlacin
plt.yticks(rotation=0)#cambia el eje de las etiquetas del grfico para que se vean bien
plt.xticks(rotation=90)#cambia el eje de las etiquetas del grfico para que se vean bien
plt.show()#Muestra el grfico
f.savefig('Heatmap.png')#Guarda el grfico en un archivo
"""La matriz de correlacin muestra la correlacin entre dos variables de forma que los valores
ms claros muestran que dos variables tienen una correlacin alta
El siguiente paso del anlisis es buscar que variables muestran una correlacin alta entre s y eliminar
una de esas variables, ya que es informacin redundante y puede eliminarse. Otra manera de enfocar el problema
es que usar dos variables correlacionadas puede ayudar a sofocar el efecto del ruido en una variable.
En primer lugar es necesario descubrir que variables son las que determinan el precio de la vivienda usando la correlacin.
"""
#Crear la lista de variables con correlacin alta con el precio de la vivienda
"""Inciso:
calcular la correlacin antes de aplicar la escala logaritmica a los datos
tiene sentido, pues el coeficiente de correlacin de Pearson no vara con
la escala y el origen. Adems solo nos sirve para hacer una aproximacin
hacia que variables usar o no en el algoritmo. Despus si ser necesario
hacer que las variables tengan una distribucin normalizada.
"""
HighCorrelation = []
for index, row in correlationPlot.iterrows():
if (row['SalePrice'] >= 0.5) or (row ['SalePrice'] <= -0.5):
HighCorrelation.append(index)
print(row['SalePrice'])
print("total de variables: "+str(len(HighCorrelation)))
print(HighCorrelation)
"""Ahora hay que examniar las variables nominales que se tendrn en cuenta
Para hacer este anlisis se va a usar una grfica que exprese la relacin entre
el precio y el valor de la vivienda."""
complete = features.loc['train']
complete = pd.concat([complete,train_labels],axis=1)
malas = [#'MSSubClass',
'LandContour',
'LandSlope',
#'RoofStyle',
#'RoofMatl',
'Exterior2nd',
#'Exterior1st',
'MasVnrType',
'BsmtExposure',
'Functional',
'YrSold']
##################################
#malas = ['Utilities', 'RoofMatl','Heating','Functional']
for var in malas:
data = pd.concat([complete[var],complete['SalePrice']],axis=1)
f,ax = plt.subplots(figsize=(12,9))
fig = sns.boxplot(x=var,y="SalePrice",data=data)
fig.axis(ymin=0,ymax=800000)
plt.xticks(rotation=90)
f.savefig(str(var)+'_Price.png')
"""
aparentemente malas variables:
LandContour
LandScope
RoofStyle
RoofMatl
Exterior2nd
Exterior1st
MasVnrType
BsmtExposure
Functional
YrSold
"""
"""Analisis con PCA"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
19480,
4280,
2808,
1467,
25,
1821,
25,
4310,
2177,
201,
198,
201,
198,
31,
9800,
25,
36759,
201,
198,
37811,
201,
198,
201,
198,
2,
... | 2.595337 | 3,603 |
import random
import sys
ntables = 100
ncols = 100
nrows = 10000
for t in range(ntables):
printstderr(f'{t}/{ntables}')
print(f"create table x ({','.join(['x int'] * ncols)});")
for r in range(nrows):
print(f"insert into _last ({','.join(['x'] * ncols)}) values (", end='')
for c in range(ncols):
print(get_value(), end=('' if c==ncols-1 else ','))
print(');')
# 10 min to generate
# 3 min to process | [
11748,
4738,
198,
11748,
25064,
198,
198,
429,
2977,
796,
1802,
198,
77,
4033,
82,
796,
1802,
198,
77,
8516,
796,
33028,
198,
198,
1640,
256,
287,
2837,
7,
429,
2977,
2599,
198,
220,
3601,
301,
1082,
81,
7,
69,
6,
90,
83,
92,
14... | 2.347826 | 184 |
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
# Tests extension install/enable
| [
11748,
28686,
198,
198,
11748,
1332,
10745,
430,
13,
26791,
13,
504,
856,
62,
16737,
198,
198,
9288,
10745,
430,
62,
4774,
82,
796,
1332,
10745,
430,
13,
26791,
13,
504,
856,
62,
16737,
13,
2025,
82,
856,
49493,
7,
198,
220,
220,
... | 2.646341 | 82 |
import hashlib
import logging
import re
from dataclasses import dataclass
from datetime import datetime, timedelta
from textwrap import wrap
from typing import Dict, List
from pyroute2 import IPRoute, NDB, WireGuard
from wgskex.common.utils import mac2eui64
logger = logging.getLogger(__name__)
# TODO make loglevel configurable
logger.setLevel("DEBUG")
# pyroute2 stuff
| [
11748,
12234,
8019,
198,
11748,
18931,
198,
11748,
302,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
2420,
37150,
1330,
14441,
198,
6738,
19720,
1330,
360,
713,... | 3.139344 | 122 |
import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from routes import items
import config
from constants import *
config.parse_args()
app = FastAPI(
title="API",
description="API boilerplate",
version="1.0.0",
openapi_tags=API_TAGS_METADATA,
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(items.router)
if __name__ == "__main__":
uvicorn.run("main:app", host=config.CONFIG.host, port=int(config.CONFIG.port))
| [
11748,
334,
25531,
1211,
198,
198,
6738,
3049,
15042,
1330,
12549,
17614,
198,
6738,
3049,
15042,
13,
27171,
1574,
13,
66,
669,
1330,
23929,
12310,
2509,
1574,
198,
6738,
11926,
1330,
3709,
198,
198,
11748,
4566,
198,
198,
6738,
38491,
... | 2.553191 | 235 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.html import mark_safe
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
12582,
355,
4808,
198,
6738,
42625,
14208,
13,
26791,
13,
6494,
1330,
1317,
62,
21230,
628,
198,
2,
13610,
534,
49... | 3.422222 | 45 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-16 13:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
319,
2864,
12,
486,
12,
1433,
1511,
25,
2327,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,... | 2.791045 | 67 |
import argparse
import numpy as np
import os
import torch
from transformers import AutoTokenizer, AutoConfig, Trainer, TrainingArguments
from model import RobertaForStsRegression
from dataset import KlueStsWithSentenceMaskDataset
from utils import read_json, seed_everything
from metric import compute_metrics
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# data_arg
parser.add_argument("--data_dir", type=str, default="./data")
parser.add_argument("--model_dir", type=str, default="./model")
parser.add_argument("--output_dir", type=str, default="./output")
parser.add_argument("--model_name_or_path", type=str, default="klue/roberta-large")
parser.add_argument(
"--train_filename", type=str, default="klue-sts-v1.1_train.json"
)
parser.add_argument("--valid_filename", type=str, default="klue-sts-v1.1_dev.json")
# train_arg
parser.add_argument("--num_labels", type=int, default=1)
parser.add_argument("--seed", type=int, default=15)
parser.add_argument("--num_train_epochs", type=int, default=5)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--learning_rate", type=float, default=5e-5)
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument("--weight_decay", type=float, default=0.01)
# eval_arg
parser.add_argument("--evaluation_strategy", type=str, default="steps")
parser.add_argument("--save_steps", type=int, default=250)
parser.add_argument("--eval_steps", type=int, default=250)
parser.add_argument("--save_total_limit", type=int, default=2)
args = parser.parse_args()
main(args)
| [
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
28686,
198,
11748,
28034,
198,
6738,
6121,
364,
1330,
11160,
30642,
7509,
11,
11160,
16934,
11,
31924,
11,
13614,
28100,
2886,
198,
198,
6738,
2746,
1330,
5199,
64,
... | 2.752443 | 614 |
import unittest
from nanoservice import Responder
from nanoservice import Requester
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
15709,
418,
712,
501,
1330,
10328,
8623,
198,
6738,
15709,
418,
712,
501,
1330,
9394,
7834,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
... | 2.87234 | 47 |
from airtech_api.utils.auditable_model import AuditableBaseModel
from django.db import models
# Create your models here.
| [
6738,
1633,
13670,
62,
15042,
13,
26791,
13,
3885,
4674,
62,
19849,
1330,
7591,
4674,
14881,
17633,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
628,
198,
2,
13610,
534,
4981,
994,
13,
198
] | 3.617647 | 34 |
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
gauss_1 = gaussian(10, 8, 2) # 0.12098536225957168
gauss_2 = gaussian(10, 10, 2) # 0.19947114020071635
print("Gauss(10, 8, 2): {}".format(gauss_1))
print("Gauss(10, 10, 2): {}".format(gauss_2))
#
mean = 0
variance = 1
std = np.sqrt(variance)
# Plot between -10 and 10 with .001 steps.
x = np.arange(-5, 5, 0.001)
gauss = []
for i in x:
gauss.append(gaussian(i, mean, std))
gauss = np.array(gauss)
plt.plot(x, gauss)
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
31986,
1046,
62,
16,
796,
31986,
31562,
7,
940,
... | 2.075 | 280 |
import unittest
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
628,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.333333 | 30 |
import json
from typing import TypedDict
from .bot_emoji import AdditionalEmoji
configurator = Configurator()
configurator.load()
| [
11748,
33918,
198,
6738,
19720,
1330,
17134,
276,
35,
713,
198,
6738,
764,
13645,
62,
368,
31370,
1330,
15891,
36,
5908,
7285,
628,
628,
198,
220,
220,
220,
220,
628,
628,
198,
11250,
333,
1352,
796,
17056,
333,
1352,
3419,
198,
11250... | 2.979167 | 48 |
from scipy.spatial import distance
from scipy import ndimage
import matplotlib.pyplot as plt
import torch
from scipy import stats
import numpy as np
| [
6738,
629,
541,
88,
13,
2777,
34961,
1330,
5253,
198,
6738,
629,
541,
88,
1330,
299,
67,
9060,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
28034,
198,
6738,
629,
541,
88,
1330,
9756,
198,
11748,
299,
32... | 3.163265 | 49 |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
| [
2,
19617,
28,
40477,
12,
23,
198,
81,
37811,
198,
1212,
2438,
373,
7560,
416,
198,
59,
1220,
4808,
220,
220,
220,
4808,
220,
4808,
91,
220,
220,
4808,
220,
4808,
198,
930,
44104,
8,
11139,
28264,
5769,
62,
91,
11139,
91,
930,
7,
... | 2.571429 | 112 |
if __name__ == "__main__":
bibliographic_entry = "Peroni, S., Osborne, F., Di Iorio, A., Nuzzolese, A. G., Poggi, F., Vitali, F., " \
"Motta, E. (2017). Research Articles in Simplified HTML: a Web-first format for " \
"HTML-based scholarly articles. PeerJ Computer Science 3: e132. e2513. " \
"DOI: https://doi.org/10.7717/peerj-cs.132"
print(contains_word("Peroni", "Osborne", bibliographic_entry))
print(contains_word("Peroni", "Asprino", bibliographic_entry))
print(contains_word("Reforgiato", "Osborne", bibliographic_entry))
print(contains_word("Reforgiato", "Asprino", bibliographic_entry))
| [
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
275,
29142,
6826,
62,
13000,
796,
366,
5990,
14651,
11,
311,
1539,
29667,
11,
376,
1539,
6031,
314,
40974,
11,
317,
1539,
399,
4715,
349,
2771,
1... | 2.255663 | 309 |
#!/usr/bin/env python
import responses
from selenium import webdriver
# This file contains/references the default JS
# used to provide functions dealing with input/output
SCRIPT_RUNNER = "runner.html"
ENCODING = 'utf-8'
PAGE_LOAD_TIMEOUT = 5
PAGE_LOAD_TIMEOUT_MS = PAGE_LOAD_TIMEOUT * 1000
capabilities = webdriver.DesiredCapabilities.PHANTOMJS
capabilities["phantomjs.page.settings.resourceTimeout"] = PAGE_LOAD_TIMEOUT_MS
capabilities["phantomjs.page.settings.loadImages"] = False
SCRIPT_TEMPLATE = """
window.requestData = {{method:"{0}", headers:{1}, data:"{2}", params:{3}}};
window.method = requestData.method;
window.headers = requestData.headers;
window.data = requestData.data;
window.params = requestData.params;
window.logs = [];
window.log = function(message) {{
window.logs.push({{
"time": (new Date).getTime(),
"message": message
}})
}};
"""
GET_LOGS_SCRIPT = 'return window.logs;'
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
9109,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
198,
2,
770,
2393,
4909,
14,
5420,
4972,
262,
4277,
26755,
198,
2,
973,
284,
2148,
5499,
7219,
351,
5128,
14,
2... | 2.698061 | 361 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
VALID_HISTORY_FIELDS = [
'datetime', 'open', 'close', 'high', 'low', 'total_turnover', 'volume',
'acc_net_value', 'discount_rate', 'unit_net_value',
'limit_up', 'limit_down', 'open_interest', 'basis_spread', 'settlement', 'prev_settlement'
]
VALID_GET_PRICE_FIELDS = [
'OpeningPx', 'ClosingPx', 'HighPx', 'LowPx', 'TotalTurnover', 'TotalVolumeTraded',
'AccNetValue', 'UnitNetValue', 'DiscountRate',
'SettlPx', 'PrevSettlPx', 'OpenInterest', 'BasisSpread', 'HighLimitPx', 'LowLimitPx'
]
VALID_TENORS = [
'0S', '1M', '2M', '3M', '6M', '9M', '1Y', '2Y', '3Y', '4Y',
'5Y', '6Y', '7Y', '8Y', '9Y', '10Y', '15Y', '20Y', '30Y',
'40Y', '50Y'
]
VALID_INSTRUMENT_TYPES = [
'CS', 'Future', 'INDX', 'ETF', 'LOF', 'SF', 'FenjiA', 'FenjiB', 'FenjiMu',
'Stock', 'Fund', 'Index'
]
VALID_XUEQIU_FIELDS = [
'new_comments', 'total_comments',
'new_followers', 'total_followers',
'sell_actions', 'buy_actions',
]
VALID_MARGIN_FIELDS = [
'margin_balance',
'buy_on_margin_value',
'short_sell_quantity',
'margin_repayment',
'short_balance_quantity',
'short_repayment_quantity',
'short_balance',
'total_balance'
]
VALID_SHARE_FIELDS = [
'total', 'circulation_a', 'management_circulation', 'non_circulation_a', 'total_a'
]
VALID_TURNOVER_FIELDS = (
'today',
'week',
'month',
'three_month',
'six_month',
'year',
'current_year',
'total',
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
23428,
2389,
62,
39,
42480,
62,
11674,
3698,
5258,
796,
685,
198,
220,
220,
220,
705,
19608,
8079,
3256,
705,
96... | 2.101551 | 709 |
"""
Simple pre-processing for PeerRead papers.
Takes in JSON formatted data from ScienceParse and outputs a tfrecord
Reference example:
https://github.com/tensorlayer/tensorlayer/blob/9528da50dfcaf9f0f81fba9453e488a1e6c8ee8f/examples/data_process/tutorial_tfrecord3.py
"""
import argparse
import glob
import os
import random
import io
import json
from dateutil.parser import parse as parse_date
import tensorflow as tf
import bert.tokenization as tokenization
from PeerRead.ScienceParse.Paper import Paper
from PeerRead.ScienceParse.ScienceParseReader import ScienceParseReader
from PeerRead.data_cleaning.PeerRead_hand_features import get_PeerRead_hand_features
rng = random.Random(0)
def bert_process_sentence(example_tokens, max_seq_length, tokenizer):
"""
Tokenization and pre-processing of text as expected by Bert
Parameters
----------
example_tokens
max_seq_length
tokenizer
Returns
-------
"""
# Account for [CLS] and [SEP] with "- 2"
if len(example_tokens) > max_seq_length - 2:
example_tokens = example_tokens[0:(max_seq_length - 2)]
# The convention in BERT for single sequences is:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. (vv: Not relevant for us)
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
# vv: segment_ids seem to be the same as type_ids
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in example_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return input_ids, input_mask, segment_ids
def paper_to_bert_Example(text_features, context_features, max_seq_length, tokenizer):
"""
Parses the input paper into a tf.Example as expected by Bert
Note: the docs for tensorflow Example are awful \_()_/
"""
abstract_features = {}
abstract_tokens, abstract_padding_mask, _ = \
bert_process_sentence(text_features['abstract'], max_seq_length, tokenizer)
abstract_features["token_ids"] = _int64_feature(abstract_tokens)
abstract_features["token_mask"] = _int64_feature(abstract_padding_mask)
# abstract_features["segment_ids"] = create_int_feature(feature.segment_ids) TODO: ommission may cause bugs
# abstract_features["label_ids"] = _int64_feature([feature.label_id])
# non-sequential features
tf_context_features, tf_context_features_types = _dict_of_nonlist_numerical_to_tf_features(context_features)
features = {**tf_context_features, **abstract_features}
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
return tf_example
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto,
e.g, An integer label.
"""
if isinstance(value, list):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
else:
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
"""Wrapper for inserting a float Feature into a SequenceExample proto,
e.g, An integer label.
"""
if isinstance(value, list):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
else:
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto,
e.g, an image in byte
"""
# return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _dict_of_nonlist_numerical_to_tf_features(my_dict):
"""
Strip out non-numerical features
Returns tf_features_dict: a dictionary suitable for passing to tf.train.example
tf_types_dict: a dictionary of the tf types of previous dict
"""
tf_types_dict = {}
tf_features_dict = {}
for k, v in my_dict.items():
if isinstance(v, int) or isinstance(v, bool):
tf_features_dict[k] = _int64_feature(v)
tf_types_dict[k] = tf.int64
elif isinstance(v, float):
tf_features_dict[k] = _float_feature(v)
tf_types_dict[k] = tf.float32
else:
pass
return tf_features_dict, tf_types_dict
venues = {'acl': 1,
'conll': 2,
'iclr': 3,
'nips': 4,
'icml': 5,
'emnlp': 6,
'aaai': 7,
'hlt-naacl': 8,
'arxiv': 0}
if __name__ == "__main__":
main()
| [
37811,
198,
26437,
662,
12,
36948,
329,
41139,
5569,
9473,
13,
198,
51,
1124,
287,
19449,
39559,
1366,
422,
5800,
10044,
325,
290,
23862,
257,
48700,
22105,
628,
198,
26687,
1672,
25,
198,
5450,
1378,
12567,
13,
785,
14,
83,
22854,
29... | 2.558935 | 2,104 |
__all__=['module1'] | [
834,
439,
834,
28,
17816,
21412,
16,
20520
] | 2.375 | 8 |
from pygame import Surface, font
from copy import copy
from random import randint, choice
import string
from lib.transactionButton import TransactionButton
SHOP_PREFIX = ["archer", "baker", "fisher", "miller", "rancher", "robber"]
SHOP_SUFFIX = ["cave", "creek", "desert", "farm", "field", "forest", "hill", "lake", "mountain", "pass", "valley", "woods"]
| [
6738,
12972,
6057,
1330,
20321,
11,
10369,
198,
6738,
4866,
1330,
4866,
198,
6738,
4738,
1330,
43720,
600,
11,
3572,
198,
11748,
4731,
198,
198,
6738,
9195,
13,
7645,
2673,
21864,
1330,
45389,
21864,
198,
198,
9693,
3185,
62,
47,
31688,... | 2.648276 | 145 |
import logging
import numpy as np
import core.dataflow as dtf
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
| [
11748,
18931,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
4755,
13,
7890,
11125,
355,
288,
27110,
198,
11748,
49385,
13,
20850,
62,
9288,
355,
40812,
198,
198,
62,
25294,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
... | 2.977778 | 45 |
import tkinter.messagebox
from tkinter import *
import tkinter as tk
from tkinter import filedialog
import numpy
import pytesseract #Python wrapper for Google-owned OCR engine known by the name of Tesseract.
import cv2
from PIL import Image, ImageTk
import os
root = tk.Tk()
root.title("Object Character Recognizer")
root.geometry("1280x720")
test_image = None
w = tk.LabelFrame(root, text="Image:", width=768, height=600)
w.place(x=20, y=10)
w.pack_propagate(0)
w1 = tk.LabelFrame(root, text="Extracted Text:", width=500, height=310)
w1.place(x=800, y=300)
w2 = tk.LabelFrame(root, text="Operations:", width=350, height=280)
w2.place(x=800, y=10)
btn1 = tk.Button(w2, text="Load Image", padx=40, pady=10, command=browse_image)
btn1.place(x=22, y=20)
btn1 = tk.Button(w2, text="Run Handwritten OCR", padx=40, pady=10, command=use_ocr_handwriting)
btn1.place(x=22, y=80)
btn1 = tk.Button(w2, text="Run Default OCR", padx=40, pady=10, command=use_ocr_default)
btn1.place(x=22, y=140)
btn1 = tk.Button(w2, text="Run Single Text OCR", padx=40, pady=10, command=use_ocr_singletext)
btn1.place(x=22, y=200)
root.mainloop()
| [
11748,
256,
74,
3849,
13,
20500,
3524,
198,
6738,
256,
74,
3849,
1330,
1635,
198,
11748,
256,
74,
3849,
355,
256,
74,
198,
6738,
256,
74,
3849,
1330,
5717,
498,
519,
198,
11748,
299,
32152,
198,
11748,
12972,
83,
408,
263,
529,
1303... | 2.450766 | 457 |
"""loads the nasm library, used by TF."""
load("//third_party:repo.bzl", "tf_http_archive")
| [
37811,
46030,
262,
299,
8597,
5888,
11,
973,
416,
24958,
526,
15931,
198,
198,
2220,
7203,
1003,
17089,
62,
10608,
25,
260,
7501,
13,
65,
48274,
1600,
366,
27110,
62,
4023,
62,
17474,
4943,
198
] | 2.657143 | 35 |
# Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
# the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
# reserved. See files LICENSE and NOTICE for details.
#
# This file is part of CEED, a collection of benchmarks, miniapps, software
# libraries and APIs for efficient high-order finite element and spectral
# element discretizations for exascale applications. For more information and
# source code availability see http://github.com/ceed.
#
# The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
# a collaborative effort of two U.S. Department of Energy organizations (Office
# of Science and the National Nuclear Security Administration) responsible for
# the planning and preparation of a capable exascale ecosystem, including
# software, applications, hardware, advanced system engineering and early
# testbed platforms, in support of the nation's exascale computing imperative.
# @file
# Test Ceed Vector functionality
import os
import libceed
import numpy as np
import check
TOL = libceed.EPSILON * 256
# -------------------------------------------------------------------------------
# Utility
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Test creation, setting, reading, restoring, and destroying of a vector
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Test setValue
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Test getArrayRead state counter
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Test setting one vector from array of another vector
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Test getArray to modify array
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Test creation, setting, reading, restoring, and destroying of a vector using
# CEED_MEM_DEVICE
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Test view
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Test norms
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Test taking the reciprocal of a vector
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Test AXPY
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Test pointwise multiplication
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Test Scale
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Test getArrayWrite to modify array
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# Test modification of reshaped array
# -------------------------------------------------------------------------------
def test_199(ceed_resource):
"""Modification of reshaped array"""
ceed = libceed.Ceed(ceed_resource)
vec = ceed.Vector(12)
vec.set_value(0.0)
with vec.array(4, 3) as x:
x[...] = np.eye(4, 3)
with vec.array_read(3, 4) as x:
assert np.all(x == np.eye(4, 3).reshape(3, 4))
# -------------------------------------------------------------------------------
| [
2,
15069,
357,
66,
8,
2177,
11,
13914,
45036,
3549,
2351,
4765,
11,
11419,
13,
21522,
771,
379,
198,
2,
262,
13914,
45036,
3549,
2351,
18643,
13,
27140,
32572,
12,
34,
16820,
12,
22,
2682,
24038,
13,
1439,
6923,
198,
2,
10395,
13,
... | 6.565789 | 684 |
"""Fixes for CESM2 model."""
from ..fix import Fix
from ..shared import (add_scalar_depth_coord, add_scalar_height_coord,
add_scalar_typeland_coord, add_scalar_typesea_coord)
| [
37811,
22743,
274,
329,
42700,
44,
17,
2746,
526,
15931,
198,
6738,
11485,
13049,
1330,
13268,
198,
6738,
11485,
28710,
1330,
357,
2860,
62,
1416,
282,
283,
62,
18053,
62,
37652,
11,
751,
62,
1416,
282,
283,
62,
17015,
62,
37652,
11,
... | 2.233333 | 90 |
from YouTubeFacesDB import generate_ytf_database
###############################################################################
# Create the dataset
###############################################################################
generate_ytf_database(
directory= '../data',#'/scratch/vitay/Datasets/YouTubeFaces', # Location of the YTF dataset
filename='ytfdb.h5', # Name of the HDF5 file to write to
labels=10, # Number of labels to randomly select
max_number=-1, # Maximum number of images to use
size=(100, 100), # Size of the images
color=False, # Black and white
bw_first=True, # Final shape is (1, w, h)
cropped=True # The original images are cropped to the faces
) | [
6738,
7444,
37,
2114,
11012,
1330,
7716,
62,
20760,
69,
62,
48806,
198,
198,
29113,
29113,
7804,
4242,
21017,
198,
2,
13610,
262,
27039,
198,
29113,
29113,
7804,
4242,
21017,
198,
8612,
378,
62,
20760,
69,
62,
48806,
7,
220,
220,
198,... | 3.570707 | 198 |
from freezegun import freeze_time
from rest_framework import test
from waldur_mastermind.billing.tests.utils import get_financial_report_url
from waldur_mastermind.invoices import models as invoice_models
from waldur_mastermind.invoices.tests import factories as invoice_factories
from waldur_mastermind.invoices.tests import fixtures as invoice_fixtures
| [
6738,
1479,
89,
1533,
403,
1330,
16611,
62,
2435,
198,
6738,
1334,
62,
30604,
1330,
1332,
198,
198,
6738,
266,
1940,
333,
62,
9866,
10155,
13,
65,
4509,
13,
41989,
13,
26791,
1330,
651,
62,
46921,
62,
13116,
62,
6371,
198,
6738,
266... | 3.534653 | 101 |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the tests for aea.cli.utils module."""
from builtins import FileNotFoundError
from typing import cast
from unittest import TestCase, mock
from click import BadParameter, ClickException
from jsonschema import ValidationError
from yaml import YAMLError
from aea.cli.utils.click_utils import AEAJsonPathType, PublicIdParameter
from aea.cli.utils.config import (
_init_cli_config,
get_or_create_cli_config,
update_cli_config,
)
from aea.cli.utils.context import Context
from aea.cli.utils.decorators import _validate_config_consistency, clean_after
from aea.cli.utils.formatting import format_items
from aea.cli.utils.generic import is_readme_present
from aea.cli.utils.package_utils import (
find_item_in_distribution,
find_item_locally,
is_fingerprint_correct,
try_get_balance,
try_get_item_source_path,
try_get_item_target_path,
validate_author_name,
validate_package_name,
)
from tests.conftest import FETCHAI
from tests.test_cli.tools_for_testing import (
ConfigLoaderMock,
ContextMock,
PublicIdMock,
StopTest,
raise_stoptest,
)
AUTHOR = "author"
class PublicIdParameterTestCase(TestCase):
"""Test case for PublicIdParameter class."""
def test_get_metavar_positive(self):
"""Test for get_metavar positive result."""
result = PublicIdParameter.get_metavar("obj", "param")
expected_result = "PUBLIC_ID"
self.assertEqual(result, expected_result)
def _raise_yamlerror(*args):
raise YAMLError()
def _raise_file_not_found_error(*args):
raise FileNotFoundError()
def _raise_validation_error(*args, **kwargs):
raise ValidationError("Message.")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
16529,
26171,
198,
2,
198,
2,
220,
220,
15069,
2864,
12,
23344,
376,
7569,
13,
20185,
15302,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
3... | 3.057247 | 821 |