blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9b4cee01a7a4aad6bd4aa41ff11599feddafe8b0 | 14b5679d88afa782dc5d6b35878ab043089a060a | /students/LvTao/20200515/测试鼠标是否移动.py | 317ea2ada7e5968f4d2f40aad82bbbc2832c59da | [] | no_license | mutiangua/EIS2020 | c541ef32623f67f9277945cd39cff3c02f06e4dd | 92aa2711b763a2c93be238825c445bf2db8da391 | refs/heads/master | 2022-11-18T05:21:47.567342 | 2020-07-11T10:11:21 | 2020-07-11T10:11:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | import time
import pyautogui
def pos():
pos_mouse=pyautogui.position()
time.sleep(1)
return pos_mouse
while True:
if pos()==pyautogui.position():
continue
else:
x,y=pyautogui.position()
print('当前位置X{},Y{}'.format(x,y))
| [
"noreply@github.com"
] | mutiangua.noreply@github.com |
22891206df5b4ecb1cd4e94be6977834bdc91d4b | 2e514a16e9da89c68e9222f85c88910a6747a714 | /pmds/utils.py | f7b7e02a24bcbe1a59a3414a514b206896d4b4ea | [] | no_license | vu-minh/probabilistic-mds | e97bb700471177901e4809debce805a29d542334 | 95e4f839f468d0b07d61fd8645db1d4abc46724c | refs/heads/master | 2023-02-22T02:29:39.861470 | 2021-01-27T12:48:37 | 2021-01-27T12:48:37 | 299,236,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | import random
from itertools import chain, islice
def chunks(in_list, size=100, shuffle=True):
"""Generator to chunk `in_list` in to small chunks of size `size`.
"""
if shuffle:
in_list = random.sample(list(in_list), k=len(in_list))
iterator = iter(in_list)
for first in iterator:
yield chain([first], islice(iterator, size - 1))
if __name__ == "__main__":
a = list(range(10))
for c in chunks(a, size=3, shuffle=True):
print(list(c))
for c in chunks(a, size=3, shuffle=False):
print(list(c))
| [
"vie.minhvu@gmail.com"
] | vie.minhvu@gmail.com |
74c590dee70d866754a3bfddb67a69646b5796c8 | 7837961d07a64aa1f73d88ed1012ec5e322ab370 | /src/generative_playground/molecules/lean_settings.py | 5af24245d63c36b4de49a57e16e9310343c74414 | [
"MIT"
] | permissive | markharley/generative_playground | 1281f13cc28c43ede9695e3ffa98713e613023d4 | 56e826e5ca453ee19b0d4298ed27b4db5efd6fd9 | refs/heads/master | 2020-05-18T09:50:27.820273 | 2019-05-05T12:03:26 | 2019-05-05T12:03:26 | 184,337,386 | 0 | 0 | null | 2019-04-30T22:01:43 | 2019-04-30T22:01:42 | null | UTF-8 | Python | false | false | 386 | py | import inspect
import os
molecules_root_location = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + '/'
def get_data_location(molecules=True):
if molecules:
return {'source_data': molecules_root_location + 'data/250k_rndm_zinc_drugs_clean.smi'}
else:
return {'source_data': molecules_root_location + 'data/equation2_15_dataset.txt'} | [
"egor.kraev@gmail.com"
] | egor.kraev@gmail.com |
5789bba57d449927a822097fee9dbc9781f0b257 | e68f6c07bf8c8347ded2901af0cc0b1f347364a9 | /src/language/help/helpAndDir.py | b6fd809934b0781a266d8bde5361f4b237b28504 | [
"Apache-2.0"
] | permissive | AldrichYang/HelloPython3 | 182de86f740a722987bfb27efb7e976b6f484315 | 3689dcddf668aa3cad185ca21cd8be079fdbc736 | refs/heads/master | 2023-08-17T05:28:55.484692 | 2023-08-15T09:35:11 | 2023-08-15T09:35:11 | 86,020,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | import sys
# help string for the built-in len() function; note that it's "len" not "len()",
# which is a call to the function, which we don't want
help(len)
help(sys)
# dir() is like help() but just gives a quick list of its defined symbols, or "attributes"
dir(sys)
# help string for the exit() function in the sys module
help(sys.exit)
# help string for the split() method for string objects.
# You can call help() with that object itself or an example of that object, plus its attribute.
# For example, calling help('xyz'.split) is the same as calling help(str.split).
help('xyz'.split)
# help string for list objects
help(list)
# displays list object attributes, including its methods
dir(list)
# help string for the append() method for list objects
help(list.append)
| [
"yanghengx@nonobank.com"
] | yanghengx@nonobank.com |
abb547f452b4b5e68d990bc054c61b57bad55471 | 809c42b165fb4d551d0ac87f57fa918975d6cc5d | /accounts/tests.py | e918e7dcc03aa69e72e36eaa178efb4d1c98a1b5 | [] | no_license | ramblingbarney/verbose-pancake | 6298bedf667e6559b0d23ec3541422875c0e6d90 | 30fc1c9db6a8c9a9a4c885b1250b57ee6fedff1c | refs/heads/master | 2022-12-10T22:07:27.190960 | 2019-02-26T00:02:03 | 2019-02-26T00:02:03 | 153,365,053 | 0 | 1 | null | 2022-12-08T01:16:51 | 2018-10-16T22:58:41 | HTML | UTF-8 | Python | false | false | 431 | py | from django.test import TestCase
class TestTreatmentViews(TestCase):
''' test the treatments view '''
def test_treatments_view(self):
# direct to the treatments view
page = self.client.get('/')
# check if it has a status code 200
self.assertEqual(page.status_code, 200)
# check that you are directed to the treatments.html page
self.assertTemplateUsed(page, "index.html")
| [
"c9dw5er@protonmail.com"
] | c9dw5er@protonmail.com |
e9b441d40e1a9776554b3bd1aeb99e0defc58dc9 | 440ea025790f1dac624f399f79a58113a6a612f5 | /setup.py | 9917579b48b79e133e41420823100f8ebd4e47a4 | [
"MIT"
] | permissive | jameshilliard/hlk-sw16 | e3023684d182eb6603683bae842f59c33a74bd0d | f3e2442beee57aebbbfc05c5088677847fe669e2 | refs/heads/master | 2021-07-04T20:08:41.698140 | 2020-08-20T13:06:49 | 2020-08-20T13:07:09 | 155,641,501 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | #!/usr/bin/env python
from setuptools import setup
setup(
name='hlk-sw16',
version='0.0.9',
description='Python client for HLK-SW16',
url='https://github.com/jameshilliard/hlk-sw16',
author='James Hilliard',
author_email='james.hilliard1@gmail.com',
license='MIT',
packages=[
'hlk_sw16',
],
)
| [
"james.hilliard1@gmail.com"
] | james.hilliard1@gmail.com |
c21099e0e4bd6c580fc987579f69c77eba1616f5 | bc5ac79c9b03300e4fcec4f8ddff0d17230df213 | /geokey_geotagx/__init__.py | 38ffa6c44d9f5467fa7b01ee872b182d941ac09f | [
"Apache-2.0"
] | permissive | ExCiteS/geokey-geotagx | f30401b9ad0e874092831551fc30ec5665a72ea4 | 9302bb66eed7023d8cb845cd19336c3b637bde69 | refs/heads/master | 2020-12-26T02:50:00.397774 | 2017-03-13T10:23:51 | 2017-03-13T10:23:51 | 39,024,338 | 0 | 1 | null | 2017-03-10T17:25:59 | 2015-07-13T16:37:37 | Python | UTF-8 | Python | false | false | 369 | py | """Main initialisation for extension."""
VERSION = (0, 2, 0)
__version__ = '.'.join(map(str, VERSION))
try:
from geokey.extensions.base import register
register(
'geokey_geotagx',
'GeoTag-X',
display_admin=True,
superuser=False,
version=__version__
)
except BaseException:
print 'Please install GeoKey first'
| [
"j.osokinas@mappingforchange.org.uk"
] | j.osokinas@mappingforchange.org.uk |
6af841fb17dd20c39b1e70b06965fed9ff8a9455 | b287c232165bb0d248d619e978f57c864cc36a1c | /leo/plugins/importers/javascript.py | 38fa8e7d788aec325dd87180f3c2c238a1199e35 | [
"BSD-3-Clause",
"MIT"
] | permissive | tbpassin/leo-editor | 39ec8cfc6c35c867b89a21f495b8049b1254bcc6 | 76b60e5c58781f84c86a603b9c50b709250682b8 | refs/heads/master | 2023-08-09T05:05:54.064246 | 2023-03-01T14:12:14 | 2023-03-01T14:12:14 | 247,962,821 | 0 | 0 | NOASSERTION | 2020-03-17T12:22:03 | 2020-03-17T12:22:02 | null | UTF-8 | Python | false | false | 11,266 | py | #@+leo-ver=5-thin
#@+node:ekr.20140723122936.18144: * @file ../plugins/importers/javascript.py
"""The @auto importer for JavaScript."""
import re
from typing import Any, Dict, Generator
from leo.core import leoGlobals as g # Required
from leo.core.leoCommands import Commands as Cmdr
from leo.core.leoNodes import Position
from leo.plugins.importers.linescanner import Importer
#@+others
#@+node:ekr.20140723122936.18049: ** class JS_Importer
class JS_Importer(Importer):
def __init__(self, c: Cmdr) -> None:
"""The ctor for the JS_ImportController class."""
# Init the base class.
super().__init__(c, language='javascript')
#@+others
#@+node:ekr.20161101183354.1: *3* js_i.compute_headline
clean_regex_list1 = [
# (function name (
re.compile(r'\s*\(?(function\b\s*[\w]*)\s*\('),
# name: (function (
re.compile(r'\s*(\w+\s*\:\s*\(*\s*function\s*\()'),
# const|let|var name = .* =>
re.compile(r'\s*(?:const|let|var)\s*(\w+\s*(?:=\s*.*)=>)'),
]
clean_regex_list2 = [
re.compile(r'(.*\=)(\s*function)'), # .* = function
]
clean_regex_list3 = [
re.compile(r'(.*\=\s*new\s*\w+)\s*\(.*(=>)'), # .* = new name .* =>
re.compile(r'(.*)\=\s*\(.*(=>)'), # .* = ( .* =>
re.compile(r'(.*)\((\s*function)'), # .* ( function
re.compile(r'(.*)\(.*(=>)'), # .* ( .* =>
re.compile(r'(.*)(\(.*\,\s*function)'), # .* \( .*, function
]
clean_regex_list4 = [
re.compile(r'(.*)\(\s*(=>)'), # .* ( =>
]
def compute_headline(self, s: str) -> str:
"""Return a cleaned up headline s."""
s = s.strip()
# Don't clean a headline twice.
if s.endswith('>>') and s.startswith('<<'): # pragma: no cover (missing test)
return s
for ch in '{(=':
if s.endswith(ch):
s = s[:-1].strip()
# First regex cleanup. Use \1.
for pattern in self.clean_regex_list1:
m = pattern.match(s)
if m:
s = m.group(1)
break
# Second regex cleanup. Use \1 + \2
for pattern in self.clean_regex_list2:
m = pattern.match(s)
if m:
s = m.group(1) + m.group(2)
break
# Third regex cleanup. Use \1 + ' ' + \2
for pattern in self.clean_regex_list3:
m = pattern.match(s)
if m:
s = m.group(1) + ' ' + m.group(2)
break
# Fourth cleanup. Use \1 + ' ' + \2 again
for pattern in self.clean_regex_list4: # pragma: no cover (mysterious)
m = pattern.match(s)
if m:
s = m.group(1) + ' ' + m.group(2)
break
# Final whitespace cleanups.
s = s.replace(' ', ' ')
s = s.replace(' (', '(')
return g.truncate(s, 100)
#@-others
#@+node:ekr.20200131110322.2: ** JsLexer...
# JsLex: a lexer for Javascript
# Written by Ned Batchelder. Used by permission.
#
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/jslex/src/default/NOTICE.txt
#@+node:ekr.20200131110322.4: *3* class Tok
class Tok:
"""A specification for a token class."""
num = 0
def __init__(self, name: str, regex: str, next: str = None) -> None:
self.id = Tok.num
Tok.num += 1
self.name = name
self.regex = regex
self.next = next
#@+node:ekr.20200131110322.7: *3* class Lexer
class Lexer:
"""A generic multi-state regex-based lexer."""
#@+others
#@+node:ekr.20200131110322.8: *4* Lexer.__init__
def __init__(self, states: Dict, first: Any) -> None:
self.regexes = {}
self.toks = {}
for state, rules in states.items():
parts = []
for tok in rules:
groupid = "t%d" % tok.id
self.toks[groupid] = tok
parts.append("(?P<%s>%s)" % (groupid, tok.regex))
self.regexes[state] = re.compile("|".join(parts), re.MULTILINE | re.VERBOSE) # |re.UNICODE)
self.state = first
#@+node:ekr.20200131110322.9: *4* Lexer.lex
def lex(self, text: str) -> Generator:
"""Lexically analyze `text`.
Yields pairs (`name`, `tokentext`).
"""
end = len(text)
state = self.state
regexes = self.regexes
toks = self.toks
start = 0
while start < end:
for match in regexes[state].finditer(text, start):
# g.trace(state, start, text, match)
# g.printObj(regexes[state])
name = match.lastgroup
tok = toks[name]
toktext = match.group(name)
start += len(toktext)
yield(tok.name, toktext)
if tok.next:
state = tok.next
break
self.state = state
#@-others
#@+node:ekr.20200131110322.6: *3* function: literals
def literals(choices: str, prefix: str = "", suffix: str = "") -> str:
"""
Create a regex from a space-separated list of literal `choices`.
If provided, `prefix` and `suffix` will be attached to each choice
individually.
"""
return "|".join(prefix + re.escape(c) + suffix for c in choices.split())
#@+node:ekr.20200131110322.10: *3* class JsLexer(Lexer)
class JsLexer(Lexer):
"""A Javascript lexer
>>> lexer = JsLexer()
>>> list(lexer.lex("a = 1"))
[('id', 'a'), ('ws', ' '), ('punct', '='), ('ws', ' '), ('dnum', '1')]
This doesn't properly handle non-Ascii characters in the Javascript source.
"""
# EKR: Happily, the JS importer doesn't need to handle id's carefully.
#@+<< constants >>
#@+node:ekr.20200131190707.1: *4* << constants >> (JsLexer)
# Because these tokens are matched as alternatives in a regex, longer possibilities
# must appear in the list before shorter ones, for example, '>>' before '>'.
#
# Note that we don't have to detect malformed Javascript, only properly lex
# correct Javascript, so much of this is simplified.
# Details of Javascript lexical structure are taken from
# http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf
# A useful explanation of automatic semicolon insertion is at
# http://inimino.org/~inimino/blog/javascript_semicolons
# See https://stackoverflow.com/questions/6314614/match-any-unicode-letter
both_before = [
Tok("comment", r"/\*(.|\n)*?\*/"),
Tok("linecomment", r"//.*?$"),
Tok("ws", r"\s+"),
Tok("keyword", literals("""
async await
break case catch class const continue debugger
default delete do else enum export extends
finally for function if import in instanceof new
return super switch this throw try typeof var
void while with
""", suffix=r"\b"), next='reg'),
Tok("reserved", literals("null true false", suffix=r"\b"), next='div'),
#
# EKR: This would work if patterns were compiled with the re.UNICODE flag.
# However, \w is not the same as valid JS characters.
# In any case, the JS importer doesn't need to handle id's carefully.
#
# Tok("id", r"""([\w$])([\w\d]*)""", next='div'),
#
Tok("id", r"""
([a-zA-Z_$ ]|\\u[0-9a-fA-Z]{4}) # first char
([a-zA-Z_$0-9]|\\u[0-9a-fA-F]{4})* # rest chars
""", next='div'),
Tok("hnum", r"0[xX][0-9a-fA-F]+", next='div'),
Tok("onum", r"0[0-7]+"),
Tok("dnum", r"""
( (0|[1-9][0-9]*) # DecimalIntegerLiteral
\. # dot
[0-9]* # DecimalDigits-opt
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
\. # dot
[0-9]+ # DecimalDigits
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
(0|[1-9][0-9]*) # DecimalIntegerLiteral
([eE][-+]?[0-9]+)? # ExponentPart-opt
)
""", next='div'),
Tok("punct", literals("""
>>>= === !== >>> <<= >>= <= >= == != << >> &&
|| += -= *= %= &= |= ^=
"""), next="reg"),
Tok("punct", literals("++ -- ) ]"), next='div'),
Tok("punct", literals("{ } ( [ . ; , < > + - * % & | ^ ! ~ ? : ="), next='reg'),
Tok("string", r'"([^"\\]|(\\(.|\n)))*?"', next='div'),
Tok("string", r"'([^'\\]|(\\(.|\n)))*?'", next='div'),
]
both_after = [
Tok("other", r"."),
]
states = {
'div': # slash will mean division
both_before + [
Tok("punct", literals("/= /"), next='reg'),
] + both_after,
'reg': # slash will mean regex
both_before + [
Tok("regex",
r"""
/ # opening slash
# First character is..
( [^*\\/[] # anything but * \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)
# Following characters are same, except for excluding a star
( [^\\/[] # anything but \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)* # many times
/ # closing slash
[a-zA-Z0-9]* # trailing flags
""", next='div'),
] + both_after,
}
#@-<< constants >>
def __init__(self) -> None:
super().__init__(self.states, 'reg')
#@-others
def do_import(c: Cmdr, parent: Position, s: str) -> None:
"""The importer callback for javascript."""
JS_Importer(c).import_from_string(parent, s)
importer_dict = {
'extensions': ['.js',],
'func': do_import,
}
#@@language python
#@@tabwidth -4
#@-leo
| [
"edreamleo@gmail.com"
] | edreamleo@gmail.com |
e2113a7ea5445429c1e44aeb9e26665ae73cf5dc | cce6dd55b66857761768c4f36fcd1e99da14e1ee | /mnist_cnn_gpu.py | 1e5b9fb3d66fa05e93a8e517d32057cd41f8d25c | [
"Apache-2.0"
] | permissive | siposbence/mnist-cnn-gpu | b98fec2df6e3472b536b90ed14dce16aed563274 | 7d01b7d909f795ceac540c69b239204d29c39dda | refs/heads/master | 2022-12-25T12:27:40.772000 | 2019-12-14T15:46:00 | 2019-12-14T15:46:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,665 | py | from __future__ import print_function
import tensorflow
import tensorflow.keras as keras
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from tensorflow.keras import backend as K
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
gpu_devices = tensorflow.config.experimental.list_physical_devices('GPU')
tensorflow.config.experimental.set_memory_growth(gpu_devices[0], True)
#print("GPUs: " + gpu_devices[0])
gpus = tensorflow.test.gpu_device_name()
print("GPUs: " + gpus)
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
best_check = ModelCheckpoint(filepath="model-best.h5", verbose=1, save_weights_only=True, save_best_only=True)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
callbacks=[best_check])
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
| [
"cannin@gmail.com"
] | cannin@gmail.com |
98cc0764581e92078db33632b9a8330ad97806de | 51d7e8c09793b50d45731bd5ab9b531b525cf6db | /tests/garage/torch/algos/test_maml_ppo.py | ea4ac63fd8c01d020ad7379470f45d65de0217bd | [
"MIT"
] | permissive | fangqyi/garage | 454247849a6a3f547557b3fac3787ba9eeb0391f | ddafba385ef005f46f913ab352f9638760e5b412 | refs/heads/master | 2023-02-25T00:43:18.903328 | 2021-01-26T01:52:15 | 2021-01-26T01:52:15 | 267,667,220 | 0 | 0 | MIT | 2020-05-28T18:35:08 | 2020-05-28T18:35:07 | null | UTF-8 | Python | false | false | 2,582 | py | """This script is a test that fails when MAML-TRPO performance is too low."""
import pytest
try:
# pylint: disable=unused-import
import mujoco_py # noqa: F401
except ImportError:
pytest.skip('To use mujoco-based features, please install garage[mujoco].',
allow_module_level=True)
except Exception: # pylint: disable=broad-except
pytest.skip(
'Skipping tests, failed to import mujoco. Do you have a '
'valid mujoco key installed?',
allow_module_level=True)
import torch
from garage.envs import GarageEnv
from garage.envs import normalize
from garage.envs.mujoco import HalfCheetahDirEnv
from garage.experiment import deterministic, LocalRunner
from garage.torch.algos import MAMLPPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
from tests.fixtures import snapshot_config
@pytest.mark.mujoco
class TestMAMLPPO:
"""Test class for MAML-PPO."""
def setup_method(self):
"""Setup method which is called before every test."""
self.env = GarageEnv(
normalize(HalfCheetahDirEnv(), expected_action_scale=10.))
self.policy = GaussianMLPPolicy(
env_spec=self.env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None,
)
self.value_function = GaussianMLPValueFunction(env_spec=self.env.spec,
hidden_sizes=(32, 32))
def teardown_method(self):
"""Teardown method which is called after every test."""
self.env.close()
def test_ppo_pendulum(self):
"""Test PPO with Pendulum environment."""
deterministic.set_seed(0)
rollouts_per_task = 5
max_path_length = 100
runner = LocalRunner(snapshot_config)
algo = MAMLPPO(env=self.env,
policy=self.policy,
value_function=self.value_function,
max_path_length=max_path_length,
meta_batch_size=5,
discount=0.99,
gae_lambda=1.,
inner_lr=0.1,
num_grad_updates=1)
runner.setup(algo, self.env)
last_avg_ret = runner.train(n_epochs=10,
batch_size=rollouts_per_task *
max_path_length)
assert last_avg_ret > -5
| [
"qiaoyi.fang@duke.edu"
] | qiaoyi.fang@duke.edu |
870bd9c0f9b119223bfade20649c0461cbd266fc | 787e0325fb6436ce010800154a0bf7d3dc2c0191 | /flask/app/user/user_resource.py | f3ef3f567375dbf36b51d02f5057857cf9e37ede | [
"MIT"
] | permissive | jj-style/FlaskReactTemplateApp | d538b4487a72266d02d4421eaff68fbf17a07571 | 7dc9ed014d33eb7d0355c5a4fee77b88f0bb3ed8 | refs/heads/master | 2023-03-02T15:45:15.779357 | 2021-02-06T13:39:58 | 2021-02-06T13:39:58 | 331,768,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,197 | py | from flask import redirect
from flask import url_for
from flask import current_app
from flask_restful import Resource, marshal_with, reqparse
from flask_login import login_user
from flask_login import logout_user
from flask_login import current_user
from flask_login import login_required
from sqlalchemy import exc
from app.models import User as UserModel
from app import db
from app import login
from app.user.user_data import user_response, password_t
from datetime import datetime, timedelta
import jwt
@login.request_loader
def load_user(req):
auth_header = req.headers.get("X-Auth", "")
if auth_header == "":
return None
try:
token = auth_header
data = jwt.decode(token, current_app.config["SECRET_KEY"], algorithms=["HS512"])
user = UserModel.query.filter_by(username=data["sub"]).first_or_404()
if user:
return user
except jwt.ExpiredSignatureError:
print("token expired")
return None
except (jwt.InvalidTokenError, Exception) as e:
print("token invalid")
print(str(e))
return None
return None
class Users(Resource):
@marshal_with(user_response)
@login_required
def get(self):
"""Returns all users in the database
Returns:
List<Dict>: - List containing dictionary each representing a user model
"""
users = UserModel.query.all()
return users
class User(Resource):
@marshal_with(user_response)
@login_required
def get(self, id):
"""Get a user by their ID
Args:
id (int): unique ID of a user
Returns:
Dict: dictionary of user model or 404 if not found
"""
user = UserModel.query.filter_by(id=id).first_or_404()
return user
class Register(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument(
"username", type=str, location="json", required=True, nullable=False
)
self.reqparse.add_argument(
"email", type=str, location="json", required=True, nullable=False
)
self.reqparse.add_argument(
"password", type=password_t, location="json", required=True, nullable=False
)
super(Register, self).__init__()
def post(self):
args = self.reqparse.parse_args()
print(args)
try:
# add user to database
user = UserModel(username=args.username, email=args.email)
user.set_password(args.password)
db.session.add(user)
db.session.commit()
return "signed up", 200
except exc.IntegrityError:
# unique contraint violated
return "Error conflict", 409
class Login(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument("username", type=str, location="json", required=True)
self.reqparse.add_argument("password", type=str, location="json", required=True)
self.reqparse.add_argument(
"remember", type=bool, location="json", default=False
)
super(Login, self).__init__()
def post(self):
if current_user.is_authenticated:
print("already logged in")
return redirect(url_for("index"))
args = self.reqparse.parse_args()
user = UserModel.query.filter_by(username=args.username).first()
if user is None or not user.check_password(args.password):
return "Invalid username or password", 400
login_user(user, remember=args.remember)
token = jwt.encode(
{
"sub": user.username,
"iat": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(minutes=30),
},
current_app.config["SECRET_KEY"],
algorithm="HS512",
)
return {"token": token}
class Logout(Resource):
@login_required
def get(self):
if not current_user.is_authenticated:
return "not logged in", 400
logout_user()
return redirect(url_for("index"))
| [
"style.jj@pm.me"
] | style.jj@pm.me |
fac3f04df019414ae685c3823333bcb2f171d65d | 52381a4fc02e90ce1fcfffd8d9876d9e8f44c248 | /core/jobs/batch_jobs/email_deletion_jobs.py | 895c4067d3191f9ccbef1490e639ea0c12d09bab | [
"Apache-2.0"
] | permissive | ankita240796/oppia | 18aa1609a0f237ce76142b2a0d3169e830e5bcdd | ba4f072e494fd59df53fecc37e67cea7f9727234 | refs/heads/develop | 2022-07-11T01:11:53.136252 | 2022-06-30T08:55:49 | 2022-06-30T08:55:49 | 160,626,761 | 0 | 0 | Apache-2.0 | 2020-04-28T16:12:26 | 2018-12-06T06:02:18 | Python | UTF-8 | Python | false | false | 5,406 | py | # coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validation Jobs for blog models"""
from __future__ import annotations
from core.jobs import base_jobs
from core.jobs.io import ndb_io
from core.jobs.transforms import job_result_transforms
from core.jobs.types import job_run_result
from core.platform import models
import apache_beam as beam
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import email_models
from mypy_imports import feedback_models
from mypy_imports import user_models
(email_models, feedback_models, user_models) = models.Registry.import_models([
models.NAMES.email, models.NAMES.feedback, models.NAMES.user
])
class DeleteUnneededEmailRelatedModelsJob(base_jobs.JobBase):
"""Job that deletes emails models that belonged to users that were deleted
as part of the wipeout process.
"""
def run(self) -> beam.PCollection[job_run_result.JobRunResult]:
deleted_user_ids_collection = (
self.pipeline
| 'Get all deleted user models' >> ndb_io.GetModels(
user_models.DeletedUserModel.get_all())
| 'Extract user IDs' >> beam.Map(
lambda deleted_user_model: deleted_user_model.id)
)
deleted_user_ids = beam.pvalue.AsIter(deleted_user_ids_collection)
sent_email_models_to_delete = (
self.pipeline
| 'Get all sent email models' >> ndb_io.GetModels(
email_models.SentEmailModel.get_all())
| 'Filter sent email models that belong to deleted users' >> (
beam.Filter(
lambda model, ids: (
model.sender_id in ids or model.recipient_id in ids),
ids=deleted_user_ids
))
)
sent_email_models_to_delete_result = (
sent_email_models_to_delete
| 'Count sent email models to be deleted' >> (
job_result_transforms.CountObjectsToJobRunResult('SENT EMAILS'))
)
bulk_email_models_to_delete = (
self.pipeline
| 'Get all bulk email models' >> ndb_io.GetModels(
email_models.BulkEmailModel.get_all())
| 'Filter bulk email models that belong to deleted users' >> (
beam.Filter(
lambda model, ids: model.sender_id in ids,
ids=deleted_user_ids
))
)
bulk_email_models_to_delete_result = (
bulk_email_models_to_delete
| 'Count bulk email models to be deleted' >> (
job_result_transforms.CountObjectsToJobRunResult('BULK EMAILS'))
)
unsent_feedback_email_models_to_delete = (
self.pipeline
| 'Get all unsent feedback models' >> ndb_io.GetModels(
feedback_models.UnsentFeedbackEmailModel.get_all())
| 'Filter unsent feedback models that belong to deleted users' >> (
beam.Filter(
lambda model, ids: model.id in ids, ids=deleted_user_ids))
)
unsent_feedback_email_models_to_delete_result = (
unsent_feedback_email_models_to_delete
| 'Count unsent feedback email models to be deleted' >> (
job_result_transforms.CountObjectsToJobRunResult(
'FEEDBACK EMAILS'))
)
user_bulk_emails_models_to_delete = (
self.pipeline
| 'Get all user bulk email models' >> ndb_io.GetModels(
user_models.UserBulkEmailsModel.get_all())
| 'Filter user bulk email models that belong to deleted users' >> (
beam.Filter(
lambda model, ids: model.id in ids, ids=deleted_user_ids))
)
user_bulk_emails_models_to_delete_result = (
user_bulk_emails_models_to_delete
| 'Count user bulk email models to be deleted' >> (
job_result_transforms.CountObjectsToJobRunResult(
'USER BULK EMAILS'))
)
unused_models_deletion = (
(
sent_email_models_to_delete,
bulk_email_models_to_delete,
unsent_feedback_email_models_to_delete,
user_bulk_emails_models_to_delete
)
| 'Merge models' >> beam.Flatten()
| 'Extract keys' >> beam.Map(lambda model: model.key)
| 'Delete models' >> ndb_io.DeleteModels()
)
return (
(
sent_email_models_to_delete_result,
bulk_email_models_to_delete_result,
unsent_feedback_email_models_to_delete_result,
user_bulk_emails_models_to_delete_result,
)
| 'Merge results' >> beam.Flatten()
)
| [
"noreply@github.com"
] | ankita240796.noreply@github.com |
301225f3d969658bd238c02066ff527524faf878 | 1454fcf1109549dcdf4ca294611c17067349514b | /010.py | fbcdb14af48d51ab704be1ad70a2de93c429893b | [] | no_license | maydhak/project-euler | 98c28499923ae31ef855276754b526df4030fbb6 | 987ad6a1768663a419d81d3376a2e1a260fa3c6d | refs/heads/master | 2020-07-05T11:07:15.466173 | 2020-01-31T04:29:00 | 2020-01-31T04:29:00 | 202,633,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,937 | py | """
Problem 10:
The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
Find the sum of all the primes below two million.
"""
# My first solution was to just change the check in the while loop to 'max(primes_found) < 2000000'
# and change 'max' to 'sum' in the last line of the program. But it was taking WAY too long to compute.
# Then I realized n = max(primes_found) and that one of the while loops was really unnecessary. After making
# these changes, the program ran a bit faster but still took quite a while (about half an hour). Still, it does
# give the right result.
def solution10():
primes_found = {2}
n = 3
while n <= 2000000:
prime = True
for p in primes_found: # check the new number against all previously found primes
if p < n/2: # if the prime divisor > half the number, the quotient will be < 2 and it can no longer be prime; 2 is the smallest prime
if n % p == 0:
prime = False
break # breaks out of checking the number against previously found primes
if prime:
primes_found.add(n)
n += 2 # prime numbers must be odd, so we will increment by 2
print(sum(primes_found))
# Rewritten version of problem 7 using just one while loop:
def solution7_1():
primes_found = {2}
n = 3
while len(primes_found) < 10001:
prime = True
for p in primes_found: # check the new number against all previously found primes
if p < n/2: # if the prime divisor > half the number, the quotient will be < 2 and it can no longer be prime; 2 is the smallest prime
if n % p == 0:
prime = False
break # breaks out of checking the number against previously found primes
if prime:
primes_found.add(n)
n += 2 # prime numbers must be odd, so we will increment by 2
print(max(primes_found))
| [
"maydhak@gmail.com"
] | maydhak@gmail.com |
186b6ab5b4853fd0e6e79101be8a1dd942089fed | d4cd60d109f9e5955093fd3682deadfccf8c10cc | /Easy_or_hard.py | d0ecd54f501c9067a0d8b139f896dd0743300454 | [] | no_license | richards12306/leetcode | 3a52a01a53131b238b2c77b0721426dfbc254fb1 | 4f72eb3d2b1483d7c4646d9a91e4e7be15d166b1 | refs/heads/master | 2021-03-21T11:43:33.939399 | 2020-03-15T03:30:43 | 2020-03-15T03:30:43 | 247,289,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | #!/usr/bin/python3
#-*- coding=utf-8 -*-
name_num = int(input())
for i in range(name_num):
name = input().split( )
print('{} {}'.format(name[1],name[0]))
#include<stdio.h>
#include<string.h>
int main(){
int i,t,j,l,n;
scanf("%d",&t);
char name[2][200];
for(i=0;i<t;i++){
scanf("%s %s",&name[0],&name[1]);
for(j=0;j<2;j++){
l=strlen(name[j]);
n=0;
while(n<l){
if((n==0)&&(name[j][0]>='a')&&(name[j][0]<='z'))
name[j][0]-=32;
else if((n>0)&&(name[j][n]>='A')&&(name[j][n]<='Z'))
name[j][n]+=32;
n++;
}
}
printf("%s %s\n",name[1],name[0]);
}
return 0;
}
| [
"richards12306@gmail.com"
] | richards12306@gmail.com |
4afc588764dd5336564155aff3c892958612465f | be354ff04646ae2137320f039405f58438890579 | /VVS/TestCases/ChangeMaintenanceDir.py | dc3f65772577dfb1025aa99e2d98c256f580548e | [
"MIT"
] | permissive | virtual5311/ForthYear | 5a81e6c141100bd8b1d80671fe603020d5ca67d2 | d630ae9e6ac23214e6ecae7c9c843baafb424343 | refs/heads/master | 2021-05-27T17:29:00.270441 | 2015-04-15T16:15:18 | 2015-04-15T16:15:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | #{{{ Marathon
from default import *
#}}} Marathon
def test():
set_java_recorded_version("1.8.0_25")
if window(''):
assert_p('Start server', 'Text', 'Start server')
assert_p('lbl:Maintenance Dir', 'Text', 'Maintenance Dir:')
assert_p('lbl:./TestSite/maintenance', 'Text', './TestSite/maintenance')
click('..._2')
if window('Open'):
select('JFileChooser_0', '#H/Music')
close()
assert_p('lbl:./TestSite/maintenance', 'Text', './TestSite/maintenance')
click('SubmitConfig')
assert_p('lbl:/Users/salexandru/Music', 'Text', '/Users/salexandru/Music')
select('Maintenance directory', '/Users/lexanu/Music')
click('SubmitConfig')
if window('Configuration'):
assert_p('JPanel_3', 'Enabled', 'true')
assert_p('JPanel_2', 'Enabled', 'true')
assert_p('JOptionPane_0', 'Enabled', 'true')
click('OK')
close()
close()
pass | [
"salexandru.alex@gmail.com"
] | salexandru.alex@gmail.com |
dc5114d0245b09e0eaa2f2b05f054de35b7a8bba | 966b549fe69fa9dead21d162e3ebaa7eba97aa5c | /bespoke_tuition/bespoke_tuition/wsgi.py | cc91e24616ec33703ba10921d07cbff7b0989f8e | [] | no_license | SeanWaite/CETM67_ASSIGNMENT | ae4556f392f68dddc0e70b688f6fe4d8d29fda7d | b58daea0864bf8aa085fb6e00815aac8807e0576 | refs/heads/main | 2023-06-17T21:45:24.436083 | 2021-07-16T14:24:06 | 2021-07-16T14:24:06 | 386,660,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for bespoke_tuition project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bespoke_tuition.settings')
application = get_wsgi_application()
| [
"bh83dr@student.sunderland.ac.uk"
] | bh83dr@student.sunderland.ac.uk |
abcf7ebdc121e3c0b4067a1082bb237a7b9624ea | ac46749c6b1ffde83daf4449d862301bc3f31a8d | /app/sendSMS.py | 3739b2438e6b9489efcdbad8f6eda37eada8b59e | [] | no_license | petermassaro/flask-paint | 9d074d53761ae225d333950a98c9620411d933c0 | 64823eb25052481ff5adcc1bd9251523557d881f | refs/heads/master | 2022-12-10T15:51:14.319197 | 2018-04-21T15:43:37 | 2018-04-21T15:43:37 | 130,480,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | from twilio.rest import Client
from flask import current_app
from threading import Thread
def sendSMS(recipientNumber, messageContent):
client = Client(current_app.config['TWILIO_ACCOUNT_SID'], app.config['TWILIO_AUTH_TOKEN'])
message = client.api.account.messages.create(
to='+1{}'.format(recipientNumber),
from_=current_app.config['TWILIO_NUMBER'],
body=messageContent)
| [
"petermassaro@Peters-MacBook-Air.local"
] | petermassaro@Peters-MacBook-Air.local |
58e763898710361ea138991802ef384274628d64 | f0681b8c129e8afce21e340697502230f45ce930 | /venv/Lib/site-packages/com/vmware/vcenter/vm_client.py | a08c07d9358ac1cb0cb378966e1199db49d71547 | [] | no_license | dungla2011/python_pyvmomi_working_sample_vmware_easy | 8852b6fdcd0f7d0f648f6f7b6c6e4f70c7213746 | a3b6d86a802f28c7ee249fc03523d5e5f0a2e3bd | refs/heads/main | 2023-07-05T14:56:46.551091 | 2021-08-20T12:19:39 | 2021-08-20T12:19:39 | 395,496,219 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109,741 | py | # -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2021 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.vcenter.vm.
#---------------------------------------------------------------------------
"""
The ``com.vmware.vcenter.vm_client`` module provides classes for managing
virtual machines.
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class GuestOS(Enum):
"""
The ``GuestOS`` class defines the valid guest operating system types used
for configuring a virtual machine.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
DOS = None
"""
MS-DOS.
"""
WIN_31 = None
"""
Windows 3.1
"""
WIN_95 = None
"""
Windows 95
"""
WIN_98 = None
"""
Windows 98
"""
WIN_ME = None
"""
Windows Millennium Edition
"""
WIN_NT = None
"""
Windows NT 4
"""
WIN_2000_PRO = None
"""
Windows 2000 Professional
"""
WIN_2000_SERV = None
"""
Windows 2000 Server
"""
WIN_2000_ADV_SERV = None
"""
Windows 2000 Advanced Server
"""
WIN_XP_HOME = None
"""
Windows XP Home Edition
"""
WIN_XP_PRO = None
"""
Windows XP Professional
"""
WIN_XP_PRO_64 = None
"""
Windows XP Professional Edition (64 bit)
"""
WIN_NET_WEB = None
"""
Windows Server 2003, Web Edition
"""
WIN_NET_STANDARD = None
"""
Windows Server 2003, Standard Edition
"""
WIN_NET_ENTERPRISE = None
"""
Windows Server 2003, Enterprise Edition
"""
WIN_NET_DATACENTER = None
"""
Windows Server 2003, Datacenter Edition
"""
WIN_NET_BUSINESS = None
"""
Windows Small Business Server 2003
"""
WIN_NET_STANDARD_64 = None
"""
Windows Server 2003, Standard Edition (64 bit)
"""
WIN_NET_ENTERPRISE_64 = None
"""
Windows Server 2003, Enterprise Edition (64 bit)
"""
WIN_LONGHORN = None
"""
Windows Longhorn (experimental)
"""
WIN_LONGHORN_64 = None
"""
Windows Longhorn (64 bit) (experimental)
"""
WIN_NET_DATACENTER_64 = None
"""
Windows Server 2003, Datacenter Edition (64 bit) (experimental)
"""
WIN_VISTA = None
"""
Windows Vista
"""
WIN_VISTA_64 = None
"""
Windows Vista (64 bit)
"""
WINDOWS_7 = None
"""
Windows 7
"""
WINDOWS_7_64 = None
"""
Windows 7 (64 bit)
"""
WINDOWS_7_SERVER_64 = None
"""
Windows Server 2008 R2 (64 bit)
"""
WINDOWS_8 = None
"""
Windows 8
"""
WINDOWS_8_64 = None
"""
Windows 8 (64 bit)
"""
WINDOWS_8_SERVER_64 = None
"""
Windows 8 Server (64 bit)
"""
WINDOWS_9 = None
"""
Windows 10
"""
WINDOWS_9_64 = None
"""
Windows 10 (64 bit)
"""
WINDOWS_9_SERVER_64 = None
"""
Windows 10 Server (64 bit)
"""
WINDOWS_HYPERV = None
"""
Windows Hyper-V
"""
WINDOWS_SERVER_2019 = None
"""
Windows Server 2019. This class attribute was added in vSphere API 7.0.0.0.
"""
WINDOWS_SERVER_2021 = None
"""
Windows Server 2022. This class attribute was added in vSphere API 7.0.1.0.
"""
FREEBSD = None
"""
FreeBSD 10 or earlier
"""
FREEBSD_64 = None
"""
FreeBSD 10 x64 or earlier
"""
FREEBSD_11 = None
"""
FreeBSD 11. This class attribute was added in vSphere API 6.7.
"""
FREEBSD_12 = None
"""
FreeBSD 12. This class attribute was added in vSphere API 6.7.
"""
FREEBSD_13 = None
"""
FreeBSD 13 or later. This class attribute was added in vSphere API 7.0.1.0.
"""
FREEBSD_11_64 = None
"""
FreeBSD 11 x64. This class attribute was added in vSphere API 6.7.
"""
FREEBSD_12_64 = None
"""
FreeBSD 12 x64. This class attribute was added in vSphere API 6.7.
"""
FREEBSD_13_64 = None
"""
FreeBSD 13 x64 or later. This class attribute was added in vSphere API
7.0.1.0.
"""
REDHAT = None
"""
Red Hat Linux 2.1
"""
RHEL_2 = None
"""
Red Hat Enterprise Linux 2
"""
RHEL_3 = None
"""
Red Hat Enterprise Linux 3
"""
RHEL_3_64 = None
"""
Red Hat Enterprise Linux 3 (64 bit)
"""
RHEL_4 = None
"""
Red Hat Enterprise Linux 4
"""
RHEL_4_64 = None
"""
Red Hat Enterprise Linux 4 (64 bit)
"""
RHEL_5 = None
"""
Red Hat Enterprise Linux 5
"""
RHEL_5_64 = None
"""
Red Hat Enterprise Linux 5 (64 bit) (experimental)
"""
RHEL_6 = None
"""
Red Hat Enterprise Linux 6
"""
RHEL_6_64 = None
"""
Red Hat Enterprise Linux 6 (64 bit)
"""
RHEL_7 = None
"""
Red Hat Enterprise Linux 7
"""
RHEL_7_64 = None
"""
Red Hat Enterprise Linux 7 (64 bit)
"""
RHEL_8_64 = None
"""
Red Hat Enterprise Linux 8 (64 bit). This class attribute was added in
vSphere API 6.7.
"""
RHEL_9_64 = None
"""
Red Hat Enterprise Linux 9 (64 bit). This class attribute was added in
vSphere API 7.0.1.0.
"""
CENTOS = None
"""
CentOS 4/5
"""
CENTOS_64 = None
"""
CentOS 4/5 (64-bit)
"""
CENTOS_6 = None
"""
CentOS 6
"""
CENTOS_6_64 = None
"""
CentOS 6 (64-bit)
"""
CENTOS_7 = None
"""
CentOS 7
"""
CENTOS_7_64 = None
"""
CentOS 7 (64-bit)
"""
CENTOS_8_64 = None
"""
CentOS 8 (64-bit). This class attribute was added in vSphere API 6.7.
"""
CENTOS_9_64 = None
"""
CentOS 9 (64-bit). This class attribute was added in vSphere API 7.0.1.0.
"""
ORACLE_LINUX = None
"""
Oracle Linux 4/5
"""
ORACLE_LINUX_64 = None
"""
Oracle Linux 4/5 (64-bit)
"""
ORACLE_LINUX_6 = None
"""
Oracle Linux 6
"""
ORACLE_LINUX_6_64 = None
"""
Oracle Linux 6 (64-bit)
"""
ORACLE_LINUX_7 = None
"""
Oracle Linux 7
"""
ORACLE_LINUX_7_64 = None
"""
Oracle Linux 7 (64-bit)
"""
ORACLE_LINUX_8_64 = None
"""
Oracle Linux 8 (64-bit). This class attribute was added in vSphere API 6.7.
"""
ORACLE_LINUX_9_64 = None
"""
Oracle Linux 9 (64-bit). This class attribute was added in vSphere API
7.0.1.0.
"""
SUSE = None
"""
Suse Linux
"""
SUSE_64 = None
"""
Suse Linux (64 bit)
"""
SLES = None
"""
Suse Linux Enterprise Server 9
"""
SLES_64 = None
"""
Suse Linux Enterprise Server 9 (64 bit)
"""
SLES_10 = None
"""
Suse linux Enterprise Server 10
"""
SLES_10_64 = None
"""
Suse Linux Enterprise Server 10 (64 bit) (experimental)
"""
SLES_11 = None
"""
Suse linux Enterprise Server 11
"""
SLES_11_64 = None
"""
Suse Linux Enterprise Server 11 (64 bit)
"""
SLES_12 = None
"""
Suse linux Enterprise Server 12
"""
SLES_12_64 = None
"""
Suse Linux Enterprise Server 12 (64 bit)
"""
SLES_15_64 = None
"""
Suse Linux Enterprise Server 15 (64 bit). This class attribute was added in
vSphere API 6.7.
"""
SLES_16_64 = None
"""
Suse Linux Enterprise Server 16 (64 bit). This class attribute was added in
vSphere API 7.0.1.0.
"""
NLD_9 = None
"""
Novell Linux Desktop 9
"""
OES = None
"""
Open Enterprise Server
"""
SJDS = None
"""
Sun Java Desktop System
"""
MANDRAKE = None
"""
Mandrake Linux
"""
MANDRIVA = None
"""
Mandriva Linux
"""
MANDRIVA_64 = None
"""
Mandriva Linux (64 bit)
"""
TURBO_LINUX = None
"""
Turbolinux
"""
TURBO_LINUX_64 = None
"""
Turbolinux (64 bit)
"""
UBUNTU = None
"""
Ubuntu Linux
"""
UBUNTU_64 = None
"""
Ubuntu Linux (64 bit)
"""
DEBIAN_4 = None
"""
Debian GNU/Linux 4
"""
DEBIAN_4_64 = None
"""
Debian GNU/Linux 4 (64 bit)
"""
DEBIAN_5 = None
"""
Debian GNU/Linux 5
"""
DEBIAN_5_64 = None
"""
Debian GNU/Linux 5 (64 bit)
"""
DEBIAN_6 = None
"""
Debian GNU/Linux 6
"""
DEBIAN_6_64 = None
"""
Debian GNU/Linux 6 (64 bit)
"""
DEBIAN_7 = None
"""
Debian GNU/Linux 7
"""
DEBIAN_7_64 = None
"""
Debian GNU/Linux 7 (64 bit)
"""
DEBIAN_8 = None
"""
Debian GNU/Linux 8
"""
DEBIAN_8_64 = None
"""
Debian GNU/Linux 8 (64 bit)
"""
DEBIAN_9 = None
"""
Debian GNU/Linux 9
"""
DEBIAN_9_64 = None
"""
Debian GNU/Linux 9 (64 bit)
"""
DEBIAN_10 = None
"""
Debian GNU/Linux 10
"""
DEBIAN_10_64 = None
"""
Debian GNU/Linux 10 (64 bit)
"""
DEBIAN_11 = None
"""
Debian GNU/Linux 11. This class attribute was added in vSphere API 7.0.0.0.
"""
DEBIAN_11_64 = None
"""
Debian GNU/Linux 11 (64 bit). This class attribute was added in vSphere API
7.0.0.0.
"""
ASIANUX_3 = None
"""
Asianux Server 3
"""
ASIANUX_3_64 = None
"""
Asianux Server 3 (64 bit)
"""
ASIANUX_4 = None
"""
Asianux Server 4
"""
ASIANUX_4_64 = None
"""
Asianux Server 4 (64 bit)
"""
ASIANUX_5_64 = None
"""
Asianux Server 5 (64 bit)
"""
ASIANUX_7_64 = None
"""
Asianux Server 7 (64 bit)
"""
ASIANUX_8_64 = None
"""
Asianux Server 8 (64 bit). This class attribute was added in vSphere API
6.7.
"""
ASIANUX_9_64 = None
"""
Asianux Server 9 (64 bit). This class attribute was added in vSphere API
7.0.1.0.
"""
OPENSUSE = None
"""
OpenSUSE Linux
"""
OPENSUSE_64 = None
"""
OpenSUSE Linux (64 bit)
"""
FEDORA = None
"""
Fedora Linux
"""
FEDORA_64 = None
"""
Fedora Linux (64 bit)
"""
COREOS_64 = None
"""
CoreOS Linux (64 bit)
"""
VMWARE_PHOTON_64 = None
"""
VMware Photon (64 bit)
"""
OTHER_24X_LINUX = None
"""
Linux 2.4x Kernel
"""
OTHER_24X_LINUX_64 = None
"""
Linux 2.4x Kernel (64 bit) (experimental)
"""
OTHER_26X_LINUX = None
"""
Linux 2.6x Kernel
"""
OTHER_26X_LINUX_64 = None
"""
Linux 2.6x Kernel (64 bit) (experimental)
"""
OTHER_3X_LINUX = None
"""
Linux 3.x Kernel
"""
OTHER_3X_LINUX_64 = None
"""
Linux 3.x Kernel (64 bit)
"""
OTHER_4X_LINUX = None
"""
Linux 4.x Kernel. This class attribute was added in vSphere API 6.7.
"""
OTHER_4X_LINUX_64 = None
"""
Linux 4.x Kernel (64 bit). This class attribute was added in vSphere API
6.7.
"""
OTHER_5X_LINUX = None
"""
Linux 5.x Kernel. This class attribute was added in vSphere API 7.0.1.0.
"""
OTHER_5X_LINUX_64 = None
"""
Linux 5.x Kernel (64 bit). This class attribute was added in vSphere API
7.0.1.0.
"""
OTHER_LINUX = None
"""
Linux 2.2x Kernel
"""
GENERIC_LINUX = None
"""
Other Linux
"""
OTHER_LINUX_64 = None
"""
Linux (64 bit) (experimental)
"""
SOLARIS_6 = None
"""
Solaris 6
"""
SOLARIS_7 = None
"""
Solaris 7
"""
SOLARIS_8 = None
"""
Solaris 8
"""
SOLARIS_9 = None
"""
Solaris 9
"""
SOLARIS_10 = None
"""
Solaris 10 (32 bit) (experimental)
"""
SOLARIS_10_64 = None
"""
Solaris 10 (64 bit) (experimental)
"""
SOLARIS_11_64 = None
"""
Solaris 11 (64 bit)
"""
OS2 = None
"""
OS/2
"""
ECOMSTATION = None
"""
eComStation 1.x
"""
ECOMSTATION_2 = None
"""
eComStation 2.0
"""
NETWARE_4 = None
"""
Novell NetWare 4
"""
NETWARE_5 = None
"""
Novell NetWare 5.1
"""
NETWARE_6 = None
"""
Novell NetWare 6.x
"""
OPENSERVER_5 = None
"""
SCO OpenServer 5
"""
OPENSERVER_6 = None
"""
SCO OpenServer 6
"""
UNIXWARE_7 = None
"""
SCO UnixWare 7
"""
DARWIN = None
"""
Mac OS 10.5
"""
DARWIN_64 = None
"""
Mac OS 10.5 (64 bit)
"""
DARWIN_10 = None
"""
Mac OS 10.6
"""
DARWIN_10_64 = None
"""
Mac OS 10.6 (64 bit)
"""
DARWIN_11 = None
"""
Mac OS 10.7
"""
DARWIN_11_64 = None
"""
Mac OS 10.7 (64 bit)
"""
DARWIN_12_64 = None
"""
Mac OS 10.8 (64 bit)
"""
DARWIN_13_64 = None
"""
Mac OS 10.9 (64 bit)
"""
DARWIN_14_64 = None
"""
Mac OS 10.10 (64 bit)
"""
DARWIN_15_64 = None
"""
Mac OS 10.11 (64 bit)
"""
DARWIN_16_64 = None
"""
Mac OS 10.12 (64 bit)
"""
DARWIN_17_64 = None
"""
Mac OS 10.13 (64 bit). This class attribute was added in vSphere API 6.7.
"""
DARWIN_18_64 = None
"""
Mac OS 10.14 (64 bit). This class attribute was added in vSphere API 6.7.
"""
DARWIN_19_64 = None
"""
Mac OS 10.15 (64 bit). This class attribute was added in vSphere API
7.0.0.0.
"""
DARWIN_20_64 = None
"""
Mac OS 11 (64 bit). This class attribute was added in vSphere API 7.0.1.0.
"""
DARWIN_21_64 = None
"""
Mac OS 12 (64 bit). This class attribute was added in vSphere API 7.0.1.0.
"""
VMKERNEL = None
"""
VMware ESX 4
"""
VMKERNEL_5 = None
"""
VMware ESX 5
"""
VMKERNEL_6 = None
"""
VMware ESX 6
"""
VMKERNEL_65 = None
"""
VMware ESX 6.5
"""
VMKERNEL_7 = None
"""
VMware ESX 7. This class attribute was added in vSphere API 7.0.0.0.
"""
AMAZONLINUX2_64 = None
"""
Amazon Linux 2 (64 bit). This class attribute was added in vSphere API
6.7.1.
"""
AMAZONLINUX3_64 = None
"""
Amazon Linux 3 (64 bit). This class attribute was added in vSphere API
7.0.1.0.
"""
CRXPOD_1 = None
"""
CRX Pod 1. This class attribute was added in vSphere API 7.0.0.0.
"""
OTHER = None
"""
Other Operating System
"""
OTHER_64 = None
"""
Other Operating System (64 bit) (experimental)
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`GuestOS` instance.
"""
Enum.__init__(string)
GuestOS._set_values([
GuestOS('DOS'),
GuestOS('WIN_31'),
GuestOS('WIN_95'),
GuestOS('WIN_98'),
GuestOS('WIN_ME'),
GuestOS('WIN_NT'),
GuestOS('WIN_2000_PRO'),
GuestOS('WIN_2000_SERV'),
GuestOS('WIN_2000_ADV_SERV'),
GuestOS('WIN_XP_HOME'),
GuestOS('WIN_XP_PRO'),
GuestOS('WIN_XP_PRO_64'),
GuestOS('WIN_NET_WEB'),
GuestOS('WIN_NET_STANDARD'),
GuestOS('WIN_NET_ENTERPRISE'),
GuestOS('WIN_NET_DATACENTER'),
GuestOS('WIN_NET_BUSINESS'),
GuestOS('WIN_NET_STANDARD_64'),
GuestOS('WIN_NET_ENTERPRISE_64'),
GuestOS('WIN_LONGHORN'),
GuestOS('WIN_LONGHORN_64'),
GuestOS('WIN_NET_DATACENTER_64'),
GuestOS('WIN_VISTA'),
GuestOS('WIN_VISTA_64'),
GuestOS('WINDOWS_7'),
GuestOS('WINDOWS_7_64'),
GuestOS('WINDOWS_7_SERVER_64'),
GuestOS('WINDOWS_8'),
GuestOS('WINDOWS_8_64'),
GuestOS('WINDOWS_8_SERVER_64'),
GuestOS('WINDOWS_9'),
GuestOS('WINDOWS_9_64'),
GuestOS('WINDOWS_9_SERVER_64'),
GuestOS('WINDOWS_HYPERV'),
GuestOS('WINDOWS_SERVER_2019'),
GuestOS('WINDOWS_SERVER_2021'),
GuestOS('FREEBSD'),
GuestOS('FREEBSD_64'),
GuestOS('FREEBSD_11'),
GuestOS('FREEBSD_12'),
GuestOS('FREEBSD_13'),
GuestOS('FREEBSD_11_64'),
GuestOS('FREEBSD_12_64'),
GuestOS('FREEBSD_13_64'),
GuestOS('REDHAT'),
GuestOS('RHEL_2'),
GuestOS('RHEL_3'),
GuestOS('RHEL_3_64'),
GuestOS('RHEL_4'),
GuestOS('RHEL_4_64'),
GuestOS('RHEL_5'),
GuestOS('RHEL_5_64'),
GuestOS('RHEL_6'),
GuestOS('RHEL_6_64'),
GuestOS('RHEL_7'),
GuestOS('RHEL_7_64'),
GuestOS('RHEL_8_64'),
GuestOS('RHEL_9_64'),
GuestOS('CENTOS'),
GuestOS('CENTOS_64'),
GuestOS('CENTOS_6'),
GuestOS('CENTOS_6_64'),
GuestOS('CENTOS_7'),
GuestOS('CENTOS_7_64'),
GuestOS('CENTOS_8_64'),
GuestOS('CENTOS_9_64'),
GuestOS('ORACLE_LINUX'),
GuestOS('ORACLE_LINUX_64'),
GuestOS('ORACLE_LINUX_6'),
GuestOS('ORACLE_LINUX_6_64'),
GuestOS('ORACLE_LINUX_7'),
GuestOS('ORACLE_LINUX_7_64'),
GuestOS('ORACLE_LINUX_8_64'),
GuestOS('ORACLE_LINUX_9_64'),
GuestOS('SUSE'),
GuestOS('SUSE_64'),
GuestOS('SLES'),
GuestOS('SLES_64'),
GuestOS('SLES_10'),
GuestOS('SLES_10_64'),
GuestOS('SLES_11'),
GuestOS('SLES_11_64'),
GuestOS('SLES_12'),
GuestOS('SLES_12_64'),
GuestOS('SLES_15_64'),
GuestOS('SLES_16_64'),
GuestOS('NLD_9'),
GuestOS('OES'),
GuestOS('SJDS'),
GuestOS('MANDRAKE'),
GuestOS('MANDRIVA'),
GuestOS('MANDRIVA_64'),
GuestOS('TURBO_LINUX'),
GuestOS('TURBO_LINUX_64'),
GuestOS('UBUNTU'),
GuestOS('UBUNTU_64'),
GuestOS('DEBIAN_4'),
GuestOS('DEBIAN_4_64'),
GuestOS('DEBIAN_5'),
GuestOS('DEBIAN_5_64'),
GuestOS('DEBIAN_6'),
GuestOS('DEBIAN_6_64'),
GuestOS('DEBIAN_7'),
GuestOS('DEBIAN_7_64'),
GuestOS('DEBIAN_8'),
GuestOS('DEBIAN_8_64'),
GuestOS('DEBIAN_9'),
GuestOS('DEBIAN_9_64'),
GuestOS('DEBIAN_10'),
GuestOS('DEBIAN_10_64'),
GuestOS('DEBIAN_11'),
GuestOS('DEBIAN_11_64'),
GuestOS('ASIANUX_3'),
GuestOS('ASIANUX_3_64'),
GuestOS('ASIANUX_4'),
GuestOS('ASIANUX_4_64'),
GuestOS('ASIANUX_5_64'),
GuestOS('ASIANUX_7_64'),
GuestOS('ASIANUX_8_64'),
GuestOS('ASIANUX_9_64'),
GuestOS('OPENSUSE'),
GuestOS('OPENSUSE_64'),
GuestOS('FEDORA'),
GuestOS('FEDORA_64'),
GuestOS('COREOS_64'),
GuestOS('VMWARE_PHOTON_64'),
GuestOS('OTHER_24X_LINUX'),
GuestOS('OTHER_24X_LINUX_64'),
GuestOS('OTHER_26X_LINUX'),
GuestOS('OTHER_26X_LINUX_64'),
GuestOS('OTHER_3X_LINUX'),
GuestOS('OTHER_3X_LINUX_64'),
GuestOS('OTHER_4X_LINUX'),
GuestOS('OTHER_4X_LINUX_64'),
GuestOS('OTHER_5X_LINUX'),
GuestOS('OTHER_5X_LINUX_64'),
GuestOS('OTHER_LINUX'),
GuestOS('GENERIC_LINUX'),
GuestOS('OTHER_LINUX_64'),
GuestOS('SOLARIS_6'),
GuestOS('SOLARIS_7'),
GuestOS('SOLARIS_8'),
GuestOS('SOLARIS_9'),
GuestOS('SOLARIS_10'),
GuestOS('SOLARIS_10_64'),
GuestOS('SOLARIS_11_64'),
GuestOS('OS2'),
GuestOS('ECOMSTATION'),
GuestOS('ECOMSTATION_2'),
GuestOS('NETWARE_4'),
GuestOS('NETWARE_5'),
GuestOS('NETWARE_6'),
GuestOS('OPENSERVER_5'),
GuestOS('OPENSERVER_6'),
GuestOS('UNIXWARE_7'),
GuestOS('DARWIN'),
GuestOS('DARWIN_64'),
GuestOS('DARWIN_10'),
GuestOS('DARWIN_10_64'),
GuestOS('DARWIN_11'),
GuestOS('DARWIN_11_64'),
GuestOS('DARWIN_12_64'),
GuestOS('DARWIN_13_64'),
GuestOS('DARWIN_14_64'),
GuestOS('DARWIN_15_64'),
GuestOS('DARWIN_16_64'),
GuestOS('DARWIN_17_64'),
GuestOS('DARWIN_18_64'),
GuestOS('DARWIN_19_64'),
GuestOS('DARWIN_20_64'),
GuestOS('DARWIN_21_64'),
GuestOS('VMKERNEL'),
GuestOS('VMKERNEL_5'),
GuestOS('VMKERNEL_6'),
GuestOS('VMKERNEL_65'),
GuestOS('VMKERNEL_7'),
GuestOS('AMAZONLINUX2_64'),
GuestOS('AMAZONLINUX3_64'),
GuestOS('CRXPOD_1'),
GuestOS('OTHER'),
GuestOS('OTHER_64'),
])
GuestOS._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.guest_OS',
GuestOS))
class GuestOSFamily(Enum):
"""
The ``GuestOSFamily`` class defines the valid guest operating system family
types reported by a virtual machine. This enumeration was added in vSphere
API 6.7.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
WINDOWS = None
"""
Windows operating system. This class attribute was added in vSphere API
6.7.
"""
LINUX = None
"""
Linux operating system. This class attribute was added in vSphere API 6.7.
"""
NETWARE = None
"""
Novell Netware. This class attribute was added in vSphere API 6.7.
"""
SOLARIS = None
"""
Solaris operating system. This class attribute was added in vSphere API
6.7.
"""
DARWIN = None
"""
Mac OS operating system. This class attribute was added in vSphere API 6.7.
"""
OTHER = None
"""
Other operating systems. This class attribute was added in vSphere API 6.7.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`GuestOSFamily` instance.
"""
Enum.__init__(string)
GuestOSFamily._set_values([
GuestOSFamily('WINDOWS'),
GuestOSFamily('LINUX'),
GuestOSFamily('NETWARE'),
GuestOSFamily('SOLARIS'),
GuestOSFamily('DARWIN'),
GuestOSFamily('OTHER'),
])
GuestOSFamily._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.guest_OS_family',
GuestOSFamily))
class Hardware(VapiInterface):
"""
The ``Hardware`` class provides methods for configuring the virtual
hardware of a virtual machine.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.vm.hardware'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _HardwareStub)
self._VAPI_OPERATION_IDS = {}
class Version(Enum):
"""
The ``Hardware.Version`` class defines the valid virtual hardware versions
for a virtual machine. See https://kb.vmware.com/s/article/1003746 (Virtual
machine hardware versions (1003746)).
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
VMX_03 = None
"""
Hardware version 3, first supported in ESXi 2.5.
"""
VMX_04 = None
"""
Hardware version 4, first supported in ESXi 3.0.
"""
VMX_06 = None
"""
Hardware version 6, first supported in WS 6.0.
"""
VMX_07 = None
"""
Hardware version 7, first supported in ESXi 4.0.
"""
VMX_08 = None
"""
Hardware version 8, first supported in ESXi 5.0.
"""
VMX_09 = None
"""
Hardware version 9, first supported in ESXi 5.1.
"""
VMX_10 = None
"""
Hardware version 10, first supported in ESXi 5.5.
"""
VMX_11 = None
"""
Hardware version 11, first supported in ESXi 6.0.
"""
VMX_12 = None
"""
Hardware version 12, first supported in Workstation 12.0.
"""
VMX_13 = None
"""
Hardware version 13, first supported in ESXi 6.5.
"""
VMX_14 = None
"""
Hardware version 14, first supported in ESXi 6.7. This class attribute was
added in vSphere API 6.7.
"""
VMX_15 = None
"""
Hardware version 15, first supported in ESXi 6.7 Update 2. This class
attribute was added in vSphere API 6.7.2.
"""
VMX_16 = None
"""
Hardware version 16, first supported in Workstation 15.0. This class
attribute was added in vSphere API 7.0.0.0.
"""
VMX_17 = None
"""
Hardware version 17, first supported in ESXi 7.0.0-0. This class attribute
was added in vSphere API 7.0.0.0.
"""
VMX_18 = None
"""
Hardware version 18, first supported in ESXi 7.0 U1. This class attribute
was added in vSphere API 7.0.1.0.
"""
VMX_19 = None
"""
Hardware version 19, first supported in ESXi 7.0 U2. This class attribute
was added in vSphere API 7.0.2.0.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`Version` instance.
"""
Enum.__init__(string)
Version._set_values([
Version('VMX_03'),
Version('VMX_04'),
Version('VMX_06'),
Version('VMX_07'),
Version('VMX_08'),
Version('VMX_09'),
Version('VMX_10'),
Version('VMX_11'),
Version('VMX_12'),
Version('VMX_13'),
Version('VMX_14'),
Version('VMX_15'),
Version('VMX_16'),
Version('VMX_17'),
Version('VMX_18'),
Version('VMX_19'),
])
Version._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.hardware.version',
Version))
class UpgradePolicy(Enum):
"""
The ``Hardware.UpgradePolicy`` class defines the valid virtual hardware
upgrade policies for a virtual machine.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
NEVER = None
"""
Do not upgrade the virtual machine when it is powered on.
"""
AFTER_CLEAN_SHUTDOWN = None
"""
Run scheduled upgrade when the virtual machine is powered on after a clean
shutdown of the guest operating system.
"""
ALWAYS = None
"""
Run scheduled upgrade when the virtual machine is powered on.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`UpgradePolicy` instance.
"""
Enum.__init__(string)
UpgradePolicy._set_values([
UpgradePolicy('NEVER'),
UpgradePolicy('AFTER_CLEAN_SHUTDOWN'),
UpgradePolicy('ALWAYS'),
])
UpgradePolicy._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.hardware.upgrade_policy',
UpgradePolicy))
class UpgradeStatus(Enum):
"""
The ``Hardware.UpgradeStatus`` class defines the valid virtual hardware
upgrade statuses for a virtual machine.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
NONE = None
"""
No scheduled upgrade has been attempted.
"""
PENDING = None
"""
Upgrade is scheduled but has not yet been run.
"""
SUCCESS = None
"""
The most recent scheduled upgrade was successful.
"""
FAILED = None
"""
The most recent scheduled upgrade was not successful.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`UpgradeStatus` instance.
"""
Enum.__init__(string)
UpgradeStatus._set_values([
UpgradeStatus('NONE'),
UpgradeStatus('PENDING'),
UpgradeStatus('SUCCESS'),
UpgradeStatus('FAILED'),
])
UpgradeStatus._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.hardware.upgrade_status',
UpgradeStatus))
class Info(VapiStruct):
"""
The ``Hardware.Info`` class contains information related to the virtual
hardware of a virtual machine.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'upgrade_policy',
{
'AFTER_CLEAN_SHUTDOWN' : [('upgrade_version', True)],
'ALWAYS' : [('upgrade_version', True)],
'NEVER' : [],
}
),
UnionValidator(
'upgrade_status',
{
'FAILED' : [('upgrade_error', True)],
'NONE' : [],
'PENDING' : [],
'SUCCESS' : [],
}
),
]
def __init__(self,
version=None,
upgrade_policy=None,
upgrade_version=None,
upgrade_status=None,
upgrade_error=None,
):
"""
:type version: :class:`Hardware.Version`
:param version: Virtual hardware version.
:type upgrade_policy: :class:`Hardware.UpgradePolicy`
:param upgrade_policy: Scheduled upgrade policy.
:type upgrade_version: :class:`Hardware.Version`
:param upgrade_version: Target hardware version to be used on the next scheduled virtual
hardware upgrade.
This attribute is optional and it is only relevant when the value
of ``upgradePolicy`` is one of
:attr:`Hardware.UpgradePolicy.AFTER_CLEAN_SHUTDOWN` or
:attr:`Hardware.UpgradePolicy.ALWAYS`.
:type upgrade_status: :class:`Hardware.UpgradeStatus`
:param upgrade_status: Scheduled upgrade status.
:type upgrade_error: :class:`Exception`
:param upgrade_error: Reason for the scheduled upgrade failure.
This attribute is optional and it is only relevant when the value
of ``upgradeStatus`` is :attr:`Hardware.UpgradeStatus.FAILED`.
"""
self.version = version
self.upgrade_policy = upgrade_policy
self.upgrade_version = upgrade_version
self.upgrade_status = upgrade_status
self.upgrade_error = upgrade_error
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.vcenter.vm.hardware.info', {
'version': type.ReferenceType(__name__, 'Hardware.Version'),
'upgrade_policy': type.ReferenceType(__name__, 'Hardware.UpgradePolicy'),
'upgrade_version': type.OptionalType(type.ReferenceType(__name__, 'Hardware.Version')),
'upgrade_status': type.ReferenceType(__name__, 'Hardware.UpgradeStatus'),
'upgrade_error': type.OptionalType(type.AnyErrorType()),
},
Info,
False,
None))
class UpdateSpec(VapiStruct):
"""
The ``Hardware.UpdateSpec`` class describes the updates to virtual hardware
settings of a virtual machine.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'upgrade_policy',
{
'AFTER_CLEAN_SHUTDOWN' : [('upgrade_version', False)],
'ALWAYS' : [('upgrade_version', False)],
'NEVER' : [],
}
),
]
def __init__(self,
upgrade_policy=None,
upgrade_version=None,
):
"""
:type upgrade_policy: :class:`Hardware.UpgradePolicy` or ``None``
:param upgrade_policy: Scheduled upgrade policy.
If set to :attr:`Hardware.UpgradePolicy.NEVER`, the
:attr:`Hardware.Info.upgrade_version` attribute will be reset to
None.
If None, the value is unchanged.
:type upgrade_version: :class:`Hardware.Version` or ``None``
:param upgrade_version: Target hardware version to be used on the next scheduled virtual
hardware upgrade.
If specified, this attribute must represent a newer virtual
hardware version than the current virtual hardware version reported
in :attr:`Hardware.Info.version`.
If :attr:`Hardware.UpdateSpec.upgrade_policy` is set to
:attr:`Hardware.UpgradePolicy.NEVER`, this attribute must be None.
Otherwise, if this attribute is None, default to the most recent
virtual hardware version supported by the server.
"""
self.upgrade_policy = upgrade_policy
self.upgrade_version = upgrade_version
VapiStruct.__init__(self)
UpdateSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.vm.hardware.update_spec', {
'upgrade_policy': type.OptionalType(type.ReferenceType(__name__, 'Hardware.UpgradePolicy')),
'upgrade_version': type.OptionalType(type.ReferenceType(__name__, 'Hardware.Version')),
},
UpdateSpec,
False,
None))
def get(self,
vm,
):
"""
Returns the virtual hardware settings of a virtual machine.
:type vm: :class:`str`
:param vm: Virtual machine identifier.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:rtype: :class:`Hardware.Info`
:return: Virtual hardware settings of the virtual machine.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceInaccessible`
if the virtual machine's configuration state cannot be accessed.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the system is unable to communicate with a service to complete
the request.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
"""
return self._invoke('get',
{
'vm': vm,
})
def update(self,
vm,
spec,
):
"""
Updates the virtual hardware settings of a virtual machine.
:type vm: :class:`str`
:param vm: Virtual machine identifier.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:type spec: :class:`Hardware.UpdateSpec`
:param spec: Specification for updating the virtual hardware settings of the
virtual machine.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
if the virtual machine is already configured for the desired
hardware version.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if the requested virtual hardware version is not newer than the
current version.
:raise: :class:`com.vmware.vapi.std.errors_client.Unsupported`
if the requested virtual hardware version is not supported by the
server.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceBusy`
if the virtual machine is busy performing another operation.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceInaccessible`
if the virtual machine's configuration state cannot be accessed.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the system is unable to communicate with a service to complete
the request.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
"""
return self._invoke('update',
{
'vm': vm,
'spec': spec,
})
def upgrade(self,
vm,
version=None,
):
"""
Upgrades the virtual machine to a newer virtual hardware version.
:type vm: :class:`str`
:param vm: Virtual machine identifier.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:type version: :class:`Hardware.Version` or ``None``
:param version: New virtual machine version.
If None, defaults to the most recent virtual hardware version
supported by the server.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if the virtual machine is not powered off.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
if the virtual machine is already configured for the desired
hardware version.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if ``version`` is older than the current virtual hardware version.
:raise: :class:`com.vmware.vapi.std.errors_client.Unsupported`
if ``version`` is not supported by the server.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceBusy`
if the virtual machine is busy performing another operation.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceInaccessible`
if the virtual machine's configuration state cannot be accessed.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the system is unable to communicate with a service to complete
the request.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
"""
return self._invoke('upgrade',
{
'vm': vm,
'version': version,
})
class Identity(VapiInterface):
"""
The ``Identity`` class provides methods for managing the identity of a
virtual machine. This class was added in vSphere API 6.7.1.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.vm.identity'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _IdentityStub)
self._VAPI_OPERATION_IDS = {}
class Info(VapiStruct):
"""
The ``Identity.Info`` class contains information about the identity of a
virtual machine. This class was added in vSphere API 6.7.1.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
name=None,
bios_uuid=None,
instance_uuid=None,
):
"""
:type name: :class:`str`
:param name: Virtual machine name. This attribute was added in vSphere API
6.7.1.
:type bios_uuid: :class:`str`
:param bios_uuid: 128-bit SMBIOS UUID of a virtual machine represented as a
hexadecimal string in "12345678-abcd-1234-cdef-123456789abc"
format. This attribute was added in vSphere API 6.7.1.
:type instance_uuid: :class:`str`
:param instance_uuid: VirtualCenter-specific 128-bit UUID of a virtual machine,
represented as a hexademical string. This identifier is used by
VirtualCenter to uniquely identify all virtual machine instances,
including those that may share the same SMBIOS UUID. This attribute
was added in vSphere API 6.7.1.
"""
self.name = name
self.bios_uuid = bios_uuid
self.instance_uuid = instance_uuid
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.vcenter.vm.identity.info', {
'name': type.StringType(),
'bios_uuid': type.StringType(),
'instance_uuid': type.StringType(),
},
Info,
False,
None))
class LibraryItem(VapiInterface):
"""
The ``LibraryItem`` class provides methods to identify virtual machines
managed by Content Library. This class was added in vSphere API 6.9.1.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.vm.library_item'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _LibraryItemStub)
self._VAPI_OPERATION_IDS = {}
class Info(VapiStruct):
"""
The ``LibraryItem.Info`` class contains information about the library item
associated with a virtual machine. This class was added in vSphere API
6.9.1.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
check_out=None,
):
"""
:type check_out: :class:`LibraryItem.CheckOutInfo` or ``None``
:param check_out: Information about the checked out virtual machine. This attribute
was added in vSphere API 6.9.1.
If None, the virtual machine is not checked out from a library
item.
"""
self.check_out = check_out
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.vcenter.vm.library_item.info', {
'check_out': type.OptionalType(type.ReferenceType(__name__, 'LibraryItem.CheckOutInfo')),
},
Info,
False,
None))
class CheckOutInfo(VapiStruct):
"""
The ``LibraryItem.CheckOutInfo`` class contains information about a virtual
machine checked out of a content library item. This class was added in
vSphere API 6.9.1.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
library_item=None,
):
"""
:type library_item: :class:`str`
:param library_item: Identifier of the library item that the virtual machine is checked
out from. This attribute was added in vSphere API 6.9.1.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.content.library.Item``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``com.vmware.content.library.Item``.
"""
self.library_item = library_item
VapiStruct.__init__(self)
CheckOutInfo._set_binding_type(type.StructType(
'com.vmware.vcenter.vm.library_item.check_out_info', {
'library_item': type.IdType(resource_types='com.vmware.content.library.Item'),
},
CheckOutInfo,
False,
None))
def get(self,
vm,
):
"""
Returns the information about the library item associated with the
virtual machine. This method was added in vSphere API 6.9.1.
:type vm: :class:`str`
:param vm: Identifier of the virtual machine.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:rtype: :class:`LibraryItem.Info`
:return: Information about the library item associated with the virtual
machine.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user that requested the method cannot be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user that requested the method is not authorized to perform
the method.
"""
return self._invoke('get',
{
'vm': vm,
})
class Power(VapiInterface):
"""
The ``Power`` class provides methods for managing the power state of a
virtual machine.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.vm.power'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _PowerStub)
self._VAPI_OPERATION_IDS = {}
class State(Enum):
"""
The ``Power.State`` class defines the valid power states for a virtual
machine.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
POWERED_OFF = None
"""
The virtual machine is powered off.
"""
POWERED_ON = None
"""
The virtual machine is powered on.
"""
SUSPENDED = None
"""
The virtual machine is suspended.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`State` instance.
"""
Enum.__init__(string)
State._set_values([
State('POWERED_OFF'),
State('POWERED_ON'),
State('SUSPENDED'),
])
State._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.power.state',
State))
class Info(VapiStruct):
"""
The ``Power.Info`` class contains information about the power state of a
virtual machine.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
_validator_list = [
UnionValidator(
'state',
{
'POWERED_OFF' : [('clean_power_off', True)],
'POWERED_ON' : [],
'SUSPENDED' : [],
}
),
]
def __init__(self,
state=None,
clean_power_off=None,
):
"""
:type state: :class:`Power.State`
:param state: Power state of the virtual machine.
:type clean_power_off: :class:`bool`
:param clean_power_off: Flag indicating whether the virtual machine was powered off
cleanly. This attribute may be used to detect that the virtual
machine crashed unexpectedly and should be restarted.
This attribute is optional and it is only relevant when the value
of ``state`` is :attr:`Power.State.POWERED_OFF`.
"""
self.state = state
self.clean_power_off = clean_power_off
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.vcenter.vm.power.info', {
'state': type.ReferenceType(__name__, 'Power.State'),
'clean_power_off': type.OptionalType(type.BooleanType()),
},
Info,
False,
None))
def get(self,
vm,
):
"""
Returns the power state information of a virtual machine.
:type vm: :class:`str`
:param vm: Virtual machine identifier.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:rtype: :class:`Power.Info`
:return: Power state information for the specified virtual machine.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceInaccessible`
if the virtual machine's configuration or execution state cannot be
accessed.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the system is unable to communicate with a service to complete
the request.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``VirtualMachine`` referenced by the parameter
``vm`` requires ``System.Read``.
"""
return self._invoke('get',
{
'vm': vm,
})
def start(self,
vm,
):
"""
Powers on a powered-off or suspended virtual machine.
:type vm: :class:`str`
:param vm: Virtual machine identifier.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
if the virtual machine is already powered on.
:raise: :class:`com.vmware.vapi.std.errors_client.Unsupported`
if the virtual machine does not support being powered on (e.g.
marked as a template, serving as a fault-tolerance secondary
virtual machine).
:raise: :class:`com.vmware.vapi.std.errors_client.UnableToAllocateResource`
if resources cannot be allocated for the virtual machine (e.g.
physical resource allocation policy cannot be satisfied,
insufficient licenses are available to run the virtual machine).
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceInaccessible`
if resources required by the virtual machine are not accessible
(e.g. virtual machine configuration files or virtual disks are on
inaccessible storage, no hosts are available to run the virtual
machine).
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceInUse`
if resources required by the virtual machine are in use (e.g.
virtual machine configuration files or virtual disks are locked,
host containing the virtual machine is an HA failover host).
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceBusy`
if the virtual machine is performing another operation.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the system is unable to communicate with a service to complete
the request.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``VirtualMachine`` referenced by the parameter
``vm`` requires ``VirtualMachine.Interact.PowerOn``.
"""
return self._invoke('start',
{
'vm': vm,
})
def stop(self,
vm,
):
"""
Powers off a powered-on or suspended virtual machine.
:type vm: :class:`str`
:param vm: Virtual machine identifier.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
if the virtual machine is already powered off.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceBusy`
if the virtual machine is performing another operation.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the system is unable to communicate with a service to complete
the request.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``VirtualMachine`` referenced by the parameter
``vm`` requires ``VirtualMachine.Interact.PowerOff``.
"""
return self._invoke('stop',
{
'vm': vm,
})
def suspend(self,
vm,
):
"""
Suspends a powered-on virtual machine.
:type vm: :class:`str`
:param vm: Virtual machine identifier.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
if the virtual machine is already suspended.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if the virtual machine is powered off.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceBusy`
if the virtual machine is performing another operation.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the system is unable to communicate with a service to complete
the request.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``VirtualMachine`` referenced by the parameter
``vm`` requires ``VirtualMachine.Interact.Suspend``.
"""
return self._invoke('suspend',
{
'vm': vm,
})
def reset(self,
vm,
):
"""
Resets a powered-on virtual machine.
:type vm: :class:`str`
:param vm: Virtual machine identifier.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if the virtual machine is powered off or suspended.
:raise: :class:`com.vmware.vapi.std.errors_client.ResourceBusy`
if the virtual machine is performing another operation
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the system is unable to communicate with a service to complete
the request.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthenticated`
if the user can not be authenticated.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user doesn't have the required privileges.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have all of the privileges described as follows:
* The resource ``VirtualMachine`` referenced by the parameter
``vm`` requires ``VirtualMachine.Interact.Reset``.
"""
return self._invoke('reset',
{
'vm': vm,
})
class Tools(VapiInterface):
"""
The ``Tools`` class provides methods for managing VMware Tools in the guest
operating system. This class was added in vSphere API 7.0.0.0.
"""
_VAPI_SERVICE_ID = 'com.vmware.vcenter.vm.tools'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ToolsStub)
self._VAPI_OPERATION_IDS = {}
class RunState(Enum):
"""
Current run state of VMware Tools in the guest operating system. This
enumeration was added in vSphere API 7.0.0.0.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
NOT_RUNNING = None
"""
VMware Tools is not running. This class attribute was added in vSphere API
7.0.0.0.
"""
RUNNING = None
"""
VMware Tools is running. This class attribute was added in vSphere API
7.0.0.0.
"""
EXECUTING_SCRIPTS = None
"""
VMware Tools is running scripts as part of a state transition. This class
attribute was added in vSphere API 7.0.0.0.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`RunState` instance.
"""
Enum.__init__(string)
RunState._set_values([
RunState('NOT_RUNNING'),
RunState('RUNNING'),
RunState('EXECUTING_SCRIPTS'),
])
RunState._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.tools.run_state',
RunState))
class UpgradePolicy(Enum):
"""
The ``Tools.UpgradePolicy`` class defines when Tools are auto-upgraded for
a virtual machine. This enumeration was added in vSphere API 7.0.0.0.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
MANUAL = None
"""
No auto-upgrades for Tools will be performed for this virtual machine.
Users must manually invoke the :func:`Tools.upgrade` method to update
Tools. This class attribute was added in vSphere API 7.0.0.0.
"""
UPGRADE_AT_POWER_CYCLE = None
"""
When the virtual machine is power-cycled, the system checks for a newer
version of Tools when the virtual machine is powered on. If it is
available, a Tools upgrade is automatically performed on the virtual
machine and it is rebooted if necessary. This class attribute was added in
vSphere API 7.0.0.0.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`UpgradePolicy` instance.
"""
Enum.__init__(string)
UpgradePolicy._set_values([
UpgradePolicy('MANUAL'),
UpgradePolicy('UPGRADE_AT_POWER_CYCLE'),
])
UpgradePolicy._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.tools.upgrade_policy',
UpgradePolicy))
class VersionStatus(Enum):
"""
The ``Tools.VersionStatus`` class defines the version status types of
VMware Tools installed in the guest operating system. This enumeration was
added in vSphere API 7.0.0.0.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
NOT_INSTALLED = None
"""
VMware Tools has never been installed. This class attribute was added in
vSphere API 7.0.0.0.
"""
CURRENT = None
"""
VMware Tools is installed, and the version is current. This class attribute
was added in vSphere API 7.0.0.0.
"""
UNMANAGED = None
"""
VMware Tools is installed, but it is not managed by VMware. This includes
open-vm-tools or OSPs which should be managed by the guest operating
system. This class attribute was added in vSphere API 7.0.0.0.
"""
TOO_OLD_UNSUPPORTED = None
"""
VMware Tools is installed, but the version is too old. This class attribute
was added in vSphere API 7.0.0.0.
"""
SUPPORTED_OLD = None
"""
VMware Tools is installed, supported, but a newer version is available.
This class attribute was added in vSphere API 7.0.0.0.
"""
SUPPORTED_NEW = None
"""
VMware Tools is installed, supported, and newer than the version available
on the host. This class attribute was added in vSphere API 7.0.0.0.
"""
TOO_NEW = None
"""
VMware Tools is installed, and the version is known to be too new to work
correctly with this virtual machine. This class attribute was added in
vSphere API 7.0.0.0.
"""
BLACKLISTED = None
"""
VMware Tools is installed, but the installed version is known to have a
grave bug and should be immediately upgraded. This class attribute was
added in vSphere API 7.0.0.0.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`VersionStatus` instance.
"""
Enum.__init__(string)
VersionStatus._set_values([
VersionStatus('NOT_INSTALLED'),
VersionStatus('CURRENT'),
VersionStatus('UNMANAGED'),
VersionStatus('TOO_OLD_UNSUPPORTED'),
VersionStatus('SUPPORTED_OLD'),
VersionStatus('SUPPORTED_NEW'),
VersionStatus('TOO_NEW'),
VersionStatus('BLACKLISTED'),
])
VersionStatus._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.tools.version_status',
VersionStatus))
class ToolsInstallType(Enum):
"""
The ``Tools.ToolsInstallType`` class defines the installation type of the
Tools in the guest operating system. This enumeration was added in vSphere
API 7.0.0.0.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
UNKNOWN = None
"""
Installation type is not known. Most likely tools have been installed by
OSPs or open-vm-tools, but a version that does not report its install type
or an install type that we do not recognize. This class attribute was added
in vSphere API 7.0.0.0.
"""
MSI = None
"""
MSI is the installation type used for VMware Tools on Windows. This class
attribute was added in vSphere API 7.0.0.0.
"""
TAR = None
"""
Tools have been installed by the tar installer. This class attribute was
added in vSphere API 7.0.0.0.
"""
OSP = None
"""
OSPs are RPM or Debian packages tailored for the OS in the VM. See
http://packages.vmware.com. This class attribute was added in vSphere API
7.0.0.0.
"""
OPEN_VM_TOOLS = None
"""
open-vm-tools are the open-source version of VMware Tools, may have been
packaged by the OS vendor. This class attribute was added in vSphere API
7.0.0.0.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`ToolsInstallType` instance.
"""
Enum.__init__(string)
ToolsInstallType._set_values([
ToolsInstallType('UNKNOWN'),
ToolsInstallType('MSI'),
ToolsInstallType('TAR'),
ToolsInstallType('OSP'),
ToolsInstallType('OPEN_VM_TOOLS'),
])
ToolsInstallType._set_binding_type(type.EnumType(
'com.vmware.vcenter.vm.tools.tools_install_type',
ToolsInstallType))
class Info(VapiStruct):
"""
The ``Tools.Info`` class describes the VMWare Tools properties of a virtual
machine. This class was added in vSphere API 7.0.0.0.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
auto_update_supported=None,
install_attempt_count=None,
error=None,
version_number=None,
version=None,
upgrade_policy=None,
version_status=None,
install_type=None,
run_state=None,
):
"""
:type auto_update_supported: :class:`bool`
:param auto_update_supported: Set if the virtual machine supports auto-upgrading Tools via
:class:`Tools.UpgradePolicy`. This attribute was added in vSphere
API 7.0.0.0.
:type install_attempt_count: :class:`long` or ``None``
:param install_attempt_count: Number of attempts that have been made to install or upgrade the
version of Tools installed on this virtual machine. This attribute
was added in vSphere API 7.0.0.0.
This attribute will be None if there have been no Tools install or
upgrade attempt.
:type error: :class:`Exception` or ``None``
:param error: Error that happened, if any, during last attempt to upgrade or
install Tools. This attribute was added in vSphere API 7.0.0.0.
This attribute will be None if a the last Tools install or upgrade
attempt succeeded.
:type version_number: :class:`long` or ``None``
:param version_number: Version of VMware Tools installed on the guest operating system.
This attribute was added in vSphere API 7.0.0.0.
This attribute wil be None if VMWare Tools is not installed. This
is an integer constructed as follows: (((MJR) << 10) + ((MNR) << 5)
+ (REV)) Where MJR is tha major verson, MNR is the minor version
and REV is the revision. Tools version = T Tools Version Major =
MJR = (T / 1024) Tools Version Minor = MNR = ((T % 1024) / 32)
Tools Version Revision = BASE = ((T % 1024) % 32) Tools actual
version = MJR.MNR.REV
:type version: :class:`str` or ``None``
:param version: Version of VMware Tools installed on the guest operating system.
This is a human-readable value that should not be parsed. This
attribute was added in vSphere API 7.0.0.0.
This attribute wil be None if VMWare Tools is not installed.
:type upgrade_policy: :class:`Tools.UpgradePolicy`
:param upgrade_policy: Tools upgrade policy setting for the virtual machine.
:class:`Tools.UpgradePolicy`. This attribute was added in vSphere
API 7.0.0.0.
:type version_status: :class:`Tools.VersionStatus` or ``None``
:param version_status: Current version status of VMware Tools in the guest operating
system, if known. This attribute was added in vSphere API 7.0.0.0.
This attribute will be None if the version status is not known, for
example if VMware Tools is too old to report the information.
:type install_type: :class:`Tools.ToolsInstallType` or ``None``
:param install_type: Current installation type of VMware Tools in the guest operating
system. This attribute was added in vSphere API 7.0.0.0.
This attribute will be None if the installation type is not known,
for example if VMware Tools is too old to report the information.
:type run_state: :class:`Tools.RunState`
:param run_state: Current run state of VMware Tools in the guest operating system.
This attribute was added in vSphere API 7.0.0.0.
"""
self.auto_update_supported = auto_update_supported
self.install_attempt_count = install_attempt_count
self.error = error
self.version_number = version_number
self.version = version
self.upgrade_policy = upgrade_policy
self.version_status = version_status
self.install_type = install_type
self.run_state = run_state
VapiStruct.__init__(self)
Info._set_binding_type(type.StructType(
'com.vmware.vcenter.vm.tools.info', {
'auto_update_supported': type.BooleanType(),
'install_attempt_count': type.OptionalType(type.IntegerType()),
'error': type.OptionalType(type.AnyErrorType()),
'version_number': type.OptionalType(type.IntegerType()),
'version': type.OptionalType(type.StringType()),
'upgrade_policy': type.ReferenceType(__name__, 'Tools.UpgradePolicy'),
'version_status': type.OptionalType(type.ReferenceType(__name__, 'Tools.VersionStatus')),
'install_type': type.OptionalType(type.ReferenceType(__name__, 'Tools.ToolsInstallType')),
'run_state': type.ReferenceType(__name__, 'Tools.RunState'),
},
Info,
False,
None))
class UpdateSpec(VapiStruct):
"""
The (\\\\@name UpdateSpec} class describes the VMware Tools properties of a
virtual machine that can be updated. This class was added in vSphere API
7.0.0.0.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
upgrade_policy=None,
):
"""
:type upgrade_policy: :class:`Tools.UpgradePolicy` or ``None``
:param upgrade_policy: Tools upgrade policy setting for the virtual machine.
:class:`Tools.UpgradePolicy`. This attribute was added in vSphere
API 7.0.0.0.
If None the upgrade policy will not be modified.
"""
self.upgrade_policy = upgrade_policy
VapiStruct.__init__(self)
UpdateSpec._set_binding_type(type.StructType(
'com.vmware.vcenter.vm.tools.update_spec', {
'upgrade_policy': type.OptionalType(type.ReferenceType(__name__, 'Tools.UpgradePolicy')),
},
UpdateSpec,
False,
None))
def get(self,
vm,
):
"""
Get the properties of VMware Tools. This method was added in vSphere
API 7.0.0.0.
:type vm: :class:`str`
:param vm: Identifier of the virtual machine.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:rtype: :class:`Tools.Info`
:return: VMware Tools properties.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
"""
return self._invoke('get',
{
'vm': vm,
})
def update(self,
vm,
spec,
):
"""
Update the properties of VMware Tools. This method was added in vSphere
API 7.0.0.0.
:type vm: :class:`str`
:param vm: Identifier of the virtual machine.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:type spec: :class:`Tools.UpdateSpec`
:param spec: The new values.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the system reports an error while responding to the request.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if the :attr:`Tools.UpdateSpec.upgrade_policy` attribute contains a
value that is not supported by the server.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
"""
return self._invoke('update',
{
'vm': vm,
'spec': spec,
})
def upgrade(self,
vm,
command_line_options=None,
):
"""
Begins the Tools upgrade process. To monitor the status of the Tools
upgrade, clients should check the Tools status by calling
:func:`Tools.get` and examining ``versionStatus`` and ``runState``.
This method was added in vSphere API 7.0.0.0.
:type vm: :class:`str`
:param vm: Identifier of the virtual machine.
The parameter must be an identifier for the resource type:
``VirtualMachine``.
:type command_line_options: :class:`str` or ``None``
:param command_line_options: Command line options passed to the installer to modify the
installation procedure for Tools.
Set if any additional options are desired.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the virtual machine is not found.
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
if the VMware Tools are not running.
:raise: :class:`com.vmware.vapi.std.errors_client.NotAllowedInCurrentState`
if the virtual machine is not powered on.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyInDesiredState`
is an upgrade is already in progress.
:raise: :class:`com.vmware.vapi.std.errors_client.Error`
if the upgrade process fails inside the guest operating system.
"""
return self._invoke('upgrade',
{
'vm': vm,
'command_line_options': command_line_options,
})
class _HardwareStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
})
get_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.resource_inaccessible':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceInaccessible'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/vcenter/vm/{vm}/hardware',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
'spec': type.ReferenceType(__name__, 'Hardware.UpdateSpec'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.unsupported':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unsupported'),
'com.vmware.vapi.std.errors.resource_busy':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceBusy'),
'com.vmware.vapi.std.errors.resource_inaccessible':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceInaccessible'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/vcenter/vm/{vm}/hardware',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
# properties for upgrade operation
upgrade_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
'version': type.OptionalType(type.ReferenceType(__name__, 'Hardware.Version')),
})
upgrade_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.unsupported':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unsupported'),
'com.vmware.vapi.std.errors.resource_busy':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceBusy'),
'com.vmware.vapi.std.errors.resource_inaccessible':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceInaccessible'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
upgrade_input_value_validator_list = [
]
upgrade_output_validator_list = [
]
upgrade_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/vcenter/vm/{vm}/hardware/action/upgrade',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType(__name__, 'Hardware.Info'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.VoidType(),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
'upgrade': {
'input_type': upgrade_input_type,
'output_type': type.VoidType(),
'errors': upgrade_error_dict,
'input_value_validator_list': upgrade_input_value_validator_list,
'output_validator_list': upgrade_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
'update': update_rest_metadata,
'upgrade': upgrade_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vcenter.vm.hardware',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class _IdentityStub(ApiInterfaceStub):
def __init__(self, config):
operations = {
}
rest_metadata = {
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vcenter.vm.identity',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class _LibraryItemStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
})
get_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/vcenter/vm/{vm}/library-item',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType(__name__, 'LibraryItem.Info'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vcenter.vm.library_item',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class _PowerStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
})
get_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.resource_inaccessible':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceInaccessible'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/vcenter/vm/{vm}/power',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
# properties for start operation
start_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
})
start_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
'com.vmware.vapi.std.errors.unsupported':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unsupported'),
'com.vmware.vapi.std.errors.unable_to_allocate_resource':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'UnableToAllocateResource'),
'com.vmware.vapi.std.errors.resource_inaccessible':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceInaccessible'),
'com.vmware.vapi.std.errors.resource_in_use':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceInUse'),
'com.vmware.vapi.std.errors.resource_busy':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceBusy'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
start_input_value_validator_list = [
]
start_output_validator_list = [
]
start_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/vcenter/vm/{vm}/power/start',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
# properties for stop operation
stop_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
})
stop_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
'com.vmware.vapi.std.errors.resource_busy':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceBusy'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
stop_input_value_validator_list = [
]
stop_output_validator_list = [
]
stop_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/vcenter/vm/{vm}/power/stop',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
# properties for suspend operation
suspend_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
})
suspend_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
'com.vmware.vapi.std.errors.resource_busy':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceBusy'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
suspend_input_value_validator_list = [
]
suspend_output_validator_list = [
]
suspend_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/vcenter/vm/{vm}/power/suspend',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
# properties for reset operation
reset_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
})
reset_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
'com.vmware.vapi.std.errors.resource_busy':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ResourceBusy'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.unauthenticated':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthenticated'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
reset_input_value_validator_list = [
]
reset_output_validator_list = [
]
reset_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/vcenter/vm/{vm}/power/reset',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType(__name__, 'Power.Info'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'start': {
'input_type': start_input_type,
'output_type': type.VoidType(),
'errors': start_error_dict,
'input_value_validator_list': start_input_value_validator_list,
'output_validator_list': start_output_validator_list,
'task_type': TaskType.NONE,
},
'stop': {
'input_type': stop_input_type,
'output_type': type.VoidType(),
'errors': stop_error_dict,
'input_value_validator_list': stop_input_value_validator_list,
'output_validator_list': stop_output_validator_list,
'task_type': TaskType.NONE,
},
'suspend': {
'input_type': suspend_input_type,
'output_type': type.VoidType(),
'errors': suspend_error_dict,
'input_value_validator_list': suspend_input_value_validator_list,
'output_validator_list': suspend_output_validator_list,
'task_type': TaskType.NONE,
},
'reset': {
'input_type': reset_input_type,
'output_type': type.VoidType(),
'errors': reset_error_dict,
'input_value_validator_list': reset_input_value_validator_list,
'output_validator_list': reset_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
'start': start_rest_metadata,
'stop': stop_rest_metadata,
'suspend': suspend_rest_metadata,
'reset': reset_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vcenter.vm.power',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class _ToolsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for get operation
get_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
})
get_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/vcenter/vm/{vm}/tools',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
'spec': type.ReferenceType(__name__, 'Tools.UpdateSpec'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/vcenter/vm/{vm}/tools',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
# properties for upgrade operation
upgrade_input_type = type.StructType('operation-input', {
'vm': type.IdType(resource_types='VirtualMachine'),
'command_line_options': type.OptionalType(type.StringType()),
})
upgrade_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.not_allowed_in_current_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotAllowedInCurrentState'),
'com.vmware.vapi.std.errors.already_in_desired_state':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyInDesiredState'),
'com.vmware.vapi.std.errors.error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Error'),
}
upgrade_input_value_validator_list = [
]
upgrade_output_validator_list = [
]
upgrade_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/vcenter/vm/{vm}/tools',
path_variables={
'vm': 'vm',
},
query_parameters={
}
)
operations = {
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType(__name__, 'Tools.Info'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.VoidType(),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
'upgrade': {
'input_type': upgrade_input_type,
'output_type': type.VoidType(),
'errors': upgrade_error_dict,
'input_value_validator_list': upgrade_input_value_validator_list,
'output_validator_list': upgrade_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'get': get_rest_metadata,
'update': update_rest_metadata,
'upgrade': upgrade_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vcenter.vm.tools',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class StubFactory(StubFactoryBase):
_attrs = {
'Hardware': Hardware,
'Identity': Identity,
'LibraryItem': LibraryItem,
'Power': Power,
'Tools': Tools,
'console': 'com.vmware.vcenter.vm.console_client.StubFactory',
'guest': 'com.vmware.vcenter.vm.guest_client.StubFactory',
'hardware': 'com.vmware.vcenter.vm.hardware_client.StubFactory',
'storage': 'com.vmware.vcenter.vm.storage_client.StubFactory',
'tools': 'com.vmware.vcenter.vm.tools_client.StubFactory',
}
| [
"dungla2011@gmail.com"
] | dungla2011@gmail.com |
df30628cc49ab0acf09a8074719c45fb6db065c2 | 8f1f93aa290662737d769b64d845d2e8e9d5cb5d | /robotGriffin.py | b809849961a22e334c72a8f8d671a2c45f0a3b62 | [] | no_license | RoboticsTeam4904/2014-Code | 49caa7fd24c936f19c9dbcec5c81ab166af9303e | f0367a5f4e468e63e055dd850f4cb02230ef516b | refs/heads/master | 2021-01-25T05:34:40.891194 | 2014-12-19T01:03:11 | 2014-12-19T01:03:11 | 26,616,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,415 | py | import wpilib
cstick = wpilib.Joystick(1)
lstick = wpilib.Joystick(2)
rstick = wpilib.Joystick(3)
lfmotor = wpilib.Victor(1)
rfmotor = wpilib.Victor(2)
lbmotor = wpilib.Victor(3)
rbmotor = wpilib.Victor(4)
spmotor = wpilib.Jaguar(5)
wdmotor = wpilib.Victor(6)
sbmotor = wpilib.Victor(7)
compressor = wpilib.Compressor(2,2)
solenoidA1 = wpilib.Solenoid(1)
solenoidA2 = wpilib.Solenoid(2)
solenoidB1 = wpilib.Solenoid(3)
solenoidB2 = wpilib.Solenoid(4)
solenoidC1 = wpilib.Solenoid(5)
solenoidC2 = wpilib.Solenoid(6)
def CheckRestart():
#if "6L" in PressedButton():
# print("Operator killed robot with button 6 on the left joystick")
# raise RuntimeError("OperatorLeftRestart")
#if "9R" in PressedButton():
# print("Operator killed robot with button 9 on the right joystick")
# raise RuntimeError("OperatorRightRestart")
if "7C" in PressedButton():
print("Driver killed robot with back button on the xBox controller")
raise RuntimeError("DriverRestart")
def PressedButton():
list = []
for i in range(20):
if cstick.GetRawButton(i):
list.append(str(i)+"C")
if rstick.GetRawButton(i):
list.append(str(i)+"R")
if lstick.GetRawButton(i):
list.append(str(i)+"L")
return list
class MyRobot(wpilib.SimpleRobot):
def RobotInit(self):
print("Initializing robot")
def Disabled(self):
print("**** DISABLED ****")
while self.IsDisabled():
CheckRestart()
wpilib.Wait(0.01)
def Autonomous(self):
print("**** AUTONOMOUS ****")
self.GetWatchdog().SetEnabled(False)
wpilib.Wait(2)
setMotors(-1,-1)
setJaguar(0)
wpilib.Wait(1)
while self.IsAutonomous() and self.IsEnabled():
CheckRestart()
setMotors(0,0)
wpilib.Wait(0.02)
def OperatorControl(self):
print("**** TELEOP ****")
dog = self.GetWatchdog()
dog.SetEnabled(True)
dog.SetExpiration(0.25)
while self.IsOperatorControl() and self.IsEnabled():
dog.Feed()
CheckRestart()
# Motor control
setMotorsFromStick(cstick)
setJaguar(rstick.GetY())
solenoidC1.Set(False)
solenoidC2.Set(True)
handleButtons(PressedButton())
wpilib.Wait(0.04)
def handleButtons(buttons):
for button in buttons:
if button == "1R":
print("Extending alligator mouth piston")
solenoidC1.Set(True)
solenoidC2.Set(False)
elif button == "2L":
print("Lowering alligator arm")
solenoidA1.Set(False)
solenoidA2.Set(True)
elif button == "3L":
print("Lifting alligator arm")
solenoidA1.Set(True)
solenoidA2.Set(False)
elif button == "4L":
print("Lowering forklift")
solenoidB1.Set(False)
solenoidB2.Set(True)
elif button == "5L":
print("Lifting forklift")
solenoidB1.Set(True)
solenoidB2.Set(False)
def setMotorsFromStick(stick):
throttle = stick.GetThrottle()
X = stick.GetX()
throttleGain = 1
throttleExp = 2
xGain = 1
xExp = 2
if throttle < 0:
throttle = throttle ** throttleExp
throttle *= -throttleGain
else:
throttle = throttle ** throttleExp
throttle *= throttleGain
if X < 0:
X = X ** xExp
X *= -xGain
else:
X = X ** xExp
X *= xGain
setMotors(throttle-X, throttle+X)
def setMotors(left, right):
lbmotor.Set(left * -1)
lfmotor.Set(left * -1)
rbmotor.Set(right * -1)
rfmotor.Set(right * 1)
def setJaguar(speed):
spmotor.Set(speed * 0.75)
def startCompressor():
print("Starting compressor")
compressor.Start()
def run():
startCompressor()
robot = MyRobot()
robot.StartCompetition() | [
"botprovoking@nuevaschool.org"
] | botprovoking@nuevaschool.org |
c5382963180478fd862fc67e67e37e67fa689e13 | f829d2c4347ce85ae6dd769f0aab2491d8ee4751 | /old/.history/a_20201125194051.py | f79c3e45da0a2e2a0076008652c3fd3694e249f9 | [
"LicenseRef-scancode-mulanpsl-2.0-en",
"MulanPSL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | pscly/bisai1 | 0ef18d4aa12541322947a5be250ef7f260c93276 | 257c02c8f23e373834a6275683470f3f081b7373 | refs/heads/18ti | 2023-01-21T22:02:15.582281 | 2020-11-29T09:30:33 | 2020-11-29T09:30:33 | 316,918,262 | 0 | 0 | NOASSERTION | 2020-11-29T12:33:33 | 2020-11-29T09:39:21 | Python | UTF-8 | Python | false | false | 969 | py | # for n in range(400,500):
# i = n // 100
# j = n // 10 % 10
# k = n % 10
# if n == i ** 3 + j ** 3 + k ** 3:
# print(n)
# 第一道题(16)
# input("请输入(第一次):")
# s1 = input("请输入(第二次):")
# l1 = s1.split(' ')
# l2 = []
# for i in l1:
# if i.isdigit():
# l2.append(int(i))
# for i in l2:
# if not (i % 6):
# print(i, end=" ")
# 第二道题(17)
out_l1 = []
def bian_int_list(l1):
re_l1 = [] # 返回出去的列表
for i in l1:
re_l1.append(int(i))
return re_l1
def jisuan(int_num):
he1 = 0
global out_l1
for i in str(int_num):
he1 += int(i)**2
if he1 > int(str_num):
out_l1.append(str_num)
return True
return None
while 1:
in_1 = input("请输入数值:")
nums_l1 = in_1.split(' ')
for i in range(nums_l1[0, nums_l1[1]+1]):
if jisuan(i):
out_l1.append(i)
print(i)
| [
"pscly@outlook.com"
] | pscly@outlook.com |
86cbb13474c17d9a968e3b4c5c92c3701a99c9ea | 6b9aa789652ba1316814a2fef46e01818dd817a9 | /TypeConversion.py | 005b931875b0de49c629740d1dab880be38549e9 | [] | no_license | amudalalalith/PythonTraining | 233397222b4a60ec475f6e5164a20f270541fdc8 | 0878dbb5d3ae3d2c0d6efba6d54a1c77a6bc3866 | refs/heads/master | 2020-07-27T09:23:11.504436 | 2019-09-18T13:02:35 | 2019-09-18T13:02:35 | 209,044,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | var1 = "7.5"
print (type(var1))
print (var1)
var2 = int(var1) # type cast code
print (type(var2))
print (var2)
# anything to str
# int to float
# float to int
# int, str become only numbers, no float
# float, str become numbers or float
'''
var1 = "5"
print (type(var1))
print (var1)
var2 = int(var1)
print (type(var2))
print (var2)
'''
'''
var1 = 5
print (type(var1))
print (var1)
var2 = str(var1)
print (type(var2))
print (var2)
'''
| [
"noreply@github.com"
] | amudalalalith.noreply@github.com |
499eb107ace978219587c3d7d08432c1e69aed4d | f4703011437f271d8c7cf09f6754368664edd995 | /TestModel.py | a6dbf27c14711b891aeacecb363b884f61f7efb5 | [] | no_license | xiongfang/tf_learn | e89656eb6e9b0bc460ec39097ea7cebbcc7fc7bc | da584c3b07c70065033840afab85c94fef620b53 | refs/heads/master | 2023-06-03T03:57:03.847841 | 2021-06-16T11:53:09 | 2021-06-16T11:53:09 | 304,235,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,108 | py | import cv2
import os
import pathlib
import random
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.keras as keras
import time
IMAGE_WIDTH = int(384/4)
IMAGE_HEIGHT = int(286/4)
path_root = "E:/DataSet/BioID_Face/data/BioID-FaceDatabase-V1.2"
data_root = pathlib.Path(path_root)
all_image_paths = list(data_root.glob('*.jpg'))
all_image_paths = [str(path) for path in all_image_paths]
train_image_paths = all_image_paths[:1000]
val_image_paths = all_image_paths[1000:]
def get_eye_pos(eye_file):
with open(eye_file,"r") as f:
eye = f.readline()
eye = f.readline()
pos_list = eye.split('\t')
LX = float(pos_list[0])/384
LY = float(pos_list[1])/286
RX = float(pos_list[2])/384
RY = float(pos_list[3])/286
return [LX,LY,RX,RY]
def pos_to_cv(eye_pos):
return [[int(eye_pos[0]*384),int(eye_pos[1]*286)],[int(eye_pos[2]*384),int(eye_pos[3]*286)]]
all_label_paths = list(data_root.glob('*.eye'))
all_label_paths = [str(path) for path in all_label_paths]
all_image_labels = [ get_eye_pos(path) for path in all_label_paths]
train_image_labels = all_image_labels[:1000]
val_image_labels = all_image_labels[1000:]
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [IMAGE_WIDTH, IMAGE_HEIGHT])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
img = tf.io.read_file(path)
return preprocess_image(img)
model_path_name = "E:/tf_learn/ProcessDataWeights.model"
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), activation='elu', input_shape=(IMAGE_WIDTH, IMAGE_HEIGHT, 3)),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(64, (3, 3), activation='elu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(128, (3, 3), activation='elu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='elu',kernel_regularizer = tf.keras.regularizers.L1(0.01)),
tf.keras.layers.Dense(4, activation = 'elu')])
model.load_weights(model_path_name)
path_ds = tf.data.Dataset.from_tensor_slices(val_image_paths)
val_image_ds = path_ds.map(load_and_preprocess_image)
val_label_ds = tf.data.Dataset.from_tensor_slices(val_image_labels)
val_image_label_ds = tf.data.Dataset.zip((val_image_ds, val_label_ds))
val_image_label_ds = val_image_label_ds.batch(1,drop_remainder = True)
img_list = []
for i in range(5):
index = random.randint(0,len(val_image_paths)-1)
val_image_label_ds_list = list(val_image_label_ds.as_numpy_iterator())
x_batch_train = val_image_label_ds_list[index][0]
y_batch_train = val_image_label_ds_list[index][1]
logits = model(x_batch_train, training=False) # Logits for this minibatch
img = cv2.imread(val_image_paths[index])
eye = pos_to_cv(y_batch_train[0])
cv2.circle(img,eye[0],2,(0,0,255))
cv2.circle(img,eye[1],2,(0,0,255))
eye = pos_to_cv(logits[0])
cv2.circle(img,eye[0],2,(0,255,0))
cv2.circle(img,eye[1],2,(0,255,0))
img_list.append(img)
cv2.imshow("a%d"%i,img)
cv2.waitKey()
| [
"xg_55@126.com"
] | xg_55@126.com |
0a64700b1408521e4cb652493afa4c3773da70d3 | fde90006ac56f38863ebbff75fe0da7296d8d4b6 | /src/cfehome/old_settings.py | c58c730fb22a082126c92754a918c204c7235049 | [] | no_license | carter3689/django-intro | c1c32d742548e27732580d32321648f054d1338d | 155042398d9f2505e44dfa9cfe0a2f7ad3f8131d | refs/heads/master | 2021-01-01T04:32:49.347962 | 2017-07-14T04:57:02 | 2017-07-14T04:57:02 | 97,194,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,099 | py | """
Django settings for cfehome project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p!@78+nocob7yj%nean8wwes$s_vmp2$!sahv8#gopd0mi20zn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cfehome.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cfehome.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"carter3689@gmail.com"
] | carter3689@gmail.com |
1b5cfbbbd9c2bda32860b5aa02de874a92e8bf6f | b24b9a67b94c1c46e4eaf9ee928ffc65a6d382d3 | /Settings.py | 8a72a8085a6323589f91029db36e4850bb0b23bb | [
"MIT"
] | permissive | tomaarsen/TwitchAIDungeon | a7c5977ec7e04209ce6069dffe2971d3f42b15e3 | 0414f6a90e3ad71b86f28602de05ed14bb390067 | refs/heads/master | 2023-04-23T11:19:50.036323 | 2021-05-11T19:11:12 | 2021-05-11T19:11:12 | 236,295,116 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,537 | py |
import json, os, logging
logger = logging.getLogger(__name__)
# Note, this logger will be overridden due the logger
# itself using this module for a logging file name
class FileErrorHandler:
"""
This class acts as a Context Manager for handling,
guiding and modifying errors regarding the settings.json file.
"""
def __init__(self):
super().__init__()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type:
if exc_type in (ValueError, json.decoder.JSONDecodeError):
# If there is a ValueError or json.decoder.JSONDecodeError,
# we want to let the user know their settings.json file is incorrect.
raise ValueError("There is an error in your settings file.")
elif exc_type is FileNotFoundError:
# If the file is missing, create a standardised settings.json file
# With all parameters required.
with open(Settings.PATH, "w") as f:
standard_dict = {
"Host": "irc.chat.twitch.tv",
"Port": 6667,
"Channel": "#<channel>",
"Nickname": "<name>",
"Authentication": "oauth:<auth>",
"Cooldown": 20,
"X-Access-Token": "<accessToken>",
"AllowedRanks": [
"broadcaster",
"moderator",
"vip"
],
"AllowedUsers": [],
"CustomPrompt": "You are Bot, a wizard living in the kingdom of Larion. You have a staff and a spellbook. You finish your long journey and finally arrive at the ruin you've been looking for. You look around and see that it's not much different than when you left it. A few more trees here and there, but nothing has changed."
}
f.write(json.dumps(standard_dict, indent=4, separators=(",", ": ")))
raise ValueError("Please fix your settings.json file that was just generated.")
return False
class Settings:
""" Loads data from settings.json into the bot """
PATH = os.path.join(os.getcwd(), "settings.json")
def __init__(self, bot):
with FileErrorHandler():
# Try to load the file using json.
# And pass the data to the Bot class instance if this succeeds.
logger.debug("Starting setting settings...")
with open(Settings.PATH, "r") as f:
settings = f.read()
data = json.loads(settings)
bot.set_settings(data["Host"],
data["Port"],
data["Channel"],
data["Nickname"],
data["Authentication"],
data["Cooldown"],
data["X-Access-Token"],
data["AllowedRanks"],
data["AllowedUsers"],
data["CustomPrompt"])
logger.debug("Finished setting settings.")
@staticmethod
def update_cooldown(cooldown):
with FileErrorHandler():
logger.info(f"Updating cooldown to {cooldown}s...")
with open(Settings.PATH, "r") as f:
settings = f.read()
data = json.loads(settings)
data["Cooldown"] = cooldown
with open(Settings.PATH, "w") as f:
f.write(json.dumps(data, indent=4, separators=(",", ": ")))
logger.info(f"Finished updating cooldown.")
@staticmethod
def get_channel():
with FileErrorHandler():
with open(Settings.PATH, "r") as f:
settings = f.read()
data = json.loads(settings)
return data["Channel"].replace("#", "").lower()
@staticmethod
def set_logger():
# Update logger. This is required as this class is used to set up the logging file
global logger
logger = logging.getLogger(__name__)
| [
"cubiegamedev@gmail.com"
] | cubiegamedev@gmail.com |
c047eb7f015d7ac9bf99d314da52c0404ebeec3e | 0364b8a66cae5f291e6c00f4acb9bd1819cac0d1 | /project0_boxofficemojo/bom_parallel_sql.py | 7f17c7277a7e8828a398caad01685c66a9ecfcaf | [] | no_license | vinyasmusic/data_science | 22406ee5f187f3799a24df36c1294e1cf2ff5084 | 1b90840210072bc11e26531cde79751658dddcd9 | refs/heads/master | 2021-01-15T18:21:27.238795 | 2016-03-21T18:19:29 | 2016-03-21T18:19:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110,170 | py |
<!DOCTYPE html>
<html lang="en" class=" is-copy-enabled">
<head prefix="og: http://ogp.me/ns# fb: http://ogp.me/ns/fb# object: http://ogp.me/ns/object# article: http://ogp.me/ns/article# profile: http://ogp.me/ns/profile#">
<meta charset='utf-8'>
<link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/frameworks-2cbc4060c4ac5481bfbedea9bb8fc752a93db97fca12f72ed99555906f567387.css" integrity="sha256-LLxAYMSsVIG/vt6pu4/HUqk9uX/KEvcu2ZVVkG9Wc4c=" media="all" rel="stylesheet" />
<link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/github-96ad60c299ea29f60354f3cd04103143fde49fa6c81f6c8573b8535c594ee9b0.css" integrity="sha256-lq1gwpnqKfYDVPPNBBAxQ/3kn6bIH2yFc7hTXFlO6bA=" media="all" rel="stylesheet" />
<link as="script" href="https://assets-cdn.github.com/assets/frameworks-d7233eaabb7531aaf275fa71e7bca3c43cb11eae12c8359622bd13d725320aee.js" rel="preload" />
<link as="script" href="https://assets-cdn.github.com/assets/github-be09baa865e75e7094b35490a7eaf276a214c9e78489fd8a161a61b10da8fca8.js" rel="preload" />
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta http-equiv="Content-Language" content="en">
<meta name="viewport" content="width=1020">
<title>Box-Office-Mojo-Scrapper/bom_parallel_sql.py at master · csredino/Box-Office-Mojo-Scrapper</title>
<link rel="search" type="application/opensearchdescription+xml" href="/opensearch.xml" title="GitHub">
<link rel="fluid-icon" href="https://github.com/fluidicon.png" title="GitHub">
<link rel="apple-touch-icon" href="/apple-touch-icon.png">
<link rel="apple-touch-icon" sizes="57x57" href="/apple-touch-icon-57x57.png">
<link rel="apple-touch-icon" sizes="60x60" href="/apple-touch-icon-60x60.png">
<link rel="apple-touch-icon" sizes="72x72" href="/apple-touch-icon-72x72.png">
<link rel="apple-touch-icon" sizes="76x76" href="/apple-touch-icon-76x76.png">
<link rel="apple-touch-icon" sizes="114x114" href="/apple-touch-icon-114x114.png">
<link rel="apple-touch-icon" sizes="120x120" href="/apple-touch-icon-120x120.png">
<link rel="apple-touch-icon" sizes="144x144" href="/apple-touch-icon-144x144.png">
<link rel="apple-touch-icon" sizes="152x152" href="/apple-touch-icon-152x152.png">
<link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon-180x180.png">
<meta property="fb:app_id" content="1401488693436528">
<meta content="https://avatars3.githubusercontent.com/u/6742721?v=3&s=400" name="twitter:image:src" /><meta content="@github" name="twitter:site" /><meta content="summary" name="twitter:card" /><meta content="csredino/Box-Office-Mojo-Scrapper" name="twitter:title" /><meta content="Box-Office-Mojo-Scrapper - For fun and self teaching projects" name="twitter:description" />
<meta content="https://avatars3.githubusercontent.com/u/6742721?v=3&s=400" property="og:image" /><meta content="GitHub" property="og:site_name" /><meta content="object" property="og:type" /><meta content="csredino/Box-Office-Mojo-Scrapper" property="og:title" /><meta content="https://github.com/csredino/Box-Office-Mojo-Scrapper" property="og:url" /><meta content="Box-Office-Mojo-Scrapper - For fun and self teaching projects" property="og:description" />
<meta name="browser-stats-url" content="https://api.github.com/_private/browser/stats">
<meta name="browser-errors-url" content="https://api.github.com/_private/browser/errors">
<link rel="assets" href="https://assets-cdn.github.com/">
<link rel="web-socket" href="wss://live.github.com/_sockets/Njc0MjcyMTpmZTI3YmZhYTUzNGI4NDFjYTU4ZmUzNzE4ZmMzNjZlZTpiNzA2YzRmMGUxZGU3MTI0M2M3YmQ5Mjc4MDcxMTFlMWUxNGUwNWY5MTFhOGEyMzI2ZmIyODRmM2NjOWRlYjFl--27de37a3b504c511c1f5991df44cefc07b2d8c8b">
<meta name="pjax-timeout" content="1000">
<link rel="sudo-modal" href="/sessions/sudo_modal">
<meta name="msapplication-TileImage" content="/windows-tile.png">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="selected-link" value="repo_source" data-pjax-transient>
<meta name="google-site-verification" content="KT5gs8h0wvaagLKAVWq8bbeNwnZZK1r1XQysX3xurLU">
<meta name="google-site-verification" content="ZzhVyEFwb7w3e0-uOTltm8Jsck2F5StVihD0exw2fsA">
<meta name="google-analytics" content="UA-3769691-2">
<meta content="collector.githubapp.com" name="octolytics-host" /><meta content="github" name="octolytics-app-id" /><meta content="AD443D70:38F9:8FF2C35:56F025C2" name="octolytics-dimension-request_id" /><meta content="6742721" name="octolytics-actor-id" /><meta content="csredino" name="octolytics-actor-login" /><meta content="2662531c780fb3bcf4f09d1959b2cce0d86eee5cea3f509ecfcb0c161e345fce" name="octolytics-actor-hash" />
<meta content="/<user-name>/<repo-name>/blob/show" data-pjax-transient="true" name="analytics-location" />
<meta class="js-ga-set" name="dimension1" content="Logged In">
<meta name="hostname" content="github.com">
<meta name="user-login" content="csredino">
<meta name="expected-hostname" content="github.com">
<meta name="js-proxy-site-detection-payload" content="OGUyYWQ5OGJjZTJmNjVjMmRjMzE5MGEzOTQ0MDM5YWUzYzFhMjA1M2JmNjdjY2M5M2I2Y2ViZmU0ODI0Y2YxN3x7InJlbW90ZV9hZGRyZXNzIjoiMTczLjY4LjYxLjExMiIsInJlcXVlc3RfaWQiOiJBRDQ0M0Q3MDozOEY5OjhGRjJDMzU6NTZGMDI1QzIifQ==">
<link rel="mask-icon" href="https://assets-cdn.github.com/pinned-octocat.svg" color="#4078c0">
<link rel="icon" type="image/x-icon" href="https://assets-cdn.github.com/favicon.ico">
<meta content="7c82ff970511ce1e84e10f05313828aac8a44489" name="form-nonce" />
<meta http-equiv="x-pjax-version" content="0d093e2d1eab9d4032686ae1025986f5">
<meta name="description" content="Box-Office-Mojo-Scrapper - For fun and self teaching projects">
<meta name="go-import" content="github.com/csredino/Box-Office-Mojo-Scrapper git https://github.com/csredino/Box-Office-Mojo-Scrapper.git">
<meta content="6742721" name="octolytics-dimension-user_id" /><meta content="csredino" name="octolytics-dimension-user_login" /><meta content="39412489" name="octolytics-dimension-repository_id" /><meta content="csredino/Box-Office-Mojo-Scrapper" name="octolytics-dimension-repository_nwo" /><meta content="true" name="octolytics-dimension-repository_public" /><meta content="false" name="octolytics-dimension-repository_is_fork" /><meta content="39412489" name="octolytics-dimension-repository_network_root_id" /><meta content="csredino/Box-Office-Mojo-Scrapper" name="octolytics-dimension-repository_network_root_nwo" />
<link href="https://github.com/csredino/Box-Office-Mojo-Scrapper/commits/master.atom" rel="alternate" title="Recent Commits to Box-Office-Mojo-Scrapper:master" type="application/atom+xml">
<link rel="canonical" href="https://github.com/csredino/Box-Office-Mojo-Scrapper/blob/master/bom_parallel_sql.py" data-pjax-transient>
</head>
<body class="logged_in env-production linux vis-public page-blob">
<a href="#start-of-content" tabindex="1" class="accessibility-aid js-skip-to-content">Skip to content</a>
<div class="header header-logged-in true" role="banner">
<div class="container clearfix">
<a class="header-logo-invertocat" href="https://github.com/" data-hotkey="g d" aria-label="Homepage" data-ga-click="Header, go to dashboard, icon:logo">
<svg aria-hidden="true" class="octicon octicon-mark-github" height="28" role="img" version="1.1" viewBox="0 0 16 16" width="28"><path d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59 0.4 0.07 0.55-0.17 0.55-0.38 0-0.19-0.01-0.82-0.01-1.49-2.01 0.37-2.53-0.49-2.69-0.94-0.09-0.23-0.48-0.94-0.82-1.13-0.28-0.15-0.68-0.52-0.01-0.53 0.63-0.01 1.08 0.58 1.23 0.82 0.72 1.21 1.87 0.87 2.33 0.66 0.07-0.52 0.28-0.87 0.51-1.07-1.78-0.2-3.64-0.89-3.64-3.95 0-0.87 0.31-1.59 0.82-2.15-0.08-0.2-0.36-1.02 0.08-2.12 0 0 0.67-0.21 2.2 0.82 0.64-0.18 1.32-0.27 2-0.27 0.68 0 1.36 0.09 2 0.27 1.53-1.04 2.2-0.82 2.2-0.82 0.44 1.1 0.16 1.92 0.08 2.12 0.51 0.56 0.82 1.27 0.82 2.15 0 3.07-1.87 3.75-3.65 3.95 0.29 0.25 0.54 0.73 0.54 1.48 0 1.07-0.01 1.93-0.01 2.2 0 0.21 0.15 0.46 0.55 0.38C13.71 14.53 16 11.53 16 8 16 3.58 12.42 0 8 0z"></path></svg>
</a>
<div class="site-search scoped-search js-site-search" role="search">
<!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/csredino/Box-Office-Mojo-Scrapper/search" class="js-site-search-form" data-scoped-search-url="/csredino/Box-Office-Mojo-Scrapper/search" data-unscoped-search-url="/search" method="get"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /></div>
<label class="js-chromeless-input-container form-control">
<div class="scope-badge">This repository</div>
<input type="text"
class="form-control js-site-search-focus js-site-search-field is-clearable chromeless-input"
data-hotkey="s"
name="q"
placeholder="Search"
aria-label="Search this repository"
data-unscoped-placeholder="Search GitHub"
data-scoped-placeholder="Search"
tabindex="1"
autocapitalize="off">
</label>
</form>
</div>
<ul class="header-nav left" role="navigation">
<li class="header-nav-item">
<a href="/pulls" class="js-selected-navigation-item header-nav-link" data-ga-click="Header, click, Nav menu - item:pulls context:user" data-hotkey="g p" data-selected-links="/pulls /pulls/assigned /pulls/mentioned /pulls">
Pull requests
</a> </li>
<li class="header-nav-item">
<a href="/issues" class="js-selected-navigation-item header-nav-link" data-ga-click="Header, click, Nav menu - item:issues context:user" data-hotkey="g i" data-selected-links="/issues /issues/assigned /issues/mentioned /issues">
Issues
</a> </li>
<li class="header-nav-item">
<a class="header-nav-link" href="https://gist.github.com/" data-ga-click="Header, go to gist, text:gist">Gist</a>
</li>
</ul>
<ul class="header-nav user-nav right" id="user-links">
<li class="header-nav-item">
<a href="/notifications" aria-label="You have no unread notifications" class="header-nav-link notification-indicator tooltipped tooltipped-s js-socket-channel js-notification-indicator" data-channel="notification-changed-v2:6742721" data-ga-click="Header, go to notifications, icon:read" data-hotkey="g n">
<span class="mail-status "></span>
<svg aria-hidden="true" class="octicon octicon-bell" height="16" role="img" version="1.1" viewBox="0 0 14 16" width="14"><path d="M14 12v1H0v-1l0.73-0.58c0.77-0.77 0.81-2.55 1.19-4.42 0.77-3.77 4.08-5 4.08-5 0-0.55 0.45-1 1-1s1 0.45 1 1c0 0 3.39 1.23 4.16 5 0.38 1.88 0.42 3.66 1.19 4.42l0.66 0.58z m-7 4c1.11 0 2-0.89 2-2H5c0 1.11 0.89 2 2 2z"></path></svg>
</a>
</li>
<li class="header-nav-item dropdown js-menu-container">
<a class="header-nav-link tooltipped tooltipped-s js-menu-target" href="/new"
aria-label="Create new…"
data-ga-click="Header, create new, icon:add">
<svg aria-hidden="true" class="octicon octicon-plus left" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path d="M12 9H7v5H5V9H0V7h5V2h2v5h5v2z"></path></svg>
<span class="dropdown-caret"></span>
</a>
<div class="dropdown-menu-content js-menu-content">
<ul class="dropdown-menu dropdown-menu-sw">
<a class="dropdown-item" href="/new" data-ga-click="Header, create new repository">
New repository
</a>
<a class="dropdown-item" href="/organizations/new" data-ga-click="Header, create new organization">
New organization
</a>
<div class="dropdown-divider"></div>
<div class="dropdown-header">
<span title="csredino/Box-Office-Mojo-Scrapper">This repository</span>
</div>
<a class="dropdown-item" href="/csredino/Box-Office-Mojo-Scrapper/issues/new" data-ga-click="Header, create new issue">
New issue
</a>
<a class="dropdown-item" href="/csredino/Box-Office-Mojo-Scrapper/settings/collaboration" data-ga-click="Header, create new collaborator">
New collaborator
</a>
</ul>
</div>
</li>
<li class="header-nav-item dropdown js-menu-container">
<a class="header-nav-link name tooltipped tooltipped-sw js-menu-target" href="/csredino"
aria-label="View profile and more"
data-ga-click="Header, show menu, icon:avatar">
<img alt="@csredino" class="avatar" height="20" src="https://avatars1.githubusercontent.com/u/6742721?v=3&s=40" width="20" />
<span class="dropdown-caret"></span>
</a>
<div class="dropdown-menu-content js-menu-content">
<div class="dropdown-menu dropdown-menu-sw">
<div class=" dropdown-header header-nav-current-user css-truncate">
Signed in as <strong class="css-truncate-target">csredino</strong>
</div>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="/csredino" data-ga-click="Header, go to profile, text:your profile">
Your profile
</a>
<a class="dropdown-item" href="/stars" data-ga-click="Header, go to starred repos, text:your stars">
Your stars
</a>
<a class="dropdown-item" href="/explore" data-ga-click="Header, go to explore, text:explore">
Explore
</a>
<a class="dropdown-item" href="/integrations" data-ga-click="Header, go to integrations, text:integrations">
Integrations
</a>
<a class="dropdown-item" href="https://help.github.com" data-ga-click="Header, go to help, text:help">
Help
</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="/settings/profile" data-ga-click="Header, go to settings, icon:settings">
Settings
</a>
<!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/logout" class="logout-form" data-form-nonce="7c82ff970511ce1e84e10f05313828aac8a44489" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /><input name="authenticity_token" type="hidden" value="hEqzFliwy3p2NvF51N3o2mGNrVpzhwvJ/akhzjpQH4kIShRBdtPiXuayJd1hrbZXRzDV0DxVrip/3lQOdhy4lg==" /></div>
<button class="dropdown-item dropdown-signout" data-ga-click="Header, sign out, icon:logout">
Sign out
</button>
</form>
</div>
</div>
</li>
</ul>
</div>
</div>
<div id="start-of-content" class="accessibility-aid"></div>
<div id="js-flash-container">
</div>
<div role="main" class="main-content">
<div itemscope itemtype="http://schema.org/SoftwareSourceCode">
<div id="js-repo-pjax-container" class="context-loader-container js-repo-nav-next" data-pjax-container>
<div class="pagehead repohead instapaper_ignore readability-menu experiment-repo-nav">
<div class="container repohead-details-container">
<ul class="pagehead-actions">
<li>
<!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/notifications/subscribe" class="js-social-container" data-autosubmit="true" data-form-nonce="7c82ff970511ce1e84e10f05313828aac8a44489" data-remote="true" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /><input name="authenticity_token" type="hidden" value="QEta690Gb9ewiRmihxzR9gfU1e6u3Vs0LBTaYGsGoosV5Ew8797K+InZWDwXz6Wx1vPTGX7RCaozHusBaLO89g==" /></div> <input class="form-control" id="repository_id" name="repository_id" type="hidden" value="39412489" />
<div class="select-menu js-menu-container js-select-menu">
<a href="/csredino/Box-Office-Mojo-Scrapper/subscription"
class="btn btn-sm btn-with-count select-menu-button js-menu-target" role="button" tabindex="0" aria-haspopup="true"
data-ga-click="Repository, click Watch settings, action:blob#show">
<span class="js-select-button">
<svg aria-hidden="true" class="octicon octicon-eye" height="16" role="img" version="1.1" viewBox="0 0 16 16" width="16"><path d="M8.06 2C3 2 0 8 0 8s3 6 8.06 6c4.94 0 7.94-6 7.94-6S13 2 8.06 2z m-0.06 10c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4z m2-4c0 1.11-0.89 2-2 2s-2-0.89-2-2 0.89-2 2-2 2 0.89 2 2z"></path></svg>
Unwatch
</span>
</a>
<a class="social-count js-social-count" href="/csredino/Box-Office-Mojo-Scrapper/watchers">
1
</a>
<div class="select-menu-modal-holder">
<div class="select-menu-modal subscription-menu-modal js-menu-content" aria-hidden="true">
<div class="select-menu-header">
<svg aria-label="Close" class="octicon octicon-x js-menu-close" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path d="M7.48 8l3.75 3.75-1.48 1.48-3.75-3.75-3.75 3.75-1.48-1.48 3.75-3.75L0.77 4.25l1.48-1.48 3.75 3.75 3.75-3.75 1.48 1.48-3.75 3.75z"></path></svg>
<span class="select-menu-title">Notifications</span>
</div>
<div class="select-menu-list js-navigation-container" role="menu">
<div class="select-menu-item js-navigation-item " role="menuitem" tabindex="0">
<svg aria-hidden="true" class="octicon octicon-check select-menu-item-icon" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path d="M12 5L4 13 0 9l1.5-1.5 2.5 2.5 6.5-6.5 1.5 1.5z"></path></svg>
<div class="select-menu-item-text">
<input id="do_included" name="do" type="radio" value="included" />
<span class="select-menu-item-heading">Not watching</span>
<span class="description">Be notified when participating or @mentioned.</span>
<span class="js-select-button-text hidden-select-button-text">
<svg aria-hidden="true" class="octicon octicon-eye" height="16" role="img" version="1.1" viewBox="0 0 16 16" width="16"><path d="M8.06 2C3 2 0 8 0 8s3 6 8.06 6c4.94 0 7.94-6 7.94-6S13 2 8.06 2z m-0.06 10c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4z m2-4c0 1.11-0.89 2-2 2s-2-0.89-2-2 0.89-2 2-2 2 0.89 2 2z"></path></svg>
Watch
</span>
</div>
</div>
<div class="select-menu-item js-navigation-item selected" role="menuitem" tabindex="0">
<svg aria-hidden="true" class="octicon octicon-check select-menu-item-icon" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path d="M12 5L4 13 0 9l1.5-1.5 2.5 2.5 6.5-6.5 1.5 1.5z"></path></svg>
<div class="select-menu-item-text">
<input checked="checked" id="do_subscribed" name="do" type="radio" value="subscribed" />
<span class="select-menu-item-heading">Watching</span>
<span class="description">Be notified of all conversations.</span>
<span class="js-select-button-text hidden-select-button-text">
<svg aria-hidden="true" class="octicon octicon-eye" height="16" role="img" version="1.1" viewBox="0 0 16 16" width="16"><path d="M8.06 2C3 2 0 8 0 8s3 6 8.06 6c4.94 0 7.94-6 7.94-6S13 2 8.06 2z m-0.06 10c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4z m2-4c0 1.11-0.89 2-2 2s-2-0.89-2-2 0.89-2 2-2 2 0.89 2 2z"></path></svg>
Unwatch
</span>
</div>
</div>
<div class="select-menu-item js-navigation-item " role="menuitem" tabindex="0">
<svg aria-hidden="true" class="octicon octicon-check select-menu-item-icon" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path d="M12 5L4 13 0 9l1.5-1.5 2.5 2.5 6.5-6.5 1.5 1.5z"></path></svg>
<div class="select-menu-item-text">
<input id="do_ignore" name="do" type="radio" value="ignore" />
<span class="select-menu-item-heading">Ignoring</span>
<span class="description">Never be notified.</span>
<span class="js-select-button-text hidden-select-button-text">
<svg aria-hidden="true" class="octicon octicon-mute" height="16" role="img" version="1.1" viewBox="0 0 16 16" width="16"><path d="M8 2.81v10.38c0 0.67-0.81 1-1.28 0.53L3 10H1c-0.55 0-1-0.45-1-1V7c0-0.55 0.45-1 1-1h2l3.72-3.72c0.47-0.47 1.28-0.14 1.28 0.53z m7.53 3.22l-1.06-1.06-1.97 1.97-1.97-1.97-1.06 1.06 1.97 1.97-1.97 1.97 1.06 1.06 1.97-1.97 1.97 1.97 1.06-1.06-1.97-1.97 1.97-1.97z"></path></svg>
Stop ignoring
</span>
</div>
</div>
</div>
</div>
</div>
</div>
</form>
</li>
<li>
<div class="js-toggler-container js-social-container starring-container ">
<!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/csredino/Box-Office-Mojo-Scrapper/unstar" class="js-toggler-form starred" data-form-nonce="7c82ff970511ce1e84e10f05313828aac8a44489" data-remote="true" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /><input name="authenticity_token" type="hidden" value="63xO7aRZgxGDe3ed/UDob2cZv5AFnx7nJZLsuexSUfv81p9FvuKJcJVBC7qXCc0zHWyMCHfy/AtdWC5q4rjlUg==" /></div>
<button
class="btn btn-sm btn-with-count js-toggler-target"
aria-label="Unstar this repository" title="Unstar csredino/Box-Office-Mojo-Scrapper"
data-ga-click="Repository, click unstar button, action:blob#show; text:Unstar">
<svg aria-hidden="true" class="octicon octicon-star" height="16" role="img" version="1.1" viewBox="0 0 14 16" width="14"><path d="M14 6l-4.9-0.64L7 1 4.9 5.36 0 6l3.6 3.26L2.67 14l4.33-2.33 4.33 2.33L10.4 9.26 14 6z"></path></svg>
Unstar
</button>
<a class="social-count js-social-count" href="/csredino/Box-Office-Mojo-Scrapper/stargazers">
0
</a>
</form>
<!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/csredino/Box-Office-Mojo-Scrapper/star" class="js-toggler-form unstarred" data-form-nonce="7c82ff970511ce1e84e10f05313828aac8a44489" data-remote="true" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /><input name="authenticity_token" type="hidden" value="NOKrQrxIo+rC6pHJCJXHbmLasI09QXq0Y5qVueXND9SKrOBf52Lwp9RZ1sAEBMNMQZi68qLIyfwz36zeYiSoRQ==" /></div>
<button
class="btn btn-sm btn-with-count js-toggler-target"
aria-label="Star this repository" title="Star csredino/Box-Office-Mojo-Scrapper"
data-ga-click="Repository, click star button, action:blob#show; text:Star">
<svg aria-hidden="true" class="octicon octicon-star" height="16" role="img" version="1.1" viewBox="0 0 14 16" width="14"><path d="M14 6l-4.9-0.64L7 1 4.9 5.36 0 6l3.6 3.26L2.67 14l4.33-2.33 4.33 2.33L10.4 9.26 14 6z"></path></svg>
Star
</button>
<a class="social-count js-social-count" href="/csredino/Box-Office-Mojo-Scrapper/stargazers">
0
</a>
</form> </div>
</li>
<li>
<a href="#fork-destination-box" class="btn btn-sm btn-with-count"
title="Fork your own copy of csredino/Box-Office-Mojo-Scrapper to your account"
aria-label="Fork your own copy of csredino/Box-Office-Mojo-Scrapper to your account"
rel="facebox"
data-ga-click="Repository, show fork modal, action:blob#show; text:Fork">
<svg aria-hidden="true" class="octicon octicon-repo-forked" height="16" role="img" version="1.1" viewBox="0 0 10 16" width="10"><path d="M8 1c-1.11 0-2 0.89-2 2 0 0.73 0.41 1.38 1 1.72v1.28L5 8 3 6v-1.28c0.59-0.34 1-0.98 1-1.72 0-1.11-0.89-2-2-2S0 1.89 0 3c0 0.73 0.41 1.38 1 1.72v1.78l3 3v1.78c-0.59 0.34-1 0.98-1 1.72 0 1.11 0.89 2 2 2s2-0.89 2-2c0-0.73-0.41-1.38-1-1.72V9.5l3-3V4.72c0.59-0.34 1-0.98 1-1.72 0-1.11-0.89-2-2-2zM2 4.2c-0.66 0-1.2-0.55-1.2-1.2s0.55-1.2 1.2-1.2 1.2 0.55 1.2 1.2-0.55 1.2-1.2 1.2z m3 10c-0.66 0-1.2-0.55-1.2-1.2s0.55-1.2 1.2-1.2 1.2 0.55 1.2 1.2-0.55 1.2-1.2 1.2z m3-10c-0.66 0-1.2-0.55-1.2-1.2s0.55-1.2 1.2-1.2 1.2 0.55 1.2 1.2-0.55 1.2-1.2 1.2z"></path></svg>
Fork
</a>
<div id="fork-destination-box" style="display: none;">
<h2 class="facebox-header" data-facebox-id="facebox-header">Where should we fork this repository?</h2>
<include-fragment src=""
class="js-fork-select-fragment fork-select-fragment"
data-url="/csredino/Box-Office-Mojo-Scrapper/fork?fragment=1">
<img alt="Loading" height="64" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-128.gif" width="64" />
</include-fragment>
</div>
<a href="/csredino/Box-Office-Mojo-Scrapper/network" class="social-count">
0
</a>
</li>
</ul>
<h1 class="entry-title public ">
<svg aria-hidden="true" class="octicon octicon-repo" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path d="M4 9h-1v-1h1v1z m0-3h-1v1h1v-1z m0-2h-1v1h1v-1z m0-2h-1v1h1v-1z m8-1v12c0 0.55-0.45 1-1 1H6v2l-1.5-1.5-1.5 1.5V14H1c-0.55 0-1-0.45-1-1V1C0 0.45 0.45 0 1 0h10c0.55 0 1 0.45 1 1z m-1 10H1v2h2v-1h3v1h5V11z m0-10H2v9h9V1z"></path></svg>
<span class="author" itemprop="author"><a href="/csredino" class="url fn" rel="author">csredino</a></span><!--
--><span class="path-divider">/</span><!--
--><strong itemprop="name"><a href="/csredino/Box-Office-Mojo-Scrapper" data-pjax="#js-repo-pjax-container">Box-Office-Mojo-Scrapper</a></strong>
<span class="page-context-loader">
<img alt="" height="16" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-32.gif" width="16" />
</span>
</h1>
</div>
<div class="container">
<nav class="reponav js-repo-nav js-sidenav-container-pjax js-octicon-loaders"
itemscope
itemtype="http://schema.org/BreadcrumbList"
role="navigation"
data-pjax="#js-repo-pjax-container">
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a href="/csredino/Box-Office-Mojo-Scrapper" aria-selected="true" class="js-selected-navigation-item selected reponav-item" data-hotkey="g c" data-selected-links="repo_source repo_downloads repo_commits repo_releases repo_tags repo_branches /csredino/Box-Office-Mojo-Scrapper" itemprop="url">
<svg aria-hidden="true" class="octicon octicon-code" height="16" role="img" version="1.1" viewBox="0 0 14 16" width="14"><path d="M9.5 3l-1.5 1.5 3.5 3.5L8 11.5l1.5 1.5 4.5-5L9.5 3zM4.5 3L0 8l4.5 5 1.5-1.5L2.5 8l3.5-3.5L4.5 3z"></path></svg>
<span itemprop="name">Code</span>
<meta itemprop="position" content="1">
</a> </span>
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a href="/csredino/Box-Office-Mojo-Scrapper/issues" class="js-selected-navigation-item reponav-item" data-hotkey="g i" data-selected-links="repo_issues repo_labels repo_milestones /csredino/Box-Office-Mojo-Scrapper/issues" itemprop="url">
<svg aria-hidden="true" class="octicon octicon-issue-opened" height="16" role="img" version="1.1" viewBox="0 0 14 16" width="14"><path d="M7 2.3c3.14 0 5.7 2.56 5.7 5.7S10.14 13.7 7 13.7 1.3 11.14 1.3 8s2.56-5.7 5.7-5.7m0-1.3C3.14 1 0 4.14 0 8s3.14 7 7 7 7-3.14 7-7S10.86 1 7 1z m1 3H6v5h2V4z m0 6H6v2h2V10z"></path></svg>
<span itemprop="name">Issues</span>
<span class="counter">0</span>
<meta itemprop="position" content="2">
</a> </span>
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
<a href="/csredino/Box-Office-Mojo-Scrapper/pulls" class="js-selected-navigation-item reponav-item" data-hotkey="g p" data-selected-links="repo_pulls /csredino/Box-Office-Mojo-Scrapper/pulls" itemprop="url">
<svg aria-hidden="true" class="octicon octicon-git-pull-request" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path d="M11 11.28c0-1.73 0-6.28 0-6.28-0.03-0.78-0.34-1.47-0.94-2.06s-1.28-0.91-2.06-0.94c0 0-1.02 0-1 0V0L4 3l3 3V4h1c0.27 0.02 0.48 0.11 0.69 0.31s0.3 0.42 0.31 0.69v6.28c-0.59 0.34-1 0.98-1 1.72 0 1.11 0.89 2 2 2s2-0.89 2-2c0-0.73-0.41-1.38-1-1.72z m-1 2.92c-0.66 0-1.2-0.55-1.2-1.2s0.55-1.2 1.2-1.2 1.2 0.55 1.2 1.2-0.55 1.2-1.2 1.2zM4 3c0-1.11-0.89-2-2-2S0 1.89 0 3c0 0.73 0.41 1.38 1 1.72 0 1.55 0 5.56 0 6.56-0.59 0.34-1 0.98-1 1.72 0 1.11 0.89 2 2 2s2-0.89 2-2c0-0.73-0.41-1.38-1-1.72V4.72c0.59-0.34 1-0.98 1-1.72z m-0.8 10c0 0.66-0.55 1.2-1.2 1.2s-1.2-0.55-1.2-1.2 0.55-1.2 1.2-1.2 1.2 0.55 1.2 1.2z m-1.2-8.8c-0.66 0-1.2-0.55-1.2-1.2s0.55-1.2 1.2-1.2 1.2 0.55 1.2 1.2-0.55 1.2-1.2 1.2z"></path></svg>
<span itemprop="name">Pull requests</span>
<span class="counter">0</span>
<meta itemprop="position" content="3">
</a> </span>
<a href="/csredino/Box-Office-Mojo-Scrapper/wiki" class="js-selected-navigation-item reponav-item" data-hotkey="g w" data-selected-links="repo_wiki /csredino/Box-Office-Mojo-Scrapper/wiki">
<svg aria-hidden="true" class="octicon octicon-book" height="16" role="img" version="1.1" viewBox="0 0 16 16" width="16"><path d="M2 5h4v1H2v-1z m0 3h4v-1H2v1z m0 2h4v-1H2v1z m11-5H9v1h4v-1z m0 2H9v1h4v-1z m0 2H9v1h4v-1z m2-6v9c0 0.55-0.45 1-1 1H8.5l-1 1-1-1H1c-0.55 0-1-0.45-1-1V3c0-0.55 0.45-1 1-1h5.5l1 1 1-1h5.5c0.55 0 1 0.45 1 1z m-8 0.5l-0.5-0.5H1v9h6V3.5z m7-0.5H8.5l-0.5 0.5v8.5h6V3z"></path></svg>
Wiki
</a>
<a href="/csredino/Box-Office-Mojo-Scrapper/pulse" class="js-selected-navigation-item reponav-item" data-selected-links="pulse /csredino/Box-Office-Mojo-Scrapper/pulse">
<svg aria-hidden="true" class="octicon octicon-pulse" height="16" role="img" version="1.1" viewBox="0 0 14 16" width="14"><path d="M11.5 8L8.8 5.4 6.6 8.5 5.5 1.6 2.38 8H0V10h3.6L4.5 8.2l0.9 5.4L9 8.5l1.6 1.5H14V8H11.5z"></path></svg>
Pulse
</a>
<a href="/csredino/Box-Office-Mojo-Scrapper/graphs" class="js-selected-navigation-item reponav-item" data-selected-links="repo_graphs repo_contributors /csredino/Box-Office-Mojo-Scrapper/graphs">
<svg aria-hidden="true" class="octicon octicon-graph" height="16" role="img" version="1.1" viewBox="0 0 16 16" width="16"><path d="M16 14v1H0V0h1v14h15z m-11-1H3V8h2v5z m4 0H7V3h2v10z m4 0H11V6h2v7z"></path></svg>
Graphs
</a>
<a href="/csredino/Box-Office-Mojo-Scrapper/settings" class="js-selected-navigation-item reponav-item" data-selected-links="repo_settings repo_branch_settings hooks /csredino/Box-Office-Mojo-Scrapper/settings">
<svg aria-hidden="true" class="octicon octicon-gear" height="16" role="img" version="1.1" viewBox="0 0 14 16" width="14"><path d="M14 8.77V7.17l-1.94-0.64-0.45-1.09 0.88-1.84-1.13-1.13-1.81 0.91-1.09-0.45-0.69-1.92H6.17l-0.63 1.94-1.11 0.45-1.84-0.88-1.13 1.13 0.91 1.81-0.45 1.09L0 7.23v1.59l1.94 0.64 0.45 1.09-0.88 1.84 1.13 1.13 1.81-0.91 1.09 0.45 0.69 1.92h1.59l0.63-1.94 1.11-0.45 1.84 0.88 1.13-1.13-0.92-1.81 0.47-1.09 1.92-0.69zM7 11c-1.66 0-3-1.34-3-3s1.34-3 3-3 3 1.34 3 3-1.34 3-3 3z"></path></svg>
Settings
</a>
</nav>
</div>
</div>
<div class="container new-discussion-timeline experiment-repo-nav">
<div class="repository-content">
<a href="/csredino/Box-Office-Mojo-Scrapper/blob/cb35ecc6cb65579762f67d4969140dcc1737d5fe/bom_parallel_sql.py" class="hidden js-permalink-shortcut" data-hotkey="y">Permalink</a>
<!-- blob contrib key: blob_contributors:v21:2778b2ddaa38e9689e2b42133dd338d5 -->
<div class="file-navigation js-zeroclipboard-container">
<div class="select-menu js-menu-container js-select-menu left">
<button class="btn btn-sm select-menu-button js-menu-target css-truncate" data-hotkey="w"
title="master"
type="button" aria-label="Switch branches or tags" tabindex="0" aria-haspopup="true">
<i>Branch:</i>
<span class="js-select-button css-truncate-target">master</span>
</button>
<div class="select-menu-modal-holder js-menu-content js-navigation-container" data-pjax aria-hidden="true">
<div class="select-menu-modal">
<div class="select-menu-header">
<svg aria-label="Close" class="octicon octicon-x js-menu-close" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path d="M7.48 8l3.75 3.75-1.48 1.48-3.75-3.75-3.75 3.75-1.48-1.48 3.75-3.75L0.77 4.25l1.48-1.48 3.75 3.75 3.75-3.75 1.48 1.48-3.75 3.75z"></path></svg>
<span class="select-menu-title">Switch branches/tags</span>
</div>
<div class="select-menu-filters">
<div class="select-menu-text-filter">
<input type="text" aria-label="Find or create a branch…" id="context-commitish-filter-field" class="form-control js-filterable-field js-navigation-enable" placeholder="Find or create a branch…">
</div>
<div class="select-menu-tabs">
<ul>
<li class="select-menu-tab">
<a href="#" data-tab-filter="branches" data-filter-placeholder="Find or create a branch…" class="js-select-menu-tab" role="tab">Branches</a>
</li>
<li class="select-menu-tab">
<a href="#" data-tab-filter="tags" data-filter-placeholder="Find a tag…" class="js-select-menu-tab" role="tab">Tags</a>
</li>
</ul>
</div>
</div>
<div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="branches" role="menu">
<div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
<a class="select-menu-item js-navigation-item js-navigation-open selected"
href="/csredino/Box-Office-Mojo-Scrapper/blob/master/bom_parallel_sql.py"
data-name="master"
data-skip-pjax="true"
rel="nofollow">
<svg aria-hidden="true" class="octicon octicon-check select-menu-item-icon" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path d="M12 5L4 13 0 9l1.5-1.5 2.5 2.5 6.5-6.5 1.5 1.5z"></path></svg>
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text" title="master">
master
</span>
</a>
</div>
<!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/csredino/Box-Office-Mojo-Scrapper/branches" class="js-create-branch select-menu-item select-menu-new-item-form js-navigation-item js-new-item-form" data-form-nonce="7c82ff970511ce1e84e10f05313828aac8a44489" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /><input name="authenticity_token" type="hidden" value="3XmyuC6qhhvv7JH5J3P+ByqR5aE8Pk5+RHSMhB+UbHvZHreGtnYknW9iTOe4HpqU/+qmY5ZNxbiSKWI2pjhE+A==" /></div>
<svg aria-hidden="true" class="octicon octicon-git-branch select-menu-item-icon" height="16" role="img" version="1.1" viewBox="0 0 10 16" width="10"><path d="M10 5c0-1.11-0.89-2-2-2s-2 0.89-2 2c0 0.73 0.41 1.38 1 1.72v0.3c-0.02 0.52-0.23 0.98-0.63 1.38s-0.86 0.61-1.38 0.63c-0.83 0.02-1.48 0.16-2 0.45V4.72c0.59-0.34 1-0.98 1-1.72 0-1.11-0.89-2-2-2S0 1.89 0 3c0 0.73 0.41 1.38 1 1.72v6.56C0.41 11.63 0 12.27 0 13c0 1.11 0.89 2 2 2s2-0.89 2-2c0-0.53-0.2-1-0.53-1.36 0.09-0.06 0.48-0.41 0.59-0.47 0.25-0.11 0.56-0.17 0.94-0.17 1.05-0.05 1.95-0.45 2.75-1.25s1.2-1.98 1.25-3.02h-0.02c0.61-0.36 1.02-1 1.02-1.73zM2 1.8c0.66 0 1.2 0.55 1.2 1.2s-0.55 1.2-1.2 1.2-1.2-0.55-1.2-1.2 0.55-1.2 1.2-1.2z m0 12.41c-0.66 0-1.2-0.55-1.2-1.2s0.55-1.2 1.2-1.2 1.2 0.55 1.2 1.2-0.55 1.2-1.2 1.2z m6-8c-0.66 0-1.2-0.55-1.2-1.2s0.55-1.2 1.2-1.2 1.2 0.55 1.2 1.2-0.55 1.2-1.2 1.2z"></path></svg>
<div class="select-menu-item-text">
<span class="select-menu-item-heading">Create branch: <span class="js-new-item-name"></span></span>
<span class="description">from ‘master’</span>
</div>
<input type="hidden" name="name" id="name" class="js-new-item-value">
<input type="hidden" name="branch" id="branch" value="master">
<input type="hidden" name="path" id="path" value="bom_parallel_sql.py">
</form>
</div>
<div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="tags">
<div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
</div>
<div class="select-menu-no-results">Nothing to show</div>
</div>
</div>
</div>
</div>
<div class="btn-group right">
<a href="/csredino/Box-Office-Mojo-Scrapper/find/master"
class="js-pjax-capture-input btn btn-sm"
data-pjax
data-hotkey="t">
Find file
</a>
<button aria-label="Copy file path to clipboard" class="js-zeroclipboard btn btn-sm zeroclipboard-button tooltipped tooltipped-s" data-copied-hint="Copied!" type="button">Copy path</button>
</div>
<div class="breadcrumb js-zeroclipboard-target">
<span class="repo-root js-repo-root"><span class="js-path-segment"><a href="/csredino/Box-Office-Mojo-Scrapper"><span>Box-Office-Mojo-Scrapper</span></a></span></span><span class="separator">/</span><strong class="final-path">bom_parallel_sql.py</strong>
</div>
</div>
<div class="commit-tease">
<span class="right">
<a class="commit-tease-sha" href="/csredino/Box-Office-Mojo-Scrapper/commit/cb35ecc6cb65579762f67d4969140dcc1737d5fe" data-pjax>
cb35ecc
</a>
<time datetime="2015-07-29T15:42:21Z" is="relative-time">Jul 29, 2015</time>
</span>
<div>
<img alt="@csredino" class="avatar" height="20" src="https://avatars1.githubusercontent.com/u/6742721?v=3&s=40" width="20" />
<a href="/csredino" class="user-mention" rel="author">csredino</a>
<a href="/csredino/Box-Office-Mojo-Scrapper/commit/cb35ecc6cb65579762f67d4969140dcc1737d5fe" class="message" data-pjax="true" title="parallel and sql
-added multiprocessing to scrapper
-added a couple analysis scripts that produce plots
-added versions that use SQL database instead of csv for storing data">parallel and sql</a>
</div>
<div class="commit-tease-contributors">
<button type="button" class="btn-link muted-link contributors-toggle" data-facebox="#blob_contributors_box">
<strong>1</strong>
contributor
</button>
</div>
<div id="blob_contributors_box" style="display:none">
<h2 class="facebox-header" data-facebox-id="facebox-header">Users who have contributed to this file</h2>
<ul class="facebox-user-list" data-facebox-id="facebox-description">
<li class="facebox-user-list-item">
<img alt="@csredino" height="24" src="https://avatars3.githubusercontent.com/u/6742721?v=3&s=48" width="24" />
<a href="/csredino">csredino</a>
</li>
</ul>
</div>
</div>
<div class="file">
<div class="file-header">
<div class="file-actions">
<div class="btn-group">
<a href="/csredino/Box-Office-Mojo-Scrapper/raw/master/bom_parallel_sql.py" class="btn btn-sm " id="raw-url">Raw</a>
<a href="/csredino/Box-Office-Mojo-Scrapper/blame/master/bom_parallel_sql.py" class="btn btn-sm js-update-url-with-hash">Blame</a>
<a href="/csredino/Box-Office-Mojo-Scrapper/commits/master/bom_parallel_sql.py" class="btn btn-sm " rel="nofollow">History</a>
</div>
<!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/csredino/Box-Office-Mojo-Scrapper/edit/master/bom_parallel_sql.py" class="inline-form js-update-url-with-hash" data-form-nonce="7c82ff970511ce1e84e10f05313828aac8a44489" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /><input name="authenticity_token" type="hidden" value="uAqVhZ6gF6wY3Scpgxs90N9NJqgMLp+s9X/VUJpA83k8vPygrh3p40a433f61m4MRxl6HTslmEvBwafz1kWjkQ==" /></div>
<button class="btn-octicon tooltipped tooltipped-nw" type="submit"
aria-label="Edit this file" data-hotkey="e" data-disable-with>
<svg aria-hidden="true" class="octicon octicon-pencil" height="16" role="img" version="1.1" viewBox="0 0 14 16" width="14"><path d="M0 12v3h3l8-8-3-3L0 12z m3 2H1V12h1v1h1v1z m10.3-9.3l-1.3 1.3-3-3 1.3-1.3c0.39-0.39 1.02-0.39 1.41 0l1.59 1.59c0.39 0.39 0.39 1.02 0 1.41z"></path></svg>
</button>
</form> <!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="/csredino/Box-Office-Mojo-Scrapper/delete/master/bom_parallel_sql.py" class="inline-form" data-form-nonce="7c82ff970511ce1e84e10f05313828aac8a44489" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /><input name="authenticity_token" type="hidden" value="5eTHDvqa6U2VBvncVbyMF37bvcfyWl5vE6nMzPep5c9N/n5b+d0XWWEzca1WK61CGTH8XOkHYK7Zk1+ClbFrdw==" /></div>
<button class="btn-octicon btn-octicon-danger tooltipped tooltipped-nw" type="submit"
aria-label="Delete this file" data-disable-with>
<svg aria-hidden="true" class="octicon octicon-trashcan" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path d="M10 2H8c0-0.55-0.45-1-1-1H4c-0.55 0-1 0.45-1 1H1c-0.55 0-1 0.45-1 1v1c0 0.55 0.45 1 1 1v9c0 0.55 0.45 1 1 1h7c0.55 0 1-0.45 1-1V5c0.55 0 1-0.45 1-1v-1c0-0.55-0.45-1-1-1z m-1 12H2V5h1v8h1V5h1v8h1V5h1v8h1V5h1v9z m1-10H1v-1h9v1z"></path></svg>
</button>
</form> </div>
<div class="file-info">
199 lines (169 sloc)
<span class="file-info-divider"></span>
8.13 KB
</div>
</div>
<div itemprop="text" class="blob-wrapper data type-python">
<table class="highlight tab-size js-file-line-container" data-tab-size="8">
<tr>
<td id="L1" class="blob-num js-line-number" data-line-number="1"></td>
<td id="LC1" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> bs4 <span class="pl-k">import</span> BeautifulSoup</td>
</tr>
<tr>
<td id="L2" class="blob-num js-line-number" data-line-number="2"></td>
<td id="LC2" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> urllib2 <span class="pl-k">import</span> urlopen</td>
</tr>
<tr>
<td id="L3" class="blob-num js-line-number" data-line-number="3"></td>
<td id="LC3" class="blob-code blob-code-inner js-file-line"><span class="pl-k">import</span> csv</td>
</tr>
<tr>
<td id="L4" class="blob-num js-line-number" data-line-number="4"></td>
<td id="LC4" class="blob-code blob-code-inner js-file-line"><span class="pl-k">import</span> re</td>
</tr>
<tr>
<td id="L5" class="blob-num js-line-number" data-line-number="5"></td>
<td id="LC5" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> retrypy <span class="pl-k">import</span> retry</td>
</tr>
<tr>
<td id="L6" class="blob-num js-line-number" data-line-number="6"></td>
<td id="LC6" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> multiprocessing <span class="pl-k">import</span> Pool </td>
</tr>
<tr>
<td id="L7" class="blob-num js-line-number" data-line-number="7"></td>
<td id="LC7" class="blob-code blob-code-inner js-file-line"><span class="pl-k">from</span> multiprocessing <span class="pl-k">import</span> cpu_count</td>
</tr>
<tr>
<td id="L8" class="blob-num js-line-number" data-line-number="8"></td>
<td id="LC8" class="blob-code blob-code-inner js-file-line"><span class="pl-k">import</span> MySQLdb <span class="pl-k">as</span> mdb</td>
</tr>
<tr>
<td id="L9" class="blob-num js-line-number" data-line-number="9"></td>
<td id="LC9" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L10" class="blob-num js-line-number" data-line-number="10"></td>
<td id="LC10" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L11" class="blob-num js-line-number" data-line-number="11"></td>
<td id="LC11" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L12" class="blob-num js-line-number" data-line-number="12"></td>
<td id="LC12" class="blob-code blob-code-inner js-file-line"><span class="pl-en">@retry.decorate</span>(<span class="pl-v">times</span><span class="pl-k">=</span><span class="pl-c1">4</span>)</td>
</tr>
<tr>
<td id="L13" class="blob-num js-line-number" data-line-number="13"></td>
<td id="LC13" class="blob-code blob-code-inner js-file-line"><span class="pl-k">def</span> <span class="pl-en">urlopen_with_retry</span>(<span class="pl-smi">some_url</span>):</td>
</tr>
<tr>
<td id="L14" class="blob-num js-line-number" data-line-number="14"></td>
<td id="LC14" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> urlopen(some_url)</td>
</tr>
<tr>
<td id="L15" class="blob-num js-line-number" data-line-number="15"></td>
<td id="LC15" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L16" class="blob-num js-line-number" data-line-number="16"></td>
<td id="LC16" class="blob-code blob-code-inner js-file-line"><span class="pl-k">def</span> <span class="pl-en">crawlToCSV</span>(<span class="pl-smi">url</span>):</td>
</tr>
<tr>
<td id="L17" class="blob-num js-line-number" data-line-number="17"></td>
<td id="LC17" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">try</span>:</td>
</tr>
<tr>
<td id="L18" class="blob-num js-line-number" data-line-number="18"></td>
<td id="LC18" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-s"><span class="pl-pds">"</span>elizabeth<span class="pl-pds">"</span></span> <span class="pl-k">in</span> url <span class="pl-k">and</span> <span class="pl-s"><span class="pl-pds">"</span>elizabethtown<span class="pl-pds">"</span></span> <span class="pl-k">not</span> <span class="pl-k">in</span> url:<span class="pl-c">#fixes an annoying encoding error in an inelagent way</span></td>
</tr>
<tr>
<td id="L19" class="blob-num js-line-number" data-line-number="19"></td>
<td id="LC19" class="blob-code blob-code-inner js-file-line"> url<span class="pl-k">=</span><span class="pl-s"><span class="pl-pds">'</span>http://www.boxofficemojo.com/movies/?id=elizabeth%A0.htm<span class="pl-pds">'</span></span></td>
</tr>
<tr>
<td id="L20" class="blob-num js-line-number" data-line-number="20"></td>
<td id="LC20" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-s"><span class="pl-pds">"</span>simpleplan<span class="pl-pds">"</span></span> <span class="pl-k">in</span> url:</td>
</tr>
<tr>
<td id="L21" class="blob-num js-line-number" data-line-number="21"></td>
<td id="LC21" class="blob-code blob-code-inner js-file-line"> url<span class="pl-k">=</span><span class="pl-s"><span class="pl-pds">'</span>http://www.boxofficemojo.com/movies/?id=simpleplan%A0.htm<span class="pl-pds">'</span></span></td>
</tr>
<tr>
<td id="L22" class="blob-num js-line-number" data-line-number="22"></td>
<td id="LC22" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">print</span> url</td>
</tr>
<tr>
<td id="L23" class="blob-num js-line-number" data-line-number="23"></td>
<td id="LC23" class="blob-code blob-code-inner js-file-line"> <span class="pl-c">#time.sleep(0.1) #pause for courtesy? not sure if neccesary,i'm new at this</span></td>
</tr>
<tr>
<td id="L24" class="blob-num js-line-number" data-line-number="24"></td>
<td id="LC24" class="blob-code blob-code-inner js-file-line"> current_url <span class="pl-k">=</span> (url <span class="pl-k">+</span> <span class="pl-s"><span class="pl-pds">"</span>&adjust_yr=2015&p=.htm<span class="pl-pds">"</span></span>) <span class="pl-c">#do all movies in 2015 dollars (done automatically by site with correct URL)</span></td>
</tr>
<tr>
<td id="L25" class="blob-num js-line-number" data-line-number="25"></td>
<td id="LC25" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L26" class="blob-num js-line-number" data-line-number="26"></td>
<td id="LC26" class="blob-code blob-code-inner js-file-line"> soup <span class="pl-k">=</span> BeautifulSoup(urlopen(current_url).read())</td>
</tr>
<tr>
<td id="L27" class="blob-num js-line-number" data-line-number="27"></td>
<td id="LC27" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L28" class="blob-num js-line-number" data-line-number="28"></td>
<td id="LC28" class="blob-code blob-code-inner js-file-line"> directors<span class="pl-k">=</span>soup.findAll(<span class="pl-s"><span class="pl-pds">'</span>a<span class="pl-pds">'</span></span>, <span class="pl-v">href</span><span class="pl-k">=</span> re.compile(<span class="pl-s"><span class="pl-pds">'</span>Director&id<span class="pl-pds">'</span></span>))</td>
</tr>
<tr>
<td id="L29" class="blob-num js-line-number" data-line-number="29"></td>
<td id="LC29" class="blob-code blob-code-inner js-file-line"> director_list<span class="pl-k">=</span>[]</td>
</tr>
<tr>
<td id="L30" class="blob-num js-line-number" data-line-number="30"></td>
<td id="LC30" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> t <span class="pl-k">in</span> directors:</td>
</tr>
<tr>
<td id="L31" class="blob-num js-line-number" data-line-number="31"></td>
<td id="LC31" class="blob-code blob-code-inner js-file-line"> director_list.append(t.encode_contents())</td>
</tr>
<tr>
<td id="L32" class="blob-num js-line-number" data-line-number="32"></td>
<td id="LC32" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> i <span class="pl-k">in</span> <span class="pl-c1">range</span>(<span class="pl-c1">0</span>,<span class="pl-c1">2</span>):</td>
</tr>
<tr>
<td id="L33" class="blob-num js-line-number" data-line-number="33"></td>
<td id="LC33" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> i<span class="pl-k">>=</span><span class="pl-c1">len</span>(director_list):</td>
</tr>
<tr>
<td id="L34" class="blob-num js-line-number" data-line-number="34"></td>
<td id="LC34" class="blob-code blob-code-inner js-file-line"> director_list.append(<span class="pl-s"><span class="pl-pds">'</span>N/A<span class="pl-pds">'</span></span>)<span class="pl-c">#fill rest of list</span></td>
</tr>
<tr>
<td id="L35" class="blob-num js-line-number" data-line-number="35"></td>
<td id="LC35" class="blob-code blob-code-inner js-file-line"> director1<span class="pl-k">=</span>director_list[<span class="pl-c1">0</span>]</td>
</tr>
<tr>
<td id="L36" class="blob-num js-line-number" data-line-number="36"></td>
<td id="LC36" class="blob-code blob-code-inner js-file-line"> director2<span class="pl-k">=</span>director_list[<span class="pl-c1">1</span>]</td>
</tr>
<tr>
<td id="L37" class="blob-num js-line-number" data-line-number="37"></td>
<td id="LC37" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L38" class="blob-num js-line-number" data-line-number="38"></td>
<td id="LC38" class="blob-code blob-code-inner js-file-line"> writers<span class="pl-k">=</span>soup.findAll(<span class="pl-s"><span class="pl-pds">'</span>a<span class="pl-pds">'</span></span>, <span class="pl-v">href</span><span class="pl-k">=</span> re.compile(<span class="pl-s"><span class="pl-pds">'</span>Writer&id<span class="pl-pds">'</span></span>))</td>
</tr>
<tr>
<td id="L39" class="blob-num js-line-number" data-line-number="39"></td>
<td id="LC39" class="blob-code blob-code-inner js-file-line"> writer_list<span class="pl-k">=</span>[]</td>
</tr>
<tr>
<td id="L40" class="blob-num js-line-number" data-line-number="40"></td>
<td id="LC40" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> t <span class="pl-k">in</span> writers:</td>
</tr>
<tr>
<td id="L41" class="blob-num js-line-number" data-line-number="41"></td>
<td id="LC41" class="blob-code blob-code-inner js-file-line"> writer_list.append(t.encode_contents())</td>
</tr>
<tr>
<td id="L42" class="blob-num js-line-number" data-line-number="42"></td>
<td id="LC42" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> i <span class="pl-k">in</span> <span class="pl-c1">range</span>(<span class="pl-c1">0</span>,<span class="pl-c1">2</span>):</td>
</tr>
<tr>
<td id="L43" class="blob-num js-line-number" data-line-number="43"></td>
<td id="LC43" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> i<span class="pl-k">>=</span><span class="pl-c1">len</span>(writer_list):</td>
</tr>
<tr>
<td id="L44" class="blob-num js-line-number" data-line-number="44"></td>
<td id="LC44" class="blob-code blob-code-inner js-file-line"> writer_list.append(<span class="pl-s"><span class="pl-pds">'</span>N/A<span class="pl-pds">'</span></span>)</td>
</tr>
<tr>
<td id="L45" class="blob-num js-line-number" data-line-number="45"></td>
<td id="LC45" class="blob-code blob-code-inner js-file-line"> writer1<span class="pl-k">=</span>writer_list[<span class="pl-c1">0</span>]</td>
</tr>
<tr>
<td id="L46" class="blob-num js-line-number" data-line-number="46"></td>
<td id="LC46" class="blob-code blob-code-inner js-file-line"> writer2<span class="pl-k">=</span>writer_list[<span class="pl-c1">1</span>]</td>
</tr>
<tr>
<td id="L47" class="blob-num js-line-number" data-line-number="47"></td>
<td id="LC47" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L48" class="blob-num js-line-number" data-line-number="48"></td>
<td id="LC48" class="blob-code blob-code-inner js-file-line"> composers<span class="pl-k">=</span>soup.findAll(<span class="pl-s"><span class="pl-pds">'</span>a<span class="pl-pds">'</span></span>, <span class="pl-v">href</span><span class="pl-k">=</span> re.compile(<span class="pl-s"><span class="pl-pds">'</span>Composer&id<span class="pl-pds">'</span></span>))</td>
</tr>
<tr>
<td id="L49" class="blob-num js-line-number" data-line-number="49"></td>
<td id="LC49" class="blob-code blob-code-inner js-file-line"> composer_list<span class="pl-k">=</span>[]</td>
</tr>
<tr>
<td id="L50" class="blob-num js-line-number" data-line-number="50"></td>
<td id="LC50" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> t <span class="pl-k">in</span> composers:</td>
</tr>
<tr>
<td id="L51" class="blob-num js-line-number" data-line-number="51"></td>
<td id="LC51" class="blob-code blob-code-inner js-file-line"> composer_list.append(t.encode_contents())</td>
</tr>
<tr>
<td id="L52" class="blob-num js-line-number" data-line-number="52"></td>
<td id="LC52" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> i <span class="pl-k">in</span> <span class="pl-c1">range</span>(<span class="pl-c1">0</span>,<span class="pl-c1">2</span>):</td>
</tr>
<tr>
<td id="L53" class="blob-num js-line-number" data-line-number="53"></td>
<td id="LC53" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> i<span class="pl-k">>=</span><span class="pl-c1">len</span>(composer_list):</td>
</tr>
<tr>
<td id="L54" class="blob-num js-line-number" data-line-number="54"></td>
<td id="LC54" class="blob-code blob-code-inner js-file-line"> composer_list.append(<span class="pl-s"><span class="pl-pds">'</span>N/A<span class="pl-pds">'</span></span>)</td>
</tr>
<tr>
<td id="L55" class="blob-num js-line-number" data-line-number="55"></td>
<td id="LC55" class="blob-code blob-code-inner js-file-line"> composer1<span class="pl-k">=</span>composer_list[<span class="pl-c1">0</span>]</td>
</tr>
<tr>
<td id="L56" class="blob-num js-line-number" data-line-number="56"></td>
<td id="LC56" class="blob-code blob-code-inner js-file-line"> composer2<span class="pl-k">=</span>composer_list[<span class="pl-c1">1</span>]</td>
</tr>
<tr>
<td id="L57" class="blob-num js-line-number" data-line-number="57"></td>
<td id="LC57" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L58" class="blob-num js-line-number" data-line-number="58"></td>
<td id="LC58" class="blob-code blob-code-inner js-file-line"> actors<span class="pl-k">=</span>soup.findAll(<span class="pl-s"><span class="pl-pds">'</span>a<span class="pl-pds">'</span></span>, <span class="pl-v">href</span><span class="pl-k">=</span> re.compile(<span class="pl-s"><span class="pl-pds">'</span>Actor&id<span class="pl-pds">'</span></span>))</td>
</tr>
<tr>
<td id="L59" class="blob-num js-line-number" data-line-number="59"></td>
<td id="LC59" class="blob-code blob-code-inner js-file-line"> actor_list<span class="pl-k">=</span>[]</td>
</tr>
<tr>
<td id="L60" class="blob-num js-line-number" data-line-number="60"></td>
<td id="LC60" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> t <span class="pl-k">in</span> actors:</td>
</tr>
<tr>
<td id="L61" class="blob-num js-line-number" data-line-number="61"></td>
<td id="LC61" class="blob-code blob-code-inner js-file-line"> actor_list.append(t.encode_contents())</td>
</tr>
<tr>
<td id="L62" class="blob-num js-line-number" data-line-number="62"></td>
<td id="LC62" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> i <span class="pl-k">in</span> <span class="pl-c1">range</span>(<span class="pl-c1">0</span>,<span class="pl-c1">6</span>):</td>
</tr>
<tr>
<td id="L63" class="blob-num js-line-number" data-line-number="63"></td>
<td id="LC63" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> i<span class="pl-k">>=</span><span class="pl-c1">len</span>(actor_list):</td>
</tr>
<tr>
<td id="L64" class="blob-num js-line-number" data-line-number="64"></td>
<td id="LC64" class="blob-code blob-code-inner js-file-line"> actor_list.append(<span class="pl-s"><span class="pl-pds">'</span>N/A<span class="pl-pds">'</span></span>)</td>
</tr>
<tr>
<td id="L65" class="blob-num js-line-number" data-line-number="65"></td>
<td id="LC65" class="blob-code blob-code-inner js-file-line"> actor1<span class="pl-k">=</span>actor_list[<span class="pl-c1">0</span>]</td>
</tr>
<tr>
<td id="L66" class="blob-num js-line-number" data-line-number="66"></td>
<td id="LC66" class="blob-code blob-code-inner js-file-line"> actor2<span class="pl-k">=</span>actor_list[<span class="pl-c1">1</span>]</td>
</tr>
<tr>
<td id="L67" class="blob-num js-line-number" data-line-number="67"></td>
<td id="LC67" class="blob-code blob-code-inner js-file-line"> actor3<span class="pl-k">=</span>actor_list[<span class="pl-c1">2</span>]</td>
</tr>
<tr>
<td id="L68" class="blob-num js-line-number" data-line-number="68"></td>
<td id="LC68" class="blob-code blob-code-inner js-file-line"> actor4<span class="pl-k">=</span>actor_list[<span class="pl-c1">3</span>]</td>
</tr>
<tr>
<td id="L69" class="blob-num js-line-number" data-line-number="69"></td>
<td id="LC69" class="blob-code blob-code-inner js-file-line"> actor5<span class="pl-k">=</span>actor_list[<span class="pl-c1">4</span>]</td>
</tr>
<tr>
<td id="L70" class="blob-num js-line-number" data-line-number="70"></td>
<td id="LC70" class="blob-code blob-code-inner js-file-line"> actor6<span class="pl-k">=</span>actor_list[<span class="pl-c1">5</span>]</td>
</tr>
<tr>
<td id="L71" class="blob-num js-line-number" data-line-number="71"></td>
<td id="LC71" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L72" class="blob-num js-line-number" data-line-number="72"></td>
<td id="LC72" class="blob-code blob-code-inner js-file-line"> producers<span class="pl-k">=</span>soup.findAll(<span class="pl-s"><span class="pl-pds">'</span>a<span class="pl-pds">'</span></span>, <span class="pl-v">href</span><span class="pl-k">=</span> re.compile(<span class="pl-s"><span class="pl-pds">'</span>Producer&id<span class="pl-pds">'</span></span>))</td>
</tr>
<tr>
<td id="L73" class="blob-num js-line-number" data-line-number="73"></td>
<td id="LC73" class="blob-code blob-code-inner js-file-line"> producer_list<span class="pl-k">=</span>[]</td>
</tr>
<tr>
<td id="L74" class="blob-num js-line-number" data-line-number="74"></td>
<td id="LC74" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> t <span class="pl-k">in</span> producers:</td>
</tr>
<tr>
<td id="L75" class="blob-num js-line-number" data-line-number="75"></td>
<td id="LC75" class="blob-code blob-code-inner js-file-line"> producer_list.append(t.encode_contents())</td>
</tr>
<tr>
<td id="L76" class="blob-num js-line-number" data-line-number="76"></td>
<td id="LC76" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> i <span class="pl-k">in</span> <span class="pl-c1">range</span>(<span class="pl-c1">0</span>,<span class="pl-c1">6</span>):</td>
</tr>
<tr>
<td id="L77" class="blob-num js-line-number" data-line-number="77"></td>
<td id="LC77" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> i<span class="pl-k">>=</span><span class="pl-c1">len</span>(producer_list):</td>
</tr>
<tr>
<td id="L78" class="blob-num js-line-number" data-line-number="78"></td>
<td id="LC78" class="blob-code blob-code-inner js-file-line"> producer_list.append(<span class="pl-s"><span class="pl-pds">'</span>N/A<span class="pl-pds">'</span></span>)</td>
</tr>
<tr>
<td id="L79" class="blob-num js-line-number" data-line-number="79"></td>
<td id="LC79" class="blob-code blob-code-inner js-file-line"> producer1<span class="pl-k">=</span>producer_list[<span class="pl-c1">0</span>]</td>
</tr>
<tr>
<td id="L80" class="blob-num js-line-number" data-line-number="80"></td>
<td id="LC80" class="blob-code blob-code-inner js-file-line"> producer2<span class="pl-k">=</span>producer_list[<span class="pl-c1">1</span>]</td>
</tr>
<tr>
<td id="L81" class="blob-num js-line-number" data-line-number="81"></td>
<td id="LC81" class="blob-code blob-code-inner js-file-line"> producer3<span class="pl-k">=</span>producer_list[<span class="pl-c1">2</span>]</td>
</tr>
<tr>
<td id="L82" class="blob-num js-line-number" data-line-number="82"></td>
<td id="LC82" class="blob-code blob-code-inner js-file-line"> producer4<span class="pl-k">=</span>producer_list[<span class="pl-c1">3</span>]</td>
</tr>
<tr>
<td id="L83" class="blob-num js-line-number" data-line-number="83"></td>
<td id="LC83" class="blob-code blob-code-inner js-file-line"> producer5<span class="pl-k">=</span>producer_list[<span class="pl-c1">4</span>]</td>
</tr>
<tr>
<td id="L84" class="blob-num js-line-number" data-line-number="84"></td>
<td id="LC84" class="blob-code blob-code-inner js-file-line"> producer6<span class="pl-k">=</span>producer_list[<span class="pl-c1">5</span>]</td>
</tr>
<tr>
<td id="L85" class="blob-num js-line-number" data-line-number="85"></td>
<td id="LC85" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L86" class="blob-num js-line-number" data-line-number="86"></td>
<td id="LC86" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L87" class="blob-num js-line-number" data-line-number="87"></td>
<td id="LC87" class="blob-code blob-code-inner js-file-line"> all_bs<span class="pl-k">=</span>soup.findAll(<span class="pl-s"><span class="pl-pds">'</span>b<span class="pl-pds">'</span></span>)</td>
</tr>
<tr>
<td id="L88" class="blob-num js-line-number" data-line-number="88"></td>
<td id="LC88" class="blob-code blob-code-inner js-file-line"> b_list<span class="pl-k">=</span>[] <span class="pl-c">#lots of the information we want is in bold, and appears in the same order on each page</span></td>
</tr>
<tr>
<td id="L89" class="blob-num js-line-number" data-line-number="89"></td>
<td id="LC89" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> t <span class="pl-k">in</span> all_bs:</td>
</tr>
<tr>
<td id="L90" class="blob-num js-line-number" data-line-number="90"></td>
<td id="LC90" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-s"><span class="pl-pds">'</span>Domestic Lifetime<span class="pl-pds">'</span></span> <span class="pl-k">not</span> <span class="pl-k">in</span> t.encode_contents():<span class="pl-c">#want to ignore the lifetime box office</span></td>
</tr>
<tr>
<td id="L91" class="blob-num js-line-number" data-line-number="91"></td>
<td id="LC91" class="blob-code blob-code-inner js-file-line"> b_list.append(t.encode_contents())</td>
</tr>
<tr>
<td id="L92" class="blob-num js-line-number" data-line-number="92"></td>
<td id="LC92" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">len</span>(b_list)<span class="pl-k">>=</span><span class="pl-c1">10</span>:<span class="pl-c">#avoids bad entries with no box office data</span></td>
</tr>
<tr>
<td id="L93" class="blob-num js-line-number" data-line-number="93"></td>
<td id="LC93" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-s"><span class="pl-pds">'</span>$<span class="pl-pds">'</span></span><span class="pl-k">in</span> b_list[<span class="pl-c1">2</span>] <span class="pl-k">or</span> <span class="pl-s"><span class="pl-pds">'</span>n/a<span class="pl-pds">'</span></span> <span class="pl-k">in</span> b_list[<span class="pl-c1">9</span>]:<span class="pl-c">#avoid movies w/o box office data, or unadjustable box office data, if not caught above</span></td>
</tr>
<tr>
<td id="L94" class="blob-num js-line-number" data-line-number="94"></td>
<td id="LC94" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-s"><span class="pl-pds">'</span>n/a<span class="pl-pds">'</span></span> <span class="pl-k">in</span> b_list[<span class="pl-c1">9</span>]:<span class="pl-c">#has a foreign release only, order is shifted</span></td>
</tr>
<tr>
<td id="L95" class="blob-num js-line-number" data-line-number="95"></td>
<td id="LC95" class="blob-code blob-code-inner js-file-line"> title<span class="pl-k">=</span>b_list[<span class="pl-c1">1</span>]</td>
</tr>
<tr>
<td id="L96" class="blob-num js-line-number" data-line-number="96"></td>
<td id="LC96" class="blob-code blob-code-inner js-file-line"> domestic<span class="pl-k">=</span><span class="pl-s"><span class="pl-pds">'</span>N/A<span class="pl-pds">'</span></span></td>
</tr>
<tr>
<td id="L97" class="blob-num js-line-number" data-line-number="97"></td>
<td id="LC97" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-s"><span class="pl-pds">'</span>N/A<span class="pl-pds">'</span></span> <span class="pl-k">not</span> <span class="pl-k">in</span> b_list[<span class="pl-c1">2</span>]:</td>
</tr>
<tr>
<td id="L98" class="blob-num js-line-number" data-line-number="98"></td>
<td id="LC98" class="blob-code blob-code-inner js-file-line"> distributor<span class="pl-k">=</span>b_list[<span class="pl-c1">2</span>].split(<span class="pl-s"><span class="pl-pds">'</span>><span class="pl-pds">'</span></span>)[<span class="pl-c1">1</span>].split(<span class="pl-s"><span class="pl-pds">'</span><<span class="pl-pds">'</span></span>)[<span class="pl-c1">0</span>]</td>
</tr>
<tr>
<td id="L99" class="blob-num js-line-number" data-line-number="99"></td>
<td id="LC99" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L100" class="blob-num js-line-number" data-line-number="100"></td>
<td id="LC100" class="blob-code blob-code-inner js-file-line"> distributor<span class="pl-k">=</span>b_list[<span class="pl-c1">2</span>]</td>
</tr>
<tr>
<td id="L101" class="blob-num js-line-number" data-line-number="101"></td>
<td id="LC101" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">len</span>(b_list[<span class="pl-c1">3</span>].split(<span class="pl-s"><span class="pl-pds">'</span>><span class="pl-pds">'</span></span>))<span class="pl-k">></span><span class="pl-c1">3</span>:<span class="pl-c">#sometimes the release date is not in a hyperlink</span></td>
</tr>
<tr>
<td id="L102" class="blob-num js-line-number" data-line-number="102"></td>
<td id="LC102" class="blob-code blob-code-inner js-file-line"> release<span class="pl-k">=</span>b_list[<span class="pl-c1">3</span>].split(<span class="pl-s"><span class="pl-pds">'</span>><span class="pl-pds">'</span></span>)[<span class="pl-c1">2</span>].split(<span class="pl-s"><span class="pl-pds">'</span><<span class="pl-pds">'</span></span>)[<span class="pl-c1">0</span>]</td>
</tr>
<tr>
<td id="L103" class="blob-num js-line-number" data-line-number="103"></td>
<td id="LC103" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L104" class="blob-num js-line-number" data-line-number="104"></td>
<td id="LC104" class="blob-code blob-code-inner js-file-line"> release<span class="pl-k">=</span>b_list[<span class="pl-c1">3</span>].split(<span class="pl-s"><span class="pl-pds">'</span>><span class="pl-pds">'</span></span>)[<span class="pl-c1">1</span>].split(<span class="pl-s"><span class="pl-pds">'</span><<span class="pl-pds">'</span></span>)[<span class="pl-c1">0</span>]</td>
</tr>
<tr>
<td id="L105" class="blob-num js-line-number" data-line-number="105"></td>
<td id="LC105" class="blob-code blob-code-inner js-file-line"> genre<span class="pl-k">=</span>b_list[<span class="pl-c1">4</span>]</td>
</tr>
<tr>
<td id="L106" class="blob-num js-line-number" data-line-number="106"></td>
<td id="LC106" class="blob-code blob-code-inner js-file-line"> runtime<span class="pl-k">=</span>b_list[<span class="pl-c1">5</span>]</td>
</tr>
<tr>
<td id="L107" class="blob-num js-line-number" data-line-number="107"></td>
<td id="LC107" class="blob-code blob-code-inner js-file-line"> rating<span class="pl-k">=</span>b_list[<span class="pl-c1">6</span>]</td>
</tr>
<tr>
<td id="L108" class="blob-num js-line-number" data-line-number="108"></td>
<td id="LC108" class="blob-code blob-code-inner js-file-line"> budget<span class="pl-k">=</span>b_list[<span class="pl-c1">7</span>]</td>
</tr>
<tr>
<td id="L109" class="blob-num js-line-number" data-line-number="109"></td>
<td id="LC109" class="blob-code blob-code-inner js-file-line"> worldwide<span class="pl-k">=</span>b_list[<span class="pl-c1">12</span>]</td>
</tr>
<tr>
<td id="L110" class="blob-num js-line-number" data-line-number="110"></td>
<td id="LC110" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>: <span class="pl-c">#has a domestic release</span></td>
</tr>
<tr>
<td id="L111" class="blob-num js-line-number" data-line-number="111"></td>
<td id="LC111" class="blob-code blob-code-inner js-file-line"> title<span class="pl-k">=</span>b_list[<span class="pl-c1">1</span>]</td>
</tr>
<tr>
<td id="L112" class="blob-num js-line-number" data-line-number="112"></td>
<td id="LC112" class="blob-code blob-code-inner js-file-line"> domestic<span class="pl-k">=</span>b_list[<span class="pl-c1">2</span>]</td>
</tr>
<tr>
<td id="L113" class="blob-num js-line-number" data-line-number="113"></td>
<td id="LC113" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-s"><span class="pl-pds">'</span>n/a<span class="pl-pds">'</span></span> <span class="pl-k">not</span> <span class="pl-k">in</span> b_list[<span class="pl-c1">3</span>]:</td>
</tr>
<tr>
<td id="L114" class="blob-num js-line-number" data-line-number="114"></td>
<td id="LC114" class="blob-code blob-code-inner js-file-line"> distributor<span class="pl-k">=</span>b_list[<span class="pl-c1">3</span>].split(<span class="pl-s"><span class="pl-pds">'</span>><span class="pl-pds">'</span></span>)[<span class="pl-c1">1</span>].split(<span class="pl-s"><span class="pl-pds">'</span><<span class="pl-pds">'</span></span>)[<span class="pl-c1">0</span>]</td>
</tr>
<tr>
<td id="L115" class="blob-num js-line-number" data-line-number="115"></td>
<td id="LC115" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L116" class="blob-num js-line-number" data-line-number="116"></td>
<td id="LC116" class="blob-code blob-code-inner js-file-line"> distributor<span class="pl-k">=</span>b_list[<span class="pl-c1">3</span>]</td>
</tr>
<tr>
<td id="L117" class="blob-num js-line-number" data-line-number="117"></td>
<td id="LC117" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">len</span>(b_list[<span class="pl-c1">4</span>].split(<span class="pl-s"><span class="pl-pds">'</span>><span class="pl-pds">'</span></span>))<span class="pl-k">></span><span class="pl-c1">3</span>:<span class="pl-c">#sometimes the release date is not in a hyperlink</span></td>
</tr>
<tr>
<td id="L118" class="blob-num js-line-number" data-line-number="118"></td>
<td id="LC118" class="blob-code blob-code-inner js-file-line"> release<span class="pl-k">=</span>b_list[<span class="pl-c1">4</span>].split(<span class="pl-s"><span class="pl-pds">'</span>><span class="pl-pds">'</span></span>)[<span class="pl-c1">2</span>].split(<span class="pl-s"><span class="pl-pds">'</span><<span class="pl-pds">'</span></span>)[<span class="pl-c1">0</span>]</td>
</tr>
<tr>
<td id="L119" class="blob-num js-line-number" data-line-number="119"></td>
<td id="LC119" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L120" class="blob-num js-line-number" data-line-number="120"></td>
<td id="LC120" class="blob-code blob-code-inner js-file-line"> release<span class="pl-k">=</span>b_list[<span class="pl-c1">4</span>].split(<span class="pl-s"><span class="pl-pds">'</span>><span class="pl-pds">'</span></span>)[<span class="pl-c1">1</span>].split(<span class="pl-s"><span class="pl-pds">'</span><<span class="pl-pds">'</span></span>)[<span class="pl-c1">0</span>]</td>
</tr>
<tr>
<td id="L121" class="blob-num js-line-number" data-line-number="121"></td>
<td id="LC121" class="blob-code blob-code-inner js-file-line"> genre<span class="pl-k">=</span>b_list[<span class="pl-c1">5</span>]</td>
</tr>
<tr>
<td id="L122" class="blob-num js-line-number" data-line-number="122"></td>
<td id="LC122" class="blob-code blob-code-inner js-file-line"> runtime<span class="pl-k">=</span>b_list[<span class="pl-c1">6</span>]</td>
</tr>
<tr>
<td id="L123" class="blob-num js-line-number" data-line-number="123"></td>
<td id="LC123" class="blob-code blob-code-inner js-file-line"> rating<span class="pl-k">=</span>b_list[<span class="pl-c1">7</span>]</td>
</tr>
<tr>
<td id="L124" class="blob-num js-line-number" data-line-number="124"></td>
<td id="LC124" class="blob-code blob-code-inner js-file-line"> budget<span class="pl-k">=</span>b_list[<span class="pl-c1">8</span>]</td>
</tr>
<tr>
<td id="L125" class="blob-num js-line-number" data-line-number="125"></td>
<td id="LC125" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> <span class="pl-c1">len</span>(b_list)<span class="pl-k">==</span><span class="pl-c1">11</span> <span class="pl-k">or</span> <span class="pl-s"><span class="pl-pds">'</span>%<span class="pl-pds">'</span></span> <span class="pl-k">not</span> <span class="pl-k">in</span> b_list[<span class="pl-c1">11</span>]:<span class="pl-c">#this means it only has a domestic release</span></td>
</tr>
<tr>
<td id="L126" class="blob-num js-line-number" data-line-number="126"></td>
<td id="LC126" class="blob-code blob-code-inner js-file-line"> worldwide<span class="pl-k">=</span><span class="pl-s"><span class="pl-pds">'</span>N/A<span class="pl-pds">'</span></span></td>
</tr>
<tr>
<td id="L127" class="blob-num js-line-number" data-line-number="127"></td>
<td id="LC127" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L128" class="blob-num js-line-number" data-line-number="128"></td>
<td id="LC128" class="blob-code blob-code-inner js-file-line"> worldwide<span class="pl-k">=</span>b_list[<span class="pl-c1">13</span>]</td>
</tr>
<tr>
<td id="L129" class="blob-num js-line-number" data-line-number="129"></td>
<td id="LC129" class="blob-code blob-code-inner js-file-line"> <span class="pl-c">#print release</span></td>
</tr>
<tr>
<td id="L130" class="blob-num js-line-number" data-line-number="130"></td>
<td id="LC130" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> (title,director1,director2,domestic,distributor,release,genre,runtime,rating,budget,worldwide,actor1,actor2,actor3,actor4,actor5,actor6,producer1,producer2,producer3,producer4,producer5,producer6,writer1,writer2,composer1,composer2)<span class="pl-c">#since this is in the big "if" it wont write to file if it is formated incorrectly</span></td>
</tr>
<tr>
<td id="L131" class="blob-num js-line-number" data-line-number="131"></td>
<td id="LC131" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L132" class="blob-num js-line-number" data-line-number="132"></td>
<td id="LC132" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">else</span>:</td>
</tr>
<tr>
<td id="L133" class="blob-num js-line-number" data-line-number="133"></td>
<td id="LC133" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">0</span> <span class="pl-c">#bad record will be removed</span></td>
</tr>
<tr>
<td id="L134" class="blob-num js-line-number" data-line-number="134"></td>
<td id="LC134" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">except</span>:</td>
</tr>
<tr>
<td id="L135" class="blob-num js-line-number" data-line-number="135"></td>
<td id="LC135" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">return</span> <span class="pl-c1">0</span> <span class="pl-c">#if there is an exception, treat it like a bad record and move on</span></td>
</tr>
<tr>
<td id="L136" class="blob-num js-line-number" data-line-number="136"></td>
<td id="LC136" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L137" class="blob-num js-line-number" data-line-number="137"></td>
<td id="LC137" class="blob-code blob-code-inner js-file-line"><span class="pl-k">if</span> <span class="pl-c1">__name__</span> <span class="pl-k">==</span> <span class="pl-s"><span class="pl-pds">"</span>__main__<span class="pl-pds">"</span></span>:</td>
</tr>
<tr>
<td id="L138" class="blob-num js-line-number" data-line-number="138"></td>
<td id="LC138" class="blob-code blob-code-inner js-file-line"> current_url<span class="pl-k">=</span>(<span class="pl-s"><span class="pl-pds">"</span>http://www.boxofficemojo.com/movies/alphabetical.htm?letter=NUM&p=.html<span class="pl-pds">"</span></span>)<span class="pl-c"># starting point for search, can be any letter</span></td>
</tr>
<tr>
<td id="L139" class="blob-num js-line-number" data-line-number="139"></td>
<td id="LC139" class="blob-code blob-code-inner js-file-line"> movie_links<span class="pl-k">=</span>[]<span class="pl-c">#initialize as an empty list</span></td>
</tr>
<tr>
<td id="L140" class="blob-num js-line-number" data-line-number="140"></td>
<td id="LC140" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L141" class="blob-num js-line-number" data-line-number="141"></td>
<td id="LC141" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L142" class="blob-num js-line-number" data-line-number="142"></td>
<td id="LC142" class="blob-code blob-code-inner js-file-line"> soup <span class="pl-k">=</span> BeautifulSoup(urlopen_with_retry(current_url).read()) <span class="pl-c">#generate list of links for the letter indices</span></td>
</tr>
<tr>
<td id="L143" class="blob-num js-line-number" data-line-number="143"></td>
<td id="LC143" class="blob-code blob-code-inner js-file-line"> letters <span class="pl-k">=</span> soup.findAll(<span class="pl-s"><span class="pl-pds">'</span>a<span class="pl-pds">'</span></span>, <span class="pl-v">href</span><span class="pl-k">=</span> re.compile(<span class="pl-s"><span class="pl-pds">'</span>letter=<span class="pl-pds">'</span></span>))</td>
</tr>
<tr>
<td id="L144" class="blob-num js-line-number" data-line-number="144"></td>
<td id="LC144" class="blob-code blob-code-inner js-file-line"> letter_index<span class="pl-k">=</span>[] <span class="pl-c">#intialize as an empty list</span></td>
</tr>
<tr>
<td id="L145" class="blob-num js-line-number" data-line-number="145"></td>
<td id="LC145" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> t <span class="pl-k">in</span> letters:</td>
</tr>
<tr>
<td id="L146" class="blob-num js-line-number" data-line-number="146"></td>
<td id="LC146" class="blob-code blob-code-inner js-file-line"> letter_index.append(<span class="pl-s"><span class="pl-pds">"</span>http://www.boxofficemojo.com<span class="pl-pds">"</span></span> <span class="pl-k">+</span> t[<span class="pl-s"><span class="pl-pds">'</span>href<span class="pl-pds">'</span></span>])</td>
</tr>
<tr>
<td id="L147" class="blob-num js-line-number" data-line-number="147"></td>
<td id="LC147" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L148" class="blob-num js-line-number" data-line-number="148"></td>
<td id="LC148" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> i <span class="pl-k">in</span> <span class="pl-c1">range</span>(<span class="pl-c1">0</span>,<span class="pl-c1">27</span>): <span class="pl-c">#loop through all letter indices for movies</span></td>
</tr>
<tr>
<td id="L149" class="blob-num js-line-number" data-line-number="149"></td>
<td id="LC149" class="blob-code blob-code-inner js-file-line"> current_url<span class="pl-k">=</span>letter_index[i]</td>
</tr>
<tr>
<td id="L150" class="blob-num js-line-number" data-line-number="150"></td>
<td id="LC150" class="blob-code blob-code-inner js-file-line"> soup <span class="pl-k">=</span> BeautifulSoup(urlopen_with_retry(current_url).read())</td>
</tr>
<tr>
<td id="L151" class="blob-num js-line-number" data-line-number="151"></td>
<td id="LC151" class="blob-code blob-code-inner js-file-line"> navbar<span class="pl-k">=</span>soup.find(<span class="pl-s"><span class="pl-pds">'</span>div<span class="pl-pds">'</span></span>, <span class="pl-s"><span class="pl-pds">'</span>alpha-nav-holder<span class="pl-pds">'</span></span>)</td>
</tr>
<tr>
<td id="L152" class="blob-num js-line-number" data-line-number="152"></td>
<td id="LC152" class="blob-code blob-code-inner js-file-line"> pages <span class="pl-k">=</span> navbar.findAll(<span class="pl-s"><span class="pl-pds">'</span>a<span class="pl-pds">'</span></span>, <span class="pl-v">href</span><span class="pl-k">=</span> re.compile(<span class="pl-s"><span class="pl-pds">'</span>alphabetical<span class="pl-pds">'</span></span>))</td>
</tr>
<tr>
<td id="L153" class="blob-num js-line-number" data-line-number="153"></td>
<td id="LC153" class="blob-code blob-code-inner js-file-line"> page_list<span class="pl-k">=</span>[] <span class="pl-c"># page_list is reset for each letter index</span></td>
</tr>
<tr>
<td id="L154" class="blob-num js-line-number" data-line-number="154"></td>
<td id="LC154" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L155" class="blob-num js-line-number" data-line-number="155"></td>
<td id="LC155" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> t <span class="pl-k">in</span> pages:</td>
</tr>
<tr>
<td id="L156" class="blob-num js-line-number" data-line-number="156"></td>
<td id="LC156" class="blob-code blob-code-inner js-file-line"> page_list.append(<span class="pl-s"><span class="pl-pds">"</span>http://www.boxofficemojo.com<span class="pl-pds">"</span></span> <span class="pl-k">+</span> t[<span class="pl-s"><span class="pl-pds">'</span>href<span class="pl-pds">'</span></span>])</td>
</tr>
<tr>
<td id="L157" class="blob-num js-line-number" data-line-number="157"></td>
<td id="LC157" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L158" class="blob-num js-line-number" data-line-number="158"></td>
<td id="LC158" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L159" class="blob-num js-line-number" data-line-number="159"></td>
<td id="LC159" class="blob-code blob-code-inner js-file-line"> movietable<span class="pl-k">=</span>soup.find(<span class="pl-s"><span class="pl-pds">'</span>div<span class="pl-pds">'</span></span>,{<span class="pl-s"><span class="pl-pds">'</span>id<span class="pl-pds">'</span></span>:<span class="pl-s"><span class="pl-pds">'</span>main<span class="pl-pds">'</span></span>})</td>
</tr>
<tr>
<td id="L160" class="blob-num js-line-number" data-line-number="160"></td>
<td id="LC160" class="blob-code blob-code-inner js-file-line"> movies <span class="pl-k">=</span> movietable.findAll(<span class="pl-s"><span class="pl-pds">'</span>a<span class="pl-pds">'</span></span>, <span class="pl-v">href</span><span class="pl-k">=</span> re.compile(<span class="pl-s"><span class="pl-pds">'</span>id=<span class="pl-pds">'</span></span>))</td>
</tr>
<tr>
<td id="L161" class="blob-num js-line-number" data-line-number="161"></td>
<td id="LC161" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> t <span class="pl-k">in</span> movies:</td>
</tr>
<tr>
<td id="L162" class="blob-num js-line-number" data-line-number="162"></td>
<td id="LC162" class="blob-code blob-code-inner js-file-line"> movie_links.append(<span class="pl-s"><span class="pl-pds">"</span>http://www.boxofficemojo.com<span class="pl-pds">"</span></span> <span class="pl-k">+</span> t[<span class="pl-s"><span class="pl-pds">'</span>href<span class="pl-pds">'</span></span>])</td>
</tr>
<tr>
<td id="L163" class="blob-num js-line-number" data-line-number="163"></td>
<td id="LC163" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L164" class="blob-num js-line-number" data-line-number="164"></td>
<td id="LC164" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">if</span> pages <span class="pl-k">!=</span> <span class="pl-c1">None</span>: <span class="pl-c">#this only runs if there is a 2nd page for this letter </span></td>
</tr>
<tr>
<td id="L165" class="blob-num js-line-number" data-line-number="165"></td>
<td id="LC165" class="blob-code blob-code-inner js-file-line"> i<span class="pl-k">=</span><span class="pl-c1">0</span> <span class="pl-c">#page list starts at 2 (consequence of page layout)</span></td>
</tr>
<tr>
<td id="L166" class="blob-num js-line-number" data-line-number="166"></td>
<td id="LC166" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">while</span> i<span class="pl-k"><</span><span class="pl-c1">len</span>(page_list): <span class="pl-c">#loop over multiple pages for each letter index</span></td>
</tr>
<tr>
<td id="L167" class="blob-num js-line-number" data-line-number="167"></td>
<td id="LC167" class="blob-code blob-code-inner js-file-line"> current_url<span class="pl-k">=</span>page_list[i] </td>
</tr>
<tr>
<td id="L168" class="blob-num js-line-number" data-line-number="168"></td>
<td id="LC168" class="blob-code blob-code-inner js-file-line"> soup <span class="pl-k">=</span> BeautifulSoup(urlopen_with_retry(current_url).read())</td>
</tr>
<tr>
<td id="L169" class="blob-num js-line-number" data-line-number="169"></td>
<td id="LC169" class="blob-code blob-code-inner js-file-line"> movietable<span class="pl-k">=</span>soup.find(<span class="pl-s"><span class="pl-pds">'</span>div<span class="pl-pds">'</span></span>,{<span class="pl-s"><span class="pl-pds">'</span>id<span class="pl-pds">'</span></span>:<span class="pl-s"><span class="pl-pds">'</span>main<span class="pl-pds">'</span></span>})</td>
</tr>
<tr>
<td id="L170" class="blob-num js-line-number" data-line-number="170"></td>
<td id="LC170" class="blob-code blob-code-inner js-file-line"> movies <span class="pl-k">=</span> movietable.findAll(<span class="pl-s"><span class="pl-pds">'</span>a<span class="pl-pds">'</span></span>, <span class="pl-v">href</span><span class="pl-k">=</span> re.compile(<span class="pl-s"><span class="pl-pds">'</span>id=<span class="pl-pds">'</span></span>))</td>
</tr>
<tr>
<td id="L171" class="blob-num js-line-number" data-line-number="171"></td>
<td id="LC171" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> t <span class="pl-k">in</span> movies:</td>
</tr>
<tr>
<td id="L172" class="blob-num js-line-number" data-line-number="172"></td>
<td id="LC172" class="blob-code blob-code-inner js-file-line"> movie_links.append(<span class="pl-s"><span class="pl-pds">"</span>http://www.boxofficemojo.com<span class="pl-pds">"</span></span> <span class="pl-k">+</span> t[<span class="pl-s"><span class="pl-pds">'</span>href<span class="pl-pds">'</span></span>])</td>
</tr>
<tr>
<td id="L173" class="blob-num js-line-number" data-line-number="173"></td>
<td id="LC173" class="blob-code blob-code-inner js-file-line"> i<span class="pl-k">+=</span><span class="pl-c1">1</span></td>
</tr>
<tr>
<td id="L174" class="blob-num js-line-number" data-line-number="174"></td>
<td id="LC174" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L175" class="blob-num js-line-number" data-line-number="175"></td>
<td id="LC175" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L176" class="blob-num js-line-number" data-line-number="176"></td>
<td id="LC176" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L177" class="blob-num js-line-number" data-line-number="177"></td>
<td id="LC177" class="blob-code blob-code-inner js-file-line"> pool <span class="pl-k">=</span> Pool(cpu_count() <span class="pl-k">*</span> <span class="pl-c1">2</span>) <span class="pl-c"># Creates a Pool with cpu_count * 2 threads</span></td>
</tr>
<tr>
<td id="L178" class="blob-num js-line-number" data-line-number="178"></td>
<td id="LC178" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">print</span> <span class="pl-s"><span class="pl-pds">"</span>start scrape<span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L179" class="blob-num js-line-number" data-line-number="179"></td>
<td id="LC179" class="blob-code blob-code-inner js-file-line"> results <span class="pl-k">=</span> pool.map(crawlToCSV, movie_links) <span class="pl-c"># results is a list of all scrapped data returned from each call to crawlToCSV</span></td>
</tr>
<tr>
<td id="L180" class="blob-num js-line-number" data-line-number="180"></td>
<td id="LC180" class="blob-code blob-code-inner js-file-line"> pool.close()</td>
</tr>
<tr>
<td id="L181" class="blob-num js-line-number" data-line-number="181"></td>
<td id="LC181" class="blob-code blob-code-inner js-file-line"> pool.join() </td>
</tr>
<tr>
<td id="L182" class="blob-num js-line-number" data-line-number="182"></td>
<td id="LC182" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L183" class="blob-num js-line-number" data-line-number="183"></td>
<td id="LC183" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">print</span> <span class="pl-s"><span class="pl-pds">"</span>start writing file . . .<span class="pl-pds">"</span></span></td>
</tr>
<tr>
<td id="L184" class="blob-num js-line-number" data-line-number="184"></td>
<td id="LC184" class="blob-code blob-code-inner js-file-line"> results<span class="pl-k">=</span>[result <span class="pl-k">for</span> result <span class="pl-k">in</span> results <span class="pl-k">if</span> result <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">0</span>]<span class="pl-c">#remove bad records</span></td>
</tr>
<tr>
<td id="L185" class="blob-num js-line-number" data-line-number="185"></td>
<td id="LC185" class="blob-code blob-code-inner js-file-line"> results<span class="pl-k">=</span>[result <span class="pl-k">for</span> result <span class="pl-k">in</span> results <span class="pl-k">if</span> result <span class="pl-k">is</span> <span class="pl-k">not</span> <span class="pl-c1">None</span>]<span class="pl-c">#remove bad records</span></td>
</tr>
<tr>
<td id="L186" class="blob-num js-line-number" data-line-number="186"></td>
<td id="LC186" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L187" class="blob-num js-line-number" data-line-number="187"></td>
<td id="LC187" class="blob-code blob-code-inner js-file-line">
</td>
</tr>
<tr>
<td id="L188" class="blob-num js-line-number" data-line-number="188"></td>
<td id="LC188" class="blob-code blob-code-inner js-file-line"> con <span class="pl-k">=</span> mdb.connect(<span class="pl-s"><span class="pl-pds">'</span>localhost<span class="pl-pds">'</span></span>, <span class="pl-s"><span class="pl-pds">'</span>movie_user1<span class="pl-pds">'</span></span>, <span class="pl-s"><span class="pl-pds">'</span>movie616<span class="pl-pds">'</span></span>, <span class="pl-s"><span class="pl-pds">'</span>movie_data<span class="pl-pds">'</span></span>)<span class="pl-c">#</span></td>
</tr>
<tr>
<td id="L189" class="blob-num js-line-number" data-line-number="189"></td>
<td id="LC189" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">with</span> con:</td>
</tr>
<tr>
<td id="L190" class="blob-num js-line-number" data-line-number="190"></td>
<td id="LC190" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L191" class="blob-num js-line-number" data-line-number="191"></td>
<td id="LC191" class="blob-code blob-code-inner js-file-line"> cur <span class="pl-k">=</span> con.cursor()</td>
</tr>
<tr>
<td id="L192" class="blob-num js-line-number" data-line-number="192"></td>
<td id="LC192" class="blob-code blob-code-inner js-file-line"> cur.execute(<span class="pl-s"><span class="pl-pds">"</span>DROP TABLE IF EXISTS BoxOfficeMojo<span class="pl-pds">"</span></span>)<span class="pl-c">#delete table if it already exists</span></td>
</tr>
<tr>
<td id="L193" class="blob-num js-line-number" data-line-number="193"></td>
<td id="LC193" class="blob-code blob-code-inner js-file-line"> cur.execute(<span class="pl-s"><span class="pl-pds">"</span>CREATE TABLE BoxOfficeMojo(Id INT PRIMARY KEY AUTO_INCREMENT,title VARCHAR(25),director1 VARCHAR(25),director2 VARCHAR(25),domestic VARCHAR(25),distributor VARCHAR(25),release_date VARCHAR(25),genre VARCHAR(25),runtime VARCHAR(25),rating VARCHAR(25),budget VARCHAR(25),worldwide VARCHAR(25),actor1 VARCHAR(25),actor2 VARCHAR(25),actor3 VARCHAR(25),actor4 VARCHAR(25),actor5 VARCHAR(25),actor6 VARCHAR(25),producer1 VARCHAR(25),producer2 VARCHAR(25),producer3 VARCHAR(25),producer4 VARCHAR(25),producer5 VARCHAR(25),producer6 VARCHAR(25),writer1 VARCHAR(25),writer2 VARCHAR(25),composer1 VARCHAR(25),composer2 VARCHAR(25))<span class="pl-pds">"</span></span>)<span class="pl-c">#all columns will be treated as strings, will work smoothly with previous version of analysis</span></td>
</tr>
<tr>
<td id="L194" class="blob-num js-line-number" data-line-number="194"></td>
<td id="LC194" class="blob-code blob-code-inner js-file-line"> <span class="pl-k">for</span> result <span class="pl-k">in</span> results:</td>
</tr>
<tr>
<td id="L195" class="blob-num js-line-number" data-line-number="195"></td>
<td id="LC195" class="blob-code blob-code-inner js-file-line"> <span class="pl-c1">print</span> result</td>
</tr>
<tr>
<td id="L196" class="blob-num js-line-number" data-line-number="196"></td>
<td id="LC196" class="blob-code blob-code-inner js-file-line"> cur.execute(<span class="pl-s"><span class="pl-pds">"</span>INSERT INTO BoxOfficeMojo(title,director1,director2,domestic,distributor,release_date,genre,runtime,rating,budget,worldwide,actor1,actor2,actor3,actor4,actor5,actor6,producer1,producer2,producer3,producer4,producer5,producer6,writer1,writer2,composer1,composer2) VALUES(<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>,<span class="pl-c1">%s</span>)<span class="pl-pds">"</span></span>,result)</td>
</tr>
<tr>
<td id="L197" class="blob-num js-line-number" data-line-number="197"></td>
<td id="LC197" class="blob-code blob-code-inner js-file-line"> </td>
</tr>
<tr>
<td id="L198" class="blob-num js-line-number" data-line-number="198"></td>
<td id="LC198" class="blob-code blob-code-inner js-file-line"><span class="pl-c1">print</span> <span class="pl-s"><span class="pl-pds">"</span>Done writing file<span class="pl-pds">"</span></span></td>
</tr>
</table>
</div>
</div>
<button type="button" data-facebox="#jump-to-line" data-facebox-class="linejump" data-hotkey="l" class="hidden">Jump to Line</button>
<div id="jump-to-line" style="display:none">
<!-- </textarea> --><!-- '"` --><form accept-charset="UTF-8" action="" class="js-jump-to-line-form" method="get"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /></div>
<input class="form-control linejump-input js-jump-to-line-field" type="text" placeholder="Jump to line…" aria-label="Jump to line" autofocus>
<button type="submit" class="btn">Go</button>
</form></div>
</div>
<div class="modal-backdrop"></div>
</div>
</div>
</div>
</div>
<div class="container site-footer-container">
<div class="site-footer" role="contentinfo">
<ul class="site-footer-links right">
<li><a href="https://status.github.com/" data-ga-click="Footer, go to status, text:status">Status</a></li>
<li><a href="https://developer.github.com" data-ga-click="Footer, go to api, text:api">API</a></li>
<li><a href="https://training.github.com" data-ga-click="Footer, go to training, text:training">Training</a></li>
<li><a href="https://shop.github.com" data-ga-click="Footer, go to shop, text:shop">Shop</a></li>
<li><a href="https://github.com/blog" data-ga-click="Footer, go to blog, text:blog">Blog</a></li>
<li><a href="https://github.com/about" data-ga-click="Footer, go to about, text:about">About</a></li>
</ul>
<a href="https://github.com" aria-label="Homepage" class="site-footer-mark">
<svg aria-hidden="true" class="octicon octicon-mark-github" height="24" role="img" title="GitHub " version="1.1" viewBox="0 0 16 16" width="24"><path d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59 0.4 0.07 0.55-0.17 0.55-0.38 0-0.19-0.01-0.82-0.01-1.49-2.01 0.37-2.53-0.49-2.69-0.94-0.09-0.23-0.48-0.94-0.82-1.13-0.28-0.15-0.68-0.52-0.01-0.53 0.63-0.01 1.08 0.58 1.23 0.82 0.72 1.21 1.87 0.87 2.33 0.66 0.07-0.52 0.28-0.87 0.51-1.07-1.78-0.2-3.64-0.89-3.64-3.95 0-0.87 0.31-1.59 0.82-2.15-0.08-0.2-0.36-1.02 0.08-2.12 0 0 0.67-0.21 2.2 0.82 0.64-0.18 1.32-0.27 2-0.27 0.68 0 1.36 0.09 2 0.27 1.53-1.04 2.2-0.82 2.2-0.82 0.44 1.1 0.16 1.92 0.08 2.12 0.51 0.56 0.82 1.27 0.82 2.15 0 3.07-1.87 3.75-3.65 3.95 0.29 0.25 0.54 0.73 0.54 1.48 0 1.07-0.01 1.93-0.01 2.2 0 0.21 0.15 0.46 0.55 0.38C13.71 14.53 16 11.53 16 8 16 3.58 12.42 0 8 0z"></path></svg>
</a>
<ul class="site-footer-links">
<li>© 2016 <span title="0.19007s from github-fe125-cp1-prd.iad.github.net">GitHub</span>, Inc.</li>
<li><a href="https://github.com/site/terms" data-ga-click="Footer, go to terms, text:terms">Terms</a></li>
<li><a href="https://github.com/site/privacy" data-ga-click="Footer, go to privacy, text:privacy">Privacy</a></li>
<li><a href="https://github.com/security" data-ga-click="Footer, go to security, text:security">Security</a></li>
<li><a href="https://github.com/contact" data-ga-click="Footer, go to contact, text:contact">Contact</a></li>
<li><a href="https://help.github.com" data-ga-click="Footer, go to help, text:help">Help</a></li>
</ul>
</div>
</div>
<div id="ajax-error-message" class="ajax-error-message flash flash-error">
<svg aria-hidden="true" class="octicon octicon-alert" height="16" role="img" version="1.1" viewBox="0 0 16 16" width="16"><path d="M15.72 12.5l-6.85-11.98C8.69 0.21 8.36 0.02 8 0.02s-0.69 0.19-0.87 0.5l-6.85 11.98c-0.18 0.31-0.18 0.69 0 1C0.47 13.81 0.8 14 1.15 14h13.7c0.36 0 0.69-0.19 0.86-0.5S15.89 12.81 15.72 12.5zM9 12H7V10h2V12zM9 9H7V5h2V9z"></path></svg>
<button type="button" class="flash-close js-flash-close js-ajax-error-dismiss" aria-label="Dismiss error">
<svg aria-hidden="true" class="octicon octicon-x" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path d="M7.48 8l3.75 3.75-1.48 1.48-3.75-3.75-3.75 3.75-1.48-1.48 3.75-3.75L0.77 4.25l1.48-1.48 3.75 3.75 3.75-3.75 1.48 1.48-3.75 3.75z"></path></svg>
</button>
Something went wrong with that request. Please try again.
</div>
<script crossorigin="anonymous" integrity="sha256-1yM+qrt1Marydfpx57yjxDyxHq4SyDWWIr0T1yUyCu4=" src="https://assets-cdn.github.com/assets/frameworks-d7233eaabb7531aaf275fa71e7bca3c43cb11eae12c8359622bd13d725320aee.js"></script>
<script async="async" crossorigin="anonymous" integrity="sha256-vgm6qGXnXnCUs1SQp+rydqIUyeeEif2KFhphsQ2o/Kg=" src="https://assets-cdn.github.com/assets/github-be09baa865e75e7094b35490a7eaf276a214c9e78489fd8a161a61b10da8fca8.js"></script>
<div class="js-stale-session-flash stale-session-flash flash flash-warn flash-banner hidden">
<svg aria-hidden="true" class="octicon octicon-alert" height="16" role="img" version="1.1" viewBox="0 0 16 16" width="16"><path d="M15.72 12.5l-6.85-11.98C8.69 0.21 8.36 0.02 8 0.02s-0.69 0.19-0.87 0.5l-6.85 11.98c-0.18 0.31-0.18 0.69 0 1C0.47 13.81 0.8 14 1.15 14h13.7c0.36 0 0.69-0.19 0.86-0.5S15.89 12.81 15.72 12.5zM9 12H7V10h2V12zM9 9H7V5h2V9z"></path></svg>
<span class="signed-in-tab-flash">You signed in with another tab or window. <a href="">Reload</a> to refresh your session.</span>
<span class="signed-out-tab-flash">You signed out in another tab or window. <a href="">Reload</a> to refresh your session.</span>
</div>
<div class="facebox" id="facebox" style="display:none;">
<div class="facebox-popup">
<div class="facebox-content" role="dialog" aria-labelledby="facebox-header" aria-describedby="facebox-description">
</div>
<button type="button" class="facebox-close js-facebox-close" aria-label="Close modal">
<svg aria-hidden="true" class="octicon octicon-x" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path d="M7.48 8l3.75 3.75-1.48 1.48-3.75-3.75-3.75 3.75-1.48-1.48 3.75-3.75L0.77 4.25l1.48-1.48 3.75 3.75 3.75-3.75 1.48 1.48-3.75 3.75z"></path></svg>
</button>
</div>
</div>
</body>
</html>
| [
"csredino@gmail.com"
] | csredino@gmail.com |
4c16db9da335b2e33f796f3ffd468c5c0e564b7e | c66311c23395e562074af4edfa21c7d94bf07602 | /sovrin_node/server/upgrader.py | 804a597c8b709f8a86062918277e479cae587589 | [
"Apache-2.0"
] | permissive | kyc-chain/sovrin-node | e25aad08d05d0c4c8c36d870d1e061a8c616bdd8 | f4ce648d9030215ed5e63ce310d1063210f19416 | refs/heads/master | 2021-01-09T05:37:43.766819 | 2017-01-26T10:44:16 | 2017-01-26T10:44:16 | 80,806,139 | 0 | 0 | null | 2017-02-03T07:15:19 | 2017-02-03T07:15:19 | null | UTF-8 | Python | false | false | 9,065 | py | import os
from collections import deque
from datetime import datetime
from functools import partial
from typing import Tuple, Union, Optional
import dateutil.parser
import dateutil.tz
from plenum.common.log import getlogger
from plenum.common.txn import NAME, TXN_TYPE
from plenum.common.txn import VERSION
from plenum.server.has_action_queue import HasActionQueue
from sovrin_common.txn import ACTION, POOL_UPGRADE, START, SCHEDULE, CANCEL
logger = getlogger()
class Upgrader(HasActionQueue):
def __init__(self, nodeId, config, baseDir, ledger):
self.nodeId = nodeId
self.config = config
self.baseDir = baseDir
self.ledger = ledger
# TODO: Rename to `upgradedVersion`
self.hasCodeBeenUpgraded = self._hasCodeBeenUpgraded()
self.storeCurrentVersion()
# TODO: Rename to `failedToUpgrade`
self.didLastUpgradeFail = self._didLastUpgradeFail()
if self.didLastUpgradeFail:
# TODO: Call `lastUpgradeFailed` to tell the agent and then agent
# should remove file
pass
else:
self.removeNextVersionFile()
self.scheduledUpgrade = None # type: Tuple[str, int]
HasActionQueue.__init__(self)
def service(self):
return self._serviceActions()
def processLedger(self):
# Assumption: Only version is enough to identify a release, no hash
# checking is done
currentVer = self.getVersion()
upgrades = {} # Map of version to scheduled time
for txn in self.ledger.getAllTxn().values():
if txn[TXN_TYPE] == POOL_UPGRADE:
if txn[ACTION] == START:
if self.isVersionHigher(currentVer, txn[VERSION]):
if self.nodeId not in txn[SCHEDULE]:
logger.warn('{} not present in schedule {}'.
format(self.nodeId, txn[SCHEDULE]))
else:
upgrades[txn[VERSION]] = txn[SCHEDULE][self.nodeId]
elif txn[ACTION] == CANCEL:
if txn[VERSION] not in upgrades:
logger.warn('{} encountered before {}'.
format(CANCEL, START))
else:
upgrades.pop(txn[VERSION])
else:
logger.error('{} cannot be {}'.format(ACTION, txn[ACTION]))
upgrades = sorted(upgrades.items(),
key=lambda x: self.getNumericValueOfVersion(x[0]),
reverse=True)
if upgrades:
latestVer, upgradeAt = upgrades[0]
self._upgrade(latestVer, upgradeAt)
@staticmethod
def getVersion():
from sovrin_node.__metadata__ import __version__
return __version__
@staticmethod
def getNumericValueOfVersion(version):
version = list(map(int, version.split('.')))
return sum([v*(10**i) for i, v in enumerate(version)])
@staticmethod
def isVersionHigher(oldVer, newVer):
assert oldVer.count('.') == newVer.count('.'), 'Cannot compare {} ' \
'and {}'.format(
oldVer, newVer)
oldVerVal = Upgrader.getNumericValueOfVersion(oldVer)
newVerVal = Upgrader.getNumericValueOfVersion(newVer)
return newVerVal > oldVerVal
@property
def lastVersionFilePath(self):
return os.path.join(self.baseDir, self.config.lastRunVersionFile)
@property
def nextVersionFilePath(self):
return os.path.join(self.baseDir, self.config.nextVersionFile)
def storeCurrentVersion(self):
version = self.getVersion()
with open(self.lastVersionFilePath, 'w') as f:
f.write(version)
f.flush()
def storeNextVersionToUpgrade(self, version):
with open(self.nextVersionFilePath, 'w') as f:
f.write(version)
f.flush()
def isCurrentVersionLower(self, version):
return not self.isVersionHigher(self.getVersion(), version)
def _hasCodeBeenUpgraded(self) -> Optional[str]:
if not os.path.isfile(self.lastVersionFilePath):
# If last version file not found means node starting on a fresh
# machine
return None
else:
with open(self.lastVersionFilePath, 'r') as f:
version = f.read()
if self.isVersionHigher(version, self.getVersion()):
return self.getVersion()
def _didLastUpgradeFail(self) -> Optional[str]:
if not os.path.isfile(self.nextVersionFilePath):
# If next version file not found means the file has been processed
# and deleted
return None
else:
with open(self.nextVersionFilePath, 'r') as f:
version = f.read()
if self.isVersionHigher(version, self.getVersion()):
return version
def isScheduleValid(self, schedule, nodeIds):
times = []
if set(schedule.keys()) != nodeIds:
return False, 'Schedule should contain id of all nodes'
unow = datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
for dateStr in schedule.values():
try:
dt = dateutil.parser.parse(dateStr)
if dt <= unow:
return False, '{} is less than current time'.format(dt)
times.append(dt)
except ValueError:
return False, '{} cannot be parsed to a time'.format(dateStr)
times = sorted(times)
for i in range(len(times)):
if i == len(times) - 1:
break
diff = (times[i+1] - times[i]).seconds
if diff < self.config.MinSepBetweenNodeUpgrades:
return False, 'time span between upgrades is {} seconds which' \
' is less than {}, specified in the config'.\
format(diff, self.config.MinSepBetweenNodeUpgrades)
return True, ''
def statusInLedger(self, name, version):
t = {}
for txn in self.ledger.getAllTxn().values():
if txn[NAME] == name and txn[VERSION] == version:
t = txn
if not t:
return None
else:
return t[ACTION]
def handleUpgradeTxn(self, txn):
if txn[TXN_TYPE] == POOL_UPGRADE:
if txn[ACTION] == START:
if self.nodeId not in txn[SCHEDULE]:
logger.warn('{} not present in schedule {}'.
format(self.nodeId, txn[SCHEDULE]))
else:
if not self.scheduledUpgrade and \
self.isVersionHigher(self.getVersion(), txn[VERSION]):
# If no upgrade has been scheduled
self._upgrade(txn[VERSION], txn[SCHEDULE][self.nodeId])
elif self.scheduledUpgrade and self.isVersionHigher(
self.scheduledUpgrade[0], txn[VERSION]):
# If upgrade has been scheduled but for version lower
# than current transaction
self.aqStash = deque()
self.scheduledUpgrade = None
self._upgrade(txn[VERSION], txn[SCHEDULE][self.nodeId])
elif txn[ACTION] == CANCEL:
if self.scheduledUpgrade and self.scheduledUpgrade[0] == txn[VERSION]:
self.scheduledUpgrade = None
self.aqStash = deque()
# An efficient way would be to enqueue all upgrades to do
# and then for each cancel keep dequeuing them
self.processLedger()
def _upgrade(self, version, when: Union[datetime, str]):
assert isinstance(when, (str, datetime))
logger.info(
"{}'s upgrader processing upgrade for version".
format(self.nodeId, version))
if isinstance(when, str):
when = dateutil.parser.parse(when)
unow = datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
if when > unow:
delay = (when - unow).seconds
self._schedule(partial(self.callUpgradeAgent, version), delay)
self.scheduledUpgrade = (version, delay)
else:
self.callUpgradeAgent(version)
return True
def callUpgradeAgent(self, version):
# TODO: Call upgrade agent
logger.info("{}'s upgrader calling agent for upgrade".format(self.nodeId))
self.storeNextVersionToUpgrade(version)
self.scheduledUpgrade = None
def lastUpgradeFailed(self):
# TODO: Tell the agent that upgrade failed
self.removeNextVersionFile()
def removeNextVersionFile(self):
try:
os.remove(self.nextVersionFilePath)
except OSError:
pass
| [
"rajesh.kalaria@gmail.com"
] | rajesh.kalaria@gmail.com |
d99a2c5eda75e382873cbddb9783b05cb3bcfe1e | a3127ca75744dc83a0ef23c558638be857c05063 | /data_loader/stl_10_logits_loader.py | f3523a5cebc684449015e7900a8871ea46760916 | [
"Apache-2.0"
] | permissive | BillyGun27/keras_template | b7170ce4a3cbbbcacd8c650199b774f8732cdec9 | 2dec6ad9d5cbb538d1d7b54c3cde78bb15281e9a | refs/heads/master | 2020-04-27T07:25:56.135423 | 2019-03-10T16:20:02 | 2019-03-10T16:20:02 | 174,135,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,171 | py | from base.base_data_loader import BaseDataLoader
from utils.image_preprocessing_logits import ImageDataGenerator
from keras.applications.imagenet_utils import preprocess_input
class Stl10LogitsLoader(BaseDataLoader):
def __init__(self, config):
super(Stl10LogitsLoader, self).__init__(config)
self.data_generator = ImageDataGenerator(
data_format='channels_last',
preprocessing_function=preprocess_input
)
self.train_generator = self.data_generator.flow_from_directory(
'datasets/img/train',
target_size=(self.config.data_loader.image_size , self.config.data_loader.image_size ),
batch_size=self.config.trainer.batch_size
)
self.test_generator = self.data_generator.flow_from_directory(
'datasets/img/test', shuffle=False,
target_size=(self.config.data_loader.image_size , self.config.data_loader.image_size),
batch_size=self.config.trainer.batch_size
)
def get_train_data_generator(self):
return self.train_generator
def get_test_data_generator(self):
return self.test_generator
| [
"billygun27@gmail.com"
] | billygun27@gmail.com |
8f779ae7bd790997e2a3fce3a42a64b70bbd7709 | 3047f66549c5928cf07bc14bd3ff276ce8458f22 | /config.py | bf1021d3b9f955d335b7c9d6608e18fcdcae53d8 | [] | no_license | 2429581027/spe2018 | b47faf01b5954552cbfe4caed32923663c716396 | 3649104935fc8b519450d6d12c78110a40f5aaec | refs/heads/master | 2022-12-06T17:12:08.324913 | 2020-08-09T16:34:07 | 2020-08-09T16:34:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,514 | py | '''
file: config.py
date: 2018_09_19
author: Junjie Cao
'''
import argparse
###################################
## shared parameters
parser = argparse.ArgumentParser(description = 'spe 2019, reconstruction from incompleted points')
#parser.add_argument('--data_root', type = str, default = '/data/spe_database_old', help = 'it is a shared parameter')
parser.add_argument('--data_root', type = str,default = '../../data', help = 'it is a shared parameter') # for my macbook
parser.add_argument('--outf', type=str, default='../../data/spe_out', help='output folder')# /Users/jjcao/data/spe_data_train_11348
parser.add_argument('--model', type=str, default = './model/0.pkl', help='saved/pre_trained model')
# parser.add_argument('--modelBeta', type=str, default = './model/SPENetSiam.pkl', help='saved/pre_trained model')
# parser.add_argument('--modelPose', type=str, default = './model/SPENetSiam.pkl', help='saved/pre_trained model')
# parser.add_argument('--modelGen', type=str, default = './model/SPENetSiam_pointnetmini_PointGenCon_84_0.109_s0.106_p0.001_3d0.0004_decoded0.0002_j0.0001-centerBinput-stnOutput.pkl', help='saved/pre_trained model')
parser.add_argument('--center_input', default = True, type = bool, help = 'center input in dataset')
parser.add_argument('--trans_smpl_generated', default = 'stn', type = str, help = 'None, stn, center')
# should >= number of GPU*2. e.g. 72 batch in 3 GPU leads to 24 batch in each GPU. # If the batches number on each GPU == 1, nn.BatchNorm1d fails.
# large batch size => better convergence. # 16 for 6-9G gpu with decoder, 24 for ? without decoder
#parser.add_argument('--batch_size', type=int, default=128, help='input batch size') #72=24*3=18*4, 96=24*4
parser.add_argument('--batch_size', type=int, default=2, help='input batch size') # for debug on mac
parser.add_argument('--start_epoch', type=int, default = 0, help='')
parser.add_argument('--no_epoch', type=int, default = 121, help='number of epochs to train for')#121
parser.add_argument('--lr',type = float,default = 0.001,help = 'learning rate')#0.001
parser.add_argument('--step_lr', type = float, default = 10, help = 'encoder learning rate.')
parser.add_argument('--step_save', type = float, default = 2, help = 'step for saving model.')
parser.add_argument('--shape_ratio',type = float, default = 40.0 ,help = 'weight of shape loss') #40 for GMOF loss function
parser.add_argument('--pose_ratio',type = float, default = 400.0, help = 'weight of pose')# 400 for GMOF loss function
#default: 400. 20 is enough for making sure that predicated pose parameter does not contain global rotation
parser.add_argument('--threeD_ratio',type = float, default = 400.0, help = 'weight of vertices decoded by smpl')
#default: 200. 20 is enough for making sure that predicated pose parameter does not contain global rotation
parser.add_argument('--j3d_ratio',type = float, default = 0.0, help = 'weight of 3d key points decoded by smpl') #200
parser.add_argument('--decoded_ratio',type = float, default = 400.0, help = 'weight of vertices decoded by decoder')#400,
#parser.add_argument('--with_chamfer',default = False, type = bool,help = 'use chamfer loss')
#parser.add_argument('--chamfer_ratio',type = float, default = 0.0, help = 'weight of 3d chamfer distance')#50
###################################
## parameters for training
parser.add_argument('--network', type = str,default = 'SPENet',help = 'SPENet, SPENetSiam, SPENetBeta, SPENetPose')
parser.add_argument('--encoder', type = str,default = 'pointnetmini',help = 'pointnetmini, pointnet or pointnet2')
parser.add_argument('--decoder', type = str,default = 'None',help = 'None, PointGenCon or pointnet2 or dispNet?')
parser.add_argument('--with_stn', default = 'STN3dTR', type = str, help = 'use STN3dR, STN3dRQuad, STN3dTR, or None in encoder')
parser.add_argument('--with_stn_feat', default = False, type = bool, help = 'use stn feature transform in encoder or not')
parser.add_argument('--pervertex_weight', type = str, default = 'None', help = 'None or ')#./data/pervertex_weight_sdf.npz
parser.add_argument('--point_count', type=int, default=2500, help='the count of vertices in the input pointcloud for training')
parser.add_argument('--workers', type=int, default=0, help='number of data loading workers - 0 means same thread as main execution')
parser.add_argument('--momentum',type = float,default = 0.9,help = 'momentum')
# weight decay = 0.0001, it is very important for training the network using adam
parser.add_argument('--wd', type = float, default = 0.0001, help = 'encoder weight decay rate.')
parser.add_argument('--ls', type = str, default = 'L2', help = 'loss function: L2, L1, or GMOF (from less robust to more robust).')
parser.add_argument('--vis', type=str, default= 'spe', help='visdom environment, use visualization in training')
parser.add_argument('--smpl_mean_theta_path', type = str, default = './data/neutral_smpl_mean_params.h5', help = 'the path for mean smpl theta value')
parser.add_argument('--smpl_model',type = str,
default = './data/neutral_smpl_with_cocoplus_reg.txt',
help = 'smpl model path')
########
# for reconstruction, correspondence
parser.add_argument('--HR', type=int, default=0, help='Use high Resolution template for better precision in the nearest neighbor step ?')
parser.add_argument('--nepoch', type=int, default=3000, help='number of epochs to train for during the regression step')
# parser.add_argument('--inputA', type=str, default = "/data/MPI-FAUST/test/scans/test_scan_021.ply", help='your path to mesh 0')
# parser.add_argument('--inputB', type=str, default = "/data/MPI-FAUST/test/scans/test_scan_011.ply", help='your path to mesh 1')
parser.add_argument('--inputA', type=str, default = "data/example_0.ply", help='your path to mesh 0')
parser.add_argument('--inputB', type=str, default = "data/example_1.ply", help='your path to mesh 1')
#parser.add_argument('--num_points', type=int, default = 6890, help='number of points fed to poitnet') # point_count
#parser.add_argument('--num_angles', type=int, default = 300, help='number of angle in the search of optimal reconstruction. Set to 1, if you mesh are already facing the cannonical direction as in data/example_1.ply')
parser.add_argument('--clean', type=int, default=1, help='if 1, remove points that dont belong to any edges')
parser.add_argument('--scale', type=int, default=1, help='if 1, scale input mesh to have same volume as the template')
parser.add_argument('--project_on_target', type=int, default=0, help='if 1, projects predicted correspondences point on target mesh')
########
# for data generation
parser.add_argument('--human_count', type = int, default = 30000, help = 'the count of male/femal in generated database')
parser.add_argument('--sample_count', type = int, default = 0, help = 'the count of samples of a SMPL template mesh') # 2500
parser.add_argument('--op', type = str, default = 'generate', help = 'generate, distill, unify')
parser.add_argument('--gender', type = str, default = 'm', help = 'm for male, f for female, b for both')
parser.add_argument('--data_type', type = str, default = 'w', help = 'w for whole, f for front view, fb for front & back view')
# spe_dataset_train_specifiedPose
parser.add_argument('--database_train', type = str, default = 'spe_dataset_train', help = 'name')
parser.add_argument('--database_val', type = str, default = 'spe_dataset_val', help = 'name')
args = parser.parse_args() | [
"jjcao1231@gmail.com"
] | jjcao1231@gmail.com |
1b76c942e3de6e3fe0bf580d33e52777e4e3576a | d864baee49f407cc785d66066bdd1777c44b2a20 | /ProjectServer/ProjectServer/migrations/versions/c44e79f81237_.py | 53f499a1028cdd8033503ef414e22f6b5ac5e3b5 | [] | no_license | 7aplus/Project_ServerAPI | 912e4ca0a50250b3db6800e78889887e430a7fde | 28c49e1618375a938862f76f4794ee0aba1299ff | refs/heads/master | 2022-12-12T17:17:46.823425 | 2019-05-14T03:44:28 | 2019-05-14T03:44:28 | 177,141,448 | 0 | 0 | null | 2022-12-08T05:02:49 | 2019-03-22T13:04:21 | Python | UTF-8 | Python | false | false | 1,336 | py | """empty message
Revision ID: c44e79f81237
Revises:
Create Date: 2019-03-26 13:48:10.829766
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c44e79f81237'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('employees',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('employ_email', sa.String(length=24), nullable=False),
sa.Column('employ_name', sa.String(length=24), nullable=False),
sa.Column('empliye_password', sa.String(length=100), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('user_email', sa.String(length=24), nullable=False),
sa.Column('user_name', sa.String(length=24), nullable=False),
sa.Column('user_password', sa.String(length=100), nullable=False),
sa.Column('user_phone', sa.String(length=12), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('users')
op.drop_table('employees')
# ### end Alembic commands ###
| [
"noreply@github.com"
] | 7aplus.noreply@github.com |
7286572ca41a65b315c05e18e48584a9641c4a8d | a1aef92f9678c3ff01cba67d9c9e7f5fe2532796 | /MinMaxPruning/agent.py | 9365f3747b0c3e64726824beb986bbbc1d5249fc | [] | no_license | currybur/AU332-AI-Principle-Application-HW | 535bc8c57a3a3634ffdad06a10d83b896314c3b7 | f902eddf4b7c9d1f58e46b770ac639d923882e75 | refs/heads/master | 2022-09-13T01:34:40.240708 | 2020-06-05T15:41:57 | 2020-06-05T15:41:57 | 269,679,827 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,432 | py | import random, re, datetime
from queue import PriorityQueue
import time
class Agent(object):
def __init__(self, game):
self.game = game
def getAction(self, state):
raise Exception("Not implemented yet")
class RandomAgent(Agent):
def getAction(self, state):
legal_actions = self.game.actions(state)
self.action = random.choice(legal_actions)
class SimpleGreedyAgent(Agent):
# a one-step-lookahead greedy agent that returns action with max vertical advance
def getAction(self, state):
legal_actions = self.game.actions(state)
self.action = random.choice(legal_actions)
player = self.game.player(state)
if player == 1:
max_vertical_advance_one_step = max([action[0][0] - action[1][0] for action in legal_actions])
max_actions = [action for action in legal_actions if
action[0][0] - action[1][0] == max_vertical_advance_one_step]
else:
max_vertical_advance_one_step = max([action[1][0] - action[0][0] for action in legal_actions])
max_actions = [action for action in legal_actions if
action[1][0] - action[0][0] == max_vertical_advance_one_step]
self.action = random.choice(max_actions)
class MyTeam(Agent):
def getAction(self, state):
legal_actions = self.game.actions(state)
self.action = random.choice(legal_actions)
player = self.game.player(state)
### START CODE HERE ###
depth = 1
max_step = 30
alpha = float('-inf')
beta = float('inf')
action_queue = PriorityQueue() # search with bounded memory and preference
update_queue = PriorityQueue()
for action in legal_actions:
action_queue.put((-(3-2*player)*(action[0][0] - action[1][0]),action)) # all actions sorted by vertical displacement
start = time.time()
while True:
now = time.time()
if now-start>=0.2:
#print("depth",depth)
break
count = 0
while (not action_queue.empty()) and max_step > count:
action = action_queue.get()[1]
count += 1
if self.cal_value(self.game.succ(state, action), player) == 10086: # evaluate after-action state
self.action = action
break
#print(count)
child_value = self.min_op(player, self.game.succ(state, action), depth, alpha, beta, max_step)
update_queue.put((-child_value,action))
if child_value > alpha:
alpha = child_value
self.action = action
depth += 1
#now = time.time()
#if now-start>=1:
# break
while not action_queue.empty():
action_queue.get()
while not update_queue.empty():
action_queue.put(update_queue.get())
#print(time.time()-start,depth)
def cal_value(self, state, player):
"""
evaluates the state, if win, 1000; if lose, -1000; else, a value(larger=better).
:param state:
:param player:
:return:
"""
board = state[1]
player_pieces_position = board.getPlayerPiecePositions(player)
enemy_pieces_position = board.getPlayerPiecePositions(3-player)
player_vertical = 0
for position in player_pieces_position:
player_vertical += position[0]
enemy_vertical = 0
for position in enemy_pieces_position:
enemy_vertical += position[0]
player_horizontal = 0
for position in player_pieces_position:
player_horizontal += abs(abs(position[1] - min(position[0],20-position[0])/2)-1)
enemy_horizontal = 0
for position in enemy_pieces_position:
enemy_horizontal += abs(abs(position[1] - min(position[0],20-position[0])/2)-1)
if player == 1:
if player_vertical == 30: # the state is win ending
return 10086
if enemy_vertical == 170: # lose ending
return -10086
else:
return 400-(player_vertical + enemy_vertical)+(enemy_horizontal - player_horizontal)/2
else:
if player_vertical == 170:
return 10086
if enemy_vertical == 30:
return -10086
else:
return (player_vertical + enemy_vertical)+(enemy_horizontal - player_horizontal)/2
def maxi_op(self, player, state, depth, alpha, beta, max_step):
if depth == 0:
return self.cal_value(state, player)
if self.cal_value(state, player) == -10086:
return -10086
depth -= 1
node_value = float('-inf')
action_queue = PriorityQueue()
for action in self.game.actions(state):
action_queue.put((-(3-2*player)*(action[0][0] - action[1][0]),action))
count = 0
while (not action_queue.empty()) and count < max_step:
action = action_queue.get()[1]
count += 1
node_value = max(node_value, self.min_op(player, self.game.succ(state, action), depth, alpha, beta, max_step))
if node_value >= beta: # pruning
return node_value
alpha = max(alpha, node_value)
return node_value
def min_op(self, player, state, depth, alpha, beta, max_step):
if depth == 0:
return self.cal_value(state, player)
if self.cal_value(state, player) == 10086:
return 10086
depth -= 1
node_value = float('inf')
action_queue = PriorityQueue()
for action in self.game.actions(state):
action_queue.put(((3-2*player)*(action[0][0] - action[1][0]),action))#search from the worst state
count = 0
while (not action_queue.empty()) and count < max_step:
action = action_queue.get()[1]
count += 1
node_value = min(node_value, self.maxi_op(player, self.game.succ(state, action), depth, alpha, beta, max_step))
if node_value <= alpha:
return node_value
beta = min(beta, node_value)
return node_value
### END CODE HERE ###
| [
"curryjam_cg@sjtu.edu.cn"
] | curryjam_cg@sjtu.edu.cn |
b3d260b543db795759aa80a3817a7c826afa7a54 | cffb771a1cac3a6ad9651e21725cff79011c6b76 | /slot/templatetags/getData_tags.py | f230b8c591ea2e3d234e95e9870222d722bb07be | [] | no_license | Reni-masa/analyze-to-slot | c6d8e0798afb42c3166b1413a139e43bc991e51f | e71ccee84cbf16274bcaf239013ddaae5610c1f5 | refs/heads/main | 2023-03-14T13:23:10.368566 | 2021-03-07T14:26:58 | 2021-03-07T14:26:58 | 330,069,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | from django import template
register = template.Library() # Djangoのテンプレートタグライブラリ
# カスタムタグとして登録する
@register.simple_tag
def getData(column_name, date, number, slot_data_list):
'''
arg1:取得するカラム名
arg2:日付
arg3:台番号
return:日付・台番号に一致するデータの指定したカラム名のデータを返す
'''
return_value = ""
for slot_data in slot_data_list:
if slot_data.get('number') == number and str(slot_data.get('date_time')) == date:
return_value = slot_data.get(column_name)
return return_value
| [
"mstk.ambit@gmail.com"
] | mstk.ambit@gmail.com |
5836ad6384982599fa5386c942f276b1fcbd7022 | 05fc3134da52ab0f1d95d9c4304bde68fc2a56cc | /tasks.py | a5661e372b313f07d146231967b867407d64dc2f | [
"AGPL-3.0-only"
] | permissive | lino-framework/extjs6 | b046d43bac3676afd2bbad825a8c478c2007471f | 6c8cf927e265bf23ad15d07da0b01c087c7bff07 | refs/heads/master | 2023-07-21T15:39:04.616082 | 2023-07-10T20:35:39 | 2023-07-10T20:35:39 | 46,885,420 | 6 | 1 | BSD-2-Clause | 2018-02-13T05:52:43 | 2015-11-25T20:40:26 | CSS | UTF-8 | Python | false | false | 448 | py | from atelier.invlib import setup_from_tasks
ns = setup_from_tasks(
globals(), "lino_extjs6",
languages="en de fr et".split(),
# tolerate_sphinx_warnings=True,
blogref_url = 'https://luc.lino-framework.org',
revision_control_system='git',
# locale_dir='lino_extjs/extjs/locale',
cleanable_files=['docs/api/lino_extjs6.*'],
demo_projects=[
'lino_extjs6.projects.team6',
'lino_extjs6.projects.lydia6'])
| [
"luc.saffre@gmail.com"
] | luc.saffre@gmail.com |
677352f08e920cb21713ec2f072334eb23f02ebb | a56e5570ab57e4d3c44c9c6ba44bdacac9fa1ad8 | /insertion_sort.py | 027c54743f008f5ce2dac82c48a2eeee27837080 | [] | no_license | teknofage/CS-2.1-Sorting_Algorithms | a7db54c29af5c939022d4dd6453a0529256a3bc1 | e42b64c4d606d76102b5930ae8e74822a75999ae | refs/heads/main | 2023-01-20T16:52:00.816333 | 2020-12-05T07:50:55 | 2020-12-05T07:50:55 | 308,201,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | def insertionSort(alist):
for i in range(1,len(alist)):
#element to be compared
current = alist[i]
#comparing the current element with the sorted portion and swapping
while i>0 and alist[i-1]>current:
alist[i] = alist[i-1]
i = i-1
alist[i] = current
#print(alist)
return alist
print([5,2,1,9,0,4,6])
print(insertionSort([5,2,1,9,0,4,6])) | [
"teknofage@gmail.com"
] | teknofage@gmail.com |
1748096adcaee16136b03577c7d95b443e1f7467 | 78b4cccd1a29c55310b7fad953e71ad0d7dd137a | /python/codeforces/problems/1_71A.py | 8658de3a61fa5240b6d95c28d8a69e28823d05b5 | [] | no_license | amirhossain2k9/coding_practice | fb8ad936d569dc92626fb4b1252a55017b7f001c | bce953018fe5ba6caf5fa581ddb4aac04c96caa7 | refs/heads/master | 2021-06-17T03:27:57.056518 | 2021-03-02T19:54:54 | 2021-03-02T19:54:54 | 172,905,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | """
Codeforces problem : Way Too Long Words
url : https://codeforces.com/problemset/problem/71/A
"""
# take number of input words
number_of_inputs = int(input())
# storage to keep the input and output
outputs = []
for _ in range(number_of_inputs):
word = input()
if len(word) <= 10:
outputs.append(word)
continue
outputs.append(word.replace(word[1:-1], str(len(word[1:-1]))))
for output in outputs:
print(output) | [
"amirhossain2k9@gmail.com"
] | amirhossain2k9@gmail.com |
9008db0dcde390fe77582d341a61c022fcdcae95 | 5a1c9825a77877e53604a02411a57740406176f9 | /edit_vin_masa.py | c5e8f31b79237b0d23fabdd680cdd579c4838078 | [] | no_license | AlexandruPopa97/App-for-a-wine-shop | 5580c43d2394da4ff724d82345fc4734fac25cc4 | b3bf013ce0f445cd42f97314680a30d469b2db65 | refs/heads/master | 2020-04-25T17:26:10.868877 | 2019-02-27T16:41:05 | 2019-02-27T16:41:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,484 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'edit_vin_masa.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(504, 305)
self.layoutWidget = QtWidgets.QWidget(Dialog)
self.layoutWidget.setGeometry(QtCore.QRect(10, 10, 478, 285))
self.layoutWidget.setObjectName("layoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.label_6 = QtWidgets.QLabel(self.layoutWidget)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 0, 0, 1, 1)
self.denumire = QtWidgets.QLineEdit(self.layoutWidget)
self.denumire.setObjectName("denumire")
self.gridLayout.addWidget(self.denumire, 0, 1, 1, 1)
self.label_16 = QtWidgets.QLabel(self.layoutWidget)
self.label_16.setObjectName("label_16")
self.gridLayout.addWidget(self.label_16, 0, 2, 1, 1)
self.pret = QtWidgets.QLineEdit(self.layoutWidget)
self.pret.setObjectName("pret")
self.gridLayout.addWidget(self.pret, 0, 4, 1, 1)
self.label_7 = QtWidgets.QLabel(self.layoutWidget)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 1, 0, 1, 1)
self.soi_struguri = QtWidgets.QLineEdit(self.layoutWidget)
self.soi_struguri.setObjectName("soi_struguri")
self.gridLayout.addWidget(self.soi_struguri, 1, 1, 1, 1)
self.label_17 = QtWidgets.QLabel(self.layoutWidget)
self.label_17.setObjectName("label_17")
self.gridLayout.addWidget(self.label_17, 1, 2, 1, 2)
self.label_8 = QtWidgets.QLabel(self.layoutWidget)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 2, 0, 1, 1)
self.tara_origine = QtWidgets.QLineEdit(self.layoutWidget)
self.tara_origine.setObjectName("tara_origine")
self.gridLayout.addWidget(self.tara_origine, 2, 1, 1, 1)
self.label_18 = QtWidgets.QLabel(self.layoutWidget)
self.label_18.setObjectName("label_18")
self.gridLayout.addWidget(self.label_18, 2, 2, 1, 2)
self.label_9 = QtWidgets.QLabel(self.layoutWidget)
self.label_9.setObjectName("label_9")
self.gridLayout.addWidget(self.label_9, 3, 0, 1, 1)
self.producator = QtWidgets.QLineEdit(self.layoutWidget)
self.producator.setObjectName("producator")
self.gridLayout.addWidget(self.producator, 3, 1, 1, 1)
self.label_19 = QtWidgets.QLabel(self.layoutWidget)
self.label_19.setObjectName("label_19")
self.gridLayout.addWidget(self.label_19, 3, 2, 1, 2)
self.label_10 = QtWidgets.QLabel(self.layoutWidget)
self.label_10.setObjectName("label_10")
self.gridLayout.addWidget(self.label_10, 4, 0, 1, 1)
self.procent_alcool = QtWidgets.QLineEdit(self.layoutWidget)
self.procent_alcool.setObjectName("procent_alcool")
self.gridLayout.addWidget(self.procent_alcool, 4, 1, 1, 1)
self.descriere = QtWidgets.QTextEdit(self.layoutWidget)
self.descriere.setObjectName("descriere")
self.gridLayout.addWidget(self.descriere, 4, 2, 6, 3)
self.label_11 = QtWidgets.QLabel(self.layoutWidget)
self.label_11.setObjectName("label_11")
self.gridLayout.addWidget(self.label_11, 5, 0, 1, 1)
self.cantitate_zahar = QtWidgets.QLineEdit(self.layoutWidget)
self.cantitate_zahar.setObjectName("cantitate_zahar")
self.gridLayout.addWidget(self.cantitate_zahar, 5, 1, 1, 1)
self.label_12 = QtWidgets.QLabel(self.layoutWidget)
self.label_12.setObjectName("label_12")
self.gridLayout.addWidget(self.label_12, 6, 0, 1, 1)
self.culoare = QtWidgets.QLineEdit(self.layoutWidget)
self.culoare.setObjectName("culoare")
self.gridLayout.addWidget(self.culoare, 6, 1, 1, 1)
self.label_13 = QtWidgets.QLabel(self.layoutWidget)
self.label_13.setObjectName("label_13")
self.gridLayout.addWidget(self.label_13, 7, 0, 1, 1)
self.recipient = QtWidgets.QLineEdit(self.layoutWidget)
self.recipient.setObjectName("recipient")
self.gridLayout.addWidget(self.recipient, 7, 1, 1, 1)
self.label_14 = QtWidgets.QLabel(self.layoutWidget)
self.label_14.setObjectName("label_14")
self.gridLayout.addWidget(self.label_14, 8, 0, 1, 1)
self.volum_recipient = QtWidgets.QLineEdit(self.layoutWidget)
self.volum_recipient.setObjectName("volum_recipient")
self.gridLayout.addWidget(self.volum_recipient, 8, 1, 1, 1)
self.label_15 = QtWidgets.QLabel(self.layoutWidget)
self.label_15.setObjectName("label_15")
self.gridLayout.addWidget(self.label_15, 9, 0, 1, 1)
self.numar_unitati = QtWidgets.QLineEdit(self.layoutWidget)
self.numar_unitati.setObjectName("numar_unitati")
self.gridLayout.addWidget(self.numar_unitati, 9, 1, 1, 1)
self.pushButton_8 = QtWidgets.QPushButton(self.layoutWidget)
self.pushButton_8.setObjectName("pushButton_8")
self.gridLayout.addWidget(self.pushButton_8, 10, 3, 1, 1)
self.pushButton_11 = QtWidgets.QPushButton(self.layoutWidget)
self.pushButton_11.setObjectName("pushButton_11")
self.gridLayout.addWidget(self.pushButton_11, 10, 4, 1, 1)
self.an_productie = QtWidgets.QLineEdit(self.layoutWidget)
self.an_productie.setObjectName("an_productie")
self.gridLayout.addWidget(self.an_productie, 1, 4, 1, 1)
self.timp_pastrare = QtWidgets.QLineEdit(self.layoutWidget)
self.timp_pastrare.setObjectName("timp_pastrare")
self.gridLayout.addWidget(self.timp_pastrare, 2, 4, 1, 1)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label_6.setText(_translate("Dialog", "Denumire"))
self.label_16.setText(_translate("Dialog", "Pret"))
self.label_7.setText(_translate("Dialog", "Soi struguri"))
self.label_17.setText(_translate("Dialog", "An productie"))
self.label_8.setText(_translate("Dialog", "Tara origine"))
self.label_18.setText(_translate("Dialog", "Timp pastrare"))
self.label_9.setText(_translate("Dialog", "Producator"))
self.label_19.setText(_translate("Dialog", "Descriere"))
self.label_10.setText(_translate("Dialog", "Procent alcool"))
self.label_11.setText(_translate("Dialog", "Cantitate zahar"))
self.label_12.setText(_translate("Dialog", "Culoare"))
self.label_13.setText(_translate("Dialog", "Recipient"))
self.label_14.setText(_translate("Dialog", "Volum recipient"))
self.label_15.setText(_translate("Dialog", "Numar unitati"))
self.pushButton_8.setText(_translate("Dialog", "Clear"))
self.pushButton_11.setText(_translate("Dialog", "Save in DB"))
| [
"noreply@github.com"
] | AlexandruPopa97.noreply@github.com |
f513095477676cf4ac8803e8be246f20f45272db | 61fa045ff748b1baed3516abb5c3dbd373e922da | /coordinate_system.py | 07b13acec00ea53615f0924722c6d9e64638d291 | [] | no_license | Soosang-9/Dusan | d89e74da090dd897878445cf8afe3cf55886183c | 3bff1b8c8685e03f0e42aa29a34a5238289ab9b8 | refs/heads/master | 2021-04-06T20:12:17.537672 | 2018-04-24T02:37:02 | 2018-04-24T02:37:02 | 125,290,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,203 | py | # -*- coding: utf-8 -*-
# made by Leni.
# 2017.11.14.Tuesday - test start.
# import uiFile.
from uiFile.main import Ui_MainWindow
# import PyQt5 modules.
from PyQt5.QtGui import QBrush, QPen
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QMainWindow, QGraphicsScene, QGraphicsRectItem, QGraphicsEllipseItem, QGraphicsLineItem
# import other modules.
import sys
import numpy as np
from Dusan.test import DrawCircles
# make Main_Function class.
class MainFunction(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.round = 50
# set range.
self.gScene = QGraphicsScene(0, 0, self.ui.graphicsView.width()-5, self.ui.graphicsView.height()-5, self.ui.graphicsView)
print 'graphics View x %f', self.ui.graphicsView.width()
print 'graphics View y %f', self.ui.graphicsView.height()
self.ui.graphicsView.setScene(self.gScene)
# test circle
self.circle = QGraphicsEllipseItem()
red = QBrush(Qt.red)
pen = QPen(Qt.black)
pen.setWidth(6)
# test line
self.x_line = QGraphicsLineItem(self.gScene.width()/2, 0, self.gScene.width()/2, self.gScene.height())
self.gScene.addItem(self.x_line)
self.y_line = QGraphicsLineItem(0, self.gScene.width()/2, self.gScene.height(), self.gScene.width()/2)
self.gScene.addItem(self.y_line)
#self.circle2 = DrawCircles(int(self.gScene.width()/2), int(self.gScene.height()/2))
#self.gScene.addItem(self.circle2)
print 'gScene View x %f', self.gScene.width()/2
print 'gScene View y %f', self.gScene.height()/2
self.circle = self.gScene.addEllipse(self.gScene.width()/2-self.round, self.gScene.height()/2-self.round,
self.round*2, self.round*2, pen, red)
# check Item argv.
self.g_item = QGraphicsRectItem(self.gScene.width()/2, self.gScene.height()/2, 100, 100)
self.gScene.addItem(self.g_item)
self.g1_item = QGraphicsRectItem(self.gScene.width()/2, self.gScene.height()/2, 100, 100)
self.gScene.addItem(self.g1_item)
# self.gScene.addItem(self.circles)
self.show()
def slot_ok(self):
random_x = np.random.random_integers(-300, 300)
random_y = np.random.random_integers(-300, 300)
# 값 조정은 display로 한다.
self.ui.layer_a.display(100)
tip = ''
self.circle.setPos(float(random_x), float(random_y))
self.g1_item.setPos(float(random_x), float(random_y))
print 'x > %s' % self.g_item.x()
print 'circle -> %d' % self.circle.x()
print 'circle -> %d' % self.circle.y()
if self.g_item.x() != self.circle.x():
tip += ' move x > %f\n' % (self.g_item.x() - self.circle.x())
if self.g_item.y() != self.circle.y():
tip += ' move y > %f' % (self.g_item.y() - self.circle.y())
self.ui.information.setText(tip)
# start Main process.
if __name__ == '__main__':
app = QApplication(sys.argv)
mainFunction = MainFunction()
sys.exit(app.exec_())
| [
"sigld1004@naver.com"
] | sigld1004@naver.com |
61d67338da326c0b82ae9ef359f504ccba54da59 | ed298f7b16e0a1fcc4d5ddc9da324247d200bc8a | /cleanup.py | 03ca72d1bca9728c96256d120fb9e0c22c7a7d14 | [] | no_license | stella-gao/deepfunc | ed1a67f0a0e682a2e0d1fde05a13fe190ec6f07e | a587512519c234c7ab70eb3fd504a98cd935b4ab | refs/heads/master | 2021-01-21T00:11:48.502524 | 2016-04-28T17:18:44 | 2016-04-28T17:18:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,207 | py | #!/usr/bin/env python
'''
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python gen_next_level_data.py
'''
import numpy
from keras.models import Sequential
from keras.layers.core import (
Dense, Dropout, Activation, Flatten)
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.embeddings import Embedding
from keras.optimizers import SGD
from sklearn.metrics import classification_report
from keras.utils import np_utils
from utils import (
shuffle, train_val_test_split,
get_gene_ontology,
get_model_max_features,
encode_seq_one_hot)
import sys
import os
from collections import deque
LAMBDA = 24
DATA_ROOT = 'data/cnn/'
CUR_LEVEL = 'level_2/'
NEXT_LEVEL = 'level_3/'
MAXLEN = 1000
def get_model(
go_id,
parent_id,
nb_filter=64,
nb_row=3,
nb_col=3,
pool_length=2):
filepath = DATA_ROOT + CUR_LEVEL + parent_id + '/' + go_id + '.hdf5'
model = Sequential()
model.add(Convolution2D(nb_filter, nb_row, nb_col,
border_mode='valid',
input_shape=(1, MAXLEN, 20)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(pool_length, pool_length)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(
loss='binary_crossentropy', optimizer='adam', class_mode='binary')
# Loading saved weights
print 'Loading weights for ' + go_id
model.load_weights(filepath)
return model
def main(*args, **kwargs):
if len(args) < 3:
raise Exception('Please provide function id')
parent_id = args[1]
go_id = args[2]
if len(args) == 4:
level = int(args[3])
global CUR_LEVEL
global NEXT_LEVEL
CUR_LEVEL = 'level_' + str(level) + '/'
NEXT_LEVEL = 'level_' + str(level + 1) + '/'
try:
model = get_model(go_id, parent_id)
except Exception, e:
print e
filepath = DATA_ROOT + CUR_LEVEL + parent_id + '/' + go_id + '.hdf5'
print "Removing " + filepath
os.remove(filepath)
if __name__ == '__main__':
main(*sys.argv)
| [
"coolmaksat@gmail.com"
] | coolmaksat@gmail.com |
6c11c9f28c7cb984ba87b066f90b8d56b41d9d05 | 0054277b83ef6a365d95bcffca4bcc37b7cd663d | /Hangle.py | c827f76c4e6bf994c5c9b8ab229bf882edecb004 | [] | no_license | Azam4204/WorldScrabble | c6f9eabd1fdd970ca72f1f7ba515d7881df48ed4 | 79f5dacfedddd3d1f7d7a7b8590604529ef807b4 | refs/heads/main | 2023-08-21T22:47:19.353474 | 2021-10-15T13:05:07 | 2021-10-15T13:05:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,883 | py | import random
from words import words
from Hangle_visual import lives_visual_dict
import string
def get_valid_word(words):
word = random.choice(words) # randomly chooses something from the list
while '-' in word or ' ' in word:
word = random.choice(words)
return word.upper()
def hangle():
word = get_valid_word(words)
word_letters = set(word) # letters in the word
alphabet = set(string.ascii_uppercase)
used_letters = set() # what the user has guessed
lives = 7
# getting user input
while len(word_letters) > 0 and lives > 0:
# letters used
# ' '.join(['a', 'b', 'cd']) --> 'a b cd'
print('You have', lives, 'lives left and you have used these letters: ', ' '.join(used_letters))
# what current word is (ie W - R D)
word_list = [letter if letter in used_letters else '-' for letter in word]
print(lives_visual_dict[lives])
print('Current word: ', ' '.join(word_list))
user_letter = input('Guess a letter: ').upper()
if user_letter in alphabet - used_letters:
used_letters.add(user_letter)
if user_letter in word_letters:
word_letters.remove(user_letter)
print('')
else:
lives = lives - 1 # takes away a life if wrong
print('\nYour letter,', user_letter, 'is not in the word.')
elif user_letter in used_letters:
print('\nYou have already used that letter. Guess another letter.')
else:
print('\nThat is not a valid letter.')
# gets here when len(word_letters) == 0 OR when lives == 0
if lives == 0:
print(lives_visual_dict[lives])
print('You lost, sorry. The word was', word)
else:
print('YAY! You guessed the word', word, '!!')
#if __name__ == '__main__':
hangle()
| [
"noreply@github.com"
] | Azam4204.noreply@github.com |
d1dbfc9b792006bdaec8fa8bd964e190e4b49afb | d5fa803e4e2d61c3a7eb456cd72d0b1647375f13 | /items.py | 28b78cd4a1385e8672c3cdfbba0ab7d452182af5 | [
"Apache-2.0"
] | permissive | yscoder-github/xueqiu_crawl | 6fe4b0a45133993d6cac609e00cf4ed2be2a4b7b | be4a912ffe518a390312e0cdd7029d294cfa419d | refs/heads/master | 2020-04-01T04:55:47.575970 | 2019-07-14T08:46:16 | 2019-07-14T08:46:16 | 152,882,286 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | # encoding=utf-8
# ------------------------------------------
# 版本:1.0
# 日期:2018-10-17
# 作者:殷帅
# ------------------------------------------
from scrapy import Item, Field
class TopicBriefItem(Item):
"""话题信息"""
feedback = Field()
pic = Field() # 图片
reply_count = Field() # 回复数
id = Field() # id
topic_pic = Field()
title = Field()
first_pic = Field()
cover_pic = Field()
source = Field()
link_stock_desc = Field()
score = Field()
retweet_count = Field()
topic_pic_hd = Field()
description = Field()
reweeted_status = Field()
view_count = Field()
quote_cards = Field()
topic_title = Field() # 话题标题
user_profile = Field() # 用户主页
target = Field() # 文章地址
created_at = Field() # 文章创建时间
promotion = Field()
tag = Field()
link_stock_symbol = Field()
topic_desc = Field() # 话题描述
class TopicInfoItem(Item):
"""话题详情"""
target = Field() # 文章地址
text = Field() # 网页文本
| [
"yinshuai001@ke.com"
] | yinshuai001@ke.com |
bb6d2326537c1ac345d6d3d6810c1604401c8807 | 61d3d0472ddd642e13e3ef1e0b8c19eefd8de758 | /Main for submission.py | 4ce872f56d315b3a40202964478891911fdb4f68 | [] | no_license | yingdanwu/High-way-Traffic-Flow-Simulation | b2c729df21bf2a9a1783933a04386f9836597455 | aea811b68e9ed6af17db40dca3d148bb2dac5f0c | refs/heads/master | 2022-11-27T00:06:15.094545 | 2020-08-02T19:35:47 | 2020-08-02T19:35:47 | 284,528,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,192 | py | from decimal import Decimal, ROUND_UP
import heapq
import Car as Car
import Road as Road
# generate random integer
from random import random
class Solution:
def __init__(self):
self.carDic={}
self.roadDic={}
self.ID=1
self.data=[]
self.enter_n=[0]*4
self.exit_n=[0]*4
self.exitTrue=[0]*4
self.change=0
self.carHistory=[]
self.carChanged=set()
self.carChangeEvent=[]
self.roadHistory=[]
self.roadClock=[0+9*i for i in range(100)]
self.carWaited=set()
self.carWaitedEvent=[]
def genQueue(self,startT,endT,onRoad):
queue=[]
t=0
while t<endT:
newCar=Car.Car()
newCar.ID=self.ID
newCar.speed=1
newCar.enterT=t
newCar.T=t
newCar.mainRoad=onRoad
queue.append(newCar)
t+=Decimal(str(random()*0.8+0.0001)).quantize(Decimal('.01'), rounding=ROUND_UP)
self.carDic[self.ID]=newCar
self.ID+=1
return queue
def update_velocity(self,Road,car,i,j):
curtime=car.T
if self.roadClock and abs(curtime-self.roadClock[0])<0.1:
t=self.roadClock.pop(0)
M=0
for n in range(0,1):
M+=self.enter_n[n]-self.exit_n[n]
self.roadHistory.append(M)
Road.velocity[i][j]=max(1/3600,(1.8*len(Road.cartime[i][j])/Road.intersection_length*(-0.079244)+53.035999)/3600)
#x:number of car in 1mile, y:average speed mile/hour, y=-0.079244x+53.035999
car.velocity_history.append(round(Road.velocity[i][j],5))
t= Road.intersection_length/Road.velocity[i][j]
t=Decimal(str(t)).quantize(Decimal('.01'), rounding=ROUND_UP)
car.T+=t
def exitCheck(self):
a=random()
if a>0.3:return False
else:return True
def enter(self,car,FEL,road,j):
ID=road.ID
self.enter_n[ID]+=1
road.intersection_car_number[0][j]+=1
self.update_velocity(road,car,0,j)
road.cartime[0][j].append(car.T)
heapq.heappush(FEL,[car.T,"proceed"+str(ID)+"_1",car.ID,j])
def exit(self,car,FEL,road,j):#j is the lane number
road.intersection_car_number[-1][j]-=1
ID=road.ID
if road.cartime[-1][j]:road.cartime[-1][j].pop()
self.exit_n[ID]+=1
if ID==3:return car.ID, car.T
if j==0 and self.exitCheck():
self.exitTrue[ID]+=1
self.data.append([car.ID,car.T])
else:heapq.heappush(FEL,[car.T,"enter"+str(ID+1),car.ID,j])
def proceed(self,car,FEL,Road_n,intersection_n):
i,j=intersection_n
road=self.roadDic[Road_n]
if road.cartime[i-1][j]:road.cartime[i-1][j].pop(0)
road.intersection_car_number[i-1][j]-=1
if i<road.division-1:
self.changelane(car,FEL,Road_n,intersection_n)
else:
road.intersection_car_number[i][j]+=1
self.update_velocity(road,car,i,j)
heapq.heappush(FEL, [car.T, "exit"+str(Road_n), car.ID,j])
road.cartime[i][j].append(car.T)
def changelane(self,car,FEL,Road_n,intersection_n):
i,j=intersection_n
road=self.roadDic[Road_n]
if random()>0.5:
if j==0:k=1
else:k=0
A,B,C,D=0,0,0,car.T+Decimal(str(road.intersection_length/road.velocity[i-1][j])).quantize(Decimal('.01'))
if road.cartime[i][j]:C=road.cartime[i][j][-1]
if road.cartime[i-1][k]:A=road.cartime[i-1][k][0]+Decimal(str(road.intersection_length/road.velocity[i-1][k])).quantize(Decimal('.01'), rounding=ROUND_UP)
if road.cartime[i][k]:B=road.cartime[i][k][-1]
if D-C<0.29 and D-B>0.3 and A-D>0.3:
self.change+=1
self.carChanged.add(car.ID)
self.carChangeEvent.append([car.ID,road.ID,D,C,B,A])
j=k
road.intersection_car_number[i][j]+=1
self.update_velocity(road,car,i,j)
road.cartime[i][j].append(car.T)
heapq.heappush(FEL,[car.T,"proceed"+str(Road_n)+"_"+str(i+1),car.ID,j])
def conflict(self,list,sofar,last,FEL):
mainroad=[False]*3 ##check whether there is event on the mainroad
for event in list:
if event[1] in {"exit0","exit1","exit2"} and event[-1]==0:
mainroad[int(event[1][-1])]=True
for event in list:
if event[1] in {"enter1","enter2","enter3"}:
i=int(event[1][-1])-1
if last[i]>event[0]:
#print("Back to FEL",list)
heapq.heappush(FEL,[event[0]+sofar[i],event[1],event[2],0])
list.remove(event)
else:
if mainroad[i]==True:
# print("conflict",list)
carID=event[2]
self.carWaited.add(carID)
#print(list,carID)
sofar[i]+=Decimal(str(0.5)).quantize(Decimal('.01'), rounding=ROUND_UP)
heapq.heappush(FEL,[event[0]+sofar[i],event[1],event[2],0])
list.remove(event)
else:
#print("no conflict",list)
sofar[i]=0
last[i]=event[0]
#print("afterlist",list)
return
def main(self,startT,endT):
FEL=[]
#Generate Queue at enters
Q0=self.genQueue(startT,endT,True)
Q1=self.genQueue(startT,endT,False)
Q2=self.genQueue(startT,endT,False)
Q3=self.genQueue(startT,endT,False)
#Generate four roads with its length and number of sections and IDs
Road0=Road.Road(0.2,2,2,0)
Road1=Road.Road(0.8,8,2,1)
Road2=Road.Road(0.8,8,2,2)
Road3=Road.Road(0.4,4,2,3)
self.roadDic[0]=Road0
self.roadDic[1]=Road1
self.roadDic[2]=Road2
self.roadDic[3]=Road3
#Put the queue at enters into the Future Event List(FEL)
for car in Q0:
j=int(random()//(1/2))
heapq.heappush(FEL,[car.T,"enter0",car.ID,j])
for car in Q1:
heapq.heappush(FEL,[car.T,"enter1",car.ID,0])
for car in Q2:
heapq.heappush(FEL,[car.T,"enter2",car.ID,0])
for car in Q3:
heapq.heappush(FEL,[car.T,"enter3",car.ID,0])
#Use carHistory to record information of event with the index of Car's ID
self.carHistory=[[] for _ in range(self.ID+1)]
#record wait time at entry due to conflict
Waittime_sofar=[0]*3
Lastvehicle_outtime=[0]*3
#Start FEL
while FEL:
event=heapq.heappop(FEL)
curtime=event[0]
event_list=[event]
while FEL and FEL[0][0]==curtime:
event_list.append(heapq.heappop(FEL))
for event in event_list:
ID=event[-2]
self.carHistory[ID].append(event)
self.conflict(event_list,Waittime_sofar,Lastvehicle_outtime,FEL)
for event in event_list:
ID=event[-2]
car=self.carDic[event[2]]
if event[0]>endT:break
if event[1] in {"enter0","enter1","enter2","enter3"}:
road=self.roadDic[int(event[1][-1])]
self.enter(car,FEL,road,event[-1])
elif event[1] in {"exit0","exit1","exit2","exit3"}:
road=self.roadDic[int(event[1][-1])]
self.exit(car,FEL,road,event[-1])
else:
Road_n=int(event[1][-3])
intersection_n=[int(event[1][-1]),event[-1]]
self.proceed(car,FEL,Road_n,intersection_n)
def get_carTotal(self):
print("Total number of car",self.ID)
def get_conflictTotal(self):
print("Total number of conflict",len(self.carWaited))
def get_laneChange(self):
print("Total number of lanechange",self.change)
def get_laneChangeCar(self):
print("Total number of car changed lane",len(self.carChanged))
def get_car_Passing_Enter_Exit(self):
print("Number of car passed the four enters",self.enter_n)
print("Number of car passed the four exits",self.exit_n)
print("Number of car leaves the main road",sum(self.exitTrue)-self.exitTrue[-1]+self.exit_n[-1])
def get_change_lane_event(self,n):
change_Lane_List=[]
for _ in range(n):
change_Lane_List.append(self.carChanged.pop())
for ID in change_Lane_List:
print(self.carHistory[ID])
for event in self.carChangeEvent:
if event[0]==ID:print("Changelane: carID,roadID,car_time,car_ahead_time,next_lane_car_time,next_lane_car_behind_time",event)
def get_conflict_event(self,n):
waited_list=[]
for _ in range(n):
waited_list.append(self.carWaited.pop())
for ID in waited_list:
print(self.carHistory[ID][0:5])
def get_car_velocity(self,list):
velocity_list=list
for carID in velocity_list:
car=self.carDic[carID]
#shows where the selected car starts and end
print("ID:",carID,"Start:",self.carHistory[carID][0][1],"End:",self.carHistory[carID][-1][1])
print(car.velocity_history)
print("finish")
return
test=Solution()
test.main(0,15*60) #(startT,endT)
# test.get_carTotal()
# test.get_conflictTotal()
# test.get_laneChange()
# test.get_laneChangeCar()
# test.get_car_Passing_Enter_Exit()
#test.get_change_lane_event(1)#input is how many event do you want
# test.get_conflict_event(1)#input is how many event do you want
test.get_car_velocity([301,401,501])
| [
"noreply@github.com"
] | yingdanwu.noreply@github.com |
5f87f8a1dd51bbbd5ab67cacd2e7b1bf4819ff49 | 6cd2afb703f0037c38ebaaa7e3fc132d7acbfa31 | /viewer_19580/urls.py | 76bf364518c599d0bd749c685c1e2f4d16291727 | [] | no_license | crowdbotics-apps/viewer-19580 | 46aeb079e7cf8214232d2491e1f15f8ffdaab538 | 8cdc0dd08a7f66dc4cd58299c42b83bd59f6b692 | refs/heads/master | 2022-11-29T05:23:02.074502 | 2020-08-15T15:54:35 | 2020-08-15T15:54:35 | 287,777,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,907 | py | """viewer_19580 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Viewer"
admin.site.site_title = "Viewer Admin Portal"
admin.site.index_title = "Viewer Admin"
# swagger
api_info = openapi.Info(
title="Viewer API",
default_version="v1",
description="API documentation for Viewer App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
7bf7b2ca5ecda4aaf627db68ed8acd4fda3d748a | 0a087260d71d9a9ba95063d0a0a28c96e93b8869 | /CoinTossing/CoinToss.py | d287014d3ed07dc8773db3047ce851138af3af0b | [] | no_license | johnhopkins/PythonToolkit | 865508fe1b6588ac68664d83712570ddc333b750 | 8ae248f7f91333f6ba2bd4aee8f228de99fd4ece | refs/heads/master | 2022-04-29T09:42:15.792061 | 2022-04-14T16:18:49 | 2022-04-14T16:18:49 | 51,039,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | import random
heads = 0 #initialize the count variables
tails = 0
while True:
coinresult = random.randint(1, 2) #flip coin
if coinresult == 1: #if result = 1 then increment heads counter
heads += 1
elif coinresult == 2: #if result = 2 then increment tails counter
tails += 1
if heads == tails: #check if counts are equal and break loop if they are
break
print("The number of flips was {count}".format(count = heads + tails))
| [
"johnhopkins@gmx.co.uk"
] | johnhopkins@gmx.co.uk |
ec0ae5cdcc72db44f196c2b77106a879ce172c59 | 157e19fb20be103e21d7bb7b1549b5863fdeb602 | /剑指offer/35_复杂链表的复制/code.py | 3d7f5e702577be591fa89ab0fc05fe0535f12db6 | [] | no_license | Darr-en1/practice | a226be21d0305b624946dc534058a6b15772fd05 | 9648096c8508884c348a55ff4967d61773ef1a0c | refs/heads/master | 2022-09-17T18:59:26.588513 | 2021-04-21T13:37:14 | 2021-04-21T13:37:14 | 147,457,500 | 2 | 0 | null | 2022-08-23T17:43:23 | 2018-09-05T04:01:03 | Python | UTF-8 | Python | false | false | 1,303 | py | from collections import deque
class Node:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def arr_to_tree(li):
tree_list = deque()
tree = Node(li[0]) if li else None
i = 1
tree_list.append(tree)
while i < len(li):
node = tree_list.popleft()
if li[i]:
node.left = Node(li[i])
tree_list.append(node.left)
if i + 1 < len(li) and li[i + 1]:
node.right = Node(li[i + 1])
tree_list.append(node.right)
i += 2
return tree
class Solution:
def copyRandomList(self, head: 'Node') -> 'Node':
pre = curr = Node(0)
exist = {}
while head:
if head not in exist:
copy = Node(head.val)
exist[head] = copy
curr.next = exist[head]
if head.random is not None:
if head.random not in exist:
copy = Node(head.random.val)
exist[head.random] = copy
curr.next.random = exist[head.random]
curr = curr.next
head = head.next
return pre.next
if __name__ == '__main__':
a = arr_to_tree([4, 2, 5, 1, 3])
Solution().treeToDoublyList(a)
| [
"darr_en1@126.com"
] | darr_en1@126.com |
f4277637101ca2452185a124b44a2047eef1c208 | b1931901a2599e170f4c0dbbecc1678ecd976904 | /Tools/Scripts/webkitpy/port/simulator_process.py | c1147b2bbf734a964fff63af7b4931702f8a1399 | [] | no_license | walmis/WPEWebKit-upstream | b75872f73073a2d58da0a9a51fc9aab891fb897d | 4b3a7b8cdd8afc12162fc2e0dcf474685e3fcf58 | refs/heads/master | 2023-03-10T11:19:26.173072 | 2017-03-22T09:28:59 | 2017-03-22T09:28:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,495 | py | # Copyright (C) 2017 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import errno
import os
import signal
import time
from webkitpy.port.server_process import ServerProcess
from webkitpy.xcode.simulator import Simulator
class SimulatorProcess(ServerProcess):
class Popen(object):
def __init__(self, pid, stdin, stdout, stderr):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pid = pid
self.returncode = None
def poll(self):
if self.returncode:
return self.returncode
try:
os.kill(self.pid, 0)
except OSError, err:
assert err.errno == errno.ESRCH
self.returncode = 1
return self.returncode
def wait(self):
while not self.poll():
time.sleep(0.01) # In seconds
return self.returncode
def __init__(self, port_obj, name, cmd, env=None, universal_newlines=False, treat_no_data_as_crash=False, worker_number=None):
self._bundle_id = port_obj.app_identifier_from_bundle(cmd[0])
self._device = port_obj.device_for_worker_number(worker_number)
env['IPC_IDENTIFIER'] = self._bundle_id + '-' + self._device.udid
# This location matches the location used by WebKitTestRunner and DumpRenderTree
# for the other side of these fifos.
file_location = '/tmp/' + env['IPC_IDENTIFIER']
self._in_path = file_location + '_IN'
self._out_path = file_location + '_OUT'
self._error_path = file_location + '_ERROR'
super(SimulatorProcess, self).__init__(port_obj, name, cmd, env, universal_newlines, treat_no_data_as_crash)
def _reset(self):
super(SimulatorProcess, self)._reset()
# Unlinks are needed on reset in the event that the Python code unexpectedly
# fails between _start() and kill(). This can be caused by a SIGKILL or a crash.
# This ensures that os.mkfifo() will not be obstructed by previous fifos.
# Other files will still cause os.mkfifo() to fail.
try:
os.unlink(self._in_path)
except:
pass
try:
os.unlink(self._out_path)
except:
pass
try:
os.unlink(self._error_path)
except:
pass
def _start(self):
if self._proc:
raise ValueError('{} already running'.format(self._name))
self._reset()
FIFO_PERMISSION_FLAGS = 0600 # Only owner can read and write
os.mkfifo(self._in_path, FIFO_PERMISSION_FLAGS)
os.mkfifo(self._out_path, FIFO_PERMISSION_FLAGS)
os.mkfifo(self._error_path, FIFO_PERMISSION_FLAGS)
stdout = os.fdopen(os.open(self._out_path, os.O_RDONLY | os.O_NONBLOCK), 'rb')
stderr = os.fdopen(os.open(self._error_path, os.O_RDONLY | os.O_NONBLOCK), 'rb')
self._pid = self._device.launch_app(self._bundle_id, self._cmd[1:], env=self._env)
def handler(signum, frame):
assert signum == signal.SIGALRM
raise Exception('Timed out waiting for process to open {}'.format(self._in_path))
signal.signal(signal.SIGALRM, handler)
signal.alarm(3) # In seconds
stdin = None
try:
stdin = open(self._in_path, 'w', 0) # Opening with no buffering, like popen
except:
# We set self._proc as _reset() and _kill() depend on it.
self._proc = SimulatorProcess.Popen(self._pid, stdin, stdout, stderr)
if self._proc.poll() is not None:
self._reset()
raise Exception('App {} crashed before stdin could be attached'.format(os.path.basename(self._cmd[0])))
self._kill()
self._reset()
raise
signal.alarm(0) # Cancel alarm
self._proc = SimulatorProcess.Popen(self._pid, stdin, stdout, stderr)
def stop(self, timeout_secs=3.0):
try:
os.kill(self._pid, signal.SIGTERM)
except OSError as err:
assert err.errno == errno.ESRCH
pass
return super(SimulatorProcess, self).stop(timeout_secs)
| [
"jbedard@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc"
] | jbedard@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc |
6847a1eca7af1f947c92ad605d549a74a1471dca | ec1d8a4847f3fe07641d290d9161436ff8a65923 | /employProj/employProj/urls.py | 5f07c9b384c767ef4ca051f07106b100bae33beb | [
"Apache-2.0"
] | permissive | cs-fullstack-2019-spring/django-formclassv2-cw-ChelsGreg | 2e1265a78b3e98df9b3e93cf6f9a1d6ddb2f16a8 | 1eee2bcb74b30af722cf87a7641ba2c77ff47ed1 | refs/heads/master | 2020-04-25T17:28:39.843828 | 2019-03-01T20:06:09 | 2019-03-01T20:06:09 | 172,949,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | """employProj URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('employApp.urls')),
path('admin/', admin.site.urls),
]
| [
"cgreg2217@gmail.com"
] | cgreg2217@gmail.com |
8d1ffa5ca70658d963d0923b36851dbb974c4a97 | 80c23490c7bbaf34581b92163dabafb6b0d373de | /src/neural_toolbox/cbn_pluggin.py | 8981097b7ae1ed3ff82e84f1dd710f02b5823dd1 | [
"Apache-2.0"
] | permissive | ash567/guess_what | c4133a1267c9413be8f4935656cc0a1f137e597d | 092653695cb2a14cbb12df619b894d983c065a67 | refs/heads/master | 2021-04-27T17:37:29.946649 | 2018-12-01T20:08:54 | 2018-12-01T20:08:54 | 112,757,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,400 | py | import tensorflow as tf
import neural_toolbox.utils as utils
from tensorflow.python.ops.init_ops import RandomUniform
class CBNAbtract(object):
"""
Factory (Design pattern) to use cbn
"""
def create_cbn_input(self, feature_maps):
"""
This method is called every time conditional batchnorm is applied on cbn
This factory enable to inject cbn to a pretrained resnet
The good practice is to put the input of cbn (lstm embedding for instance) in the constructor.
One may then use this variable in the create cbn.
e.g.
def __init__(self, lstm_state):
self.lstm_state = lstm_state
def create_cbn_input(feature_map):
feat = int(feature_maps.get_shape()[3])
delta_betas = tf.contrib.layers.fully_connected(lstm_state, num_outputs=feat)
delta_gammas = tf.contrib.layers.fully_connected(lstm_state, num_outputs=feat)
return delta_betas, delta_gammas
:param feature_maps: (None,h,w,f)
:return: deltas_betas, delta_gammas: (None, f), (None, f)
"""
batch_size = int(feature_maps.get_shape()[0])
heigh = int(feature_maps.get_shape()[1])
width = int(feature_maps.get_shape()[2])
feat = int(feature_maps.get_shape()[3])
delta_betas = tf.zeros(shape=[batch_size, feat]) # Note that this does not compile (batch_size=None)
delta_gammas = tf.zeros(shape=[batch_size, feat])
return delta_betas, delta_gammas
class CBNfromLSTM(CBNAbtract):
"""
Basic LSTM for CBN
"""
def __init__(self, lstm_state, no_units, use_betas=True, use_gammas=True):
self.lstm_state = lstm_state
self.cbn_embedding_size = no_units
self.use_betas = use_betas
self.use_gammas = use_gammas
def create_cbn_input(self, feature_maps):
no_features = int(feature_maps.get_shape()[3])
batch_size = tf.shape(feature_maps)[0]
if self.use_betas:
h_betas = utils.fully_connected(self.lstm_state,
self.cbn_embedding_size,
weight_initializer=RandomUniform(-1e-4, 1e-4),
scope="hidden_betas",
activation='relu')
delta_betas = utils.fully_connected(h_betas, no_features, scope="delta_beta",
weight_initializer=RandomUniform(-1e-4, 1e-4), use_bias=False)
else:
delta_betas = tf.tile(tf.constant(0.0, shape=[1, no_features]), tf.stack([batch_size, 1]))
if self.use_gammas:
h_gammas = utils.fully_connected(self.lstm_state,
self.cbn_embedding_size,
weight_initializer=RandomUniform(-1e-4, 1e-4),
scope="hidden_gammas",
activation='relu')
delta_gammas = utils.fully_connected(h_gammas, no_features, scope="delta_gamma",
weight_initializer=RandomUniform(-1e-4, 1e-4))
else:
delta_gammas = tf.tile(tf.constant(0.0, shape=[1, no_features]), tf.stack([batch_size, 1]))
return delta_betas, delta_gammas | [
"ishugarg567@gmail.com"
] | ishugarg567@gmail.com |
1a54ab850f7f501180a68c0fe5699cb8d2f26269 | 8876297ec918067a90de875ceccf24035b82534a | /RaspberryPi/DjangoSite/mysite/curling/sockets.py | baa2e8e3ef28bcdb252a0b5ab2c7ae056f165309 | [
"Apache-2.0"
] | permissive | zbassett/curling-robot | 142ac7d3e114732f0bb8dbeb8f14aabafb1d7519 | 188800588fa294e93522ea20fa7ddab7b5a642b3 | refs/heads/master | 2021-01-22T03:49:15.000613 | 2017-04-12T01:00:43 | 2017-04-12T01:00:43 | 81,463,068 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from socket_server.namespace import EventNamespace
class Namespace(EventNamespace):
def client_connected(self, client):
super(Namespace, self).client_connected(client)
print 'Send ping'
self.emit_to(client, 'ping')
def register_callbacks(self):
return {
'pong': self.pong
}
def pong(self, client, **kwargs):
print 'Received pong event' | [
"zlbassett@gmail.com"
] | zlbassett@gmail.com |
ab3168a7ed6a211db35ec3e6069861560ba39898 | 1986f044d6476fab476a9b5eb9a95cc30d6a8eac | /Chapter07/pygal_1.py | c30537be5d2fb85031674c73d8f2dbb96a6b3e07 | [
"MIT"
] | permissive | PacktPublishing/Mastering-Python-Networking | 711f47ecff9ca2fec51f948badff22cd8c73ada4 | 52a2827919db1773f66700f3946390f200bd6dab | refs/heads/master | 2023-02-08T01:39:44.670413 | 2023-01-30T09:03:30 | 2023-01-30T09:03:30 | 82,666,812 | 138 | 127 | MIT | 2020-11-05T11:34:15 | 2017-02-21T10:25:34 | Python | UTF-8 | Python | false | false | 865 | py | #!/usr/bin/env python3
import pygal
x_time = []
out_octets = []
out_packets = []
in_octets = []
in_packets = []
with open('results.txt', 'r') as f:
for line in f.readlines():
# eval(line) reads in each line as dictionary instead of string
line = eval(line)
x_time.append(line['Time'])
out_packets.append(float(line['Gig0-0_Out_uPackets']))
out_octets.append(float(line['Gig0-0_Out_Octet']))
in_packets.append(float(line['Gig0-0_In_uPackets']))
in_octets.append(float(line['Gig0-0_In_Octet']))
line_chart = pygal.Line()
line_chart.title = "Router 1 Gig0/0"
line_chart.x_labels = x_time
line_chart.add('out_octets', out_octets)
line_chart.add('out_packets', out_packets)
line_chart.add('in_octets', in_octets)
line_chart.add('in_packets', in_packets)
line_chart.render_to_file('pygal_example_2.svg')
| [
"echou@yahoo.com"
] | echou@yahoo.com |
338da416edb18b27cc044eed40fac6b4cb29c615 | 35bdcd17fccd462876b0896ba976362c9cc116ac | /ABDULFATAI_FOLDER/comparison.py | c7ec49c9accbb04f1080445c428e041ead8b47d3 | [] | no_license | toyinfa2884/parsel_tongue | 41f1def3d2add735ab8e38f387b6e0fe0dbee480 | 61d4263da603b4ed9cee2a116ef804a2f57caf48 | refs/heads/master | 2023-08-18T21:15:24.834699 | 2021-09-29T17:09:11 | 2021-09-29T17:09:11 | 397,195,811 | 0 | 0 | null | 2021-08-17T13:17:32 | 2021-08-17T09:44:07 | Python | UTF-8 | Python | false | false | 353 | py | first_number = input("Enter the first number:")
second_number = input("Enter the second numnber:")
third_number = input("Enter the third number:")
first_number_int = int(first_number)
second_number_int = int(second_number)
third_number_int = int(third_number)
if first_number < second_number < third_number:
print(True)
else:
print(False)
| [
"toyinfatai@gmail.com"
] | toyinfatai@gmail.com |
4c54cd6dea3899136c907bad36f818ecd01c78c5 | ebfde136062b30761956063d77c6684357b83419 | /education_institution/Matter.py | c919cc8fa2127df29f091fb30be1a24b7cb07c95 | [] | no_license | wsanmiguel/diplomado_django | ee8036827f754269deb28ce559cf9b4c103970f4 | 224be7b00b38c51b320b01c6d21f05351e36ca56 | refs/heads/main | 2023-04-15T15:20:12.005505 | 2021-04-30T23:54:03 | 2021-04-30T23:54:03 | 350,714,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | class Matter():
def __init__(self, id, name):
self.name = name
self.id = id
def __str__(self):
return '''
Identificación : {}
Nombre : {} '''.format(self.id, self.name) | [
"wsg20043@hotmail.com"
] | wsg20043@hotmail.com |
4b5813798e4a92928c1e749080bc7219fb8c87a8 | 7cfc2adeef9fe24fdf4cf568112168aa0ed97ccf | /checkingfacedetection.py | de299e5f04551053a043fe574d96ffe364138443 | [] | no_license | anubhavsingh10/Face-Mask-Detection | 262fbe423e59e4b19d052ff01dcfbe298ba40b15 | a515fea47df44a500b4cc5768f7b2d2fe838f607 | refs/heads/main | 2023-01-23T12:54:51.194610 | 2020-11-29T12:01:11 | 2020-11-29T12:01:11 | 316,928,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,766 | py | import os
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import cv2
import random
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
model = tf.keras.models.load_model(r"C:\Users\hschahar\Desktop\GUIDED_PROJECTS\Face Mask Detection\FaceDetectionModel.h5")
facedetector = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
source = cv2.VideoCapture(0,cv2.CAP_DSHOW)
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
while True:
ret,img = source.read()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
face = facedetector.detectMultiScale(gray,1.1,6)
for (x,y,w,h) in face:
face_img=gray[y:y+w,x:x+w]
face_img = img_to_array(face_img)
face_img = preprocess_input(face_img)
face_img=cv2.resize(face_img,(100,100))
face_img=np.reshape(face_img,(1,100,100,1))
result=model.predict(face_img)
label=np.argmax(result,axis=1)[0]
cv2.rectangle(img,(x,y),(x+w,y+h),color_dict[label],2)
cv2.rectangle(img,(x,y-40),(x+w,y),color_dict[label],-1)
cv2.putText(img, labels_dict[label], (x, y-10),cv2.FONT_HERSHEY_SIMPLEX,0.8,(255,255,255),2)
cv2.imshow('LIVE',img)
key=cv2.waitKey(1)
if(key==27):
break
cv2.destroyAllWindows()
source.release() | [
"noreply@github.com"
] | anubhavsingh10.noreply@github.com |
44c2a04829912890a32fd58b094366d87a23770c | c846cced4e15653b11f93e17a132e9c13149491e | /examples/03/3.1-2.py | 1a6a04f10391dd485bf37e89c759c7b2d0a819a1 | [] | no_license | soongon/python-auto | f082ade1136889cbc083f19169fde23435a6941c | c265526deaca7f0feab259b21d143b14826e6d77 | refs/heads/master | 2022-07-07T15:08:06.671268 | 2020-05-15T08:23:10 | 2020-05-15T08:23:10 | 203,484,663 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | num = 10
if num < 10:
print("less than 10")
elif num > 10:
print("more than 10")
else:
print("same")
if num < 10:
print("less than 10")
if num < 15:
print("less than 15")
if num < 20:
print("less than 20")
| [
"kitri@a.com"
] | kitri@a.com |
ad6b0c76a134ab06bce7085a102acc5b090d7e5b | 41af8dab43f81674d93ed86e0499a19c836acbb4 | /2019/day11.py | 2ae86a570dde60a41e5a1a9fccf41aac2c1058b8 | [] | no_license | radoh/aoc | bc098a09848c31cac9c14176c6d7b058756701e5 | c3b0606a02baf51fd82103d0503584d8b119bca1 | refs/heads/master | 2021-07-09T15:19:27.930522 | 2019-12-24T08:26:29 | 2019-12-24T08:26:29 | 225,810,835 | 3 | 0 | null | 2019-12-23T10:35:54 | 2019-12-04T07:56:26 | Python | UTF-8 | Python | false | false | 2,259 | py | from utils import get_input_lines
import collections
numbers = list(map(int, next(get_input_lines(__file__)).split(',')))
op_fns = {
1: lambda a, b: a + b,
2: lambda a, b: a * b,
5: lambda a, b, i: b if a != 0 else i + 3,
6: lambda a, b, i: b if a == 0 else i + 3,
7: lambda a, b: 1 if a < b else 0,
8: lambda a, b: 1 if a == b else 0,
}
def process(nums, input):
i = 0
relative = [0]
while nums[i] != 99:
opcode = nums[i] % 100
param_modes = [nums[i] // k % 10 for k in [100, 1000, 10000]]
def arg(n, is_out):
if param_modes[n] == 2:
return nums[relative[0] + nums[i + n + 1]] if not is_out else relative[0] + nums[i + n + 1]
return nums[nums[i + n + 1]] if param_modes[n] == 0 and not is_out else nums[i + n + 1]
if opcode in (1, 2, 7, 8):
a = arg(0, False)
b = arg(1, False)
c = arg(2, True)
nums[c] = op_fns[opcode](a, b)
i += 4
elif opcode == 3:
a = arg(0, True)
nums[a] = next(input)
i += 2
elif opcode == 4:
a = arg(0, False)
yield a
i += 2
elif opcode in (5, 6):
a = arg(0, False)
b = arg(1, False)
i = op_fns[opcode](a, b, i)
elif opcode == 9:
a = arg(0, False)
relative[0] += a
i += 2
return nums
inputs = []
num_dict = collections.defaultdict(int, {i: n for i, n in enumerate(numbers)})
out = process(num_dict, iter(inputs))
pos = (0, 0)
m = collections.defaultdict(int)
m[pos] = 1
d = '^'
dr = {'^': '>', '>': 'v', 'v': '<', '<': '^'}
dl = {'^': '<', '>': '^', 'v': '>', '<': 'v'}
do = {'^': (-1, 0), '>': (0, 1), 'v': (1, 0), '<': (0, -1)}
try:
while True:
inputs.append(m[pos])
paint = next(out)
turn = next(out)
m[pos] = paint
d = dr[d] if turn else dl[d]
pos = pos[0] + do[d][0], pos[1] + do[d][1]
except StopIteration:
pass
print(len(m.keys()))
for r in range(max(map(lambda e: e[0], m.keys())) + 1):
for c in range(max(map(lambda e: e[1], m.keys())) + 1):
print('█' if m[(r, c)] else ' ', end='')
print()
| [
"radovan.halamicek@vacuumlabs.com"
] | radovan.halamicek@vacuumlabs.com |
17e37f36f229a232419e26ef1fe61fa715d0f2b4 | 86d433625a76bd899ffb86b7b86437cae9a07139 | /upfrontsystems/portlets/savedsearches/savedsearches.py | b1a4c704d565a575ee4050048b1ef6b680f1ed11 | [] | no_license | rijkstofberg/upfrontsystems.portlets.savedsearches | 57b672de29200b80a49b33c4171eb6cce6f14b30 | 5287dc6b800a8c5586a3cd78a392d290e3790ef1 | refs/heads/master | 2020-05-20T11:14:02.978717 | 2011-09-14T21:00:34 | 2011-09-14T21:00:34 | 2,385,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,469 | py | from zope.interface import implements
from Products.CMFCore.utils import getToolByName
from plone.portlets.interfaces import IPortletDataProvider
from plone.app.portlets.portlets import base
# TODO: If you define any fields for the portlet configuration schema below
# do not forget to uncomment the following import
#from zope import schema
from zope.formlib import form
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from upfrontsystems.portlets.savedsearches.interfaces import ISavedSearch
# TODO: If you require i18n translation for any of your schema fields below,
# uncomment the following to import your package MessageFactory
#from upfrontsystems.portlets.savedsearches import SavedSearchesMessageFactory as _
class ISavedSearches(IPortletDataProvider):
"""A portlet
It inherits from IPortletDataProvider because for this portlet, the
data that is being rendered and the portlet assignment itself are the
same.
"""
# TODO: Add any zope.schema fields here to capture portlet configuration
# information. Alternatively, if there are no settings, leave this as an
# empty interface - see also notes around the add form and edit form
# below.
# some_field = schema.TextLine(title=_(u"Some field"),
# description=_(u"A field to use"),
# required=True)
class Assignment(base.Assignment):
"""Portlet assignment.
This is what is actually managed through the portlets UI and associated
with columns.
"""
implements(ISavedSearches)
# TODO: Set default values for the configurable parameters here
# some_field = u""
# TODO: Add keyword parameters for configurable parameters here
# def __init__(self, some_field=u""):
# self.some_field = some_field
def __init__(self):
pass
@property
def title(self):
"""This property is used to give the title of the portlet in the
"manage portlets" screen.
"""
return "Saved Searches"
class Renderer(base.Renderer):
"""Portlet renderer.
This is registered in configure.zcml. The referenced page template is
rendered, and the implicit variable 'view' will refer to an instance
of this class. Other methods can be added and referenced in the template.
"""
render = ViewPageTemplateFile('savedsearches.pt')
def getSavedSearches(self):
searches = {}
pmt = getToolByName(self.context, 'portal_membership')
# Get the member home folder
userid = pmt.getAuthenticatedMember().id
pc = getToolByName(self.context, 'portal_catalog')
if userid:
homefolder = pmt.getHomeFolder(userid)
# at first login the home folder does not exist yet.
if not homefolder: return {}
if 'savedsearches' in homefolder.objectIds():
brains = pc(object_provides=ISavedSearch.__identifier__,
path='/'.join(homefolder.getPhysicalPath()))
searches['My searches'] = brains
# get all searches shared with the current user
brains = pc(object_provides=ISavedSearch.__identifier__)
brains = [b for b in brains if b.Creator != userid]
if brains: searches['Shared searches'] = brains
return searches
class AddForm(base.AddForm):
"""Portlet add form.
This is registered in configure.zcml. The form_fields variable tells
zope.formlib which fields to display. The create() method actually
constructs the assignment that is being added.
"""
form_fields = form.Fields(ISavedSearches)
def create(self, data):
return Assignment(**data)
# NOTE: If this portlet does not have any configurable parameters, you
# can use the next AddForm implementation instead of the previous.
# class AddForm(base.NullAddForm):
# """Portlet add form.
# """
# def create(self):
# return Assignment()
# NOTE: If this portlet does not have any configurable parameters, you
# can remove the EditForm class definition and delete the editview
# attribute from the <plone:portlet /> registration in configure.zcml
class EditForm(base.EditForm):
"""Portlet edit form.
This is registered with configure.zcml. The form_fields variable tells
zope.formlib which fields to display.
"""
form_fields = form.Fields(ISavedSearches)
| [
"rijk.stofberg@gmail.com"
] | rijk.stofberg@gmail.com |
d616c9ac31f6b34ba0c1d64c0a527e44a5450332 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/layout/scene/zaxis/tickfont/_color.py | c9c85093d3ba20c31f8f2d30cc4ebd575af30377 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 449 | py | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='layout.scene.zaxis.tickfont',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='plot',
role='style',
**kwargs
)
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
287a191c2572037ada7b9ea37bd0ecbd1f5e4bc0 | 856e9a8afcb81ae66dd998b0d2cc3556c9f315ea | /tests/plugins/test_git_filters.py | 2a4efe5e20573ce2e090ef58b7c78ce98a865449 | [
"MIT"
] | permissive | dexy/dexy | 1d5c999830de4663c05a09f4cd00b1628dfc8d46 | 323c1806e51f75435e11d2265703e68f46c8aef3 | refs/heads/develop | 2023-06-10T08:02:45.076551 | 2021-02-28T22:40:41 | 2021-02-28T22:40:41 | 1,506,989 | 141 | 34 | MIT | 2020-06-15T17:44:50 | 2011-03-21T14:48:28 | Python | UTF-8 | Python | false | false | 3,046 | py | from dexy.exceptions import UserFeedback
from dexy.filters.git import repo_from_path
from dexy.filters.git import repo_from_url
from dexy.filters.git import generate_commit_info
from tests.utils import assert_in_output
from tests.utils import runfilter
from tests.utils import tempdir
from nose.exc import SkipTest
import os
import json
REMOTE_REPO_HTTPS = "https://github.com/ananelson/dexy-templates"
PATH_TO_LOCAL_REPO = os.path.expanduser("~/dev/testrepo")
# TODO use subprocess to check out a repo to a temp dir, or have a repo in data
# dir, or use [gasp] submodules.
try:
import pygit2
import urllib
no_local_repo = not os.path.exists(PATH_TO_LOCAL_REPO)
try:
urllib.urlopen("http://google.com")
no_internet = False
except IOError:
no_internet = True
if no_local_repo:
SKIP = (True, "No local repo at %s." % PATH_TO_LOCAL_REPO)
elif no_internet:
SKIP = (True, "Internet not available.")
else:
SKIP = (False, None)
except ImportError:
SKIP = (True, "pygit2 not installed")
def skip():
if SKIP[0]:
raise SkipTest(SKIP[1])
skip()
def test_run_gitrepo():
with runfilter("repo", REMOTE_REPO_HTTPS) as doc:
assert len(doc.wrapper.nodes) > 20
def test_generate_commit_info():
repo, remote = repo_from_url(REMOTE_REPO_HTTPS)
refs = repo.listall_references()
ref = repo.lookup_reference(refs[0])
commit = repo[ref.target]
commit_info = generate_commit_info(commit)
assert commit_info['author-name'] == "Ana Nelson"
assert commit_info['author-email'] == "ana@ananelson.com"
def test_git_commit():
with runfilter("gitcommit", REMOTE_REPO_HTTPS) as doc:
output = doc.output_data()
patches = json.loads(output['patches'])
assert output['author-name'] == "Ana Nelson"
assert output['author-email'] == "ana@ananelson.com"
#assert output['message'] == "Add README file."
#assert output['hex'] == "2f15837e64a70e4d34b924f6f8c371a266d16845"
def test_git_log():
assert_in_output("gitlog", PATH_TO_LOCAL_REPO,
"Add README file.")
def test_git_log_remote():
assert_in_output("gitlog", REMOTE_REPO_HTTPS,
"Rename")
def test_repo_from_url():
repo, remote = repo_from_url(REMOTE_REPO_HTTPS)
assert remote.name == 'origin'
assert remote.url == REMOTE_REPO_HTTPS
def test_repo_from_path():
repo, remote = repo_from_path(PATH_TO_LOCAL_REPO)
assert ".git" in repo.path
#assert isinstance(repo.head, pygit2.Object)
# assert "README" in repo.head.message
def test_repo_from_invalid_path():
with tempdir():
try:
repo, remote = repo_from_path(".")
assert False
except UserFeedback as e:
assert "no git repository was found at '.'" in str(e)
def test_run_git():
with runfilter("git", PATH_TO_LOCAL_REPO) as doc:
doc.output_data()
def test_run_git_remote():
with runfilter("git", REMOTE_REPO_HTTPS) as doc:
doc.output_data()
| [
"ana@ananelson.com"
] | ana@ananelson.com |
6cc1dc4c8e6b81d2106b35562acc5a9448a76b64 | fd7a9faee9e2a6dbf89e54e1a7f228fcaf6911e1 | /tests/test_cnocr.py | 68b2776100394422842303886c7a0172e6ee7cb5 | [
"NCSA",
"Zlib",
"Intel",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-2-Clause-Views",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | wting861006/cnocr | f685c607e7ba84a8ead5a6a72301768c832a6320 | 9cb1cd57c2795007850bd25616880b15e4a3029d | refs/heads/master | 2023-09-04T18:36:30.822721 | 2021-11-05T12:03:23 | 2021-11-05T12:03:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,410 | py | # coding: utf-8
# Copyright (C) 2021, [Breezedeus](https://github.com/breezedeus).
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import pytest
import numpy as np
from PIL import Image
import Levenshtein
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(1, os.path.dirname(os.path.abspath(__file__)))
from cnocr import CnOcr
from cnocr.utils import read_img
from cnocr.consts import NUMBERS, AVAILABLE_MODELS
from cnocr.line_split import line_split
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
example_dir = os.path.join(root_dir, 'docs/examples')
CNOCR = CnOcr(model_name='densenet-s-fc', model_epoch=None)
SINGLE_LINE_CASES = [
('20457890_2399557098.jpg', ['就会哈哈大笑。3.0']),
('rand_cn1.png', ['笠淡嘿骅谧鼎皋姚歼蠢驼耳胬挝涯狗蒽孓犷']),
('rand_cn2.png', ['凉芦']),
('helloworld.jpg', ['Hello world!你好世界']),
]
MULTIPLE_LINE_CASES = [
('hybrid.png', ['o12345678']),
(
'multi-line_en_black.png',
[
'transforms the image many times. First, the image goes through many convolutional layers. In those',
'convolutional layers, the network learns new and increasingly complex features in its layers. Then the ',
'transformed image information goes through the fully connected layers and turns into a classification ',
'or prediction.',
],
),
(
'multi-line_en_white.png',
[
'This chapter is currently only available in this web version. ebook and print will follow.',
'Convolutional neural networks learn abstract features and concepts from raw image pixels. Feature',
'Visualization visualizes the learned features by activation maximization. Network Dissection labels',
'neural network units (e.g. channels) with human concepts.',
],
),
(
'multi-line_cn1.png',
[
'网络支付并无本质的区别,因为',
'每一个手机号码和邮件地址背后',
'都会对应着一个账户--这个账',
'户可以是信用卡账户、借记卡账',
'户,也包括邮局汇款、手机代',
'收、电话代收、预付费卡和点卡',
'等多种形式。',
],
),
(
'multi-line_cn2.png',
[
'当然,在媒介越来越多的情形下,',
'意味着传播方式的变化。过去主流',
'的是大众传播,现在互动性和定制',
'性带来了新的挑战——如何让品牌',
'与消费者更加互动。',
],
),
]
CASES = SINGLE_LINE_CASES + MULTIPLE_LINE_CASES
def print_preds(pred):
pred = [''.join(line_p) for line_p, _ in pred]
print("Predicted Chars:", pred)
def cal_score(preds, expected):
if len(preds) != len(expected):
return 0
total_cnt = 0
total_dist = 0
for real, (pred, _) in zip(expected, preds):
pred = ''.join(pred)
distance = Levenshtein.distance(real, pred)
total_dist += distance
total_cnt += len(real)
return 1.0 - float(total_dist) / total_cnt
@pytest.mark.parametrize('img_fp, expected', CASES)
def test_ocr(img_fp, expected):
ocr = CNOCR
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
img_fp = os.path.join(root_dir, 'examples', img_fp)
pred = ocr.ocr(img_fp)
print('\n')
print_preds(pred)
assert cal_score(pred, expected) >= 0.8
img = read_img(img_fp)
pred = ocr.ocr(img)
print_preds(pred)
assert cal_score(pred, expected) >= 0.8
img = read_img(img_fp, gray=False)
pred = ocr.ocr(img)
print_preds(pred)
assert cal_score(pred, expected) >= 0.8
@pytest.mark.parametrize('img_fp, expected', SINGLE_LINE_CASES)
def test_ocr_for_single_line(img_fp, expected):
ocr = CNOCR
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
img_fp = os.path.join(root_dir, 'examples', img_fp)
pred = ocr.ocr_for_single_line(img_fp)
print('\n')
print_preds([pred])
assert cal_score([pred], expected) >= 0.8
img = read_img(img_fp)
pred = ocr.ocr_for_single_line(img)
print_preds([pred])
assert cal_score([pred], expected) >= 0.8
img = read_img(img_fp, gray=False)
pred = ocr.ocr_for_single_line(img)
print_preds([pred])
assert cal_score([pred], expected) >= 0.8
img = np.array(Image.fromarray(img).convert('L'))
assert len(img.shape) == 2
pred = ocr.ocr_for_single_line(img)
print_preds([pred])
assert cal_score([pred], expected) >= 0.8
img = np.expand_dims(img, axis=2)
assert len(img.shape) == 3 and img.shape[2] == 1
pred = ocr.ocr_for_single_line(img)
print_preds([pred])
assert cal_score([pred], expected) >= 0.8
@pytest.mark.parametrize('img_fp, expected', MULTIPLE_LINE_CASES)
def test_ocr_for_single_lines(img_fp, expected):
ocr = CNOCR
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
img_fp = os.path.join(root_dir, 'examples', img_fp)
img = read_img(img_fp)
if img.mean() < 145: # 把黑底白字的图片对调为白底黑字
img = 255 - img
line_imgs = line_split(np.squeeze(img, -1), blank=True)
line_img_list = [line_img for line_img, _ in line_imgs]
pred = ocr.ocr_for_single_lines(line_img_list)
print('\n')
print_preds(pred)
assert cal_score(pred, expected) >= 0.8
line_img_list = [np.array(line_img) for line_img in line_img_list]
pred = ocr.ocr_for_single_lines(line_img_list)
print_preds(pred)
assert cal_score(pred, expected) >= 0.8
def test_cand_alphabet():
img_fp = os.path.join(example_dir, 'hybrid.png')
ocr = CnOcr(cand_alphabet=NUMBERS)
pred = ocr.ocr(img_fp)
pred = [''.join(line_p) for line_p, _ in pred]
print("Predicted Chars:", pred)
assert len(pred) == 1 and pred[0] == '012345678'
INSTANCE_ID = 0
@pytest.mark.parametrize('model_name', AVAILABLE_MODELS.keys())
def test_multiple_instances(model_name):
global INSTANCE_ID
print('test multiple instances for model_name: %s' % model_name)
img_fp = os.path.join(example_dir, 'hybrid.png')
INSTANCE_ID += 1
print('instance id: %d' % INSTANCE_ID)
cnocr1 = CnOcr(model_name, name='instance-%d' % INSTANCE_ID)
print_preds(cnocr1.ocr(img_fp))
INSTANCE_ID += 1
print('instance id: %d' % INSTANCE_ID)
cnocr2 = CnOcr(model_name, name='instance-%d' % INSTANCE_ID, cand_alphabet=NUMBERS)
print_preds(cnocr2.ocr(img_fp))
| [
"breezedeus@163.com"
] | breezedeus@163.com |
16b6ec1eb41eae6e49c1e21e23aa713adfdaf670 | b17d2e02d2f3f3051a67a4ee603871d0f5bc05af | /venv/Scripts/easy_install-script.py | c74436b53bd1954fd88e5e188be98b5652fdfd91 | [] | no_license | dcooper127/GeneSimulator | e0133f1d4ee64dcfa4d1b40f0e602967ba1d8e88 | 821a5b4687a467a76f16a6fbacec4081be5453d0 | refs/heads/master | 2022-11-21T14:18:23.519867 | 2020-07-29T17:12:34 | 2020-07-29T17:12:34 | 283,546,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | #!C:\Users\Roger\Documents\GitHub\GeneSimulator\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"dcooper127@gmail.com"
] | dcooper127@gmail.com |
d5761b937ac8fc57d7671c1b2ad867d7da734355 | acef45b3752b53ce6656fa20988c09e627db21d6 | /dowload ftp files and folder.py | ebf0e37a891112ecf008ef44f6427a9241f8d440 | [
"MIT"
] | permissive | RRJena/FTP-PYTHON | c94d57632e55e7fd935ba5bd165798588a276706 | 8667876d0891cd327682c44efea689e6b437edb1 | refs/heads/main | 2023-03-20T07:45:45.629325 | 2021-03-18T05:17:00 | 2021-03-18T05:17:00 | 348,949,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,405 | py | import sys
import ftplib
import os
import time
server = "192.168.3.145"
user = "ADMIN"
password = "1234"
source = "/"
destination = "D:/"
interval = 0.05
ftp = ftplib.FTP(server)
ftp.login(user, password)
def downloadFiles(path, destination):
try:
ftp.cwd(path)
os.chdir(destination)
mkdir_p(destination[0:len(destination)-1] + path)
print ("Created: " + destination[0:len(destination)-1] + path)
except OSError:
pass
except ftplib.error_perm:
print ("Error: could not change to " + path)
sys.exit("Ending Application")
filelist=ftp.nlst()
for file in filelist:
time.sleep(interval)
try:
ftp.cwd(path + file + "/")
downloadFiles(path + file + "/", destination)
except ftplib.error_perm:
os.chdir(destination[0:len(destination)-1] + path)
try:
ftp.retrbinary("RETR " + file, open(os.path.join(destination + path, file),"wb").write)
print ("Downloaded: " + file)
except:
print ("Error: File could not be downloaded " + file)
return
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
print("ERROR)]")
downloadFiles(source, destination)
| [
"noreply@github.com"
] | RRJena.noreply@github.com |
88f8b1496a92b386d3f5362f2a3adf58da85382b | 8a699595e7f156b1ade42f6042900b3331831fbf | /src/transformers/models/swin/modeling_swin.py | 81e91a19dccac92e0612504cac8f5c8dba33da2b | [
"Apache-2.0"
] | permissive | stas00/transformers | ab654371a387c5883fc882dd0286177875d6d3b4 | 7c5d79912a21880ce13d77881940458e90d98917 | refs/heads/master | 2023-02-16T00:22:41.298155 | 2022-04-08T20:55:42 | 2022-04-08T20:55:42 | 278,214,696 | 6 | 0 | Apache-2.0 | 2022-01-28T18:39:00 | 2020-07-08T23:24:49 | Python | UTF-8 | Python | false | false | 51,021 | py | # coding=utf-8
# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Swin Transformer model."""
import collections.abc
import math
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_swin import SwinConfig
logger = logging.get_logger(__name__)
# General docstring
_CONFIG_FOR_DOC = "SwinConfig"
_FEAT_EXTRACTOR_FOR_DOC = "AutoFeatureExtractor"
# Base docstring
_CHECKPOINT_FOR_DOC = "microsoft/swin-tiny-patch4-window7-224"
_EXPECTED_OUTPUT_SHAPE = [1, 49, 768]
# Image classification docstring
_IMAGE_CLASS_CHECKPOINT = "microsoft/swin-tiny-patch4-window7-224"
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/swin-tiny-patch4-window7-224",
# See all Swin models at https://huggingface.co/models?filter=swin
]
# to_2tuple, drop_path, SwinPatchEmbeddings, SwinPatchMerging and SwinDropPath are from the timm library.
@dataclass
class SwinEncoderOutput(ModelOutput):
"""
Swin encoder's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class SwinModelOutput(ModelOutput):
"""
Swin model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Average pooling of the last layer hidden-state.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: torch.FloatTensor = None
pooler_output: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class SwinMaskedImageModelingOutput(ModelOutput):
"""
Swin masked image model outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
Masked image modeling (MLM) loss.
logits (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Reconstructed pixel values.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class SwinImageClassifierOutput(ModelOutput):
"""
Swin outputs for image classification.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
# Copied from transformers.models.vit.modeling_vit.to_2tuple
def to_2tuple(x):
if isinstance(x, collections.abc.Iterable):
return x
return (x, x)
def window_partition(input_feature, window_size):
"""
Partitions the given input into windows.
"""
batch_size, height, width, num_channels = input_feature.shape
input_feature = input_feature.view(
batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
)
windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
return windows
def window_reverse(windows, window_size, height, width):
"""
Merges windows to produce higher resolution features.
"""
batch_size = int(windows.shape[0] / (height * width / window_size / window_size))
windows = windows.view(batch_size, height // window_size, width // window_size, window_size, window_size, -1)
windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(batch_size, height, width, -1)
return windows
def drop_path(input, drop_prob=0.0, training=False, scale_by_keep=True):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = input.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and scale_by_keep:
random_tensor.div_(keep_prob)
return input * random_tensor
class SwinEmbeddings(nn.Module):
"""
Construct the patch and position embeddings. Optionally, also the mask token.
"""
def __init__(self, config, use_mask_token=False):
super().__init__()
self.patch_embeddings = SwinPatchEmbeddings(
image_size=config.image_size,
patch_size=config.patch_size,
num_channels=config.num_channels,
embed_dim=config.embed_dim,
)
num_patches = self.patch_embeddings.num_patches
self.patch_grid = self.patch_embeddings.grid_size
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
if config.use_absolute_embeddings:
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim))
else:
self.position_embeddings = None
self.norm = nn.LayerNorm(config.embed_dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, pixel_values, bool_masked_pos=None):
embeddings, output_dimensions = self.patch_embeddings(pixel_values)
embeddings = self.norm(embeddings)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
# replace the masked visual tokens by mask_tokens
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
if self.position_embeddings is not None:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings, output_dimensions
class SwinPatchEmbeddings(nn.Module):
"""
Image to Patch Embedding.
"""
def __init__(self, image_size=224, patch_size=16, num_channels=3, embed_dim=768):
super().__init__()
image_size = to_2tuple(image_size)
patch_size = to_2tuple(patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_patches = num_patches
self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
def maybe_pad(self, pixel_values, height, width):
if width % self.patch_size[1] != 0:
pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
pixel_values = nn.functional.pad(pixel_values, pad_values)
if height % self.patch_size[0] != 0:
pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
pixel_values = nn.functional.pad(pixel_values, pad_values)
return pixel_values
def forward(self, pixel_values):
_, _, height, width = pixel_values.shape
# pad the input to be divisible by self.patch_size, if needed
pixel_values = self.maybe_pad(pixel_values, height, width)
embeddings = self.projection(pixel_values)
_, _, height, width = embeddings.shape
output_dimensions = (height, width)
embeddings = embeddings.flatten(2).transpose(1, 2)
return embeddings, output_dimensions
class SwinPatchMerging(nn.Module):
"""
Patch Merging Layer.
Args:
input_resolution (`Tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def maybe_pad(self, input_feature, height, width):
should_pad = (height % 2 == 1) or (width % 2 == 1)
if should_pad:
pad_values = (0, 0, 0, width % 2, 0, height % 2)
input_feature = nn.functional.pad(input_feature, pad_values)
return input_feature
def forward(self, input_feature, input_dimensions):
height, width = input_dimensions
# `dim` is height * width
batch_size, dim, num_channels = input_feature.shape
input_feature = input_feature.view(batch_size, height, width, num_channels)
# pad input to be disible by width and height, if needed
input_feature = self.maybe_pad(input_feature, height, width)
# [batch_size, height/2, width/2, num_channels]
input_feature_0 = input_feature[:, 0::2, 0::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_1 = input_feature[:, 1::2, 0::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_2 = input_feature[:, 0::2, 1::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_3 = input_feature[:, 1::2, 1::2, :]
# batch_size height/2 width/2 4*num_channels
input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C
input_feature = self.norm(input_feature)
input_feature = self.reduction(input_feature)
return input_feature
class SwinDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None, scale_by_keep=True):
super(SwinDropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
def forward(self, input):
return drop_path(input, self.drop_prob, self.training, self.scale_by_keep)
class SwinSelfAttention(nn.Module):
def __init__(self, config, dim, num_heads):
super().__init__()
if dim % num_heads != 0:
raise ValueError(
f"The hidden size ({dim}) is not a multiple of the number of attention " f"heads ({num_heads})"
)
self.num_attention_heads = num_heads
self.attention_head_size = int(dim / num_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.window_size = to_2tuple(config.window_size)
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads)
)
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w]))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.register_buffer("relative_position_index", relative_position_index)
self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
):
batch_size, dim, num_channels = hidden_states.shape
mixed_query_layer = self.query(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]
relative_position_bias = relative_position_bias.view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in SwinModel forward() function)
mask_shape = attention_mask.shape[0]
attention_scores = attention_scores.view(
batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim
)
attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class SwinSelfOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, dim)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class SwinAttention(nn.Module):
def __init__(self, config, dim, num_heads):
super().__init__()
self.self = SwinSelfAttention(config, dim, num_heads)
self.output = SwinSelfOutput(config, dim)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False):
self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class SwinIntermediate(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class SwinOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class SwinLayer(nn.Module):
def __init__(self, config, dim, input_resolution, num_heads, shift_size=0):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.shift_size = shift_size
self.window_size = config.window_size
self.input_resolution = input_resolution
self.set_shift_and_window_size(input_resolution)
self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.attention = SwinAttention(config, dim, num_heads)
self.drop_path = SwinDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.intermediate = SwinIntermediate(config, dim)
self.output = SwinOutput(config, dim)
def set_shift_and_window_size(self, input_resolution):
if min(input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(input_resolution)
def get_attn_mask(self, height, width):
if self.shift_size > 0:
# calculate attention mask for SW-MSA
img_mask = torch.zeros((1, height, width, 1))
height_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
width_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
count = 0
for height_slice in height_slices:
for width_slice in width_slices:
img_mask[:, height_slice, width_slice, :] = count
count += 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
return attn_mask
def maybe_pad(self, hidden_states, height, width):
pad_right = (self.window_size - width % self.window_size) % self.window_size
pad_bottom = (self.window_size - height % self.window_size) % self.window_size
pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
hidden_states = nn.functional.pad(hidden_states, pad_values)
return hidden_states, pad_values
def forward(self, hidden_states, input_dimensions, head_mask=None, output_attentions=False):
self.set_shift_and_window_size(input_dimensions)
height, width = input_dimensions
batch_size, _, channels = hidden_states.size()
shortcut = hidden_states
hidden_states = self.layernorm_before(hidden_states)
hidden_states = hidden_states.view(batch_size, height, width, channels)
# pad hidden_states to multiples of window size
hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
_, height_pad, width_pad, _ = hidden_states.shape
# cyclic shift
if self.shift_size > 0:
shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_hidden_states = hidden_states
# partition windows
hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
attn_mask = self.get_attn_mask(height_pad, width_pad)
if attn_mask is not None:
attn_mask = attn_mask.to(hidden_states_windows.device)
attention_outputs = self.attention(
hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions
)
attention_output = attention_outputs[0]
attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
# reverse cyclic shift
if self.shift_size > 0:
attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
attention_windows = shifted_windows
was_padded = pad_values[3] > 0 or pad_values[5] > 0
if was_padded:
attention_windows = attention_windows[:, :height, :width, :].contiguous()
attention_windows = attention_windows.view(batch_size, height * width, channels)
hidden_states = shortcut + self.drop_path(attention_windows)
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = hidden_states + self.output(layer_output)
layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
return layer_outputs
class SwinStage(nn.Module):
def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
super().__init__()
self.config = config
self.dim = dim
self.blocks = nn.ModuleList(
[
SwinLayer(
config=config,
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
shift_size=0 if (i % 2 == 0) else config.window_size // 2,
)
for i in range(depth)
]
)
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
else:
self.downsample = None
self.pointing = False
def forward(self, hidden_states, input_dimensions, head_mask=None, output_attentions=False):
height, width = input_dimensions
for i, layer_module in enumerate(self.blocks):
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions)
hidden_states = layer_outputs[0]
if self.downsample is not None:
height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2
output_dimensions = (height, width, height_downsampled, width_downsampled)
hidden_states = self.downsample(layer_outputs[0], input_dimensions)
else:
output_dimensions = (height, width, height, width)
stage_outputs = (hidden_states, output_dimensions)
if output_attentions:
stage_outputs += layer_outputs[1:]
return stage_outputs
class SwinEncoder(nn.Module):
def __init__(self, config, grid_size):
super().__init__()
self.num_layers = len(config.depths)
self.config = config
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
self.layers = nn.ModuleList(
[
SwinStage(
config=config,
dim=int(config.embed_dim * 2**i_layer),
input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)),
depth=config.depths[i_layer],
num_heads=config.num_heads[i_layer],
drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
downsample=SwinPatchMerging if (i_layer < self.num_layers - 1) else None,
)
for i_layer in range(self.num_layers)
]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
input_dimensions,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_input_dimensions = ()
all_hidden_states = () if output_hidden_states else None
all_reshaped_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if output_hidden_states:
batch_size, _, hidden_size = hidden_states.shape
# rearrange b (h w) c -> b c h w
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
for i, layer_module in enumerate(self.layers):
layer_head_mask = head_mask[i] if head_mask is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module), hidden_states, input_dimensions, layer_head_mask
)
else:
layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions)
hidden_states = layer_outputs[0]
output_dimensions = layer_outputs[1]
input_dimensions = (output_dimensions[-2], output_dimensions[-1])
all_input_dimensions += (input_dimensions,)
if output_hidden_states:
batch_size, _, hidden_size = hidden_states.shape
# rearrange b (h w) c -> b c h w
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
if output_attentions:
all_self_attentions += layer_outputs[2:]
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return SwinEncoderOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
reshaped_hidden_states=all_reshaped_hidden_states,
)
class SwinPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = SwinConfig
base_model_prefix = "swin"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, SwinEncoder):
module.gradient_checkpointing = value
SWIN_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`SwinConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SWIN_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See
[`AutoFeatureExtractor.__call__`] for details.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Swin Model transformer outputting raw hidden-states without any specific head on top.",
SWIN_START_DOCSTRING,
)
class SwinModel(SwinPreTrainedModel):
def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
super().__init__(config)
self.config = config
self.num_layers = len(config.depths)
self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1))
self.embeddings = SwinEmbeddings(config, use_mask_token=use_mask_token)
self.encoder = SwinEncoder(config, self.embeddings.patch_grid)
self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SwinModelOutput,
config_class=_CONFIG_FOR_DOC,
modality="vision",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
pixel_values=None,
bool_masked_pos=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, len(self.config.depths))
embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
encoder_outputs = self.encoder(
embedding_output,
input_dimensions,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = None
if self.pooler is not None:
pooled_output = self.pooler(sequence_output.transpose(1, 2))
pooled_output = torch.flatten(pooled_output, 1)
if not return_dict:
output = (sequence_output, pooled_output) + encoder_outputs[1:]
return output
return SwinModelOutput(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
)
@add_start_docstrings(
"Swin Model with a decoder on top for masked image modeling, as proposed in `SimMIM <https://arxiv.org/abs/2111.09886>`__.",
SWIN_START_DOCSTRING,
)
class SwinForMaskedImageModeling(SwinPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.swin = SwinModel(config, add_pooling_layer=False, use_mask_token=True)
num_features = int(config.embed_dim * 2 ** (config.num_layers - 1))
self.decoder = nn.Sequential(
nn.Conv2d(in_channels=num_features, out_channels=config.encoder_stride**2 * 3, kernel_size=1),
nn.PixelShuffle(config.encoder_stride),
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=SwinMaskedImageModelingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
pixel_values=None,
bool_masked_pos=None,
head_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
Returns:
Examples:
```python
>>> from transformers import AutoFeatureExtractor, SwinForMaskedImageModeling
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/swin-tiny-patch4-window7-224")
>>> model = SwinForMaskedImageModeling.from_pretrained("microsoft/swin-tiny-patch4-window7-224")
>>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
>>> pixel_values = feature_extractor(images=image, return_tensors="pt").pixel_values
>>> # create random boolean mask of shape (batch_size, num_patches)
>>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
>>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
>>> loss, reconstructed_pixel_values = outputs.loss, outputs.logits
>>> list(reconstructed_pixel_values.shape)
[1, 3, 224, 224]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.swin(
pixel_values,
bool_masked_pos=bool_masked_pos,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
# Reshape to (batch_size, num_channels, height, width)
sequence_output = sequence_output.transpose(1, 2)
batch_size, num_channels, sequence_length = sequence_output.shape
height = width = int(sequence_length**0.5)
sequence_output = sequence_output.reshape(batch_size, num_channels, height, width)
# Reconstruct pixel values
reconstructed_pixel_values = self.decoder(sequence_output)
masked_im_loss = None
if bool_masked_pos is not None:
size = self.config.image_size // self.config.patch_size
bool_masked_pos = bool_masked_pos.reshape(-1, size, size)
mask = (
bool_masked_pos.repeat_interleave(self.config.patch_size, 1)
.repeat_interleave(self.config.patch_size, 2)
.unsqueeze(1)
.contiguous()
)
reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none")
masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels
if not return_dict:
output = (reconstructed_pixel_values,) + outputs[2:]
return ((masked_im_loss,) + output) if masked_im_loss is not None else output
return SwinMaskedImageModelingOutput(
loss=masked_im_loss,
logits=reconstructed_pixel_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
reshaped_hidden_states=outputs.reshaped_hidden_states,
)
@add_start_docstrings(
"""
Swin Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
the [CLS] token) e.g. for ImageNet.
""",
SWIN_START_DOCSTRING,
)
class SwinForImageClassification(SwinPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.swin = SwinModel(config)
# Classifier head
self.classifier = (
nn.Linear(self.swin.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_IMAGE_CLASS_CHECKPOINT,
output_type=SwinImageClassifierOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
)
def forward(
self,
pixel_values=None,
head_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.swin(
pixel_values,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SwinImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
reshaped_hidden_states=outputs.reshaped_hidden_states,
)
| [
"noreply@github.com"
] | stas00.noreply@github.com |
bbe8b1a11a7c059a9d2379316136818ac1ac017a | 2724474441c6d349035aba6448a0e909f6ef2236 | /python/vaccine_notifier.py | e86df1682ec167f63feee9194ecd88986182e864 | [
"MIT"
] | permissive | abishekvashok/vaccine-checker | 90e8de23702f4acf1f747e0f876f79ec5d3d5d68 | 1c193db0ff9c13bfc30ffc89ef5ea5661f7b504a | refs/heads/main | 2023-05-12T04:57:19.667518 | 2021-06-03T17:17:32 | 2021-06-03T17:17:32 | 372,177,847 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,798 | py | import time
import requests
from playsound import playsound
print("Select mode: 1 - Pincode, 2 - District: ",end="")
mode = int(input())
pincode = ""
district = ""
district_code = ""
if(mode == 1):
print("Input Pincode: ",end="")
pincode = input()
elif(mode == 2):
print("Enter District Name: ",end="")
district = input()
# Todo district mapping
district_code = "307"
else:
print("Invalid choice!")
print("Enter date in DD-MM-YYYY format: ",end="")
date = input()
print("Enter Age: ",end="")
age = int(input())
urls = [
"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id="+district_code+"&date="+date,
"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByPin?pincode="+pincode+"&date="+date
]
header = {
"Accept": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
"Origin": "https://www.cowin.gov.in",
"Sec-Fetch-Site": "cross-site",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Dest": "empty",
"Referer": "https://www.cowin.gov.in/",
"Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8"
}
print("Checking for open slots. Will noitfy when available...")
def makeRequest():
response = requests.get(urls[0], headers = header)
centers = response.json()["centers"]
for center in centers:
sessions = center["sessions"]
checkEachSession(sessions)
def checkEachSession(sessions):
for session in sessions:
if(session["min_age_limit"] <= age):
if(session["available_capacity"] > 0):
print("We have a vaccine slot available!")
playsound('./alert.mp3')
while(True):
makeRequest()
time.sleep(5) | [
"abishekvashok@gmail.com"
] | abishekvashok@gmail.com |
636e6b6d7f94b6edaf31d219a9a1fa90fe59c440 | 082dec15b5129ea3cd65e23f3a3144e46dbf2f83 | /pset7/houses/roster.py | 5a1459f67a46e95af15f7bd509a658b414465fc1 | [] | no_license | 0ssamaak0/CS50X | 8ae73377ef09d8c2354e73f1bffc895c73f89ab7 | 452c7a11e1ff6de3549b3a8d35aadc716694e807 | refs/heads/master | 2023-05-05T11:13:12.339273 | 2021-05-26T20:12:30 | 2021-05-26T20:12:30 | 371,152,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | # TODO
from sys import argv, exit
import sqlite3
# Checking the right number of command line arguments
if len(argv) != 2:
print("Enter a valid number of command line arguments")
exit(1)
connect = sqlite3.connect("students.db")
curs = connect.cursor()
curs.execute(f"SELECT first, middle, last, birth FROM students WHERE house = '{argv[1]}' ORDER BY last, first")
student_tuple = curs.fetchall()
student_list = []
for student in student_tuple:
student_list.append([student_item for student_item in student])
# print(student_list)
for student in student_list:
if student[1] == None:
print(f"{student[0]} {student[2]}, born {student[3]}")
else:
print(f"{student[0]} {student[1]} {student[2]}, born {student[3]}")
connect.commit()
connect.close() | [
"0ssamaak0@gmail.com"
] | 0ssamaak0@gmail.com |
f8631d259c1277c1890704d217d2a61336e0cbbc | 46ae8264edb9098c9875d2a0a508bc071201ec8b | /res/scripts/client/gui/scaleform/daapi/view/lobby/customizationfilter_popover.py | 05360ff3fd0c540a1aff0057dc445aea0b6e0707 | [] | no_license | Difrex/wotsdk | 1fc6156e07e3a5302e6f78eafdea9bec4c897cfb | 510a34c67b8f4c02168a9830d23f5b00068d155b | refs/heads/master | 2021-01-01T19:12:03.592888 | 2016-10-08T12:06:04 | 2016-10-08T12:06:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,724 | py | # Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/customization/filter_popover.py
from constants import IGR_TYPE
from debug_utils import LOG_DEBUG
from gui import GUI_SETTINGS
from gui.game_control import getIGRCtrl
from gui.shared.formatters import text_styles, icons
from gui.shared.utils.functions import makeTooltip
from gui.Scaleform.locale.VEHICLE_CUSTOMIZATION import VEHICLE_CUSTOMIZATION
from gui.Scaleform.daapi.view.meta.CustomizationFiltersPopoverMeta import CustomizationFiltersPopoverMeta
from helpers.i18n import makeString as _ms
from gui.customization import g_customizationController
from gui.customization.shared import CUSTOMIZATION_TYPE, getBonusIcon16x16, FILTER_TYPE, QUALIFIER_TYPE_INDEX, PURCHASE_TYPE, DEFAULT_GROUP_VALUE, EMBLEM_IGR_GROUP_NAME
_BONUS_TOOLTIPS = (VEHICLE_CUSTOMIZATION.CUSTOMIZATION_TOOLTIP_BONUS_ENTIRECREW,
VEHICLE_CUSTOMIZATION.CUSTOMIZATION_TOOLTIP_BONUS_COMMANDER,
VEHICLE_CUSTOMIZATION.CUSTOMIZATION_TOOLTIP_BONUS_AIMER,
VEHICLE_CUSTOMIZATION.CUSTOMIZATION_TOOLTIP_BONUS_DRIVER,
VEHICLE_CUSTOMIZATION.CUSTOMIZATION_TOOLTIP_BONUS_RADIOMAN,
VEHICLE_CUSTOMIZATION.CUSTOMIZATION_TOOLTIP_BONUS_LOADER)
_PURCHASE_TYPE_LABELS = (VEHICLE_CUSTOMIZATION.FILTER_POPOVER_WAYSTOBUY_BUY, VEHICLE_CUSTOMIZATION.FILTER_POPOVER_WAYSTOBUY_MISSIONS, icons.premiumIgrSmall())
def _getPurchaseTypeVO():
result = []
for purchaseType, label in zip(PURCHASE_TYPE.ALL, _PURCHASE_TYPE_LABELS):
purchaseVO = {'label': label,
'enabled': True}
if purchaseType == PURCHASE_TYPE.IGR:
if not GUI_SETTINGS.igrEnabled:
continue
purchaseVO['enabled'] = getIGRCtrl().getRoomType() == IGR_TYPE.PREMIUM
purchaseVO['tooltipDisabled'] = makeTooltip(_ms(VEHICLE_CUSTOMIZATION.FILTER_TOOLTIP_IGR_DISABLED_HEADER), _ms(VEHICLE_CUSTOMIZATION.FILTER_TOOLTIP_IGR_DISABLED_BODY, icon=_ms(icons.premiumIgrSmall())))
result.append(purchaseVO)
return result
def _getBonusTypeVO(selectedBonuses):
result = []
for bonusType, tooltipText in zip(QUALIFIER_TYPE_INDEX, _BONUS_TOOLTIPS):
tooltip = makeTooltip(_ms(VEHICLE_CUSTOMIZATION.CUSTOMIZATION_FILTERPOPOVER_BONUSDESCRIPTION_HEADER, bonus=_ms(tooltipText)), _ms(VEHICLE_CUSTOMIZATION.CUSTOMIZATION_FILTERPOPOVER_BONUSDESCRIPTION_BODY, bonus=_ms(tooltipText)))
result.append({'selected': selectedBonuses[bonusType],
'value': getBonusIcon16x16(bonusType),
'tooltip': tooltip})
return result
class FilterPopover(CustomizationFiltersPopoverMeta):
def __init__(self, ctx = None):
super(FilterPopover, self).__init__()
self.__filter = None
self.__groupsMap = []
return
def changeFilter(self, filterGroup, filterGroupValue):
applyFilter = True
if filterGroup == FILTER_TYPE.GROUP:
filterGroupValue = self.__groupsMap[self.__filter.currentType][filterGroupValue][0]
if self.__filter.currentGroup == filterGroupValue:
applyFilter = False
elif filterGroup == FILTER_TYPE.PURCHASE_TYPE:
filterGroupValue = PURCHASE_TYPE.ALL[filterGroupValue]
if self.__filter.purchaseType == filterGroupValue:
applyFilter = False
elif self.__filter.currentType != CUSTOMIZATION_TYPE.CAMOUFLAGE:
self.__switchIGRFilter(filterGroupValue == PURCHASE_TYPE.IGR)
if applyFilter:
self.__filter.set(filterGroup, filterGroupValue)
self.as_enableDefBtnS(not self.__filter.isDefaultFilterSet())
def setDefaultFilter(self):
self.__filter.setDefault()
updateVO = self.__createUpdateVO()
self.as_setStateS({'bonusTypeSelected': updateVO['bonusTypeSelected'],
'customizationTypeSelectedIndex': updateVO['groupsSelectIndex'],
'purchaseTypeSelectedIndex': updateVO['purchaseTypeSelectedIndex'],
'enableGroupFilter': updateVO['enableGroupFilter']})
self.as_enableDefBtnS(False)
def _populate(self):
super(FilterPopover, self)._populate()
self.__filter = g_customizationController.filter
self.__groupsMap = [[('all_groups', VEHICLE_CUSTOMIZATION.FILTER_POPOVER_GROUPS_ALL)], [('all_groups', VEHICLE_CUSTOMIZATION.FILTER_POPOVER_GROUPS_ALL)], [('all_groups', VEHICLE_CUSTOMIZATION.FILTER_POPOVER_GROUPS_ALL)]]
for cType in CUSTOMIZATION_TYPE.ALL:
for groupName, userName in self.__filter.availableGroupNames[cType]:
if groupName != EMBLEM_IGR_GROUP_NAME and groupName != 'IGR':
self.__groupsMap[cType].append((groupName, userName))
self.as_setInitDataS(self.__createInitialVO())
self.as_enableDefBtnS(not self.__filter.isDefaultFilterSet())
def _dispose(self):
self.__filter = None
self.__groupsMap = []
super(FilterPopover, self)._dispose()
return
def __createInitialVO(self):
isTypeNotCamouflage = self.__filter.currentType != CUSTOMIZATION_TYPE.CAMOUFLAGE
groupsUserNames = []
for _, groupName in self.__groupsMap[self.__filter.currentType]:
groupsUserNames.append(groupName)
updateVO = self.__createUpdateVO()
return {'lblTitle': text_styles.highTitle(VEHICLE_CUSTOMIZATION.FILTER_POPOVER_TITLE),
'lblBonusType': text_styles.standard(VEHICLE_CUSTOMIZATION.FILTER_POPOVER_BONUSTYPE_TITLE),
'lblCustomizationType': text_styles.standard(VEHICLE_CUSTOMIZATION.FILTER_POPOVER_GROUPS_TITLE),
'lblPurchaseType': text_styles.standard(VEHICLE_CUSTOMIZATION.FILTER_POPOVER_WAYSTOBUY_TITLE),
'btnDefault': VEHICLE_CUSTOMIZATION.FILTER_POPOVER_GETDEFAULTSETTINGS,
'bonusTypeId': FILTER_TYPE.QUALIFIER,
'bonusType': _getBonusTypeVO(self.__filter.selectedBonuses),
'customizationBonusTypeVisible': isTypeNotCamouflage,
'enableGroupFilter': updateVO['enableGroupFilter'],
'customizationTypeId': FILTER_TYPE.GROUP,
'customizationType': groupsUserNames,
'customizationTypeSelectedIndex': updateVO['groupsSelectIndex'],
'customizationTypeVisible': isTypeNotCamouflage,
'bonusTypeDisableTooltip': makeTooltip(VEHICLE_CUSTOMIZATION.TOOLTIP_FILTER_GROUPS_DISABLED_HEADER, VEHICLE_CUSTOMIZATION.TOOLTIP_FILTER_GROUPS_DISABLED_BODY),
'refreshTooltip': makeTooltip(VEHICLE_CUSTOMIZATION.CUSTOMIZATION_FILTERPOPOVER_REFRESH_HEADER, VEHICLE_CUSTOMIZATION.CUSTOMIZATION_FILTERPOPOVER_REFRESH_BODY),
'purchaseTypeId': FILTER_TYPE.PURCHASE_TYPE,
'purchaseType': _getPurchaseTypeVO(),
'purchaseTypeSelectedIndex': PURCHASE_TYPE.ALL.index(self.__filter.purchaseType)}
def __createUpdateVO(self):
groupsList = []
bonusTypeSelected = []
for bonusType in QUALIFIER_TYPE_INDEX:
bonusTypeSelected.append(self.__filter.selectedBonuses[bonusType])
for group, _ in self.__groupsMap[self.__filter.currentType]:
groupsList.append(group)
if self.__filter.currentType != CUSTOMIZATION_TYPE.CAMOUFLAGE:
groupsSelectIndex = groupsList.index(self.__filter.currentGroup)
enableGroupFilter = self.__filter.isGroupFilterEnabled()
else:
groupsSelectIndex = 0
enableGroupFilter = True
return {'bonusTypeSelected': bonusTypeSelected,
'groupsSelectIndex': groupsSelectIndex,
'purchaseTypeSelectedIndex': PURCHASE_TYPE.ALL.index(self.__filter.purchaseType),
'enableGroupFilter': enableGroupFilter}
def __switchIGRFilter(self, disableGroupFilter):
""" Turn on/off group filter.
When IGR (purchase type) is selected, group filter has to become disabled, and
it has to change it's value to 'All groups', but when user selects another purchase
type, previous group value should be restored.
:param disableGroupFilter: enable or disable group filter.
"""
if self.__filter.isGroupFilterEnabled() == disableGroupFilter:
self.__filter.toggleGroupFilterEnabled()
if disableGroupFilter:
groupToSet = DEFAULT_GROUP_VALUE
else:
groupToSet = self.__filter.currentGroup
self.__filter.set(FILTER_TYPE.GROUP, groupToSet)
updateVO = self.__createUpdateVO()
self.as_setStateS({'bonusTypeSelected': updateVO['bonusTypeSelected'],
'customizationTypeSelectedIndex': updateVO['groupsSelectIndex'],
'purchaseTypeSelectedIndex': updateVO['purchaseTypeSelectedIndex'],
'enableGroupFilter': updateVO['enableGroupFilter']}) | [
"m4rtijn@gmail.com"
] | m4rtijn@gmail.com |
24caadb1da40e28f0a1b19027c888aef7f29a004 | 8983b23a25fcc3739fc977850d242ebcc64434ce | /jqurity/urls.py | a1b034bb4034894993d2bac31814d1ce65d4a60f | [] | no_license | jakiiii/django-blog | 595d834c44c4b45817091da812b90b6fa7a34aab | 260aa75b89cd9875a2e0ab1e0f9588dffd8f5281 | refs/heads/master | 2020-03-29T19:53:57.752279 | 2018-09-25T15:39:21 | 2018-09-25T15:42:39 | 150,286,125 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | py | """jqurity URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls')),
path('accounts/', include('accounts.urls'))
]
if settings.DEBUG:
urlpatterns = urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"me.jaki@outlook.com"
] | me.jaki@outlook.com |
39bb3f70f110e62019f833c85310fbc3289e577e | 9472e00cf37233604ee403de3c1fb988b3ba4709 | /final.py | b58c12ba4914a30a3357c9cd3a7d192ad5fa5912 | [] | no_license | samialabri/KheperaImageGrabber | 2b4c6b92366bcabf4434c50f2d43aa40ea5aac46 | 97a68a5e8151269aaa9a28ab1938738225bfc593 | refs/heads/master | 2021-01-19T11:02:47.698658 | 2015-04-16T12:40:31 | 2015-04-16T12:40:31 | 34,039,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | import socket
import os
from time import sleep
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
port = 2015
listen_addr = ("",port)
s.bind(listen_addr)
while 1:
data , addr = s.recvfrom(1024)
if data == "hello":
os.system("v4l2grab -d /dev/video1 -o image.jpg -W 1280 -H 720 -q 100")
| [
"sami.alabri@gmail.com"
] | sami.alabri@gmail.com |
2bcf707780928798fb6fd13449afaa3099617b48 | 6973340475bcdde077698f36dc6638eef73624f1 | /pico2019/web/picobrowser/solve.py | 8b41f84f5cdd7af1ac5872898ddbcda126561502 | [] | no_license | BigB00st/ctf-solutions | a2e53af3fe1c601060d1752974f16a140aa0bb3c | 7c1b43f086e22c70e3f52420504fe4307b842f5c | refs/heads/master | 2021-05-21T02:53:47.123611 | 2020-09-28T10:23:42 | 2020-09-28T10:23:42 | 252,509,468 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | import requests
url = "https://2019shell1.picoctf.com/problem/37829/flag"
h = { "User-Agent": "picobrowser" }
print(requests.get(url, headers=h).text) | [
"botzer.2002@gmail.com"
] | botzer.2002@gmail.com |
061070f1544c8426171b8badf96164eadb2e5abf | e90ab3d7a6c4dde0778c920fbeac6fe38d7f1fb3 | /test.py | d94d4eae4643e8cbc35479111b300e5f9fff0f74 | [] | no_license | aj00200/drove | 8693324269164254934732edee3993304e559643 | e657feb9f03b278b2f9325f24c7af0be0c844626 | refs/heads/master | 2021-01-22T12:02:30.498798 | 2011-08-15T01:13:18 | 2011-08-15T01:13:18 | 2,205,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | #! /usr/bin/env python
import unittest
# Import test classes
from unittests.libs.api.query import *
from unittests.libs.api.recent_changes import *
# Run the tests
if (__name__ == '__main__'):
unittest.main()
| [
"aj0020@aj00200.org"
] | aj0020@aj00200.org |
6e87334c09602898afadbb226ee7ed2910781cf9 | e56faac16d061f4b00b3b6087535f22e2156ced1 | /ros_ws/devel/lib/python2.7/dist-packages/intro_pkg1/srv/_FloatIO.py | b49543b8002e5a2e19f76c7917a63d90a21a250f | [
"MIT"
] | permissive | TheProjectsGuy/Learning-ROS | f305e4f65fbc22bf01680ecc931b2d47b8bb149a | 612f8eeeed0d3308cfff9084dbf7dda4732ec1ae | refs/heads/master | 2022-04-01T21:50:33.664235 | 2022-02-19T06:29:24 | 2022-02-19T06:29:24 | 160,826,721 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,221 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from intro_pkg1/FloatIORequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class FloatIORequest(genpy.Message):
_md5sum = "235c8ad2b88a9725a5a2ad2a9541a007"
_type = "intro_pkg1/FloatIORequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
float64 input
"""
__slots__ = ['input']
_slot_types = ['float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
input
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(FloatIORequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.input is None:
self.input = 0.
else:
self.input = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_d().pack(self.input))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 8
(self.input,) = _get_struct_d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_d().pack(self.input))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 8
(self.input,) = _get_struct_d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_d = None
def _get_struct_d():
global _struct_d
if _struct_d is None:
_struct_d = struct.Struct("<d")
return _struct_d
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from intro_pkg1/FloatIOResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class FloatIOResponse(genpy.Message):
_md5sum = "5dd87a43ba76105996c6c8cafb738498"
_type = "intro_pkg1/FloatIOResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
float64 output
"""
__slots__ = ['output']
_slot_types = ['float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
output
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(FloatIOResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.output is None:
self.output = 0.
else:
self.output = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_d().pack(self.output))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 8
(self.output,) = _get_struct_d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_d().pack(self.output))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 8
(self.output,) = _get_struct_d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_d = None
def _get_struct_d():
global _struct_d
if _struct_d is None:
_struct_d = struct.Struct("<d")
return _struct_d
class FloatIO(object):
_type = 'intro_pkg1/FloatIO'
_md5sum = '6c59364dede48a4429627e3e0efa7049'
_request_class = FloatIORequest
_response_class = FloatIOResponse
| [
"123avneesh@gmail.com"
] | 123avneesh@gmail.com |
e60f86108b5843d33444572629ed26e539397441 | a05f2d84a3c418c16976a04426eed58488362431 | /all iterable.py | 5e2d08a0a768539239064f10550c81cb8e22d81c | [] | no_license | codeproy/newPython | 8819180d81740c0a7c10f8cb18c1cdb9407ffa93 | 4f2097949d8f8de714aaf77a5d1f145ec9938ef4 | refs/heads/master | 2023-05-08T20:25:27.596265 | 2021-05-23T04:48:01 | 2021-05-23T04:48:01 | 69,076,074 | 0 | 0 | null | 2021-05-23T04:14:38 | 2016-09-24T03:01:19 | Python | UTF-8 | Python | false | false | 224 | py | # all iterable
def all(arr):
for elem in arr:
if not elem:
return False
break
else:
return True
a = [1,2,0,4]
if all(a):
print ("true")
else:
print ("false")
| [
"partho.ece@gmail.com"
] | partho.ece@gmail.com |
384339a14d72cafb57e028d6b4112d06e5c27362 | 5774101105b47d78adb7a57eefdfa21502bbd70c | /project-follow/MadKing-master/assets/serializers.py | d39be9254ac56c2e85c54ce840290990ba81359f | [] | no_license | zhlthunder/python-study | 34d928f0ebbdcd5543ae0f41baaea955c92f5c56 | 0f25dd5105ba46791842d66babbe4c3a64819ee5 | refs/heads/master | 2023-01-12T18:39:47.184978 | 2018-10-07T23:48:04 | 2018-10-07T23:48:04 | 90,516,611 | 0 | 1 | null | 2022-12-26T19:46:22 | 2017-05-07T07:39:48 | HTML | UTF-8 | Python | false | false | 736 | py | #_*_coding:utf-8_*_
__author__ = 'jieli'
from assets.myauth import UserProfile
from assets import models
from rest_framework import serializers
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = UserProfile
# fields = ('url', 'name', 'email')
fields = ('url', 'name', 'email','is_admin')
class AssetSerializer(serializers.ModelSerializer):
class Meta:
model = models.Asset
#加上这个,可以同时显示server中的详细的信息;
depth=2
fields = ('name', 'sn','server','networkdevice')
class ServerSerializer(serializers.ModelSerializer):
class Meta:
model = models.Server
#fields = ('name', 'sn','server') | [
"zhlthunder@163.com"
] | zhlthunder@163.com |
3d3b2d37cfbcbe305d8daf9e4d945a5ad7e17b9a | 72f836cf67d15a8fb883da5bfa2d5b2f7a610ef1 | /encodercontrol03.py | 770176085ae2e84851715944d8f7c57b8a9e9207 | [] | no_license | PatanSanaulla/AutonomousVehicleEncoderCoding | b708a13c81d8585c4ef0358c920d869de9b0d4c8 | 65810a46d67198d7c59b27576be3431142037b74 | refs/heads/master | 2023-03-30T20:33:46.965698 | 2021-04-10T02:39:36 | 2021-04-10T02:39:36 | 356,378,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,518 | py | import RPi.GPIO as gpio
import numpy as np
import time
##### INit the pins
def init():
gpio.setmode(gpio.BOARD)
gpio.setup(31, gpio.OUT)
gpio.setup(33, gpio.OUT)
gpio.setup(35, gpio.OUT)
gpio.setup(37, gpio.OUT)
gpio.setup(36, gpio.OUT)
gpio.output(36, False)
gpio.setup(7, gpio.IN, pull_up_down = gpio.PUD_UP)
gpio.setup(12, gpio.IN, pull_up_down = gpio.PUD_UP)
def gameover():
gpio.output(31, False)
gpio.output(33, False)
gpio.output(35, False)
gpio.output(37, False)
gpio.cleanup()
#to write time delta in a file
file = open('encoder03.txt','a')
# main code
init()
counterBR = np.uint64(0)
counterFL = np.uint64(0)
buttonBR = int(0)
buttonFL = int(0)
# Initialize pwm signal to control meter
pwm = gpio.PWM(37, 50)
val = 16
pwm.start(val)
time.sleep(0.1)
while True:
#print("counterBR = ", counterBR,"counterFL = ", counterFL, "BR state: ", gpio.input(12), "FL state: ", gpio.input(7))
file.write(str(counterBR)+","+str(counterFL)+","+str(gpio.input(12))+","+str(gpio.input(7))+'\n')
if int(gpio.input(12)) != int(buttonBR):
buttonBR = int(gpio.input(12))
counterBR += 1
if int(gpio.input(7)) != int(buttonFL):
buttonFL = int(gpio.input(7))
counterFL += 1
#print(counter)
if counterBR >= 960:
pwm.stop()
gameover()
print("Thanks for playing")
break
file.close()
#print("counter = ", counter, "GPIO state: ", gpio.input(12))
| [
"spatankhan@gmail.com"
] | spatankhan@gmail.com |
9b2cdeb86d06087f1f5fa0e0cfb88b8fab1f3579 | 11cd362cdd78c2fc48042ed203614b201ac94aa6 | /desktop/core/ext-py3/boto-2.49.0/bin/cwutil | 280d53f33edf02cafec34709b3684b22dfcc950c | [
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0... | permissive | cloudera/hue | b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908 | dccb9467675c67b9c3399fc76c5de6d31bfb8255 | refs/heads/master | 2023-08-31T06:49:25.724501 | 2023-08-28T20:45:00 | 2023-08-28T20:45:00 | 732,593 | 5,655 | 2,244 | Apache-2.0 | 2023-09-14T03:05:41 | 2010-06-21T19:46:51 | JavaScript | UTF-8 | Python | false | false | 5,046 | #!/usr/bin/env python
# Author: Chris Moyer <cmoyer@newstex.com>
# Description: CloudWatch Utility
# For listing stats, creating alarms, and managing
# other CloudWatch aspects
import boto
cw = boto.connect_cloudwatch()
from datetime import datetime, timedelta
def _parse_time(time_string):
"""Internal function to parse a time string"""
def _parse_dict(d_string):
result = {}
if d_string:
for d in d_string.split(","):
d = d.split(":")
result[d[0]] = d[1]
return result
def ls(namespace=None):
"""
List metrics, optionally filtering by a specific namespace
namespace: Optional Namespace to filter on
"""
print "%-10s %-50s %s" % ("Namespace", "Metric Name", "Dimensions")
print "-"*80
for m in cw.list_metrics():
if namespace is None or namespace.upper() in m.namespace:
print "%-10s %-50s %s" % (m.namespace, m.name, m.dimensions)
def stats(namespace, metric_name, dimensions=None, statistics="Average", start_time=None, end_time=None, period=60, unit=None):
"""
Lists the statistics for a specific metric
namespace: The namespace to use, usually "AWS/EC2", "AWS/SQS", etc.
metric_name: The name of the metric to track, pulled from `ls`
dimensions: The dimensions to use, formatted as Name:Value (such as QueueName:myQueue)
statistics: The statistics to measure, defaults to "Average"
'Minimum', 'Maximum', 'Sum', 'Average', 'SampleCount'
start_time: Start time, default to now - 1 day
end_time: End time, default to now
period: Period/interval for counts, default to 60 minutes
unit: Unit to track, default depends on what metric is being tracked
"""
# Parse the dimensions
dimensions = _parse_dict(dimensions)
# Parse the times
if end_time:
end_time = _parse_time(end_time)
else:
end_time = datetime.utcnow()
if start_time:
start_time = _parse_time(start_time)
else:
start_time = datetime.utcnow() - timedelta(days=1)
print "%-30s %s" % ('Timestamp', statistics)
print "-"*50
data = {}
for m in cw.get_metric_statistics(int(period), start_time, end_time, metric_name, namespace, statistics, dimensions, unit):
data[m['Timestamp']] = m[statistics]
keys = data.keys()
keys.sort()
for k in keys:
print "%-30s %s" % (k, data[k])
def put(namespace, metric_name, dimensions=None, value=None, unit=None, statistics=None, timestamp=None):
"""
Publish custom metrics
namespace: The namespace to use; values starting with "AWS/" are reserved
metric_name: The name of the metric to update
dimensions: The dimensions to use, formatted as Name:Value (such as QueueName:myQueue)
value: The value to store, mutually exclusive with `statistics`
statistics: The statistics to store, mutually exclusive with `value`
(must specify all of "Minimum", "Maximum", "Sum", "SampleCount")
timestamp: The timestamp of this measurement, default is current server time
unit: Unit to track, default depends on what metric is being tracked
"""
def simplify(lst):
return lst[0] if len(lst) == 1 else lst
print cw.put_metric_data(namespace, simplify(metric_name.split(';')),
dimensions = simplify(map(_parse_dict, dimensions.split(';'))) if dimensions else None,
value = simplify(value.split(';')) if value else None,
statistics = simplify(map(_parse_dict, statistics.split(';'))) if statistics else None,
timestamp = simplify(timestamp.split(';')) if timestamp else None,
unit = simplify(unit.split(';')) if unit else None)
def help(fnc=None):
"""
Print help message, optionally about a specific function
"""
import inspect
self = sys.modules['__main__']
if fnc:
try:
cmd = getattr(self, fnc)
except:
cmd = None
if not inspect.isfunction(cmd):
print "No function named: %s found" % fnc
sys.exit(2)
(args, varargs, varkw, defaults) = inspect.getargspec(cmd)
print cmd.__doc__
print "Usage: %s %s" % (fnc, " ".join([ "[%s]" % a for a in args]))
else:
print "Usage: cwutil [command]"
for cname in dir(self):
if not cname.startswith("_") and not cname == "cmd":
cmd = getattr(self, cname)
if inspect.isfunction(cmd):
doc = cmd.__doc__
print "\t%s - %s" % (cname, doc)
sys.exit(1)
if __name__ == "__main__":
import sys
self = sys.modules['__main__']
if len(sys.argv) >= 2:
try:
cmd = getattr(self, sys.argv[1])
except:
cmd = None
args = sys.argv[2:]
else:
cmd = help
args = []
if not cmd:
cmd = help
try:
cmd(*args)
except TypeError as e:
print e
help(cmd.__name__)
| [
"noreply@github.com"
] | cloudera.noreply@github.com | |
82cdd53d1dcf9e33c62000824cbb3912abc74ad3 | 5f22ddbd3eeb99709e43e7b9a7958c9987c7efa4 | /interview_bits/level_2/02_binary_search/02_search_step_simulation/01_implement-power-function.py | 7f76f3870d716a1ce3475e367399e4163af05c04 | [] | no_license | salvador-dali/algorithms_general | 04950bd823fc354adc58a4f23b7d2f3d39664798 | aeee3356e2488c6fab08741b1ac26e8bd5e4ac0d | refs/heads/master | 2020-12-14T06:24:10.466601 | 2016-07-17T06:00:17 | 2016-07-17T06:00:17 | 47,397,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | # https://www.interviewbit.com/problems/implement-power-function/
def power(a, b, m):
if a == 0:
return 0
res, mul = 1, a % m
while b:
if b % 2:
res = (res * mul) % m
mul = (mul * mul) % m
b /= 2
return res | [
"dmytro@knowlabs.com"
] | dmytro@knowlabs.com |
a48345655e91b63f5ae905da3ad7b8a15ef14273 | edcc0afdff7a7d01fa05664006d495627e9568e0 | /tests/snapshot/test_destroy.py | 420d09cd7da71f55fe79d6edcc08b8eaaf999984 | [] | no_license | b-a-t/zettarepl | 871538cc83e9e0ec3cf0c7f4a66bba21559127e4 | 6596fb85f31919edf8eadeee47552d14f3d62db3 | refs/heads/master | 2020-04-01T23:22:27.097027 | 2018-10-16T18:45:10 | 2018-10-16T18:45:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | # -*- coding=utf-8 -*-
from unittest.mock import call, Mock
from zettarepl.snapshot.destroy import destroy_snapshots
from zettarepl.snapshot.snapshot import Snapshot
def test__destroy_snapshots__works():
shell = Mock()
destroy_snapshots(shell, [Snapshot("data", "snap-1"), Snapshot("data/work", "snap-1"), Snapshot("data", "snap-2")])
assert shell.exec.call_count == 2
shell.exec.assert_has_calls([
call(["zfs", "destroy", "data@snap-1%snap-2"]),
call(["zfs", "destroy", "data/work@snap-1"])
], True)
| [
"themylogin@gmail.com"
] | themylogin@gmail.com |
6d380ab57dfe8678b97f28537d7c1bbc282c3a62 | ec56f317503b2bce82ff3154e4796654e43afb0b | /hardware/management/commands/retrieve_hardware_data.py | e5501fd616ff44cbc701d19f4147b8c012c977aa | [
"MIT"
] | permissive | timevortexproject/timevortex | e977a68c8d1da76c262354bac5620930548a4f13 | 3b4323c7bc4672009f94f06ad66d447ef38fac01 | refs/heads/master | 2021-01-19T02:56:14.191410 | 2016-09-20T12:52:16 | 2016-09-20T12:52:16 | 54,743,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | #!/usr/bin/python
# -*- coding: utf8 -*-
# -*- Mode: Python; py-indent-offset: 4 -*-
"""Hardware command"""
# import psutil
from timevortex.utils.commands import AbstractCommand
class Command(AbstractCommand):
"""Command class
"""
help = "Retireve hardware data from system commands"
name = "Hardware crawler"
def handle(self, *args, **options):
"""Main function
"""
# LOGGER.info("Command %s started", self.name)
# print(psutil.cpu_times())
# print(psutil.cpu_count())
# print(psutil.cpu_count(logical=False))
# print(psutil.cpu_percent(interval=1, percpu=True))
# LOGGER.info("Command %s stopped", self.name)
pass
| [
"pierreleray64@gmail.com"
] | pierreleray64@gmail.com |
5e44569573fdd9f885721843380c33d0416aa9f1 | f21bf9e70a32de28a159473c6752fab7159f4e88 | /ms_deisotope/feature_map/feature_fit.py | 1aa3bf8e53cc8f46a0826f2ff6477ecbd0a90d55 | [
"Apache-2.0"
] | permissive | mstim/ms_deisotope | c1d9cd8b5a3ab36c28c53d8988803cd268b240c5 | 29f4f466e92e66b65a2d21eca714aa627caa21db | refs/heads/master | 2023-03-20T05:02:09.088420 | 2021-03-04T21:44:35 | 2021-03-04T21:44:35 | 261,802,498 | 0 | 0 | Apache-2.0 | 2020-05-06T15:32:03 | 2020-05-06T15:32:02 | null | UTF-8 | Python | false | false | 12,644 | py | from collections import namedtuple
import numpy as np
# from brainpy import neutral_mass as calc_neutral_mass
from ms_peak_picker import FittedPeak
from ms_deisotope.averagine import glycan
from ms_deisotope.scoring import g_test_scaled
from .shape_fitter import AdaptiveMultimodalChromatogramShapeFitter
from .lcms_feature import (
EmptyFeature,
LCMSFeature,
LCMSFeatureTreeNode,
RunningWeightedAverage,
NodeFeatureSetIterator)
class map_coord(namedtuple("map_coord", ("mz", 'time'))):
def __repr__(self):
return "(%0.3f, %0.3f)" % self
class LCMSFeatureSetFit(object):
def __init__(self, features, theoretical, score, charge,
missing_features=0, supporters=None, data=None,
neutral_mass=None, scores=None, times=None):
if supporters is None:
supporters = []
if scores is None:
scores = np.array([])
if times is None:
times = np.array([])
self.features = features
self.theoretical = theoretical
self.score = score
self.charge = charge
self.data = data
self.missing_features = missing_features
self.monoisotopic_feature = features[0]
self.supporters = supporters
self.mz = theoretical.monoisotopic_mz
if neutral_mass is None:
neutral_mass = neutral_mass(self.mz, self.charge)
self.neutral_mass = neutral_mass
self.scores = scores
self.times = times
def count_null_features(self):
n_null = 0
for feature in self.features:
if feature is None or isinstance(feature, EmptyFeature):
n_null += 1
return n_null
def has_multiple_real_features(self):
return len(self) - self.count_null_features() > 1
def clone(self):
return self.__class__(
self.features, self.theoretical, self.score, self.charge,
self.missing_features, self.supporters, self.data,
self.neutral_mass, self.scores, self.times)
def __reduce__(self):
return self.__class__, (
self.features, self.theoretical, self.score, self.charge,
self.missing_features, self.supporters, self.data, self.neutral_mass,
self.scores, self.times)
def __eq__(self, other):
val = (self.score == other.score and
self.charge == other.charge and
self.features == other.features and
self.theoretical == other.theoretical)
if self.data is not None or other.data is not None:
val = val and (self.data == other.data)
return val
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return self.score < other.score
def __gt__(self, other):
return self.score > other.score
def __hash__(self):
return hash((self.monoisotopic_feature.mz, self.charge))
def __iter__(self):
return iter(self.features)
def __len__(self):
return len(self.features)
@property
def npeaks(self):
return len(self)
def __repr__(self):
return "LCMSFeatureSetFit(score=%0.5f, charge=%d, size=%d, monoisotopic_mz=%0.5f, %0.2f-%0.2f)" % (
self.score, self.charge, len(self), self.monoisotopic_feature.mz,
self.start.time, self.end.time)
@property
def start(self):
first = self.features[0]
if first is None:
raise Exception()
return map_coord(first.mz, first.start_time)
@property
def end(self):
for last in reversed(self.features):
if last is None:
continue
return map_coord(last.mz, last.end_time)
class DeconvolutedLCMSFeatureTreeNode(LCMSFeatureTreeNode):
__slots__ = ["_neutral_mass", "charge", "precursor_information"]
def __init__(self, time=None, members=None, precursor_information=None):
if precursor_information is None:
precursor_information = []
self._neutral_mass = 0
self.charge = 0
super(DeconvolutedLCMSFeatureTreeNode, self).__init__(time, members)
self.precursor_information = precursor_information
def _recalculate(self):
self._calculate_most_abundant_member()
self._mz = self._most_abundant_member.mz
self._neutral_mass = self._most_abundant_member.neutral_mass
self.charge = self._most_abundant_member.charge
@property
def neutral_mass(self):
if self._neutral_mass == 0:
if self._most_abundant_member is not None:
self._neutral_mass = self._most_abundant_member.neutral_mass
return self._neutral_mass
class DeconvolutedLCMSFeature(LCMSFeature):
def __init__(self, nodes=None, charge=None, adducts=None, used_as_adduct=None, score=0.0,
n_features=0, feature_id=None, supporters=None):
if supporters is None:
supporters = []
self.charge = charge
self.score = score
self._neutral_mass = None
self._last_neutral_mass = None
self._precursor_information = None
self.n_features = n_features
self.supporters = supporters
super(DeconvolutedLCMSFeature, self).__init__(nodes, adducts, used_as_adduct, feature_id=feature_id)
@property
def precursor_information(self):
if self._precursor_information is None:
pinfo = []
for node in self:
pinfo.extend(node.precursor_information)
self._precursor_information = tuple(pinfo)
return self._precursor_information
def clone(self, deep=False, cls=None):
if cls is None:
cls = self.__class__
return cls(
self.nodes.clone(deep=deep), self.charge, self.adducts, self.used_as_adduct, self.score,
self.n_features, self.feature_id, list(self.supporters))
def _invalidate(self, reaverage=False):
self._last_neutral_mass = self._neutral_mass if self._neutral_mass is not None else 0.
self._neutral_mass = None
self._precursor_information = None
super(DeconvolutedLCMSFeature, self)._invalidate(reaverage)
@property
def neutral_mass(self):
if self._neutral_mass is None:
avger = DeconvolutedRunningWeightedAverage()
for node in self.nodes:
avger.update(node.members)
self._neutral_mass = self._last_neutral_mass = avger.current_mean
return self._neutral_mass
def _copy_chunk(self, nodes, *args, **kwargs):
x = self.__class__(
nodes, self.charge, list(self.adducts), list(self.used_as_adduct),
self.score, self.n_features, None, list(self.supporters))
return x
def sum(self, other):
missing = []
feat_iter = NodeFeatureSetIterator([self, other])
for nodes in feat_iter:
base = nodes[0]
new = nodes[1]
if base is None:
missing.append(new)
elif new is not None:
base.members[0].intensity += new.members[0].intensity
base.precursor_information.extend(new.precursor_information)
if missing:
for node in missing:
self.insert_node(DeconvolutedLCMSFeatureTreeNode(
node.time, list(node.members), list(node.precursor_information)))
self.supporters.extend(other.supporters)
return self
def __repr__(self):
return "%s(%0.4f, %d, %0.2f, %0.2f, %0.2f)" % (
self.__class__.__name__, self.neutral_mass,
self.charge, self.score,
self.start_time, self.end_time)
class DeconvolutedRunningWeightedAverage(RunningWeightedAverage):
def add(self, peak):
if peak.intensity == 0:
if self.current_mean == 0 and self.total_weight == 0:
self.current_mean = peak.neutral_mass
self.total_weight = 1
else:
return
self.accumulator.append(peak)
agg = (self.total_weight * self.current_mean) + \
(peak.neutral_mass * peak.intensity)
self.total_weight += peak.intensity
self.current_mean = agg / self.total_weight
self.current_count += 1
return self
def recompute(self):
weight = 0
total = 0
for peak in self.accumulator:
weight += peak.intensity
total += peak.intensity * peak.neutral_mass
return total / weight
class DriftTimeRunningWeightedAverage(RunningWeightedAverage):
def add(self, peak):
if peak.intensity == 0:
if self.current_mean == 0 and self.total_weight == 0:
self.current_mean = peak.drift_time
self.total_weight = 1
else:
return
self.accumulator.append(peak)
agg = (self.total_weight * self.current_mean) + \
(peak.drift_time * peak.intensity)
self.total_weight += peak.intensity
self.current_mean = agg / self.total_weight
self.current_count += 1
return self
def recompute(self):
weight = 0
total = 0
for peak in self.accumulator:
weight += peak.intensity
total += peak.intensity * peak.drift_time
return total / weight
class IonMobilityDeconvolutedLCMSFeature(DeconvolutedLCMSFeature):
def __init__(self, nodes=None, charge=None, adducts=None, used_as_adduct=None, score=0.0,
n_features=0, feature_id=None, supporters=None):
self._drift_time = None
self._last_drift_time = None
super(IonMobilityDeconvolutedLCMSFeature, self).__init__(
nodes=nodes, charge=charge, adducts=adducts, used_as_adduct=used_as_adduct, score=score,
n_features=n_features, feature_id=feature_id, supporters=supporters)
def _invalidate(self, reaverage=False):
self._last_drift_time = self._drift_time if self._drift_time is not None else 0.
self._drift_time = None
return super(IonMobilityDeconvolutedLCMSFeature, self)._invalidate(reaverage=reaverage)
@property
def drift_time(self):
if self._drift_time is None:
avger = DriftTimeRunningWeightedAverage()
for node in self.nodes:
avger.update(node.members)
self._drift_time = self._last_drift_time = avger.current_mean
return self._drift_time
def __repr__(self):
return "%s(%0.4f, %0.4f, %d, %0.2f, %0.2f, %0.2f)" % (
self.__class__.__name__, self.neutral_mass, self.drift_time,
self.charge, self.score,
self.start_time, self.end_time)
def envelope_to_peak_list(envelope):
return [FittedPeak(e[0], e[1], 0, 0, 0, 0, 0, 0, 0) for e in envelope]
def scale_theoretical_isotopic_pattern(eid, tid):
total = sum(p.intensity for p in eid)
for p in tid:
p.intensity *= total
def isotopic_consistency(eic, averagine=glycan, truncate_after=0.95):
peak_scores = []
peak_abundances = []
for node in eic:
for peak in node.members:
eid = envelope_to_peak_list(peak.envelope)
tid = averagine.isotopic_cluster(peak.mz, peak.charge, truncate_after=truncate_after)
tid.scale(eid)
peak_scores.append(abs(g_test_scaled(None, eid, tid.truncated_tid)))
peak_abundances.append(peak.intensity)
return max(1 - np.average(peak_scores, weights=peak_abundances), 1e-4)
def spacing_fit(eic):
times, intensities = eic.as_arrays()
last_rt = times[0]
last_int = intensities[0]
rt_deltas = []
intensity_deltas = []
for rt, inten in zip(times[1:], intensities[1:]):
d_rt = rt - last_rt
rt_deltas.append(d_rt)
intensity_deltas.append(abs(last_int - inten))
last_rt = rt
last_int = inten
return max(1 - np.average(rt_deltas, weights=intensity_deltas) * 2, 1e-4)
def shape_fit(eic, smooth=0.15):
return max(1 - AdaptiveMultimodalChromatogramShapeFitter(eic, smooth=smooth).line_test, 1e-4)
def profile_qc(eic, smooth=0.15, averagine=glycan, truncate_after=0.95):
v = 1.0
v *= isotopic_consistency(eic, averagine, truncate_after)
v *= spacing_fit(eic)
v *= shape_fit(eic, smooth)
return v
try:
has_c = True
_map_coord = map_coord
_LCMSFeatureSetFit = LCMSFeatureSetFit
from ms_deisotope._c.feature_map.feature_fit import (LCMSFeatureSetFit, map_coord)
except ImportError as e:
print(e)
has_c = False
| [
"mobiusklein@gmail.com"
] | mobiusklein@gmail.com |
1c46bc10f1a06deedf4269d8d176dbb6a6b1b673 | b3a971d9bed56b7a2a6f4d705cae115b4af4545c | /AES_with_timerecord/dragonfly_cipher.py | 66997185bd41cbff928a6593e86af5fe78dd544f | [] | no_license | powderfool000/ambitioushomo | f09162fd43cb706fd73824b05a2b9c0233c11cfb | 745f6a19898bbe7f559b19802abc87380217d19e | refs/heads/master | 2022-02-24T12:18:26.405681 | 2019-09-04T07:01:31 | 2019-09-04T07:01:31 | 181,815,102 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 24,624 | py | #!/usr/bin/env python3
#"""
#Implements the Dragonfly (SAE) handshake.
#Instead of using a client (STA) and a access point (AP), we
#just programmatically create a peer to peer network of two participiants.
#Either party may initiate the SAE protocol, either party can be the client and server.
#In a mesh scenario, where two APs (two equals) are trying to establish a connection
#between each other and each one could have the role of supplicant or authenticator.
#SAE is build upon the Dragonfly Key Exchange, which is described in https://tools.ietf.org/html/rfc7664.
#https://stackoverflow.com/questions/31074172/elliptic-curve-point-addition-over-a-finite-field-in-python
#"""
import time
import hashlib
import random
import logging
import socket
import re, uuid
import base64
import os, random, struct
import subprocess
from collections import namedtuple
from Cryptodome.Cipher import AES
from Cryptodome import Random
from Cryptodome.Hash import SHA256
from optparse import *
from socket import error as SocketError
import errno
#create tcp/ip socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#retrieve local hostname
local_hostname = socket.gethostname()
#get fully qualified hostname
local_fqdn = socket.getfqdn()
#get the according ip address
ip_address = socket.gethostbyname(local_hostname)
#output hostname, domain name, ip address
print ("Working on %s (%s) with %s" % (local_hostname, local_fqdn, ip_address))
#bind socket to port
server_address = ('192.168.0.101', 65432)
print ("Starting up on %s port %s" % server_address)
sock.bind(server_address)
logger = logging.getLogger('dragonfly')
logger.setLevel(logging.INFO)
# create file handler which logs even debug messages
fh = logging.FileHandler('dragonfly.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
# add the handlers to logger
logger.addHandler(ch)
logger.addHandler(fh)
Point = namedtuple("Point", "x y")
# The point at infinity (origin for the group law).
O = 'Origin'
def lsb(x):
binary = bin(x).lstrip('0b')
return binary[0]
def legendre(a, p):
return pow(a, (p - 1) // 2, p)
def tonelli_shanks(n, p):
"""
# https://rosettacode.org/wiki/Tonelli-Shanks_algorithm#Python
"""
assert legendre(n, p) == 1, "not a square (mod p)"
q = p - 1
s = 0
while q % 2 == 0:
q //= 2
s += 1
if s == 1:
return pow(n, (p + 1) // 4, p)
for z in range(2, p):
if p - 1 == legendre(z, p):
break
c = pow(z, q, p)
r = pow(n, (q + 1) // 2, p)
t = pow(n, q, p)
m = s
t2 = 0
while (t - 1) % p != 0:
t2 = (t * t) % p
for i in range(1, m):
if (t2 - 1) % p == 0:
break
t2 = (t2 * t2) % p
b = pow(c, 1 << (m - i - 1), p)
r = (r * b) % p
c = (b * b) % p
t = (t * c) % p
m = i
return r
class Curve():
"""
Mathematical operations on a Elliptic Curve.
A lot of code taken from:
https://stackoverflow.com/questions/31074172/elliptic-curve-point-addition-over-a-finite-field-in-python
"""
def __init__(self, a, b, p):
self.a = a
self.b = b
self.p = p
def curve_equation(self, x):
"""
We currently use the elliptic curve
NIST P-384
"""
return (pow(x, 3) + (self.a * x) + self.b) % self.p
def is_quadratic_residue(self, x):
"""
https://en.wikipedia.org/wiki/Euler%27s_criterion
Computes Legendre Symbol.
"""
return pow(x, (self.p-1) // 2, self.p) == 1
def valid(self, P):
"""
Determine whether we have a valid representation of a point
on our curve. We assume that the x and y coordinates
are always reduced modulo p, so that we can compare
two points for equality with a simple ==.
"""
if P == O:
return True
else:
return (
(P.y**2 - (P.x**3 + self.a*P.x + self.b)) % self.p == 0 and
0 <= P.x < self.p and 0 <= P.y < self.p)
def inv_mod_p(self, x):
"""
Compute an inverse for x modulo p, assuming that x
is not divisible by p.
"""
if x % self.p == 0:
raise ZeroDivisionError("Impossible inverse")
return pow(x, self.p-2, self.p)
def ec_inv(self, P):
"""
Inverse of the point P on the elliptic curve y^2 = x^3 + ax + b.
"""
if P == O:
return P
return Point(P.x, (-P.y) % self.p)
def ec_add(self, P, Q):
"""
Sum of the points P and Q on the elliptic curve y^2 = x^3 + ax + b.
https://stackoverflow.com/questions/31074172/elliptic-curve-point-addition-over-a-finite-field-in-python
"""
if not (self.valid(P) and self.valid(Q)):
raise ValueError("Invalid inputs")
# Deal with the special cases where either P, Q, or P + Q is
# the origin.
if P == O:
result = Q
elif Q == O:
result = P
elif Q == self.ec_inv(P):
result = O
else:
# Cases not involving the origin.
if P == Q:
dydx = (3 * P.x**2 + self.a) * self.inv_mod_p(2 * P.y)
else:
dydx = (Q.y - P.y) * self.inv_mod_p(Q.x - P.x)
x = (dydx**2 - P.x - Q.x) % self.p
y = (dydx * (P.x - x) - P.y) % self.p
result = Point(x, y)
# The above computations *should* have given us another point
# on the curve.
assert self.valid(result)
return result
def double_add_algorithm(self, scalar, P):
"""
Double-and-Add Algorithm for Point Multiplication
Input: A scalar in the range 0-p and a point on the elliptic curve P
https://stackoverflow.com/questions/31074172/elliptic-curve-point-addition-over-a-finite-field-in-python
"""
assert self.valid(P)
b = bin(scalar).lstrip('0b')
T = P
for i in b[1:]:
T = self.ec_add(T, T)
if i == '1':
T = self.ec_add(T, P)
assert self.valid(T)
return T
class Peer:
"""
Implements https://wlan1nde.wordpress.com/2018/09/14/wpa3-improving-your-wlan-security/
Take a ECC curve from here: https://safecurves.cr.yp.to/
Example: NIST P-384
y^2 = x^3-3x+27580193559959705877849011840389048093056905856361568521428707301988689241309860865136260764883745107765439761230575
modulo p = 2^384 - 2^128 - 2^96 + 2^32 - 1
2000 NIST; also in SEC 2 and NSA Suite B
See here: https://www.rfc-editor.org/rfc/rfc5639.txt
Curve-ID: brainpoolP256r1
p =
A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377
A =
7D5A0975FC2C3057EEF67530417AFFE7FB8055C126DC5C6CE94A4B44F330B5D9
B =
26DC5C6CE94A4B44F330B5D9BBD77CBF958416295CF7E1CE6BCCDC18FF8C07B6
x =
8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262
y =
547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997
q =
A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7
h = 1
"""
def __init__(self, password, mac_address, name):
self.name = name
self.password = password
self.mac_address = mac_address
# Try out Curve-ID: brainpoolP256t1
self.p = int('A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377', 16)
self.a = int('7D5A0975FC2C3057EEF67530417AFFE7FB8055C126DC5C6CE94A4B44F330B5D9', 16)
self.b = int('26DC5C6CE94A4B44F330B5D9BBD77CBF958416295CF7E1CE6BCCDC18FF8C07B6', 16)
self.q = int('A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7', 16)
self.curve = Curve(self.a, self.b, self.p)
# A toy curve
# self.a, self.b, self.p = 2, 2, 17
# self.q = 19
# self.curve = Curve(self.a, self.b, self.p)
def initiate(self, other_mac, k=40):
"""
See algorithm in https://tools.ietf.org/html/rfc7664
in section 3.2.1
"""
self.other_mac = other_mac
found = 0
num_valid_points = 0
counter = 1
n = self.p.bit_length() + 64
while counter <= k:
base = self.compute_hashed_password(counter)
temp = self.key_derivation_function(n, base, 'Dragonfly Hunting And Pecking')
seed = (temp % (self.p - 1)) + 1
val = self.curve.curve_equation(seed)
if self.curve.is_quadratic_residue(val):
if num_valid_points < 5:
x = seed
save = base
found = 1
num_valid_points += 1
logger.debug('Got point after {} iterations'.format(counter))
counter = counter + 1
if found == 0:
logger.error('No valid point found after {} iterations'.format(k))
elif found == 1:
# https://crypto.stackexchange.com/questions/6777/how-to-calculate-y-value-from-yy-mod-prime-efficiently
# https://rosettacode.org/wiki/Tonelli-Shanks_algorithm
y = tonelli_shanks(self.curve.curve_equation(x), self.p)
PE = Point(x, y)
# check valid point
assert self.curve.curve_equation(x) == pow(y, 2, self.p)
logger.info('[{}] Using {}-th valid Point={}'.format(self.name, num_valid_points, PE))
logger.info('[{}] Point is on curve: {}'.format(self.name, self.curve.valid(PE)))
self.PE = PE
assert self.curve.valid(self.PE)
def commit_exchange(self):
"""
This is basically Diffie Hellman Key Exchange (or in our case ECCDH)
In the Commit Exchange, both sides commit to a single guess of the
password. The peers generate a scalar and an element, exchange them
with each other, and process the other's scalar and element to
generate a common and shared secret.
If we go back to elliptic curves over the real numbers, there is a nice geometric
interpretation for the ECDLP: given a starting point P, we compute 2P, 3P, . . .,
d P = T , effectively hopping back and forth on the elliptic curve. We then publish
the starting point P (a public parameter) and the final point T (the public key). In
order to break the cryptosystem, an attacker has to figure out how often we “jumped”
on the elliptic curve. The number of hops is the secret d, the private key.
"""
# seed the PBG before picking a new random number
# random.seed(time.process_time())
# None or no argument seeds from current time or from an operating
# system specific randomness source if available.
random.seed()
# Otherwise, each party chooses two random numbers, private and mask
self.private = random.randrange(1, self.p)
self.mask = random.randrange(1, self.p)
logger.debug('[{}] private={}'.format(self.name, self.private))
logger.debug('[{}] mask={}'.format(self.name, self.mask))
# These two secrets and the Password Element are then used to construct
# the scalar and element:
# what is q?
# o A point, G, on the elliptic curve, which serves as a generator for
# the ECC group. G is chosen such that its order, with respect to
# elliptic curve addition, is a sufficiently large prime.
#
# o A prime, q, which is the order of G, and thus is also the size of
# the cryptographic subgroup that is generated by G.
# https://math.stackexchange.com/questions/331329/is-it-possible-to-compute-order-of-a-point-over-elliptic-curve
# In the elliptic Curve cryptography, it is said that the order of base point
# should be a prime number, and order of a point P is defined as k, where kP=O.
# Theorem 9.2.1 The points on an elliptic curve together with O
# have cyclic subgroups. Under certain conditions all points on an
# elliptic curve form a cyclic group.
# For this specific curve the group order is a prime and, according to Theo-
# rem 8.2.4, every element is primitive.
# Question: What is the order of our PE?
# the order must be p, since p is a prime
self.scalar = (self.private + self.mask) % self.q
# If the scalar is less than two (2), the private and mask MUST be
# thrown away and new values generated. Once a valid scalar and
# Element are generated, the mask is no longer needed and MUST be
# irretrievably destroyed.
if self.scalar < 2:
raise ValueError('Scalar is {}, regenerating...'.format(self.scalar))
P = self.curve.double_add_algorithm(self.mask, self.PE)
# get the inverse of res
# −P = (x_p , p − y_p ).
self.element = self.curve.ec_inv(P)
assert self.curve.valid(self.element)
# The peers exchange their scalar and Element and check the peer's
# scalar and Element, deemed peer-scalar and Peer-Element. If the peer
# has sent an identical scalar and Element -- i.e., if scalar equals
# peer-scalar and Element equals Peer-Element -- it is sign of a
# reflection attack, and the exchange MUST be aborted. If the values
# differ, peer-scalar and Peer-Element must be validated.
logger.info('[{}] Sending scalar and element to the Peer!'.format(self.name))
logger.info('[{}] Scalar={}'.format(self.name, self.scalar))
logger.info('[{}] Element={}'.format(self.name, self.element))
return self.scalar, self.element
def compute_shared_secret(self, peer_element, peer_scalar, peer_mac):
"""
ss = F(scalar-op(private,
element-op(peer-Element,
scalar-op(peer-scalar, PE))))
AP1: K = private(AP1) • (scal(AP2) • P(x, y) ◊ new_point(AP2))
= private(AP1) • private(AP2) • P(x, y)
AP2: K = private(AP2) • (scal(AP1) • P(x, y) ◊ new_point(AP1))
= private(AP2) • private(AP1) • P(x, y)
A shared secret element is computed using one’s rand and
the other peer’s element and scalar:
Alice: K = rand A • (scal B • PW + elemB )
Bob: K = rand B • (scal A • PW + elemA )
Since scal(APx) • P(x, y) is another point, the scalar multiplied point
of e.g. scal(AP1) • P(x, y) is added to the new_point(AP2) and afterwards
multiplied by private(AP1).
"""
self.peer_element = peer_element
self.peer_scalar = peer_scalar
self.peer_mac = peer_mac
assert self.curve.valid(self.peer_element)
# If both the peer-scalar and Peer-Element are
# valid, they are used with the Password Element to derive a shared
# secret, ss:
Z = self.curve.double_add_algorithm(self.peer_scalar, self.PE)
ZZ = self.curve.ec_add(self.peer_element, Z)
K = self.curve.double_add_algorithm(self.private, ZZ)
self.k = K[0]
logger.info('[{}] Shared Secret ss={}'.format(self.name, self.k))
own_message = '{}{}{}{}{}{}'.format(self.k , self.scalar , self.peer_scalar , self.element[0] , self.peer_element[0] , self.mac_address).encode()
H = hashlib.sha256()
H.update(own_message)
self.token = H.hexdigest()
return self.token
def confirm_exchange(self, peer_token):
"""
In the Confirm Exchange, both sides confirm that they derived the
same secret, and therefore, are in possession of the same password.
"""
peer_message = '{}{}{}{}{}{}'.format(self.k , self.peer_scalar , self.scalar , self.peer_element[0] , self.element[0] , self.peer_mac).encode()
H = hashlib.sha256()
H.update(peer_message)
self.peer_token_computed = H.hexdigest()
logger.info('[{}] Computed Token from Peer={}'.format(self.name, self.peer_token_computed))
logger.info('[{}] Received Token from Peer={}'.format(self.name, peer_token))
# Pairwise Master Key” (PMK)
# compute PMK = H(k | scal(AP1) + scal(AP2) mod q)
pmk_message = '{}{}'.format(self.k, (self.scalar + self.peer_scalar) % self.q).encode()
#H = hashlib.sha256()
#H.update(pmk_message)
self.PMK = hashlib.sha256(pmk_message).digest()
logger.info('[{}] Pairwise Master Key(PMK)={}'.format(self.name, self.PMK))
return self.PMK
def key_derivation_function(self, n, base, seed):
"""
B.5.1 Per-Message Secret Number Generation Using Extra Random Bits
Key derivation function from Section B.5.1 of [FIPS186-4]
The key derivation function, KDF, is used to produce a
bitstream whose length is equal to the length of the prime from the
group's domain parameter set plus the constant sixty-four (64) to
derive a temporary value, and the temporary value is modularly
reduced to produce a seed.
"""
combined_seed = '{}{}'.format(base, seed).encode()
# base and seed concatenated are the input to the RGB
random.seed(combined_seed)
# Obtain a string of N+64 returned_bits from an RBG with a security strength of
# requested_security_strength or more.
randbits = random.getrandbits(n)
binary_repr = format(randbits, '0{}b'.format(n))
assert len(binary_repr) == n
logger.debug('Rand={}'.format(binary_repr))
# Convert returned_bits to the non-negative integer c (see Appendix C.2.1).
C = 0
for i in range(n):
if int(binary_repr[i]) == 1:
C += pow(2, n-i)
logger.debug('C={}'.format(C))
#k = (C % (n - 1)) + 1
k = C
logger.debug('k={}'.format(k))
return k
def compute_hashed_password(self, counter):
maxm = max(self.mac_address, self.other_mac)
minm = min(self.mac_address, self.other_mac)
message = '{}{}{}{}'.format(maxm, minm, self.password, counter).encode()
logger.debug('Message to hash is: {}'.format(message))
H = hashlib.sha256()
H.update(message)
digest = H.digest()
return digest
def encrypting(key, filename):
chunksize = 64*1024
outputFile = filename+".hacklab"
filesize = str(os.path.getsize(filename)).zfill(16)
IV = Random.new().read(16)
encryptor = AES.new(key, AES.MODE_CBC, IV)
with open(filename, 'rb') as infile:
with open(outputFile, 'wb') as outfile:
outfile.write(filesize.encode('utf-8'))
outfile.write(IV)
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
chunk += b' ' * (16 - (len(chunk) % 16))
outfile.write(encryptor.encrypt(chunk))
return outputFile
def handshake():
own_mac = (':'.join(re.findall('..', '%012x' % uuid.getnode())))
print (own_mac)
ap = Peer('abc1238', own_mac, 'AP')
logger.info('Starting hunting and pecking to derive PE...\n')
sock.listen(1)
connection, client_address = sock.accept()
with connection:
print ("Connecting from", client_address)
other_mac = connection.recv(1024).decode()
print (other_mac)
connection.send(own_mac.encode())
ap.initiate(other_mac)
print()
logger.info('Starting dragonfly commit exchange...\n')
scalar_ap, element_ap = ap.commit_exchange()
connection.sendall(str.encode("\n".join([str(scalar_ap), str(element_ap)])))
print()
logger.info('Computing shared secret...\n')
# receiving scalar and element
scalar_element_ap = connection.recv(1024).decode()
data = scalar_element_ap.split('\n')
print (data[0])
print (data[1])
scalar_sta = data[0]
element_sta = data[1]
print (scalar_sta)
print (element_sta)
print ()
print ()
namedtuple_element_sta = eval(element_sta)
print(namedtuple_element_sta.y, namedtuple_element_sta.x)
print ()
print ()
ap_token = ap.compute_shared_secret(namedtuple_element_sta, int(scalar_sta), other_mac)
connection.send(ap_token.encode())
print()
logger.info('Confirm Exchange...\n')
sta_token = connection.recv(1024).decode()
PMK_Key = ap.confirm_exchange(sta_token)
#print (PMK_Key)
# Running c++ Adder_alice to get the cloud data
print ("Getting ciphertext...\n")
subprocess.call("./testadd_client")
print("Printing ciphertext...\n")
cloud_data = "cloud.data"
print("This file ", cloud_data, "is our ciphertext\n")
"""
# Open and read the contents in the cloud data
f = open(cloud_data, "rb")
content = f.read(8192)
print (content)
"""
fsize = os.path.getsize(cloud_data)
# Send the file size of the data to the cloud server
connection.send(str(fsize).encode('utf-8'))
# Sending the cloud data to the cloud server for computation
BUFFER_SIZE = 1024
with open(cloud_data, 'rb') as f:
content = f.read(BUFFER_SIZE)
while content:
connection.send(content)
print ("Sent", repr(content))
content = f.read(BUFFER_SIZE)
f.close()
# Get the file size of the sent data
print('Original file size: ', os.path.getsize(cloud_data))
print('Please wait while the stupid server take some time...\n')
indication = connection.recv(1024)
print(indication.decode('utf-8'))
# Open the received computed answer from the cloud server
with open('answer.data', 'wb') as a:
print ('File opened...\n')
msg = connection.recv(BUFFER_SIZE)
ans_size = int(msg.decode('utf-8'))
gans_size = 0
while True:
print ('Receiving data...\n')
answer_data = connection.recv(BUFFER_SIZE)
print ('Data: ', answer_data)
gans_size = gans_size + len(answer_data)
a.write(answer_data)
if gans_size >= ans_size:
print('Breaking from file write')
break
print('Original file size: ', os.path.getsize(cloud_data))
# Get the file size of the received computed answer
print ('Answer data file size: ', os.path.getsize('answer.data'))
# Process the calculation
secret_key = 'secret.key'
answer_data = 'answer.data'
subprocess.call('./testadd_verify_client')
end = time.time()
f= open("endtime.txt","w+")
f.write(str(end))
f.close()
connection.close()
# Get the MD5 checksum of the cloud data and answer
os.system("md5sum cloud.data")
os.system("md5sum answer.data")
def tests():
"""
Test the fucking Curve class.
See Understanding Cryptography ECC Section.
"""
a, b, p = 2, 2, 17
curve = Curve(a, b, p)
P = Point(5, 1)
assert curve.double_add_algorithm(19, P) == O
T = P
for i in range(p+1):
T = curve.ec_add(T, P)
assert curve.double_add_algorithm(19, P) == T
if __name__ == '__main__':
#tests()
handshake() | [
"noreply@github.com"
] | powderfool000.noreply@github.com |
d320ad024d2500692c7bbcdd0ce07d080cd72d0c | f6b2dd8d27f9141f04a071765de444ab68808c18 | /libled/util/color.py | 7222ab44a0b23c1be7c70c5453d122267c3c22a2 | [] | no_license | tatsuo98se/3d_led_cube2 | ecf9eba3e226cb4bdb225c144b3ea3512009cb12 | ea1c607c64c688db0f0fb3ed2829206875c8b521 | refs/heads/master | 2018-10-22T09:27:49.820357 | 2018-07-31T15:03:24 | 2018-07-31T15:03:24 | 104,359,509 | 2 | 3 | null | 2018-07-31T15:03:25 | 2017-09-21T14:35:48 | Python | UTF-8 | Python | false | false | 3,933 | py | import numpy as np
class Color:
def __init__(self, r=0, g=0, b=0, a=1.0):
self.r = float(r)
self.g = float(g)
self.b = float(b)
self.a = float(a)
self.normalize()
@staticmethod
def int_to_color(rgb):
return Color( ((rgb&0xff0000) >> 16 ) / 255.0,
((rgb&0x00ff00) >> 8 ) / 255.0,
(rgb&0x0000ff) / 255.0)
@staticmethod
def rgbtapple_to_color(rgb, a=1.0):
return Color(rgb[0],
rgb[1],
rgb[2],
a)
@staticmethod
def rgbatapple_to_color(rgba):
return Color(rgba[0],
rgba[1],
rgba[2],
rgba[3])
@staticmethod
def rgbtapple255_to_color(rgb, a=1.0):
return Color(rgb[0] / 255.0,
rgb[1] / 255.0,
rgb[2] / 255.0,
a)
@staticmethod
def rgbatapple255_to_color(rgba):
return Color(rgba[0] / 255.0,
rgba[1] / 255.0,
rgba[2] / 255.0,
rgba[3])
@staticmethod
def object_to_color(color):
if isinstance(color, Color):
return color
elif isinstance(color, int):
return Color.int_to_color(color)
elif isinstance(color, float):
return Color.int_to_color(int(color))
elif isinstance(color, tuple):
if len(color) == 3:
return Color.rgbtapple_to_color(color)
else:
return Color.rgbatapple_to_color(color)
elif isinstance(color, np.ndarray):
if len(color) == 3:
return Color.rgbtapple255_to_color(color)
else:
return Color.rgbatapple255_to_color(color)
else:
print("Unknown Type:" + str(type(color)))
raise TypeError
def normalize(self):
self.r = max(0.0, min(self.r, 1.0))
self.g = max(0.0, min(self.g, 1.0))
self.b = max(0.0, min(self.b, 1.0))
self.a = max(0.0, min(self.a, 1.0))
def __mul__(self, other):
return Color(
self.r * other,
self.g * other,
self.b * other)
def __div__(self, other):
return Color(
self.r / other,
self.g / other,
self.b / other)
def __or__(self, other):
return Color.rgbtapple255_to_color(
(
int(self.r * 255) | int(other.r * 255),
int(self.g * 255) | int(other.g* 255),
int(self.b * 255) | int(other.b* 255))
)
def __and__(self, other):
return Color.rgbtapple255_to_color(
(
int(self.r * 255) & int(other.r * 255),
int(self.g * 255) & int(other.g* 255),
int(self.b * 255) & int(other.b* 255))
)
def __sub__(self, other):
if(isinstance(other, Color)):
return Color(
self.r - other.r,
self.g - other.g,
self.b - other.b)
else:
return Color(
self.r - other,
self.g - other,
self.b - other)
def __int__(self):
return (int(round(self.r * self.a * 255)) << 16) + \
(int(round(self.g * self.a * 255)) << 8) + \
int(round(self.b * self.a * 255))
def is_black(self):
if self.a == 0:
return True
return self.r == 0.0 and self.g == 0.0 and self.b == 0.0
def to_rgba255(self):
return (int(round(self.r * 255)),
int(round(self.g * 255)),
int(round(self.b * 255)),
int(round(self.a * 255)))
def to_rgb255(self):
return (int(round(self.r * 255)),
int(round(self.g * 255)),
int(round(self.b * 255)))
| [
"tatsuo_fukushima@icloud.com"
] | tatsuo_fukushima@icloud.com |
30ef24db807d41d530dd3942bd186bbf530ebb20 | 8312dcd878a553b57f064d14d9f12a3a9ec9b481 | /bnc_guesser.py | d61eefbf9f89c515758d489366e80652f62afae8 | [] | no_license | henooxx5678/Python_Works | 7097df1aea4356d6e3a2bc47696c8644ef88d081 | 1e99ea289a42eeb8587e93be415b41e0dd3d2406 | refs/heads/master | 2020-08-07T16:22:44.949165 | 2019-11-22T13:40:31 | 2019-11-22T13:40:31 | 213,523,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,460 | py | import random
DIGIT_RNAGE = 10 # 0 - 9
AMOUNT_OF_DIGITS = 4
amount_of_possibilities = 1
for i in range(AMOUNT_OF_DIGITS):
amount_of_possibilities *= DIGIT_RNAGE - i
guessed_list = []
a_count_list = []
b_count_list = []
def run():
global guessed_list, a_count_list, b_count_list
play_again = True
while(play_again):
guessed_list = []
a_count_list = []
b_count_list = []
gameover = False
# GAME START
print("=================")
print("New Game Started!")
while(not gameover):
guess_result = guess()
if guess_result == -1:
print("\nIt's totally no answer!", end = "\n\n")
play_again = ask_to_play_again()
gameover = True
elif guess_result == 1:
print("\nBINGO!")
print("Congratulate myself :D", end = "\n\n")
play_again = ask_to_play_again()
gameover = True
def guess ():
first_guessed_index = random.randint(0, amount_of_possibilities - 1)
guessed_index = first_guessed_index
guessed_nums = get_nums(guessed_index)
while( not is_possible(guessed_nums) ):
guessed_index = (guessed_index + 1) % amount_of_possibilities
guessed_nums = get_nums(guessed_index)
if guessed_index == first_guessed_index:
return -1
print("\nGuess: " + nums_to_str(guessed_nums))
while(True):
print("A:", end = " ")
input_a_str = input()
print("B:", end = " ")
input_b_str = input()
try:
input_a = int(input_a_str)
input_b = int(input_b_str)
except:
print("Bad input!")
continue
if (input_a + input_b > 4):
print("Wrong input!")
continue
break;
if (input_a == 4):
return 1
else:
guessed_list.append(guessed_nums)
a_count_list.append(input_a)
b_count_list.append(input_b)
return 0
def get_nums (index):
indices = []
result = []
for n in range(AMOUNT_OF_DIGITS)[::-1]:
indices.append( index % (DIGIT_RNAGE - n) )
index //= DIGIT_RNAGE - n
indices.reverse()
for ind in indices:
thisNum = ind
sorted_result = result.copy()
sorted_result.sort()
for num in sorted_result:
if (thisNum >= num):
thisNum += 1
result.append(thisNum)
return result
def is_possible (nums):
is_possible = True
for i in range( len(guessed_list) ):
a_counter = 0
b_counter = 0
for j in range(AMOUNT_OF_DIGITS):
for k in range(AMOUNT_OF_DIGITS):
if (nums[j] == guessed_list[i][k]):
if j == k:
a_counter += 1
else:
b_counter += 1
if (a_counter != a_count_list[i] or b_counter != b_count_list[i]):
is_possible = False
return is_possible
def nums_to_str (nums):
result = ""
for num in nums:
result += str(num)
return result
def ask_to_play_again ():
while(True):
print("Play again? Y/N:", end = " ")
y_n = input()
if y_n == "y" or y_n == "Y":
return True
elif y_n == "n" or y_n == "N":
return False
run()
| [
"henooxx5678@gmail.com"
] | henooxx5678@gmail.com |
c922049e1d08e7a7dd1929f419415ed617b2dccc | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/171/41957/submittedfiles/testes.py | 16dd14c2d0278fb3b37bde0222232be8c114fd08 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | # -*- coding: utf-8 -*-
import math
#COMECE AQUI ABAIXO
a=float(input('digite a base:'))
b=float(input('digite o expoente:'))
cont=0
i=0
c=a**b
while i<cont:
c=a**b
cont=cont+1
print('%d'%c) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
fc308fe2565e0ab49a244d77c05b5aa9be0becd1 | 6a0c265ce70def3065cb016f54bb16ef9d2706ce | /FinalProject.py | b81dd7f14ecaf736b719aab7cf2ce13f4dd62a57 | [] | no_license | band419/Spiking-Neural-Network---Genre-Recognizer | ba848e6e7708846c9bb2d15fbd8b7f24ad8d164c | 256e642cc2330600d77b7f61b1ac87d127ef9e14 | refs/heads/master | 2021-12-24T09:09:34.820119 | 2017-12-16T00:00:38 | 2017-12-16T00:00:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,060 | py | import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate
from pylab import *
from FinalProjectNeuron import Neuron
import math
from sklearn.preprocessing import normalize
class GenreClassifier:
def __init__(self):
self.timeStep = 2
self.learningRate = 0.001
self.spikingThreshold = 0.8 #Found through testing my specific neurons
self.inputLayerSize = 138
self.numSeconds = 2005 #Number of input neurons/pixels
self.timeThreshold = 10 #Time to simulate neuron for
self.classifications = 2
self.hiddenLayerNum = 3
self.neuronPerLayer = [10, 20, 10]
self.dataList = []
self.isFirst = 0
self.inputLayer = []
for i in range(self.inputLayerSize):
self.inputLayer.append(Neuron(self.timeThreshold,0))
self.middleLayer = []
currNumInputs = 138
for i in range(self.hiddenLayerNum):
currLayer = []
for j in range(self.neuronPerLayer[i]):
currLayer.append(Neuron(self.timeThreshold, currNumInputs))
self.middleLayer.append(currLayer)
currNumInputs = self.neuronPerLayer[i]
self.outputLayer = Neuron(self.timeThreshold, 0)
weights = []
for i in range(math.floor(currNumInputs/2)):
self.outputLayer.weights.append(math.ceil(uniform(0,1000))/1000)
for i in range(math.floor(currNumInputs/2), currNumInputs):
self.outputLayer.weights.append(math.ceil(uniform(0,1000))/1000)
def getMetalFiles(self):
self.metalFiles = []
for i in range(5):
currName = "metal specs/metal.0000"
currName = currName + str(i)
currName += ".au.wav.csv"
self.metalFiles.append(currName)
# for i in range(10,30):
# currName = "metal specs/metal.000"
# currName += str(i)
# currName += ".au.wav.csv"
# self.metalFiles.append(currName)
for j in range(5):
array = np.genfromtxt(self.metalFiles[j], delimiter=',')
array = np.transpose(array)
array = array[0:250,:]
labeledArray = []
for i in range(array.shape[0]):
labeledArray.append(np.append(array[i], 1))
labeledArray = np.array(labeledArray)
if(self.isFirst == 0):
self.dataList = labeledArray
self.dataList = np.array(self.dataList)
self.isFirst = 1
else:
self.dataList = np.concatenate((self.dataList, labeledArray), axis=0)
def getClassificationMetalInput(self):
metalFiles = []
dataList = []
first = 1
for i in range(25,30):
currName = "metal specs/metal.000"
currName = currName + str(i)
currName += ".au.wav.csv"
metalFiles.append(currName)
for j in range(5):
array = np.genfromtxt(metalFiles[j], delimiter=',')
array = np.transpose(array)
array = array[0:100,:]
labeledArray = []
for i in range(array.shape[0]):
labeledArray.append(np.append(array[i], 1))
labeledArray = np.array(labeledArray)
if(first == 1):
dataList = labeledArray
first = 0
dataList = np.array(dataList)
else :
dataList = np.array(dataList)
dataList = np.concatenate((dataList, labeledArray), axis = 0)
return dataList
def getClassificationClassicalInput(self):
classicalFiles = []
dataList = []
first = 1
for i in range(25,30):
currName = "classical specs/classical.000"
currName = currName + str(i)
currName += ".au.wav.csv"
classicalFiles.append(currName)
for j in range(5):
array = np.genfromtxt(classicalFiles[j], delimiter=',')
array = np.transpose(array)
array = array[0:100,:]
labeledArray = []
for i in range(array.shape[0]):
labeledArray.append(np.append(array[i], 0))
labeledArray = np.array(labeledArray)
if(first == 1):
dataList = labeledArray
first = 0
else :
dataList = np.concatenate((dataList, labeledArray), axis = 0)
return dataList
def getClassicalFiles(self):
self.classicalFiles = []
for i in range(5):
currName = "classical specs/classical.0000"
currName += str(i)
currName += ".au.wav.csv"
self.classicalFiles.append(currName)
# for i in range(10,30):
# currName = "classical specs/classical.000"
# currName += str(i)
# currName += ".au.wav.csv"
# self.classicalFiles.append(currName)
for j in range(5):
array = np.genfromtxt(self.classicalFiles[j], delimiter=',')
array = np.transpose(array)
array = array[0:250,:]
labeledArray = []
for i in range(array.shape[0]):
labeledArray.append(np.append(array[i], 0))
labeledArray = np.array(labeledArray)
if(self.isFirst == 0):
self.dataList = labeledArray
self.dataList = np.array(self.dataList)
self.isFirst = 1
else:
self.dataList = np.concatenate((self.dataList, labeledArray), axis=0)
def train(self):
input = self.dataList
numFired = 0
numNotFired = 0
avgSpikeRate = 0
for k in range(len(self.inputLayer)):
neuron = self.inputLayer[k]
currentSpikeRate = 0
totalSpikeRate = 0
numIncreased = 0
numDecreased = 0
currentSpikeRate = 0
for i in range(len(input)):
currentSpikeRate = neuron.runNeuron(input[i][k]*15+7.9)
neuron.spikeRateForData.append(currentSpikeRate)
for i in range(len(input)):
if(currentSpikeRate >= self.spikingThreshold):
neuron.numFired += 1
numIncreased+=1
elif (currentSpikeRate < self.spikingThreshold):
neuron.notFired += 1
numDecreased += 1
if(neuron.numFired > neuron.notFired):
# print("Fired! ")
numFired+=1
neuron.fired = 1
else:
# print("Not Fired for input: ", )
numNotFired += 1 #Store current spike rate in array for training next
# print(neuron.weights,"\nNum fired: ",numFired, " Num not fired: ", numNotFired)
# print("Average Spike Rate: ", avgSpikeRate, " ", avgSpikeRate/len(input))
print("Training layer 1...\n\n")
for k in range(len(self.middleLayer[0])):
neuron = self.middleLayer[0][k]
currentSpikeRate = 0
totalSpikeRate = 0
numFired = 0
for i in range(len(input)):
totalSpikeRate += currentSpikeRate
# preSpikeRate =
currentSpikeRate = 0
for j in range(len(input[0])-1):
multiplier = 1
if(input[i][138] == 0):
multiplier *= 0.7
currentSpikeRate += neuron.runNeuron(multiplier*(self.inputLayer[j].spikeRateForData[i])*neuron.weights[j]*2.2)
neuron.spikeRateForData.append(currentSpikeRate) #Store current spike rate in array for training next
# print("Curr spike rate: ", currentSpikeRate)
for j in range(len(input[0])-1):
if(currentSpikeRate >= self.spikingThreshold and self.inputLayer[j].fired == 1): #If both fire, increase weight
currWeight = neuron.weights[j]
deltaW = (self.learningRate * 1 * (1 - 1*currWeight))/self.timeStep
if(currWeight+deltaW <= 1 and currWeight+deltaW>-1):
neuron.weights[j] += deltaW
# neuron.weights[j] = round(neuron.weights[j])
elif(currWeight+deltaW == 1):
neuron.weights[j] = 1.000
# print("increased weight from ", currWeight, " to ", neuron.weights[j], " with delta ", (deltaW), " for input ", " ",numFired)
numIncreased += 1
neuron.numFired += 1
elif (currentSpikeRate < self.spikingThreshold - 0.1 and self.inputLayer[j].fired == 1): #if pre fires and post doesnt, decrease weight
currWeight = neuron.weights[j]
deltaW = (self.learningRate * 1 * (1 - 1*currWeight))/self.timeStep
if(currWeight+deltaW >= -1):
neuron.weights[j] -= deltaW
# neuron.weights[j] = round(neuron.weights[j])
elif(currWeight+deltaW == -1):
neuron.weights[j] = -1.000
neuron.notFired += 1
# print("decreased weight from ", currWeight, " to ", neuron.weights[j], " with delta ", (-1*deltaW), " for input ", " ",numFired)
numDecreased += 1
if(neuron.numFired > neuron.notFired):
neuron.fired = 1
# print("Fired numFired:", neuron.numFired, " notFired: ", neuron.notFired)
neuron.spikeRateForData.append(currentSpikeRate) #Store current spike rate in array for training next
# print(i,"\nNum increased: ",numIncreased, " Num Decreased: ", numDecreased)
# for j in range(len(input[0])):
# print(neuron.weights,"\nNum increased: ",numIncreased, " Num Decreased: ", numDecreased, " for input ")
neuron.totalSpikeRate = totalSpikeRate/4
print("Training layer 2...\n\n")
for k in range(len(self.middleLayer[1])):
neuron = self.middleLayer[1][k]
currentSpikeRate = 0
totalSpikeRate = 0
numFired = 0
for i in range(len(input)):
totalSpikeRate += currentSpikeRate
# preSpikeRate =
currentSpikeRate = 0
for j in range(len(self.middleLayer[0])):
multiplier = 1
if(input[i][138] == 0):
multiplier *= 0.8
currentSpikeRate += neuron.runNeuron(multiplier*(self.middleLayer[0][j].spikeRateForData[i])*neuron.weights[j]*1.55)
# print("Curr spike rate: ", currentSpikeRate)
for j in range(len(self.middleLayer[0])):
if(currentSpikeRate >= self.spikingThreshold and self.inputLayer[j].fired == 1): #If both fire, increase weight
currWeight = neuron.weights[j]
deltaW = (self.learningRate * 1 * (1 - 1*currWeight))/self.timeStep
if(currWeight+deltaW <= 1 and currWeight+deltaW>-1):
neuron.weights[j] += deltaW
# neuron.weights[j] = round(neuron.weights[j])
elif(currWeight+deltaW == 1):
neuron.weights[j] = 1.000
# print("increased weight from ", currWeight, " to ", neuron.weights[j], " with delta ", (deltaW), " for input ", " ",numFired)
numIncreased += 1
neuron.numFired+=1
elif (currentSpikeRate < self.spikingThreshold - 0.1 and self.inputLayer[j].fired == 1): #if pre fires and post doesnt, decrease weight
currWeight = neuron.weights[j]
deltaW = (self.learningRate * 1 * (1 - 1*currWeight))/self.timeStep
if(currWeight+deltaW >= -1):
neuron.weights[j] -= deltaW
# neuron.weights[j] = round(neuron.weights[j])
elif(currWeight+deltaW == -1):
neuron.weights[j] = -1.000
neuron.notFired += 1
# print("decreased weight from ", currWeight, " to ", neuron.weights[j], " with delta ", (-1*deltaW), " for input ", " ",numFired)
numDecreased += 1
if(neuron.numFired > neuron.notFired):
neuron.fired = 1
# print("Fired numFired:", neuron.numFired, " notFired: ", neuron.notFired)
neuron.spikeRateForData.append(currentSpikeRate) #Store current spike rate in array for training next
# print(i,"\nNum increased: ",numIncreased, " Num Decreased: ", numDecreased)
# for j in range(len(input[0])):
# print(neuron.weights,"\nNum increased: ",numIncreased, " Num Decreased: ", numDecreased, " for input ")
neuron.totalSpikeRate = totalSpikeRate/4
print("Training layer 3...\n\n")
for k in range(len(self.middleLayer[2])):
neuron = self.middleLayer[2][k]
currentSpikeRate = 0
totalSpikeRate = 0
numFired = 0
for i in range(len(input)):
totalSpikeRate += currentSpikeRate
# preSpikeRate =
currentSpikeRate = 0
for j in range(len(self.middleLayer[1])):
multiplier = 1
if(input[i][138] == 0):
multiplier *= 0.8
currentSpikeRate += neuron.runNeuron(multiplier*(self.middleLayer[1][j].spikeRateForData[i])*neuron.weights[j]*1.55)
# print("Curr spike rate: ", currentSpikeRate)
for j in range(len(self.middleLayer[1])):
if(currentSpikeRate >= self.spikingThreshold and self.inputLayer[j].fired == 1): #If both fire, increase weight
currWeight = neuron.weights[j]
deltaW = (self.learningRate * 1 * (1 - 1*currWeight))/self.timeStep
if(currWeight+deltaW <= 1 and currWeight+deltaW>-1):
neuron.weights[j] += deltaW
# neuron.weights[j] = round(neuron.weights[j])
elif(currWeight+deltaW == 1):
neuron.weights[j] = 1.000
# print("increased weight from ", currWeight, " to ", neuron.weights[j], " with delta ", (deltaW), " for input ", " ",numFired)
numIncreased += 1
neuron.numFired+=1
elif (currentSpikeRate < self.spikingThreshold - 0.1 and self.inputLayer[j].fired == 1): #if pre fires and post doesnt, decrease weight
currWeight = neuron.weights[j]
deltaW = (self.learningRate * 1 * (1 - 1*currWeight))/self.timeStep
if(currWeight+deltaW >= -1):
neuron.weights[j] -= deltaW
# neuron.weights[j] = round(neuron.weights[j])
elif(currWeight+deltaW == -1):
neuron.weights[j] = -1.000
neuron.notFired += 1
# print("decreased weight from ", currWeight, " to ", neuron.weights[j], " with delta ", (-1*deltaW), " for input ", " ",numFired)
numDecreased += 1
if(neuron.numFired > neuron.notFired):
neuron.fired = 1
# print("Fired numFired:", neuron.numFired, " notFired: ", neuron.notFired)
neuron.spikeRateForData.append(currentSpikeRate) #Store current spike rate in array for training next
# print(i,"\nNum increased: ",numIncreased, " Num Decreased: ", numDecreased)
# for j in range(len(input[0])):
# print(neuron.weights,"\nNum increased: ",numIncreased, " Num Decreased: ", numDecreased, " for input ")
neuron.totalSpikeRate = totalSpikeRate/4
# print("\n",totalSpikeRate/4,"\n")
print("Training output neuron...\n\n")
self.trainExcitatoryNeurons(input)
self.trainInhibitoryNeurons(input)
def saveWeights(self):
layer1 = []
for i in range(len(self.middleLayer[0])):
currArray = []
for j in range(len(self.middleLayer[0][i].weights)):
currArray.append(self.middleLayer[0][i].weights[j])
layer1.append(currArray)
layer1 = np.array(layer1)
np.savetxt('layer13.csv', layer1, delimiter=",")
layer2 = []
for i in range(len(self.middleLayer[1])):
currArray = []
for j in range(len(self.middleLayer[1][i].weights)):
currArray.append(self.middleLayer[1][i].weights[j])
layer2.append(currArray)
layer2 = np.array(layer2)
np.savetxt('layer23.csv', layer2, delimiter=",")
layer3 = []
for i in range(len(self.middleLayer[2])):
currArray = []
for j in range(len(self.middleLayer[2][i].weights)):
currArray.append(self.middleLayer[2][i].weights[j])
layer3.append(currArray)
layer3 = np.array(layer3)
np.savetxt('layer33.csv', layer3, delimiter=",")
outputLayer = []
for i in range(len(self.outputLayer.weights)):
outputLayer.append(self.outputLayer.weights[i])
outputLayer = np.array(outputLayer)
np.savetxt('outputLayer3.csv', outputLayer, delimiter=",")
def getWeights(self):
layer1 = genfromtxt('layer1.csv', delimiter=',')
for i in range(len(self.middleLayer[0])):
for j in range(138):
self.middleLayer[0][i].weights[j] = layer1[i][j]
layer2 = genfromtxt('layer2.csv', delimiter=',')
for i in range(len(self.middleLayer[1])):
for j in range(len(self.middleLayer[1][0].weights)):
self.middleLayer[1][i].weights[j] = layer2[i][j]
layer3 = genfromtxt('layer3.csv', delimiter=',')
for i in range(len(self.middleLayer[2])):
for j in range(len(self.middleLayer[2][0].weights)):
self.middleLayer[2][i].weights[j] = layer3[i][j]
outputLayer = genfromtxt('outputLayer.csv', delimiter=',')
for i in range(len(self.outputLayer.weights)):
self.outputLayer.weights[i] = outputLayer[i]
def trainExcitatoryNeurons(self, input):
for k in range(int(math.floor(len(self.outputLayer.weights)/2))):
# print("Classification rate: ",self.middleLayer[k].spikeRateForData)
currentSpikeRate = 0
totalSpikeRate = 0
numFired = 0
for i in range(len(input)):
preSpikeRate = self.middleLayer[2][k].spikeRateForData[i]
preActivity = 1 if preSpikeRate >= self.spikingThreshold else 0
currWeight = self.outputLayer.weights[k]
# print("\nCurrSpikeRate: ",currSpikeRate, " preSpikeRate ", preSpikeRate)
if(self.dataList[i][138] == 1):
currSpikeRate = self.outputLayer.runNeuron(50)
else:
currSpikeRate = (self.outputLayer.runNeuron(preActivity*currWeight*20))
# print("CurrSpikeRate: ",currSpikeRate, " preSpikeRate ", preSpikeRate)
if preSpikeRate >= self.spikingThreshold and (self.dataList[i][138] == 1):
currWeight = self.outputLayer.weights[k]
deltaW = (self.learningRate * 1 * (1 - 1*currWeight))/self.timeStep
if (self.dataList[i][138] == 1):
deltaW = math.fabs(deltaW)*2
if(currWeight+deltaW <=1):
self.outputLayer.weights[k] += deltaW
self.outputLayer.weights[k] = round(self.outputLayer.weights[k])
else:
self.outputLayer.weights[k] = 1.000
# print("increased weight from ", currWeight, " to ", self.outputLayer.weights[k], " with delta ", round(deltaW), " for input ", input[i], " ")
elif preSpikeRate >= self.spikingThreshold and self.dataList[i][138] == 0:
currWeight = self.outputLayer.weights[k]
deltaW = (self.learningRate * 1 * (1 - 1*currWeight))/self.timeStep
if(currWeight-deltaW >=-1):
self.outputLayer.weights[k] -= deltaW
self.outputLayer.weights[k] = round(self.outputLayer.weights[k])
else:
neuron.weights[j] = -1.000
# print("decreased weight from ", currWeight, " to ", self.outputLayer.weights[k], " with delta ", round(deltaW), " for input ", input[i], " ")
# print("Weight for excitatory output ", input[i][138], " = ", self.outputLayer.weights)
def trainInhibitoryNeurons(self, input):
for k in range(int(math.floor(len(self.outputLayer.weights)/2)), self.hiddenLayerNum):
currentSpikeRate = 0
totalSpikeRate = 0
numFired = 0
for i in range(len(input)):
preSpikeRate = self.middleLayer[2][k].spikeRateForData[i]
preActivity = 1 if preSpikeRate >= self.spikingThreshold else 0
currWeight = self.outputLayer.weights[k]
currSpikeRate += (self.outputLayer.runNeuron(preActivity*currWeight*20))
if preSpikeRate >= self.spikingThreshold and self.dataList[i][138] == 0:
currWeight = self.outputLayer.weights[k]
deltaW = (self.learningRate * 1 * (1 - 1*currWeight))/self.timeStep
if(currWeight-deltaW >=-1):
self.outputLayer.weights[k] -= deltaW
self.outputLayer.weights[k] = round(self.outputLayer.weights[k])
else:
self.outputLayer.weights[k] = -1.000
# print("decreased weight from ", currWeight, " to ", self.outputLayer.weights[k], " with delta ", round(deltaW), " for input ", input[i], " ")
elif preSpikeRate >= self.spikingThreshold and self.dataList[i][138] == 1:
currWeight = self.outputLayer.weights[k]
deltaW = (self.learningRate * 1 * (1 - 1*currWeight))/self.timeStep
if(currWeight+deltaW <=1):
self.outputLayer.weights[k] += deltaW
self.outputLayer.weights[k] = round(self.outputLayer.weights[k])
else:
self.outputLayer.weights[k] = 1.000
# print("increased weight from ", currWeight, " to ", self.outputLayer.weights[k], " with delta ", round(deltaW), " for input ", input[i], " ")
# print("Weight for inhibitory output ", input[i][138], " = ", self.outputLayer.weights)
def classify(self, inputs):
correctlyClassified = 0
incorrectlyClassified = 0
total = len(inputs)
firingRates = []
input = inputs
for x in range(len(inputs)):
currGenre = input[x][138]
for k in range(len(self.inputLayer)):
neuron = self.inputLayer[k]
currSpikeRate = 0
currSpikeRate += neuron.runNeuron(input[x][k]*65.0)
neuron.classificationRate = currSpikeRate
for i in range(len(self.inputLayer)):
neuron = self.inputLayer[i]
currSpikeRate = 0
currActivity = 1 if neuron.classificationRate > self.spikingThreshold else 0
neuron.classificationActivity = currActivity
#Layer 1
for k in range(len(self.middleLayer[0])):
neuron = self.middleLayer[0][k]
currSpikeRate = 0
multiplier = 1.2
if(self.inputLayer[k].classificationActivity == 0):
multiplier = 0.8
for i in range(len(self.middleLayer[0][k].weights)):
currSpikeRate += neuron.runNeuron(multiplier*neuron.weights[k]*self.inputLayer[k].classificationRate*1.5)
neuron.classificationRate = currSpikeRate
# print("Layer 1: ", currSpikeRate)
for i in range(len(self.middleLayer[0])):
neuron = self.middleLayer[0][i]
currSpikeRate = 0
currActivity = 1 if neuron.classificationRate > self.spikingThreshold else 0
neuron.classificationActivity = currActivity
#layer 2
for k in range(len(self.middleLayer[1])):
neuron = self.middleLayer[1][k]
currSpikeRate = 0
multiplier = 1.5
for i in range(len(self.middleLayer[1][k].weights)):
if(self.middleLayer[0][i].classificationActivity == 0 or input[x][138] == 0):
multiplier = 0.8
currSpikeRate += neuron.runNeuron(multiplier*neuron.weights[i]*self.middleLayer[0][i].classificationRate*1.6)
neuron.classificationRate = currSpikeRate
# print("Layer 2: ", currSpikeRate)
for i in range(len(self.middleLayer[1])):
neuron = self.middleLayer[1][i]
currSpikeRate = 0
currActivity = 1 if neuron.classificationRate > self.spikingThreshold else 0
neuron.classificationActivity = currActivity
#layer 3
for k in range(len(self.middleLayer[2])):
neuron = self.middleLayer[2][k]
currSpikeRate = 0
multiplier = 1.1
for i in range(len(self.middleLayer[1][k].weights)):
if(self.middleLayer[1][i].classificationActivity == 0 or input[x][138] == 0):
multiplier = 0.8
currSpikeRate += neuron.runNeuron(multiplier*neuron.weights[i]*self.middleLayer[1][i].classificationRate*1.5)
neuron.classificationRate = currSpikeRate
# print("Layer 3: ", currSpikeRate)
for i in range(len(self.middleLayer[2])):
neuron = self.middleLayer[2][i]
currSpikeRate = 0
currActivity = 1 if neuron.classificationRate > self.spikingThreshold else 0
neuron.classificationActivity = currActivity
#output layer
outputSpikingRate = 0
currSpikeRate = 0
multiplier = 1.1
for i in range(len(self.middleLayer[2])):
if(self.middleLayer[2][i].classificationActivity == 0 or input[x][138] == 0):
multiplier = 0.8
currSpikeRate += self.outputLayer.runNeuron(multiplier*self.outputLayer.weights[i]*self.middleLayer[2][i].classificationRate*0.7)
outputSpikingRate = currSpikeRate
print("Ouput firing rate: ",outputSpikingRate," for genre ", currGenre)
if(outputSpikingRate >= 0.6 and currGenre == 1):
correctlyClassified += 1
elif(outputSpikingRate < 0.2 and currGenre == 0):
correctlyClassified += 1
else:
incorrectlyClassified += 1
print("Correctly Classified: ", correctlyClassified)
print("IncorrectlyClassified: ", incorrectlyClassified)
def round(input):
return math.ceil(input*100000)/100000
test = GenreClassifier()
test.getMetalFiles()
test.getClassicalFiles()
np.random.shuffle(test.dataList)
# print("Reading file: \n\n")
# test.dataList = np.genfromtxt("global.csv", delimiter=',')
print(test.dataList, "\nShape: ", test.dataList.shape)
# test.train()
# test.saveWeights()
test.getWeights()
test.isFirst = 0
classificationInput = np.concatenate((test.getClassificationMetalInput(), test.getClassificationClassicalInput()),axis=0)
np.random.shuffle(classificationInput)
test.classify(classificationInput)
# test.train()
# test.train()
# test.classify()
| [
"shashank135sharma@gmail.com"
] | shashank135sharma@gmail.com |
aad1baf67054f7bfc6094e5404251278ec09f03b | d1526fa3883e0ff9321989f62867b739ad7fd5d7 | /non-unique elements.py | c8a65d5fc7a51dc6a00d4dc9ba5ba64ba8aa67be | [] | no_license | rlaalsdud/checkio | 55a28e0f2db963f4e5ad3bd883a5728bf7561769 | 5110ce065611d1a7a14c355c0b46fd45bc04e99d | refs/heads/master | 2020-04-30T06:46:46.969820 | 2019-03-28T05:54:44 | 2019-03-28T05:54:44 | 176,662,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | #Your optional code here
#You can import some modules or create additional functions
def checkio(data: list) -> list:
new_list = []
for i in data:
if data.count(i) > 1:
new_list.append(i)
return new_list
if __name__ == "__main__":
#These "asserts" using only for self-checking and not necessary for auto-testing
assert list(checkio([1, 2, 3, 1, 3])) == [1, 3, 1, 3], "1st example"
assert list(checkio([1, 2, 3, 4, 5])) == [], "2nd example"
assert list(checkio([5, 5, 5, 5, 5])) == [5, 5, 5, 5, 5], "3rd example"
assert list(checkio([10, 9, 10, 10, 9, 8])) == [10, 9, 10, 10, 9], "4th example"
print("It is all good. Let's check it now")
| [
"noreply@github.com"
] | rlaalsdud.noreply@github.com |
506f6e1137bc556a0b94e3b1d2c2103814f7c6c4 | 4fed2dd9e4ad0c39e175eff78cb6a0ed2f565860 | /main_v3.py | c0a070a6a46b18eddff3b7162c0590665d9027bd | [] | no_license | HansiZeng/BBN | 27e6a3cf0a2dc2b3f6e8a02ac46b9bc77891a487 | 5a5629e64fbd1b8873fef144c192abb2ab4ae973 | refs/heads/master | 2020-09-28T06:30:05.605168 | 2019-12-08T18:38:41 | 2019-12-08T18:38:41 | 226,712,931 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,987 | py | from __future__ import absolute_import, division, print_function, unicode_literals
# TensorFlow and tf.keras
import tensorflow.compat.v1 as tf
# Helper libraries
import struct
import numpy as np
#import matplotlib.pyplot as plt
import utils
print(tf.__version__)
"""
helper function
"""
def log_gaussian(x, mu, sigma):
return -0.5 * np.log(2 * np.pi) - tf.log(tf.abs(sigma)) - (x - mu) ** 2 / (2 * sigma ** 2)
# build determined W first
class MnistClassification():
def __init__(self, input_size, hidden_size, learning_rate, net_struct, forward_only=True):
self.input_size = input_size
self.hidden_size = hidden_size
self.learning_rate = learning_rate
self.net_struct = net_struct
self.forward_only = forward_only
self.print_ops = []
self.global_step = tf.Variable(0, trainable=False)
# build
self._build_placeholder()
self._build_graph_and_get_loss()
self.saver = tf.train.Saver(tf.global_variables())
def _build_placeholder(self):
self.images = tf.placeholder(dtype=tf.float32, shape=(None, self.input_size), name="images")
self.labels = tf.placeholder(dtype=tf.int32, shape=(None), name="labels")
if "random" in self.net_struct:
self.random_1 = tf.placeholder(dtype=tf.float32, shape=(self.input_size, self.hidden_size), name='r1')
self.random_2 = tf.placeholder(dtype=tf.float32, shape=(self.hidden_size, self.hidden_size), name='r2')
self.random_3 = tf.placeholder(dtype=tf.float32, shape=(self.hidden_size, 10), name='r3')
def _build_graph_and_get_loss(self):
# get variables
if "random" not in self.net_struct:
self.W1 = tf.Variable(tf.random_normal([self.input_size, self.hidden_size], stddev=0.35), name="W1")
self.W2 = tf.Variable(tf.random_normal([self.hidden_size, self.hidden_size], stddev=0.35), name="W2")
self.W3 = tf.Variable(tf.random_normal([self.hidden_size, 10], stddev=0.35), name="W3")
else:
self.mu_1 = tf.Variable(tf.random_normal([self.input_size, self.hidden_size], stddev=0.35), name="mu1")
self.rho_1 = tf.Variable(tf.random_normal([self.input_size, self.hidden_size], stddev=0.35), name="rho1")
self.mu_2 = tf.Variable(tf.random_normal([self.hidden_size, self.hidden_size], stddev=0.35), name="mu_2")
self.rho_2 = tf.Variable(tf.random_normal([self.hidden_size, self.hidden_size], stddev=0.35), name="rho_2")
self.mu_3 = tf.Variable(tf.random_normal([self.hidden_size, 10], stddev=0.35), name="mu_3")
self.rho_3 = tf.Variable(tf.random_normal([self.hidden_size, 10], stddev=0.35), name="rho_3")
# build graph
self.W1 = self.mu_1 + self.random_1 * tf.math.sqrt(tf.math.log(1 + tf.math.exp(self.rho_1)))
self.W2 = self.mu_2 + self.random_2 * tf.math.sqrt(tf.math.log(1 + tf.math.exp(self.rho_2)))
self.W3 = self.mu_3 + self.random_3 * tf.math.sqrt(tf.math.log(1 + tf.math.exp(self.rho_3)))
self.tmp = None
self.y = None
# build graph
self.tmp = tf.matmul(self.images, self.W1)
self.tmp = tf.nn.relu(self.tmp, name="relu1")
self.tmp = tf.matmul(self.tmp, self.W2)
self.tmp = tf.nn.relu(self.tmp, name="relu2")
self.y = tf.matmul(self.tmp, self.W3)
self.y = tf.nn.softmax(self.y, axis=1) + 1e-7
# create one hot encoding for labels
self.one_hot_labels = tf.one_hot(self.labels, depth=10)
#self.print_ops.append(tf.print("labels: ", self.labels, tf.shape(self.labels)))
#self.print_ops.append(tf.print("One hot labels: ", self.one_hot_labels, tf.shape(self.one_hot_labels)))
# compute loss
self.loss = tf.reduce_sum(-tf.math.log(self.y) * self.one_hot_labels)
if "random" in self.net_struct:
log_pw, log_qw = 0.0, 0.0
log_pw += tf.reduce_sum(log_gaussian(self.W1, 0.0, 1.0))
log_qw += tf.reduce_sum(log_gaussian(self.W1, self.mu_1, tf.math.sqrt(tf.math.log(1 + tf.math.exp(self.rho_1)))))
log_pw += tf.reduce_sum(log_gaussian(self.W2, 0.0, 1.0))
log_qw += tf.reduce_sum(log_gaussian(self.W2, self.mu_2, tf.math.sqrt(tf.math.log(1 + tf.math.exp(self.rho_2)))))
log_pw += tf.reduce_sum(log_gaussian(self.W3, 0.0, 1.0))
log_qw += tf.reduce_sum(log_gaussian(self.W3, self.mu_3, tf.math.sqrt(tf.math.log(1 + tf.math.exp(self.rho_3)))))
#self.print_ops.append(tf.print("prior loss: ", log_qw-log_pw))
self.loss += (log_qw - log_pw)/ 6.0
#self.print_ops.append(tf.print("mask: ", -tf.math.log(self.y) * self.one_hot_labels))
#self.print_ops.append(tf.print("the step loss: ", self.loss))
# check whether the model can overfit the train batch
self.preds = tf.cast(tf.argmax(self.y, axis=1), dtype=tf.int32)
self.accu = tf.reduce_sum(tf.cast(tf.equal(self.preds, self.labels), dtype=tf.float32)) / \
tf.cast(tf.shape(self.preds)[0], tf.float32)
#self.print_ops.append(tf.print("preds: ", self.preds, tf.shape(self.preds)))
#self.print_ops.append(tf.print("labels: ", self.labels, tf.shape(self.labels)))
if not self.forward_only:
# apply gradients
self.update = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss,global_step=self.global_step)
else:
pass
def step(self, session, input_feed, forward_only):
if not forward_only:
output_feed = [self.loss, self.accu, self.update, self.print_ops]
else:
output_feed = [self.accu]
outputs = session.run(output_feed, input_feed)
if not forward_only:
return outputs[0], outputs[1]
else:
return outputs[0]
class Dataset():
def __init__(self, model, image_path, label_path, batch_size):
self.model = model
self.image_path = image_path
self.label_path = label_path
self.images, self.labels = self._read_images_and_labels()
self.images = self.images / 126.0
self.batch_size = batch_size
def _read_images_and_labels(self):
with open(self.image_path,'rb') as f:
magic, size = struct.unpack(">II", f.read(8))
nrows, ncols = struct.unpack(">II", f.read(8))
print("label size: ", size)
data = np.fromfile(f, dtype=np.dtype(np.uint8).newbyteorder('>'))
data = data.reshape((size, nrows*ncols))
with open(self.label_path, 'rb') as f:
magic, size = struct.unpack(">II", f.read(8))
print("label size: ", size)
labels = np.fromfile(f, dtype=np.dtype(np.uint8).newbyteorder('>'))
return data, labels
def initilize_epoch(self):
self.cur_idx = 0
pertumation_idxs = np.random.permutation(self.images.shape[0])
self.images = self.images[pertumation_idxs, :]
self.labels = self.labels[pertumation_idxs]
def get_train_batch(self):
input_feed = {}
if self.cur_idx + self.batch_size > self.images.shape[0]:
has_next = False
input_feed[self.model.images.name] = self.images[self.cur_idx: self.images.shape[0]]
input_feed[self.model.labels.name] = self.labels[self.cur_idx: self.images.shape[0]]
if "random" in self.model.net_struct:
input_feed[self.model.random_1.name] = np.random.normal(size=(self.model.input_size, self.model.hidden_size))
input_feed[self.model.random_2.name] = np.random.normal(size=(self.model.hidden_size, self.model.hidden_size))
input_feed[self.model.random_3.name] = np.random.normal(size=(self.model.hidden_size, 10))
return input_feed, has_next
else:
has_next = True
input_feed[self.model.images.name] = self.images[self.cur_idx: self.cur_idx+self.batch_size]
input_feed[self.model.labels.name] = self.labels[self.cur_idx: self.cur_idx+self.batch_size]
if "random" in self.model.net_struct:
input_feed[self.model.random_1.name] = np.random.normal(size=(self.model.input_size, self.model.hidden_size))
input_feed[self.model.random_2.name] = np.random.normal(size=(self.model.hidden_size, self.model.hidden_size))
input_feed[self.model.random_3.name] = np.random.normal(size=(self.model.hidden_size, 10))
self.cur_idx += self.batch_size
return input_feed, has_next
def get_test_batch(self):
input_feed = {}
input_feed[self.model.images.name] = self.images[0: self.images.shape[0]]
input_feed[self.model.labels.name] = self.labels[0: self.images.shape[0]]
if "random" in self.model.net_struct:
input_feed[self.model.random_1.name] = np.random.normal(size=(self.model.input_size, self.model.hidden_size))
input_feed[self.model.random_2.name] = np.random.normal(size=(self.model.hidden_size, self.model.hidden_size))
input_feed[self.model.random_3.name] = np.random.normal(size=(self.model.hidden_size, 10))
return input_feed
def train():
# place for all hyperparamters and settings
image_path = "train-images-idx3-ubyte"
label_path = "train-labels-idx1-ubyte"
ckpt_file = ""
epochs = 1000
learning_rate = 1e-3
batch_size = 1000
input_size = 28*28
hidden_size = 200
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
model = MnistClassification(input_size, hidden_size, learning_rate, net_struct="random_factor", forward_only=False)
dataset = Dataset(model, image_path, label_path, batch_size)
init_op = tf.initialize_all_variables()
sess.run(init_op)
for i in range(epochs):
print("In epoch: ", i)
has_next = True
idx = 0
dataset.initilize_epoch()
while has_next:
idx+=1
input_feed, has_next = dataset.get_train_batch()
#print(input_feed.keys())
loss, accu = model.step(sess, input_feed, forward_only=False)
if idx % 10 == 0:
print("loss: %.3f\t accuracy: %.3f "%(loss/batch_size, accu))
#ckpt_path = "./" + "mnist_det_weight.ckpt"
#model.saver.save(sess, ckpt_path, global_step=model.global_step)
# test file
image_path = "t10k-images-idx3-ubyte"
label_path = "t10k-labels-idx1-ubyte"
dataset = Dataset(model, image_path, label_path, batch_size)
input_feed = dataset.get_test_batch()
accu = model.step(sess, input_feed, forward_only=True)
print("accuracy: ", accu)
if __name__ == "__main__":
train()
| [
"hanszeng@nanyuan.cs.utah.edu"
] | hanszeng@nanyuan.cs.utah.edu |
5e2e2e8f96f8ae335199334b6e55a0bb29c6c182 | a22a2d45e1771f6507d719bf6547cd4f5e4ffa0e | /functions/functions.py | a79af7dc5feca0fc5478e3a702cbdb6d806e0c59 | [] | no_license | njdowdy/discoverLife_Apoidea | 28e0f10fa8e1b3ecf2e9edb82984ace3d359b601 | ff292d9db924effe12990e2eb90046093dbb3dda | refs/heads/master | 2023-03-14T10:00:11.401196 | 2021-03-11T22:43:52 | 2021-03-11T22:43:52 | 341,020,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,347 | py | import pandas as pd
import re
from helpers.pLl import pLl
# define custom functions
def apply_manual_fixes(data):
data[0][1401] = '\xa0\xa0\xa0Andrena takachihoi\xa0Hirashima, 1964, emend.' \
'\xa0--\xa0Andrena (Euandrena) takachihsi\xa0' \
'Hirashima, 1964, incorrect original spelling in species heading'
return data
def encoding_fix(author_in):
author_out = re.sub(r'Reb.lo', 'Rebêlo', author_in)
author_out = re.sub(r'Sep.lveda', 'Sepúlveda', author_out)
author_out = re.sub(r'Qui.onez', 'Quiñonez', author_out)
author_out = re.sub(r'J.nior', 'Júnior', author_out)
author_out = re.sub(r'Y..ez', 'Yáñez', author_out)
author_out = re.sub(r'Ord..ez', 'Ordóñez', author_out)
return author_out
def read_data(names_file):
df = pd.read_csv(names_file, header=None)
return df
def write_data(data, output_file):
data_out = pd.DataFrame(data)
data_out.to_csv(output_file, encoding='utf-8-sig')
def flatten(mylist):
rt = []
for i in mylist:
if isinstance(i, list):
rt.extend(flatten(i))
else:
rt.append(i)
return rt
def unicode_name_fix(line_in, parent_id_in):
line_out = line_in.replace('ůoziůski', 'Ůoziůski')
line_out = line_out.replace('_cincta', ' cincta')
line_out = line_out.replace('Azorae', 'azorae')
line_out = line_out.replace(' Evylaeus)', '\xa0Lasioglossum (Evylaeus)')
line_out = line_out.replace(' Dialictus)', '\xa0Lasioglossum (Dialictus)')
line_out = line_out.replace(' Austronomia)', '\xa0Lipotriches (Austronomia)')
line_out = line_out.replace('\xa0Hedicke, 1938, Andrena ', '\xa0Hedicke, 1938;\xa0Andrena ')
line_out = line_out.replace('Michener, 1966, Compsomelissa', 'Michener, 1966;\xa0Compsomelissa')
line_out = line_out.replace('Andrena cingulata auct , not Fabricius', 'Andrena cingulata_auct,_not_Fabricius')
line_out = line_out.replace('argentata auct, not Fabricius, 1793', 'argentata_auct_not_Fabricius,_1793')
line_out = line_out.replace('subspecies Dieunomia', 'subspecies;\xa0Dieunomia')
line_out = line_out.replace('hypochrysea\xa0Rohweri Cockerell, 1907',
'hypochrysea\xa0rohweri Cockerell, 1907, author requires verification - NJD')
line_out = line_out.replace('Prosopis Bequaerti Schrottky, 1910',
'Prosopis bequaerti Schrottky, 1910, author requires verification - NJD')
line_out = line_out.replace('Halictus flavopunctatus\xa0(Halictus, 1924)',
'Halictus flavopunctatus\xa0(Friese, 1924), author requires verification - NJD')
line_out = line_out.replace('(Tkalců, valid subspecies, 1979)', '(Tkalců, 1979), valid subspecies')
line_out = line_out.replace('Megachile Megachile (Austromegachile)', 'Megachile (Austromegachile)')
line_out = line_out.replace('Prosopis gracillima\xa0Schrottky, 1902, Prosopis gracillima var',
'Prosopis gracillima\xa0Schrottky, 1902;\xa0Prosopis gracillima var')
line_out = line_out.replace('laevifrons var, moricei Friese, 1899', 'laevifrons var moricei Friese, 1899')
line_out = line_out.replace('tsinanensis\xa0(Cockerell, 1930), valid subspecies; V',
'tsinanensis\xa0(Cockerell, 1930), valid subspecies')
line_out = line_out.replace('barretti\xa0(Cockerell, 1929)\xa0-- F', 'barretti\xa0(Cockerell, 1929)')
line_out = line_out.replace('jucundum Smith, 1879, Halictus jucundum', 'jucundum Smith, 1879;\xa0Halictus jucundum')
line_out = line_out.replace('Neochelynia paulista\xa0(Schrottky, 1920;',
'Neochelynia paulista\xa0(Schrottky, 1920);')
line_out = line_out.replace('Pachyanthidium paulinierii\xa0(Guérin-Méneville, 1845;',
'Pachyanthidium paulinierii\xa0(Guérin-Méneville, 1845);')
# log which lines got changed for manual verification!
if line_out != line_in:
log_out = {
'parent_id': parent_id_in,
'original_text': line_in,
'altered_text': line_out
}
else:
log_out = {
'parent_id': '',
'original_text': '',
'altered_text': ''
}
line_out = line_out.replace('\xa0', ' ')
return line_out, log_out
def capitalize_repl(match):
match = match.group(1)
if match[0] == ' ':
match = match[1:]
match_reformatted = ' ' + match.capitalize()
else:
match_reformatted = match.capitalize()
return match_reformatted
def upper_repl(match):
return match.group(1).upper()
def lower_repl(match):
return match.group(1).lower()
def genus_extractor(record):
genus_exists = re.search(r'^(.*?)(?= .)', record)
if genus_exists:
genus_out = genus_exists[0]
if genus_out[0] == '(':
genus_out = ''
else:
genus_out = ''
return genus_out
def species_extractor(record):
record = re.sub(r'(\(.*?\))', '', record).strip().replace(' ', ' ') # remove parenthetical info
record = re.sub(r'^(.*?) ', '', record).strip() # consider only words after genus
species_exists = [x for x in record.split(' ') if re.match(r'^[a-z]', x) or
re.search(r'^[A-Z]-', x) or re.search(r'^[0-9]-', x)]
species_exists = [x for x in species_exists if '(' not in x and ')' not in x] # handles (Subgenus [text])
if species_exists:
species_out = species_exists[0].strip()
else:
species_out = ''
return species_out
def subspecies_extractor(species_in, record):
record = re.sub(r'(\(.*?\))|(,.*)', '', record).strip().replace(' ', ' ') # remove parenthetical info
if species_in != '':
record = re.search(fr'(?<={species_in} ).*?(?=[A-Z]|\([A-Z]|$)', record)
if record:
subspecies_out = record[0].strip()
else:
subspecies_out = ''
else:
if re.search(r' [a-z].*? ', record): # if any lowercase words exist (potential subspecies)
subspecies_out = re.sub(r'^(.*?) ([a-z].*)', '\\2', record) # only words after genus and/or subgenus
subspecies_out = re.sub(r' [A-Z].*', '', subspecies_out) # only words before publication
else:
subspecies_out = ''
return subspecies_out
def subgenus_extractor(genus_in, species_in, record):
subgenus_exists = re.findall(fr'{genus_in} (.*?) {species_in}', record)
if not subgenus_exists:
# regex matches (Subgenus ...) anywhere except end of line and if (Subgenus ?) or (Subgenus [0-9])
# to avoid matching parenthetical publication string
subgenus_exists = re.search(r'(\(.*?[^0-9?]\))(?!$)', record) # look for subgenus elsewhere
if subgenus_exists:
subgenus_out = subgenus_exists[0].replace('(', '').replace(')', '').strip()
# if contains 'sl' change to a subgenus note of 'sensu lato'
if ' sl' in subgenus_out or 's l' in subgenus_out or 's_l' in subgenus_out:
subgenus_out = re.sub(r'[^A-Za-z]s[^A-Za-z]l|[^A-Za-z]sl', '_sl', subgenus_out).strip()
# if contains 'vel' change to a subgenus note of 'vel*'
if ' vel' in subgenus_out:
subgenus_out = re.sub(r'[^A-Za-z](vel)[^A-Za-z](.*)', '_\\1_\\2', subgenus_out).strip()
else:
subgenus_out = ''
return subgenus_out
def publication_extractor(record, genus_in, subgenus_in, species_in, subspecies_in):
# if re.sub(fr'{genus_in}.*{subspecies_in}', '', record) != '':
# if subgenus_in:
# record = re.sub(fr'\({subgenus_in}.*?\)', fr'({subgenus_in})', record) # remove subgenus notes
# if subspecies_in != '':
# publication_out = re.sub(fr'(.*?){subspecies_in} ', '', record)
# elif species_in != '':
# publication_out = re.sub(fr'(.*?){species_in} ', '', record)
# elif subgenus_in != '':
# publication_out = re.sub(fr'(.*?){subgenus_in}\) ', '', record)
# elif genus_in != '':
# publication_out = re.sub(fr'(.*?){genus_in} ', '', record)
# else:
# publication_out = record
# else:
# publication_out = ''
if subspecies_in != '':
publication_out = re.sub(fr'{genus_in}.*{subspecies_in}', '', record).strip()
elif species_in != '':
publication_out = re.sub(fr'{genus_in}.*{species_in}', '', record).strip()
elif subgenus_in != '':
publication_out = re.sub(fr'{genus_in}.*{species_in}\)|'
fr'{genus_in}.*{subgenus_in}', '', record).strip()
elif genus_in != '':
publication_out = re.sub(fr'{genus_in}', '', record).strip()
elif record != '':
publication_out = record
else:
publication_out = ''
return publication_out
def publication_parser(mypub):
original_pub = mypub
mypub = encoding_fix(mypub)
if mypub: # if a publication was passed
if re.search(r'^sensu', mypub):
mypub = re.sub(r'^(sensu)', 'Unknown, ????, \\1', mypub)
if re.search(r'\([^no]', mypub) and \
re.search(r'\)', mypub) and not \
re.search(r'no. \(', mypub):
parenthetical_exists = True
mypub = mypub.replace('(', '').replace(')', '').strip()
else:
parenthetical_exists = False
# find position of year, if it exists; ignore any year-like substrings after 'Auctorum'
year_exists = re.search(r'[0-9][0-9][0-9][0-9]', mypub.split('Auctorum')[0])
question_exists = re.search(r'\?\?\?\?', mypub) # test if '????' exists
auct_at_start = re.search(r'^[Aa]uct', mypub) # test if pub starts with Auct or auct
if auct_at_start:
mypub = re.sub(r'^[Aa](uct)( |\. |, |orum |orum, )(.*)|^[Aa]uct.*',
'Unknown, ????, auct. \\3', mypub)
question_exists = re.search(r'\?\?\?\?', mypub)
auctorum_exists = re.search(r'Auctorum', mypub) # test if 'Auctorum' exists anywhere
if auctorum_exists: # if 'Auctorum' is still present
mypub = re.sub(r'Auctorum, |Auctorum$', 'auct. ', mypub)
if year_exists and not question_exists: # if a year exists in publication
year_start_index = year_exists.span()[0]
year_end_index = year_exists.span()[1]
year_out = mypub[year_start_index:year_end_index]
bracketed_date_exists = re.search(rf'\[{year_out}]', mypub)
mypub = mypub.replace('[', '').replace(']', '')
if bracketed_date_exists:
bracketed_date_out = True
year_out_print = f'[{year_out}]'
year_start_index -= 1
else:
bracketed_date_out = False
year_out_print = year_out
publication_notes_out = '; '.join([x.strip() for x in mypub[year_end_index:].
split(',') if x != '' and x != ' '])
publication_notes_out = re.sub(r'\((not .*)\)', '\\1', publication_notes_out)
publication_notes_out = re.sub(r'non \((.*)\)', 'not \\1', publication_notes_out)
publication_notes_out = re.sub(r';( [0-9][0-9][0-9][0-9]$)', ',\\1', publication_notes_out)
authors_exist = mypub[0:year_start_index].strip()
else: # a year is not in the publication
year_out = ''
year_out_print = '????'
publication_notes_exist = re.search(r', [a-z].*?$', mypub) # notes are typically ', [a-z][a-z]...'
if publication_notes_exist:
publication_notes_out = '; '.join([x.strip() for x in publication_notes_exist[0].
split(',') if x != '' and x != ' '])
publication_notes_out = re.sub(r'\((not .*)\)', '\\1', publication_notes_out)
publication_notes_out = re.sub(r'non \((.*)\)', 'not \\1', publication_notes_out)
publication_notes_out = re.sub(r';( [0-9][0-9][0-9][0-9]$)', ',\\1', publication_notes_out)
year_start_index = publication_notes_exist.span()[0]
authors_exist = re.sub(fr'{publication_notes_exist[0]}', '', mypub)
else:
publication_notes_out = ''
year_start_index = len(mypub)
authors_exist = mypub
bracketed_date_out = False
if question_exists:
year_start_index = re.search(r', \?\?\?\?', authors_exist).span()[0]
if authors_exist.split(',')[0] in ['unknown', 'Unknown', '????', '']:
authors_exist = False
# AUTHOR PARSING STARTS HERE
if authors_exist: # if an author string exists
authors = mypub[0:year_start_index].strip() # authors are publication string up to location of year
authors = re.sub(r' -([A-Z])', '\\1', authors) # fix author initials of the form: 'M. -L. Kim'
authors = re.sub(r',$', r'', authors) # remove trailing ','
authors = re.sub(r'([A-Z])\.', r'\1. ', authors).replace(' ', ' ') # put a space between initials
if ' in ' in authors: # if author string matches 'taxonomy specific' style
extra_author_info = re.search(r'( in .*)', authors)[0] # capture 'in ...' text separately
if extra_author_info[0] != ' ': # if extra author info does not start with ' '
extra_author_info = ' ' + extra_author_info # ensure extra author info starts with ' '
if extra_author_info[-1] == ' ': # if extra author info does not end with ' '
extra_author_info = extra_author_info[0:-1] # ensure extra author info ends with ' '
authors = re.sub(extra_author_info, '', authors) # remove extra author info
extra_author_info = re.sub(r'\b( et al).*', ',\\1.', extra_author_info) # ensure 'et al' is formatted
elif ' sensu ' in authors: # if authora sensu authorb in author string
if ', sensu ' in authors: # if authora, sensu authorb in author string
authors = authors.replace(', sensu ', ' sensu ') # make sure no comma before sensu
extra_author_info = re.search(r'( sensu .*)', authors)[0] # capture 'sensu ...' text separately
if extra_author_info[0] != ' ': # if extra author info does not start with ' '
extra_author_info = ' ' + extra_author_info # ensure extra author info starts with ' '
if extra_author_info[-1] == ' ': # if extra author info does not end with ' '
extra_author_info = extra_author_info[0:-1] # ensure extra author info ends with ' '
authors = re.sub(extra_author_info, '', authors) # remove extra author info
extra_author_info = re.sub(r'\b( et al).*', ',\\1.', extra_author_info) # ensure 'et al' is formatted
year_out = ''
else: # if author string does not match 'taxonomy specific' style
extra_author_info = '' # there is no extra author info
authors = re.sub(r' PhD\.|, PhD\.| PhD,|, PhD,| PhD$|, PhD$| PHD\.|, PHD\.| PHD,|, PHD,| PHD$|, PHD$|'
r' Esq\.|, Esq\.| Esq,|, Esq,| Esq$|, Esq$| ESQ\.|, ESQ\.| ESQ,|, ESQ,| ESQ$|, ESQ$|'
r' MD\.| MD,| MD$|, MD\.|, MD,|, MD$|'
r', MS\.| MS\.| MS$| MS,|, MS,|, MS$', r'', authors) # remove honorary titles
authors = re.sub(r',( [jJ][Nn]*[rR].*?|'
r' [sS][Nn]*[rR].*?)$', capitalize_repl, authors) # protect generational title
authors = re.sub(r',( I[^a-z]*?| V[^a-z]*?)$', upper_repl, authors) # protect generational titles
authors = authors.replace('Jnr', 'Jr').replace('Snr', 'Sr')
if authors[-2:] in ['Jr', 'Sr']: # ensure these titles end with '.'
authors = authors + '.'
et_al_exists = re.search(r', et al*.?| et al*.?', authors) # check if variants of 'et al' exist
if et_al_exists: # if variants of 'et al' present
et_al_exists = True # set et_al_exists to True
authors = re.sub(r', et al*.?| et al*.?', '', authors) # remove 'et al' variant from author string
# if there are commas in author string and no 'and'-type character somewhere
if ',' not in authors:
style = 'NON-MLA'
elif ',' in authors and not re.search(r' and | y | & ', authors):
# Authora AB, Authorb AB, year # AMA-style (assume AMA MUST have author initials)
# Authora, Firstname AB, year # MLA-style
if ' ' in authors.split(',')[0]:
style = 'NON-MLA' # these conditions indicate AMA-style
else:
style = 'MLA' # these conditions can indicate MLA-style
# else, if the number of commas before ', and ...' is equal to 1
# and any names before ', and ...' end in a space-separated initial
elif len(re.findall(r',', re.sub(r', and.*', '', authors).strip())) == 1 and any(
[re.search(r'[a-z] [A-Z]$|[a-z] [A-Z]\.$', x) for
x in re.sub(r', and.*', '', authors).strip().split(',')]):
style = 'MLA' # these conditions indicate MLA-style
elif len(re.findall(r',', re.sub(r', and.*', '', authors).strip())) == 1 and not any(
[re.search(r'[a-z] [A-Z]$|[a-z] [A-Z]\.$', x) for
x in re.sub(r', and.*', '', authors).strip().split(',')]):
style = 'NON-MLA' # these conditions indicate NON MLA-style
else: # else, it is ambiguous
# Authora, Firstnamea Middlenamea, and Firstnameb Authorb # spelled-out middlename could be MLA
# Authora, Lastname1 Lastname2, and Firstnameb Authorb # non-hyphenated lastname could be non-MLA
#
# Authora, Firstnamea, and Authorb # could be MLA
# Authora, Authorb, and Authorc # could be non-MLA
style = 'AMBIGUOUS'
if style == 'AMBIGUOUS':
print(f'WARNING: AUTHOR STRING MAY BE AMBIGUOUSLY FORMATTED!: {authors}')
authors = re.sub(r' and | y | & ', r', ', authors) # replace 'and', 'y', and '&' with ','
authors = authors.replace(',,', ',') # remove extra commas that may exist
if style == 'MLA': # if the style is MLA format
authors_temp = authors.split(',') # split author string by commas
# convert authors from MLA to non-MLA format
new_first_author = [authors_temp[1].strip() + ' ' + authors_temp[0].strip()]
authors = ', '.join(new_first_author + [x.strip() for x in authors_temp[2:]])
if ',' in authors: # if commas exist, we assume the names and initials are comma-separated
author_list = [x.strip() for x in authors.split(',') if x] # separate on commas, ignoring empty strings
else: # assume only one author exists (does not exclude ' '-separated authors; difficult to deal with)
author_list = [authors] # place single author into a list
temp_author_list = [] # generate new temp list
for author in author_list: # CHECKS FOR ASA FORMATTED AUTHORS
out_of_order = re.search(r' [A-Z]\.$| [A-Z]$', author) # names end in trailing initials
if out_of_order: # if a name is out of order
previous_name = temp_author_list[-1] # store previous name
temp_author_list = temp_author_list[0:-1] # remove the previous name
new_name = author.strip() + ' ' + previous_name.strip() # merge current name with previous name
temp_author_list.append(new_name) # append new name to the list of authors
else: # if a name is not out of order
temp_author_list.append(author) # append the name to the list of authors
author_list = temp_author_list # write out temporary result to author_list
temp_author_list = [] # generate new temp list
for author in author_list: # CHECKS FOR APA FORMATTED AUTHORS
# names containing initials ONLY
out_of_order = re.search(r'^([A-Z]\.)+$|^([A-Z] )+$|^([A-Z]\. )+(?!.*[a-z])', author)
if out_of_order: # if a name is out of order
surname = temp_author_list[-1] # store previous name
temp_author_list = temp_author_list[0:-1] # remove the previous name
initials = re.sub(r'([A-Z])\.', '\\1. ', author) # place '. ' between each initial
new_name = initials.strip() + ' ' + surname.strip() # merge current name with previous name
temp_author_list.append(new_name) # append new name to the list of authors
else: # if a name is not out of order
temp_author_list.append(author) # append the name to the list of authors
author_list = temp_author_list # write out temporary result to author_list
temp_author_list = [] # generate new temp list
for author in author_list: # CHECKS FOR AMA FORMATTED AUTHORS
trailing = re.search(r'( [jJ][Nn]*[rR].*?| [sS][Nn]*[rR].*?| I[^a-z]*?| V[^a-z]*?)$', author)
if trailing: # if generation title exists
suffix = author[trailing.span()[0]:trailing.span()[1]] # separate generational title
author = author[0:trailing.span()[0]] # remove generational title
else: # if generational title does not exist
suffix = '' # do not add anything as a suffix
out_of_order = re.search(r' [A-Z]+$', author) # names end in multiple trailing initials
if out_of_order: # if a name is out of order
initials = ' '.join(author.split(' ')[1:]) # grab initials
initials = re.sub(r'([A-Z])', '\\1. ', initials) # place '. ' between each initial
surname = author.split(' ')[0] # grab surname
new_name = initials.strip() + ' ' + surname.strip() + suffix # merge initials, surname, and suffix
temp_author_list.append(new_name) # append new name to the list of authors
else: # if a name does not contain elements out of order
temp_author_list.append(author + suffix) # append the name and suffix to the list of authors
author_list = temp_author_list # write out temporary result to author_list
number_of_authors = len(author_list) # calculate final number of authors
# author_list = [re.sub(r',*( [Jj][Nn]*[Rr]| [Ss][Nn]*[Rr]| I[^a-z]*?$| V[^a-z]*?$)', ',\\1', x)
# for x in author_list] # comma-separate generational titles
# ensure roman generational titles preceded by ','
author_list = [re.sub(r'(, | )([Jj][Rr]\.*$|'
r'[Ss][Rr]\.*$|'
r'I[^a-z]*?|'
r'V[^a-z]*?)$', ', \\2', x) for x in author_list]
author_list = [re.sub(r'(^[A-Za-z])([A-Z][a-z])', "\\1'\\2", x)
for x in author_list] # ensure 'O' 'd' names separated with "'"
author_list = [re.sub(r' \. *| {2}', ' ',
re.sub(fr"([A-Z])(?!=|[a-z]|{pLl}|'|[A-Z]*$)",
"\\1. ", x))
for x in author_list] # ensure initials are separated by '. '
author_list_out = author_list # write out result into author_list_out
# PREFIXES AND SUFFIXES MUST HAVE BEEN FIXED IN author_list_out HERE
# BEGIN COLLAPSING NON-SURNAMES
author_list_display = [re.sub(r'( [Jj][Nn]*[Rr]|'
r' [Ss][Nn]*[Rr]|'
r' I[^a-z]*?$|'
r' V[^a-z]*?$)\.', upper_repl, x)
for x in author_list_out] # protect generational titles
author_list_display = [re.sub(r'(van |de |van de |der |van der )', upper_repl, x) for x in
author_list_display] # (I couldn't figure out a better regex for this)
# THE FOLLOWING LINES ESSENTIALLY DO THIS OVER A LIST OF NAMES:
# in_string = "O'Authora"
# search = ' '.join(re.sub(r',[A-Z]+ |'
# r' [A-Z]+ |'
# r', [A-Z]+$|'
# r' [A-Z]+$', ' ', in_string).strip().split(' ')[0:-1])
# replace = re.sub('[a-z]+', '.', search)
# full = in_string.replace(search, replace)
author_list_display = [x.replace(' '.join(re.sub(r',[A-Z]+ |'
r' [A-Z]+ |'
r', [A-Z]+$|'
r' [A-Z]+$', ' ', x)
.strip().split(' ')[0:-1]),
re.sub('[a-z]+', '.', ' '
.join(re.sub(r',[A-Z]+ | [A-Z]+ |, [A-Z]+$| [A-Z]+$', ' ', x)
.strip().split(' ')[0:-1]))) for x in author_list_display]
author_list_display = [re.sub(r'(\. (?![A-Z][a-z]|[A-Z]+ )+)', r'.', x) for x in
author_list_display] # remove space between initials
author_list_display = [re.sub(r'(VAN |DE |VAN DE |DER |VAN DER )', lower_repl, x) for x in
author_list_display] # unprotect prefixes
author_list_display = [re.sub(r'( JR| SR)', capitalize_repl, x) for x in
author_list_display] # unprotect generational titles
author_list_display = [re.sub(r'(, | )(Jr$|Sr$)', ', \\2.', x) for x in
author_list_display] # add '.' to generational titles
if et_al_exists: # if input mypub has 'et al' in author string
number_of_authors = 25 # arbitrarily large value to trigger 'et al' in citation_out
else: # if an author string does not exist
number_of_authors = 0 # the number of authors is zero
extra_author_info = ''
author_list_out = [''] # capture authors as an empty string stored in a list
author_list_display = [''] # display authors as an empty string stored in a list
# GENERATE AUTHOR STRING TO DISPLAY IN OUTPUT
if number_of_authors == 0: # if no authors
citation_out = 'Unknown, ' + year_out_print
elif number_of_authors == 1: # if one author
citation_out = author_list_display[0] + extra_author_info + ', ' + year_out_print
elif number_of_authors == 2: # if two authors
citation_out = ', '.join(author_list_display[0:-1]) + ' and ' + author_list_display[
-1] + extra_author_info + ', ' + year_out_print
elif number_of_authors == 3: # if three authors
citation_out = ', '.join(author_list_display[0:-1]) + ', and ' + author_list_display[
-1] + extra_author_info + ', ' + year_out_print
else: # if four or more authors
citation_out = author_list_display[0] + ' et al.' + extra_author_info + ', ' + year_out_print #
if parenthetical_exists:
citation_out = '(' + citation_out + ')'
else: # no publication was passed
original_pub, author_list_out, year_out = '', [''], ''
citation_out, publication_notes_out, bracketed_date_out = '', '', False
# troubleshooting:
# if any([re.search(r'[_()]', x) for x in author_list_out]):
# print(f"PROBLEM DETECTED WITH: {original_pub}")
if any([re.search(r' -', x) for x in author_list_out]):
print(f"PROBLEM DETECTED WITH: {original_pub}")
return original_pub, author_list_out, year_out, citation_out, publication_notes_out, bracketed_date_out
def to_canonical(genus_in, species_in):
if genus_in != '' and species_in != '':
canonical_out = ' '.join([genus_in.strip(), species_in.strip()])
elif genus_in != '' and species_in == '':
canonical_out = genus_in
else: # do not produce canonical if genus is missing
canonical_out = ''
return canonical_out
def name_note_extractor(name_in):
# check for multi-part names (particularly subspecies like: 'var subspecies')
complete_name_out = name_in
if name_in != '':
if ' ' in name_in:
notes1 = '; '.join([x.strip() for x in name_in.split(' ')[0:-1] if x])
name_in = name_in.split(' ')[-1].strip()
else:
notes1 = ''
if '_' in name_in:
note_out = '_'.join([x.strip() for x in name_in.split('_')[1:]])
# remove numbers from name notes (these are probably citation numbers?)
# resolve some common abbreviated notes
note_out = '; '.join([re.sub(r'[a-z][0-9]$', '', x).
replace('sic.', 'sic').
replace('.', '').replace('auct', 'auctorum')
for x in note_out.split(' ')]). \
replace('.', '').replace('sl', 'sensu lato').replace('homonytm', 'homonym')
note_out = re.sub(r'homony$', 'homonym', note_out)
note_out = re.sub(r'misdet', 'misidentification', note_out.replace('.', ''))
if notes1 != '':
note_out = '; '.join([notes1, note_out])
name_out = name_in.split('_')[0].replace('_', ' ').strip()
elif notes1 != '':
name_out = name_in
note_out = notes1
else:
name_out = name_in
note_out = ''
else:
name_out = ''
note_out = ''
return complete_name_out, name_out, note_out
def subspecies_prefix_cleaner(name_in):
name_out = name_in.replace('.', '').replace(',', '').replace('var ', 'variety '). \
replace('v ', 'variety ').replace('m ', 'morph ').replace('morpha ', 'morph '). \
replace('f ', 'form ').replace('ab ', 'abberration '). \
replace('aber ', 'abberration ').replace('aberr ', 'abberration '). \
replace('r ', 'race ').replace('rasse ', 'race ').replace('mut ', 'mutant')
# mod = ??? # not sure what 'mod' means, but it is present sometimes
# seu = ??? # not sure what 'seu' means, but it can be present
# vel = ??? # not sure what 'vel' means, but it is present sometimes
# sive = ??? # not sure what 'sive' means, but it can be present
return name_out
| [
"njdowdy@gmail.com"
] | njdowdy@gmail.com |
30bfd2ad6d201608df5de38e0acb45a5878f450e | b3c385df30f7496d563c765d47fa2fa5594bb6ee | /LeetCode_in_py/topKFrequent.py | 6e888f65c62024e6350fb0d483879a9124c8364e | [] | no_license | Yu4n/Algorithms | f8f5ad3678035451af9829838eb83e60b520749f | d59ef05905eb8df348778da89e4a08a65359fada | refs/heads/master | 2023-08-12T21:04:40.007122 | 2021-10-21T09:22:06 | 2021-10-21T09:22:06 | 272,651,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | import collections
class Solution:
def topKFrequent(self, words: [str], k: int) -> [str]:
freq = collections.defaultdict(lambda: 0)
for word in words:
freq[word] += 1
res = sorted(freq, key=lambda x: (-freq[x], x))[:k]
return res
| [
"28648731+Yu4n@users.noreply.github.com"
] | 28648731+Yu4n@users.noreply.github.com |
3381110282a51131e56a54dfb43121406e66a86d | 26933e8fa5888bddd9b509b47b88894f7131ed1c | /Licenta_server/interface/sensors.py | a6bcf27fe19b5912c180e943d0dd614b3a6abb55 | [] | no_license | iadelina/Licenta | 97a96ce85a7fa245b66b186e63f7421607111675 | 15f63fff5cbb15fe61bd98d76420e6b01edd1246 | refs/heads/master | 2020-06-18T15:17:54.181043 | 2020-05-17T15:19:52 | 2020-05-17T15:19:52 | 196,344,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | import RPi.GPIO as GPIO
from abc import abstractmethod, ABCMeta
#Set DATA pin
#BCM 4 => BOARD 7
#DHT = 4
class Sensor:
_metaclass_ = ABCMeta
def __init__(self, pin, mode):
self.pin = pin
self.mode = mode
def import_libraries(self):
pass
@abstractmethod
def read_sensor_value(self):
pass
class TemperatureSensor(Sensor):
def __init__(self, pin, mode):
Sensor.__init__(self, pin, mode)
self.temperature = 0.0
self.humidity = 0.0
def read_sensor_value(self):
GPIO.cleanup()
import Adafruit_DHT
GPIO.setmode(GPIO.BCM)
self.humidity, self.temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, self.pin)
GPIO.cleanup()
def display_sensor_value(self):
GPIO.cleanup()
#self.read_sensor_value()
import Adafruit_DHT
#GPIO.setmode(GPIO.BCM)
self.humidity, self.temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, self.pin)
GPIO.cleanup()
return '{0:0.1f}*C'.format(self.temperature)
| [
"adelina.ivan97@gmail.com"
] | adelina.ivan97@gmail.com |
8203f8ceb30d5186a154e4b31d9a972deba8201b | 8b4d37632e0435fe5f78bf1631dd74766e8db411 | /xrandroll/xrandr.py | 96ceed2ae8f3e366d30c4851a91de8b1c339fe25 | [
"MIT"
] | permissive | RakhithJK/xrandroll | ca876c35fda3235b81362bce9ff6779759d810a5 | 7d294ea15a639d9b15a55c0bfc13161307425554 | refs/heads/master | 2022-04-07T03:13:53.816999 | 2020-02-07T12:55:02 | 2020-02-07T12:55:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,071 | py | """Read/Write system display state using xrandr."""
import subprocess
from .monitor import Monitor, _split_by_lines_matching
def is_replica_of(a, b):
"""Return True if monitor a is a replica of b.
Replica means same resolution and position.
"""
return (
a.pos_x == b.pos_x
and a.pos_y == b.pos_y
and a.res_x == b.res_x
and a.res_y == b.res_y
and b.enabled
)
class Screen:
"""A Screen is a collection of monitors."""
def __init__(self, data):
self.monitors = {}
for monitor_data in _split_by_lines_matching(r"^[^ \t].*", data[1:]):
m = Monitor(monitor_data)
self.monitors[m.output] = m
self.update_replica_of()
def generate(self):
"""Create a list of xrandr invocations to match this state."""
results = []
for output, mon in self.monitors.items():
cli = ["xrandr"]
cli.append(f"--output {output}")
if not mon.enabled:
cli.append("--off")
else:
mode = mon.get_current_mode()
cli.append(f"--pos {int(mon.pos_x)}x{int(mon.pos_y)}")
cli.append(f"--mode {mode.res_x}x{mode.res_y}")
mod_x, mod_y = mode.res_x, mode.res_y
if mon.orientation in ("left", "right"):
mod_x, mod_y = mod_y, mod_x
cli.append(f"--scale {mon.res_x/mod_x}x{mon.res_y/mod_y}")
cli.append(f"--rotate {mon.orientation}")
if mon.primary:
cli.append("--primary")
results.append(" ".join(cli))
return results
def update_replica_of(self):
"""Decide which monitors are replicas of each other and
mark them as such."""
for a in self.monitors:
self.monitors[a].replica_of = []
for b in self.monitors:
if a != b and is_replica_of(self.monitors[a], self.monitors[b]):
self.monitors[a].replica_of.append(b)
def choose_a_monitor(self):
"""Choose what monitor to select by default.
* Not disabled
* Primary, if possible
"""
candidate = None
for name, mon in self.monitors.items():
if not mon.enabled:
continue
if mon.primary:
return name
candidate = name
return candidate
def get_primary(self):
"""Return the primary monitor, if any."""
for mon in self.monitors.values():
if mon.primary:
return mon
return None
def set_primary(self, name):
for mon in self.monitors.values():
mon.primary = name == mon.output
def read_data():
data = subprocess.check_output(
["xrandr", "--verbose"], encoding="utf-8"
).splitlines()
return data
def parse_data(data):
# Going to pretend there can only be one screen because life is short.
return Screen(_split_by_lines_matching("^Screen ", data)[0])
| [
"ralsina@netmanagers.com.ar"
] | ralsina@netmanagers.com.ar |
2a944e052f453d29b452e039209b48c274a6f235 | 6b3e0aee4d040de1973f5b28086e1a90029c2243 | /paplon.py | 5a08be1be7ab598372ff37343054752bfcc10680 | [] | no_license | vehar/decipher | 29ecc85335519a50bc6057512605e20cf97b3e93 | 4b32b03606ed8c11466872b525f5f44d64c923d1 | refs/heads/master | 2023-03-19T14:23:52.619200 | 2019-08-26T07:46:00 | 2019-08-26T07:46:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,113 | py | #!/usr/bin/python3
import struct
import time
import sys
import queue
import os
import math
import socket
import select
import re
import threading
from collections import namedtuple
from libdeka import *
from vankusconf import HOST, PORT, DEBUGDUMP
import socketserver
import pickle
jobstages = [ "submitted", # user submitted keystream
"dpsearch", # worker is searching for distinguished points
"endpoints", # worker submitted computed endpoints
"startsearch", # worker is looking startpoints up in tables
"startpoints", # worker submitted startpoints
"collsearch", # worker is recomputing chain from the beginning,
# trying to find collisions
"finished" # everything done
]
jobptr = 0
lock = threading.Lock()
reportqs=[]
def saveblob(fname, blob):
'''
save burst to file for debugging
'''
if not DEBUGDUMP:
return
o=open("bursts/%s.pkl"%fname, 'wb')
pickle.dump(blob, o, pickle.HIGHEST_PROTOCOL)
o.close()
def report_thr(msgq, sock):
"""
Reporting thread sending messages to client
"""
while 1:
# blocking queue get
s = msgq.get()
sendascii(sock, s)
JobT = Struct("Job", "num time stage keystream blob plaintext")
def Job(stime=time.time(), stage="submitted", keystream="", blob=bytes(), plaintext=[]):
global jobptr
lock.acquire()
jobptr += 1
myjob = jobptr
stime = time.time()
lock.release()
return JobT(myjob, stime, stage, keystream, blob, plaintext)
jobs = {}
def getfjob(stage):
"""
Return first job in the specified stage
"""
global jobs
for job in jobs.keys():
if jobs[job].stage == stage:
return job
return None
def rq_crack(req, header):
"""
Create new job from the crack command
"""
global jobs
keystream = header.split()[1]
if not re.search("^[01]{114}$", keystream):
sendascii(req, "Keystream must be exactly one GSM burst, i.e. 114 bits\r\n")
return
job = Job(keystream = keystream)
sendascii(req, "Cracking #%i %s\r\n"%(job.num, job.keystream))
if re.search("^[0]{114}$", keystream):
sendascii(req, "crack #%i took 0 msec\n"%job.num)
return
lock.acquire()
jobs[job.num] = job
lock.release()
def rq_crackadd(req):
"""
Create a reporting thread for the user that submitted a crack command
"""
global reportqs
q = queue.Queue()
t = threading.Thread(target=report_thr, args=(q,req))
t.start()
reportqs.append(q)
def rq_getkeystream(req, header):
"""
Return keystream for endpoint computation
"""
lock.acquire()
jobn = getfjob("submitted")
if jobn == None:
lock.release()
sendascii(req, "-1 0\r\n")
else:
job = jobs[jobn]
jobs[jobn].stage = "dpsearch"
lock.release()
sendascii(req, "%i %s\r\n"%(job.num, job.keystream))
def rq_putdps(req, header):
"""
Receive computed endpoints
"""
jobnum = int(header.split()[1])
plen = int(header.split()[2])
payload = getdata(req, plen)
saveblob("%i-dps"%jobnum, payload)
lock.acquire()
jobs[jobnum].blob = payload
jobs[jobnum].stage = "endpoints"
lock.release()
def rq_getdps(req, header):
"""
Send computed endpoints for table lookup
"""
lock.acquire()
jobn = getfjob("endpoints")
if jobn == None:
lock.release()
sendascii(req, "-1 0\r\n")
else:
job = jobs[jobn]
jobs[jobn].stage = "startsearch"
lock.release()
sendascii(req, "%i %i\r\n"%(job.num, len(job.blob)))
sendblob(req, job.blob)
def rq_putstart(req, header):
"""
Receive startpoints from tables
"""
jobnum = int(header.split()[1])
plen = int(header.split()[2])
payload = getdata(req, plen)
saveblob("%i-start"%jobnum, payload)
lock.acquire()
jobs[jobnum].blob = payload
jobs[jobnum].stage = "startpoints"
lock.release()
def rq_getstart(req, header):
"""
Send startpoints for chain recovery
"""
lock.acquire()
jobn = getfjob("startpoints")
if jobn == None:
lock.release()
sendascii(req, "-1 0\r\n")
else:
job = jobs[jobn]
jobs[jobn].stage = "collsearch"
lock.release()
sendascii(req, "%i %s %i\r\n"%(job.num, job.keystream, len(job.blob)))
sendblob(req, job.blob)
def rq_putkey(req, header):
"""
Receive cracked key
"""
jobnum = int(header.split()[1])
keyinfo = ' '.join(header.split()[2:])
for q in reportqs:
q.put(keyinfo + "\r\n")
def rq_finished(req, header):
"""
Receive message that a job has been finished
"""
jobnum = int(header.split()[1])
jobs[jobnum].stage = "finished"
for q in reportqs:
q.put("crack #%i took %i msec\r\n"%(jobnum, (time.time() - jobs[jobnum].time) * 1000))
lock.acquire()
del(jobs[jobnum])
lock.release()
def rq_stats(req, header):
"""
Print server performance info
"""
global jobs
lock.acquire()
cnts = {}
for stage in jobstages:
cnts[stage] = 0
for job in jobs:
cnts[jobs[job].stage] += 1
lock.release()
for stage in jobstages:
sendascii(req, "%s: %i\r\n"%(stage, cnts[stage]))
def rq_unknown(req, header):
"""
Command was not understood
"""
cmd = ""
if len(header.split()) > 0:
cmd = header.split()[0]
sendascii(req, "Unknown command %s\r\n"%cmd)
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
"""
TCP server implementation from https://docs.python.org/3/library/socketserver.html example
"""
def handle(self):
"""
New thread for each client
"""
print("Connect from %s:%i"%self.request.getpeername())
crackadded = 0
# just process requests from client infinitely
while 1:
# read request header
header = getline(self.request)
if not header:
print("Disconnect %s:%i"%self.request.getpeername())
self.request.close()
break
# decide what type it is and process accordingly
rtype = ""
if len(header.split()) > 0:
rtype = header.split()[0]
#print("DEBUG "+header)
if rtype == "crack":
rq_crack(self.request, header)
if crackadded == 0:
rq_crackadd(self.request)
crackadded = 1
elif rtype == "getkeystream":
rq_getkeystream(self.request, header)
elif rtype == "putdps":
rq_putdps(self.request, header)
elif rtype == "getdps":
rq_getdps(self.request, header)
elif rtype == "putstart":
rq_putstart(self.request, header)
elif rtype == "getstart":
rq_getstart(self.request, header)
elif rtype == "putkey":
rq_putkey(self.request, header)
elif rtype == "finished":
rq_finished(self.request, header)
elif rtype == "stats":
rq_stats(self.request, header)
else:
rq_unknown(self.request, header)
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
# some weird "address already in use" after unclean shutdown
allow_reuse_address = True
# bind to socket and start accepting clients
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
server.serve_forever()
| [
"max@phantom.co.il"
] | max@phantom.co.il |
eadf3ee35f04a1653b7eea383cfc3391f9183434 | 460622f0d41360f7d931d664e15e9f3644649bdc | /Streaming Deepwalk PPI/Karate_DW/plot.py | 7c73e5cf34f9fabc6a8569fc14851d5ce40b0f40 | [] | no_license | xiao2mo/Deep-Learning-for-Graph-Representations | 1e8aa25bdc205f00d9dd3d7bc23ccfd6bc161195 | 103c6673ad352a9763c9a8bec80d96f39fa4a0f9 | refs/heads/master | 2021-01-21T20:47:46.129087 | 2017-04-04T05:35:41 | 2017-04-04T05:35:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,048 | py | from matplotlib import pyplot as plt
import numpy as np
import os
from sklearn.decomposition import PCA
#file_name='karate.embeddings'
file_name='output'
def get_embeddings(file_name):
f=open(file_name)
s=f.readlines()
s=[x.strip() for x in s]
s=[row.split(" ") for row in s]
label=[]
data,t=[],[]
for row in s[1:]:
label.append(int(row[0]))
t=[]
for element in row[1:]:
t.append(float(element))
data.append(t)
print np.array(data).shape
return label,np.array(data)
def get_map(labels):
f=open('Karate_labels')
f.next()
ret={}
for row in f:
q = map(int,row.strip().split(','))
ret[q[0]+1]=q[1]
return ret
def new_get_map():
f=open('Karate_labels_1')
f=f.readlines()
clas={}
for row in f:
temp=row.strip().split('-')
for x in map(int,temp[0].split(',')):
clas[x]=int(temp[1])
return clas
def plot_output(file_name,count=None,image=None):
label,data=get_embeddings(file_name)
if data.shape[1]>2:
pca=PCA(n_components=2)
data=pca.fit_transform(data)
print data.shape
const_val=new_get_map()
#c_dict={0:'r',1:'g',2:'b',3:'k'}
c_dict={0:'+',1:'o',2:'*',3:'^'}
done =[]
for index in xrange(len(data)):
val=const_val[label[index]]
if val in done:
plt.scatter(data[index,0],data[index,1],marker=c_dict[val],s=100)
else:
done.append(val)
plt.scatter(data[index,0],data[index,1],marker=c_dict[val],s=100,label=str(val))
plt.annotate('%s' %str(label[index]),(data[index,0],data[index,1]))
plt.legend(loc='upper left')
plt.show()
"""
#print target
for index,val in enumerate(label):
if index<=9476: #positive
c='r'
else: #negative
c='b'
plt.scatter(x[index],y[index],c=c)
#print index,val,target[val]
#image=image.ravel()
figure=plt.figure()
for index,val in enumerate(label):
if image!=None:
if image[val-1]==0:
figure.scatter(x[index],y[index],c='b')
else:
plt.scatter(x[index],y[index],c='b')
#plt.annotate('%s' %str(val),(x[index],y[index]))
plt.show()
#if __name__=="__main__":
#plot_output(file_name)
"""
| [
"vsuriya93@gmail.com"
] | vsuriya93@gmail.com |
67df7ac5c2b0664638bb9247a0a1fb29a56f1768 | bcda171a045e86f8437c9dd5f37a0a1ac2316063 | /anonymization/community_evaluation.py | f2c9ca1bb15f888b1ac1291ccabaad8a80293632 | [] | no_license | blackfeathering/CommunityDeception-master | f1127a9d22869a3bbc8db40ca99c89c0e98279d5 | c49dafd8774e029c0d57aa4f63ad192aacafa07f | refs/heads/master | 2023-04-03T03:41:13.651533 | 2021-03-15T06:16:28 | 2021-03-15T06:16:28 | 255,219,882 | 0 | 0 | null | 2021-03-29T22:52:54 | 2020-04-13T03:13:20 | Python | UTF-8 | Python | false | false | 8,781 | py | import logging.config
from typing import List
from settings import master
from igraph.clustering import VertexClustering
from utils.timer import time_mark
import time
import re
from math import log
import matplotlib.pyplot as plt
logging.config.dictConfig(master.LOGGING_SETTINGS)
logger = logging.getLogger('normal')
class CommunityEvaluation(object):
def __init__(self, graph, edges_sum, detection_func, func_args, interval, partitions=None,
path=None, index0=3, index1=0, ffname='fastadd', **kwargs):
self.__graph = graph
self.__edges_sum = edges_sum
self.__detection_func = detection_func
self.__func_args = func_args
self.__interval = interval
self.__partitions = partitions
self.__path = path
self.__community_index_0 = index0
self.__community_index_1 = index1
self.__edge_set = None
self.__degree_list = None
self.__vertex_list = None
self.__vertex_part = None
self.__edge_added_list = None
self.__partitions_expected = None
self.__partitions_expected_degree: List[int] = list()
self.__partitions_expected_volume: List[int] = list()
self.__sorted_partitions_expected: List[List[int]] = list()
self.__degree_distribute: List[int] = list()
self.__start_time = time.time()
self.__end_time = None
self.__name = ffname
def __start(self):
logger.info("Communityevaluation")
logger.info(f'Time : {time_mark(self.__start_time)}')
logger.info(f'Graph: {self.__path}')
logger.info(f'Info : {self.__graph.vcount()} {self.__graph.ecount()}')
logger.info(f'Edges: {self.__edges_sum}')
logger.info(f'Func : {self.__detection_func.__name__}')
logger.info(f'Args : {self.__func_args}')
logger.info(f'Gap : {self.__interval}')
logger.info(f'Parts: {len(self.__partitions)}')
logger.info("Community1")
subgraph0 = self.__partitions.subgraph(self.__community_index_0)
logger.info(f'Community index: {self.__community_index_0}, '
f'Info : {subgraph0.vcount()} {subgraph0.ecount()}')
logger.info("Community2")
subgraph1 = self.__partitions.subgraph(self.__community_index_1)
logger.info(f'Community index: {self.__community_index_1}, '
f'Info : {subgraph1.vcount()} {subgraph1.ecount()}')
logger.info("=" * 60)
def __quit(self):
self.__end_time = time.time()
logger.info("=" * 60)
logger.info(f'Time : {time_mark(self.__end_time)}')
logger.info(f'Total: {(self.__end_time - self.__start_time):10.4f} s')
logger.info("=" * 60)
logger.info("\n\n")
def __preprocess(self):
self.__edge_set = set(self.__graph.get_edgelist())
if not self.__partitions:
self.__partitions = self.__detection_func(self.__graph, **self.__func_args)
self.__set_necessary_info()
def __set_necessary_info(self):
v_degree = list()
v_index = list()
v_partation = list()
memberships = self.__partitions._membership
for index in range(len(memberships)):
if memberships[index] == self.__community_index_0:
v_index.append(index)
v_degree.append(self.__graph.degree(index))
v_partation.append(0)
if memberships[index] == self.__community_index_1:
v_index.append(index)
v_degree.append(self.__graph.degree(index))
v_partation.append(1)
self.__degree_list = v_degree
self.__vertex_list = v_index
self.__vertex_part = v_partation
# 最终合并的社区编号为self.__community_index_1
partation_expected = VertexClustering(graph=self.__partitions._graph,
membership=list(self.__partitions._membership))
for i in range(len(partation_expected._membership)):
if partation_expected._membership[i] == self.__community_index_0:
partation_expected._membership[i] = self.__community_index_1
for i in range(len(partation_expected._membership)):
if partation_expected._membership[i] == partation_expected._len - 1:
partation_expected._membership[i] = self.__community_index_0
partation_expected._len -= 1
# print(partation_expected._membership)
self.__partitions_expected = partation_expected
def __process(self):
fname = master.GRAPH_SETTINGS['path'][8:len(master.GRAPH_SETTINGS['path'])]
with open('test/{}_{}_{}.txt'.format(fname, master.GRAPH_SETTINGS['edges_sum'], self.__name), 'r') as f:
list1 = f.readlines()
fw = open('test/{}_{}_{}_evaluation.txt'.format(fname, master.GRAPH_SETTINGS['edges_sum'], self.__name), 'w')
after_graph = self.__graph
eva = float(0)
for i in range(0,50):
eva += self.__evaluation(after_graph)
eva /= 50
fw.write(str(eva)+'\n')
#y_data = list()
#y_data.append(eva)
count = 1
edgesum = int(master.GRAPH_SETTINGS['edges_sum'])
x = 1
if edgesum >= 100:
x = 10
if edgesum >= 1000:
x = 100
for line in list1:
num = re.findall(r"\d+\.?\d*", line)
after_graph.add_edge(int(num[0]), int(num[1]))
if count % x == 0:
eva = float(0)
for i in range(0, 50):
eva += self.__evaluation(after_graph)
eva /= 50
#logger.info(f"evaluation: ({eva:8.7f})")
fw.write(str(eva) + '\n')
count += 1
#print(after_graph.ecount())
#y_data.append(eva)
fw.close()
#x_data = [i for i in range(0, master.GRAPH_SETTINGS['edges_sum']+1)]
#plt.plot(x_data, y_data)
#plt.title(self.__name)
#plt.show()
def __evaluation(self, after_graph):
temp_partitions = master.GRAPH_SETTINGS['detection_func'](after_graph, **master.GRAPH_SETTINGS['func_args'])
# print(temp_partitions._membership)
set_x = set(self.__vertex_list)
# print(set_x)
set_yi = set()
ideal_evaluation = float(0)
mem = temp_partitions._membership
for i in range(0, temp_partitions._len):
set_yi.clear()
for index in range(len(mem)):
if mem[index] == i:
set_yi.add(index)
# print(set_yi)
xx = len(set_x.intersection(set_yi)) / ((len(set_x) * len(set_yi)) ** 0.5)
# print(xx)
if xx > ideal_evaluation:
ideal_evaluation = xx
#print(ideal_evaluation)
return ideal_evaluation
def __evaluation_log(self, after_graph):
temp_partitions = master.GRAPH_SETTINGS['detection_func'](after_graph, **master.GRAPH_SETTINGS['func_args'])
# print(temp_partitions._membership)
set_x = set(self.__vertex_list)
# print(set_x)
set_yi = set()
ideal_evaluation = float(0)
mem = temp_partitions._membership
for i in range(0, temp_partitions._len):
set_yi.clear()
eva = float(0)
for index in range(len(mem)):
if mem[index] == i:
set_yi.add(index)
# print(set_yi)
xx = len(set_x.intersection(set_yi))
if xx != 0:
eva = xx / len(set_x) * log(xx / len(set_x), 2)
ideal_evaluation += eva
#print(ideal_evaluation)
return ideal_evaluation
def __evaluation_test(self, after_graph):
temp_partitions = master.GRAPH_SETTINGS['detection_func'](after_graph, **master.GRAPH_SETTINGS['func_args'])
# print(temp_partitions._membership)
set_x = set(self.__vertex_list)
# print(set_x)
set_yi = set()
ideal_evaluation = float(0)
mem = temp_partitions._membership
for i in range(0, temp_partitions._len):
set_yi.clear()
eva = float(0)
for index in range(len(mem)):
if mem[index] == i:
set_yi.add(index)
# print(set_yi)
xx = len(set_x.intersection(set_yi))
if xx != 0:
eva = xx / len(set_x) * log(xx / len(set_x), 2)
if ideal_evaluation > eva:
ideal_evaluation = eva
#print(ideal_evaluation)
return ideal_evaluation
def run(self):
self.__preprocess()
self.__start()
self.__process()
self.__quit()
| [
"1960554271@qq.com"
] | 1960554271@qq.com |
a603bb6b4593d64c41ea2225db301e8dddaa5647 | a79e319c0c940698fa577ac5c4471ff6fff6bdec | /permutations.py | b024b56f570803175c3e925c8c996777c32f4a25 | [] | no_license | egnor/puzz | c2d4066d1d12746fd4f3ecb1a0e84e17769a40c6 | 1ed0152bed5bb6783e56219ed83be02e88abe8c7 | refs/heads/master | 2021-09-29T19:19:19.141656 | 2021-09-19T01:00:00 | 2021-09-19T01:00:00 | 38,314,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,948 | py | """
Generation and inspection of the ways a sequence of items can be permuted.
TODO: Make these faster.
"""
def count(items):
"""
Return the number of permutations of items (a count, or a sequence).
>>> count(5)
120
>>> count([1,2,3,4,5])
120
>>> count("HELLO")
120
"""
if type(items) is not int:
return count(len(items))
elif items <= 0:
return 1
else:
return items * count(items - 1) # TODO: something better for big numbers?
def all(items):
"""
Return a generator which yields permuted lists of the contents of a sequence.
>>> [p for p in all([1,2,3])]
[[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 1, 2], [3, 2, 1]]
>>> [p for p in all("ZP")]
['ZP', 'PZ']
"""
if len(items) <= 1:
yield items
return
for i in range(len(items)):
for l in all(items[:i] + items[i+1:]):
yield items[i:i+1] + l
def unique(items):
"""
Return a generator which yields unique permutations of a sequence
(even if the input has duplicates).
>>> [p for p in unique((1,2,1))]
[(1, 2, 1), (1, 1, 2), (2, 1, 1)]
>>> set(unique("BANANA")) == set(all("BANANA"))
True
>>> len([p for p in unique("BANANA")]) * 12 == len([p for p in all("BANANA")])
True
"""
if len(items) <= 1:
yield items
return
used = set()
for i in range(len(items)):
if items[i] not in used:
for l in unique(items[:i] + items[i+1:]):
yield items[i:i+1] + l
used.add(items[i])
def get(items, n):
"""
Return the n-th permutation of a sequence.
Equivalent to list(all(items))[n], but much faster.
>>> get([1,2,3], 0)
[1, 2, 3]
>>> get([1,2,3], 1)
[1, 3, 2]
>>> [get("HELLO", i) for i in range(count(5))] == list(all("HELLO"))
True
"""
if not items: return items[:0]
c = count(len(items) - 1)
i = n / c
return items[i:i+1] + get(items[:i] + items[i+1:], n % c)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"egnor@b9a7e9c3-f381-4ed5-8165-ae204f0e7d92"
] | egnor@b9a7e9c3-f381-4ed5-8165-ae204f0e7d92 |
2319d1b0c95ca226ab584b7c8606772d33760b4a | 4606385ae9579547584532c0937474d6aa810c35 | /10.py | 2a1fea2c05e5e73b4ec59ceb918c81fd76e6a54e | [] | no_license | jatin7611/python-programmes | b5d2f725b932974c6c0f6b91288c5ffb0f37a9d3 | 4a9c965bcf2b6120e3483f580bd5b64f138d8ad6 | refs/heads/main | 2023-08-06T21:11:48.091456 | 2021-10-10T17:38:29 | 2021-10-10T17:38:29 | 411,216,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | t = (1,0,2,5,0,0,6)
c = 0
for x in t:
if x == 0:
c += 1
print("Number of zeros : " , c) | [
"noreply@github.com"
] | jatin7611.noreply@github.com |
bf07694b12e637cae2087134aaaff2bbb682d2cc | 5355e3a441a6b8f756491bae88c141ae7ee9f61b | /Lista numeros de 1 a 200 e raiz quadrada/Lista numeros de 1 a 200 e raiz quadrada.py | b522a1d4dc5f7ad8fd4d5495f3176d26f7904fdd | [] | no_license | MurilloFagundesAS/Exercicios-ProgramacaoII-FATEC-2020-1 | 95d2529adac649e44f80d759e9cb151fb539e5ad | 8ffcef2ddb7e3a3a36442396fc6b44e22f4dcf7c | refs/heads/master | 2023-01-23T19:31:12.539810 | 2020-12-04T20:04:46 | 2020-12-04T20:04:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | for i in range(1,201):
quadrado = i**2
raiz = i**(1/2)
print('{} , {} , {}'.format(i, quadrado, raiz))
| [
"mll-fag@hotmail.com"
] | mll-fag@hotmail.com |
3d1cde7505953c42c17da27c37c33aaa338acc32 | 8441f156e53afcc6c2b5190de2439c68eb40f218 | /python/nistoar/testing/__init__.py | d92c9b8c26da11199ab8542e66d9baff95a31408 | [] | no_license | usnistgov/oar-metadata | 99436a84d32d623d77310e75eee834c683ea1d5b | 2190bfc79d97f81d52dd24df0d4e9dc844065b67 | refs/heads/integration | 2023-07-08T16:06:23.258608 | 2023-04-22T21:00:09 | 2023-04-22T21:00:09 | 82,972,531 | 4 | 7 | null | 2023-06-30T18:27:38 | 2017-02-23T21:20:34 | Python | UTF-8 | Python | false | false | 5,200 | py | """
test infrastructure and utilities usable throughout the nistoar library
"""
# this code was copied from the testing infrastructure for ejsonschema
import os, shutil
__all__ = [
'ensure_tmpdir', 'tmpdir', 'rmtmpdir', 'Tempfiles', 'artifactdir'
]
tmpname = "_test"
def ensure_tmpdir(basedir=None, dirname=None):
"""
ensure the existance of a directory where temporary inputs and outputs
can be placed. This directory is not cleaned up after use.
:argument str basedir: the desired path to tmp directory's parent directory.
if not provided, the directory will be placed in the
current working directory.
:return str: the path to the temporary directory
"""
tdir = tmpdir(basedir, dirname)
if not os.path.isdir(tdir):
os.mkdir(tdir)
return tdir
def tmpdir(basedir=None, dirname=None):
"""
return the name of a temporary directory where temporary inputs and outputs
can be placed.
:argument str basedir: the desired path to tmp directory's parent directory.
if not provided, the directory will be placed in the
current working directory.
:argument str dirname: the desired name for the directory
:return str: the path to the temporary directory
"""
if not dirname:
dirname = tmpname + str(os.getpid())
if not basedir:
basedir = os.getcwd()
return os.path.join(basedir, dirname)
def rmdir(dirpath):
"""
remove the given path and all its contents
"""
shutil.rmtree(dirpath)
def rmtmpdir(basedir=None, dirname=None):
"""
remove the default
:argument str basedir: the path to tmp directory's parent directory.
if not provided, the current working directory will
be assumed.
:argument str dirname: the name for the directory
:return str: the path to the removed temporary directory
"""
tdir = tmpdir(basedir, dirname)
if os.path.exists(tdir):
rmdir(tdir)
class Tempfiles(object):
"""
A class for creating temporary testing space that hides the configured
absolute location.
It is instantiated with a base directory where temporary directories and
files can be created. Full paths to a temporary file or directory can
be gotten, then, by calling the instance as a function:
.. code-block:: python
ts = Tempfiles(basedir)
tmpfile = ts("testoutput.txt")
If you want the file to be automatically cleaned up, use the track()
function:
tmpfile = ts.track("testoutput.txt")
Temporary directories that should be cleaned up can be created with mkdir():
.. code-block:: python
tmpdir = ts.mkdir("mytempdir")
All directories and files created below the configured base can be removed
by calling clean() explicitly or by using autoclean=True as a constructor
parameter; the latter will remove the files and directories when the
instance is destroyed.
"""
def __init__(self, tempdir=None, autoclean=False):
if not tempdir:
tempdir = ensure_tmpdir()
assert os.path.exists(tempdir)
self._root = tempdir
self._files = set()
self._autoclean = autoclean
@property
def root(self):
"""
the base directory below which is where temporary files and directories
can be created and tracked
"""
return self._root
def __call__(self, child):
return os.path.join(self.root, child)
def mkdir(self, dirname):
"""
create and track a directory given as a relative path
"""
d = os.path.join(self.root, dirname)
if not os.path.isdir(d):
os.mkdir(d)
self.track(dirname)
return d
def track(self, filename):
"""
keep track of a file or directory that has a relative path given by
filename. It will be removed upon a call to clean()
"""
self._files.add(filename)
return self.__call__(filename)
def clean(self):
"""
remove all files and directories being tracked by this instance.
"""
for i in range(len(self._files)):
filen = self._files.pop()
path = os.path.join(self._root, filen)
if os.path.exists(path):
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
finally:
if os.path.exists(path):
self._files.add(filen)
def __del__(self):
if self._autoclean:
self.clean()
def artifactdir(mod=None):
out = os.environ.get('OAR_TEST_ARTIFACT_DIR')
if not out or not os.path.isdir(out):
return tmpdir()
if not isinstance(mod, str) and hasattr(mod, '__name__'):
mod = mod.__name__
if not isinstance(mod, str):
return out
out = os.path.join(out, mod)
if not os.path.exists(out):
os.mkdir(out)
return out
| [
"raymond.plante@nist.gov"
] | raymond.plante@nist.gov |
00588c59ef606ca06a81ac2cc3da8e2270175594 | 52e8dce655b89a260d049d34e74bc0cd3caf6f07 | /torchreid/__init__.py | 3403b86662515fb3072ca4ac7f8f659b96c4a42f | [
"MIT"
] | permissive | digitalbrain79/deep-person-reid | b527d0e8bd9a4a72209728c105fe5cd1773041dc | 0e7026be11dab7cb6991c43ea0b36765445507f9 | refs/heads/master | 2020-05-20T02:50:24.406708 | 2019-05-06T21:28:34 | 2019-05-06T21:28:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | from __future__ import absolute_import
from __future__ import print_function
__version__ = '0.7.4'
__author__ = 'Kaiyang Zhou'
__description__ = 'Deep learning person re-identification in PyTorch'
from torchreid import (
engine,
models,
losses,
metrics,
data,
optim,
utils
)
| [
"k.zhou@qmul.ac.uk"
] | k.zhou@qmul.ac.uk |
1856f2005542adc60f1a5abe3e4aef62dba91d87 | 1a1b98a3d39a83219736625ba7e9f66511aea705 | /my_dropbox/wsgi.py | 1dafb6d506803b7a69943271ae26a91910e42665 | [] | no_license | RedYaafte/my-dropbox | fad668c92e24914c5f77d0cbb07e55b75c1f896b | 4dc44db235cdc081b84539af60cab48efea0963b | refs/heads/master | 2022-12-03T21:36:37.898786 | 2020-01-23T23:15:30 | 2020-01-23T23:15:30 | 214,294,879 | 0 | 0 | null | 2022-11-22T04:34:30 | 2019-10-10T22:13:04 | Python | UTF-8 | Python | false | false | 397 | py | """
WSGI config for my_dropbox project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'my_dropbox.settings')
application = get_wsgi_application()
| [
"redyafte@gmail.com"
] | redyafte@gmail.com |
ba3dcd0976017d74d3520dfbc089a894520c8ad2 | 47a351d9a80b884451b4c072dc19827c6d69b2f2 | /soft7/netcdf.py | dfce551382cc5a8b4c7571996b03e4899da6643e | [
"MIT"
] | permissive | jesper-friis/soft7 | 09e94244e17798e3d6b20fb767f4914aa6e61c23 | 1b572a3710bf84dec1b97b2ee3ef931a37f7edc8 | refs/heads/main | 2023-04-10T22:06:38.272443 | 2021-02-15T11:12:44 | 2021-02-15T11:12:44 | 339,229,126 | 0 | 0 | MIT | 2021-02-15T22:53:06 | 2021-02-15T22:53:06 | null | UTF-8 | Python | false | false | 1,539 | py | import netCDF4 as nc
def nc_dumps(file, unique_id):
with nc.Dataset(file) as ds:
o = {
"uri": unique_id
}
dims = {}
for dim in ds.dimensions.values():
dims[dim.name] = dim.name
o["dimensions"] = dims
o["properties"] = {}
for prop in ds.variables.values():
props = {}
###
try:
props["type"] = str(prop.datatype)
except:
pass
###
try:
props["unit"] = prop.units
except:
pass
###
try:
if list(prop.dimensions):
props["shape"] = list(prop.dimensions)
except:
pass
###
try:
if 'standard_name' in prop.ncattrs():
props["label"] = getattr(prop, 'standard_name')
else:
props["label"] = prop.name
except:
pass
###
try:
if 'long_name' in prop.ncattrs():
props["description"] = getattr(prop, 'long_name')
except:
pass
o["properties"][prop.name] = props
return o
| [
"thomas.f.hagelien@sintef.no"
] | thomas.f.hagelien@sintef.no |
583979b4a898e582480cb2aba76840538d04675e | 15e39659c3f892631843ad03afb1991d5bf18a91 | /secondtask.py | 12c855cf87152556472245164bd9e9a9e76d6d3a | [] | no_license | rollingshow/cv_firsttask | ddaefae4e1c113afb6ae8727ec33ed68e60366f1 | c2552e65d09648f4a4ced9d4dba1ee874996594f | refs/heads/main | 2022-12-27T21:25:10.289596 | 2020-10-06T00:57:19 | 2020-10-06T00:57:19 | 301,576,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,057 | py | import matplotlib.pyplot as plt
import numpy as np
def get_coords(filename):
# Считываем реальный размер
size = np.genfromtxt(filename, max_rows=1, deletechars="\n")
# Считаываем массив
array = np.genfromtxt(filename, skip_header=2, delimiter=" ", deletechars="\n", dtype="uint8")
# Логическое или для осей
row = np.any(array, axis=0)
col = np.any(array, axis=1)
first_r = 10000; last_r = -10000
first_c = 10000; last_c = -10000
for i, elem in enumerate(row):
if(elem != 0):
if (first_r > i): first_r = i
if (last_r < i): last_r = i
for i, elem in enumerate(col):
if(elem != 0):
if (first_c > i): first_c = i
if (last_c < i): last_c = i
return {"first_r": first_r, "last_r": last_r, "first_c": first_c, "last_c": last_c}
i1 = get_coords("img1.txt")
i2 = get_coords("img2.txt")
print(i2["first_r"] - i1["first_r"], " ", i2["first_c"] - i1["first_c"]) | [
"noreply@github.com"
] | rollingshow.noreply@github.com |
7b757ab58131facf2c7d0ebfc57d796e29f390dd | 555e36a5f1a0e062983bfdaec8facf030bb41d5c | /app.py | 11c8d3d432806666d84b871f5b5549a9ce298dbd | [] | no_license | shivamverma333/Crop-Yield-Prediction | b6214b25f30d10fd07d03a65fbb64fdbeb6deca1 | 68e96ce0b553f6576c75705236913e3024473519 | refs/heads/master | 2022-07-10T00:42:26.689703 | 2020-05-16T12:20:10 | 2020-05-16T12:20:10 | 264,428,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,237 | py | import flask
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.externals import joblib
app=flask.Flask(__name__,template_folder='templates')
with open(f'models/knn_regressor_save.sav','rb') as file:
knn_regressor=joblib.load(file)
with open(f'models/scaler_save.sav','rb') as file2:
sc=joblib.load(file2)
@app.route('/',methods=['GET', 'POST'])
def main():
if flask.request.method=='GET':
return(flask.render_template('main.html'))
if flask.request.method=='POST':
apparentTemperatureMax=flask.request.form['apparentTemperatureMax']
apparentTemperatureMin=flask.request.form['apparentTemperatureMin']
cloudCover=flask.request.form['cloudCover']
dewPoint=flask.request.form['dewPoint']
humidity=flask.request.form['humidity']
precipIntensity=flask.request.form['precipIntensity']
precipIntensityMax=flask.request.form['precipIntensityMax']
precipProbability=flask.request.form['precipProbability']
precipAccumulation=flask.request.form['precipAccumulation']
precipTypeIsRain=flask.request.form['precipTypeIsRain']
precipTypeIsSnow=flask.request.form['precipTypeIsSnow']
pressure=flask.request.form['pressure']
temperatureMax=flask.request.form['temperatureMax']
temperatureMin=flask.request.form['temperatureMin']
visibility=flask.request.form['visibility']
windBearing=flask.request.form['windBearing']
windSpeed=flask.request.form['windSpeed']
NDVI=flask.request.form['NDVI']
DayInSeason=flask.request.form['DayInSeason']
dataToPredict=[[float(apparentTemperatureMax), float(apparentTemperatureMin), float(cloudCover), float(dewPoint), float(humidity), float(precipIntensity),float(precipIntensityMax),float(precipProbability), float(precipAccumulation), float(precipTypeIsRain),float(precipTypeIsSnow), float(pressure), float(temperatureMax),
float(temperatureMin), float(visibility), float(windBearing), float(windSpeed), float(NDVI), float(DayInSeason)]]
data_temp=pd.DataFrame(dataToPredict)
data_removed_cols=data_temp.iloc[:,9:11]
data_temp=data_temp.drop(data_temp.columns[[9,10]],axis=1)
data_temp=sc.transform(data_temp)
data_temp=pd.DataFrame(data_temp)
scaled_data=pd.concat([data_temp,data_removed_cols],axis=1)
scaled_data.columns=range(scaled_data.shape[1])
results=knn_regressor.predict(scaled_data)
return(flask.render_template('main.html',inputs={'apparentTemperatureMax':apparentTemperatureMax,'apparentTemperatureMin':apparentTemperatureMin,'cloudCover':cloudCover,'dewPoint':dewPoint,'humidity':humidity,'precipIntensity':precipIntensity,'precipIntensityMax':precipIntensityMax,'precipProbability':precipProbability,'precipAccumulation':precipAccumulation,'precipTypeIsRain':precipTypeIsRain,'precipTypeIsSnow':precipTypeIsSnow,'pressure':pressure,'temperatureMax':temperatureMax,'temperatureMin':temperatureMin,'visibility':visibility,'windBearing':windBearing,'windSpeed':windSpeed,'NDVI':NDVI,'DayInSeason':DayInSeason},result=results))
if __name__=='__main__':
app.run(host='0.0.0.0') | [
"noreply@github.com"
] | shivamverma333.noreply@github.com |
7f7dd2fb11a62858cb9182c5d8f8f58b8dab8c9b | 56a3d2ca4a7fd2bc5c0329b34922ca667a7cfdea | /blog/migrations/0001_initial.py | 81aaf966d8e1726e20ef13fcf36110c2aa545483 | [] | no_license | snehalkarale/BlogProject | 1f12e24de934dd2fdb1ec75447d3491ead68151b | 6395ef4cdb89ed2e861ef309b0db1ed68c7f2ca2 | refs/heads/master | 2022-04-26T22:06:51.918195 | 2020-04-30T10:44:05 | 2020-04-30T10:44:05 | 258,164,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | # Generated by Django 2.2.3 on 2020-04-23 09:02
from django.conf import settings
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('slug', models.SlugField(max_length=250, unique_for_date='publish')),
('body', models.TextField()),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=50)),
('author', models.ForeignKey(on_delete='None', related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-publish',),
},
),
]
| [
"snehalkarale21@gmail.com"
] | snehalkarale21@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.