content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Snow Animation
from tkinter import Tk, Canvas, TclError
from time import sleep
from random import randint
window=Tk()
window.title("Snow")
cvs=Canvas(window, height=800, width=1200, bg="blue")
cvs.pack()
ball=cvs.create_oval(50,50,100,100,outline="red", fill="yellow")
snow = []
r = 5
for i in range(600):
x = randint(0,1200)
y = randint(0,800)
new_snow = cvs.create_oval(x-r, y-r, x+r, y+r, outline = "white", fill = "white")
snow.append(new_snow)
cvs.bind_all('<Key>', key_press)
cvs.bind_all('<Button-1>', left_click)
while True:
sleep(0.005)
try:
window.update()
except TclError:
break | [
2,
7967,
23535,
198,
198,
6738,
256,
74,
3849,
1330,
309,
74,
11,
1680,
11017,
11,
309,
565,
12331,
198,
6738,
640,
1330,
3993,
198,
6738,
4738,
1330,
43720,
600,
198,
198,
17497,
28,
51,
74,
3419,
198,
198,
17497,
13,
7839,
7203,
... | 2.244755 | 286 |
#!/usr/bin/python3
import os
import sys
import urllib.request
from orderedset import OrderedSet
from shell import Shell
from log import Log
if __name__ == '__main__':
start=''
end=''
if len(sys.argv) > 1:
start=sys.argv[1]
if len(sys.argv) > 2:
end=sys.argv[2]
gitLog = GitLog(start, end)
GeneratorLog().saveLog(gitLog)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
2956,
297,
571,
13,
25927,
198,
6738,
6149,
2617,
1330,
14230,
1068,
7248,
198,
6738,
7582,
1330,
17537,
198,
6738,
2604,
1330,
5972,
198,
... | 2.297468 | 158 |
from django.db import models
from django.utils import timezone
# Create your models here. | [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
198,
2,
13610,
534,
4981,
994,
13
] | 3.75 | 24 |
""""textElements": [ {
"endIndex": 224,
"paragraphMarker": { "style": {} }
}, {
"endIndex": 130,
"textRun": { "content": "Li lingues differe in li grammatica e li vocabules. Omnicos directe al desirabilite de un nov ", "style": {} }
}, {
"endIndex": 143,
"startIndex": 130,
"textRun": { "content": "lingua franca", "style": { "italic": True } }
}, {
"endIndex": 224,
"startIndex": 143,
"textRun": { "content": ": solmen va esser necessi far:\n", "style": {} }
}, {
"endIndex": 243,
"startIndex": 224,
"paragraphMarker": {
"style": { "indentStart": { "magnitude": 36, "unit": "PT" }, "direction": "LEFT_TO_RIGHT", "indentFirstLine": { "magnitude": 18, "unit": "PT" }, "spacingMode": "COLLAPSE_LISTS" },
"bullet": { "listId": "foo123", "glyph": "\u25cf" }
}
}, {
"endIndex": 243,
"startIndex": 224,
"textRun": { "content": "uniform grammatica\n", "style": {} }
}, {
"endIndex": 257,
"startIndex": 243,
"paragraphMarker": {
"style": { "indentStart": { "magnitude": 36, "unit": "PT" }, "direction": "LEFT_TO_RIGHT", "indentFirstLine": { "magnitude": 18, "unit": "PT" }, "spacingMode": "COLLAPSE_LISTS" },
"bullet": { "listId": "foo123", "glyph": "\u25cf" }
}
}, {
"endIndex": 257,
"startIndex": 243,
"textRun": { "content": "Pronunciation\n", "style": {} }
}, {
"endIndex": 277,
"startIndex": 257,
"paragraphMarker": {
"style": { "indentStart": { "magnitude": 36, "unit": "PT" }, "indentFirstLine": { "magnitude": 18, "unit": "PT" }, "spacingMode": "COLLAPSE_LISTS" },
"bullet": { "listId": "foo123", "glyph": "\u25cf" }
}
}, {
"endIndex": 277,
"startIndex": 257,
"textRun": { "content": "plu sommun paroles.\n", "style": {} }
}, {
"endIndex": 500,
"startIndex": 277,
"paragraphMarker": { "style": {} }
}, {
"endIndex": 500,
"startIndex": 277,
"textRun": { "content": "Ka swu thefognay, tay waddeant varpa u inzo.\n", "style": {} }
}]"""
| [
628,
198,
198,
15931,
15931,
5239,
36,
3639,
1298,
685,
1391,
198,
220,
220,
220,
366,
437,
15732,
1298,
26063,
11,
198,
220,
220,
220,
366,
20360,
9704,
263,
1298,
1391,
366,
7635,
1298,
23884,
1782,
198,
220,
8964,
1391,
198,
220,
... | 2.276477 | 897 |
# pylint: disable= too-many-arguments, too-many-instance-attributes
from datetime import datetime, timezone
from typing import Dict, List, Optional
from urllib.parse import urlparse
from .exceptions import InvalidEventError
class AnalyticsEvent:
"""
A discrete event, representing a user action within a fides tool.
"""
def __init__(
self,
event: str,
event_created_at: datetime,
command: Optional[str] = None,
docker: bool = False,
endpoint: Optional[str] = None,
error: Optional[str] = None,
extra_data: Optional[Dict] = None,
flags: Optional[List[str]] = None,
local_host: bool = False,
resource_counts: Optional[Dict[str, int]] = None,
status_code: Optional[int] = None,
) -> None:
"""
Define a new analytics event to send to the fideslog server.
:param event: The name/type of this event.
:param event_created_at: The UTC timestamp when the event occurred, in [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) format. Must include the UTC timezone, and represent a datetime in the past.
:param command: For events submitted as a result of running CLI commands, the name of the command that was submitted. May include the subcommand name(s).
:param docker: `True` if the command was submitted within a Docker container. Default: `False`.
:param endpoint: For events submitted as a result of making API server requests, the API endpoint path that was requested. If a fully-qualified URL is provided, only the URL path will be persisted.
:param error: For events submitted as a result of running CLI commands that exit with a non-0 status code, or events submitted as a result of API server requests that respond with a non-2xx status code, the error type, without specific error details.
:param extra_data: Any additional key/value pairs that should be associated with this event.
:param flags: For events submitted as a result of running CLI commands, the flags in use when the command was submitted. Omits flag values (when they exist) by persisting only the portion of each string in this list that come before `=` or `space` characters.
:param local_host: For events submitted as a result of making API server requests, `True` if the API server is running on the user's local host. Default: `False`.
:param resource_counts: Should contain the counts of dataset, policy, and system manifests in use when this event was submitted. Include all three keys, even if one or more of their values are `0`. Ex: `{ "datasets": 7, "policies": 26, "systems": 9 }`.
:param status_code: For events submitted as a result of making API server requests, the HTTP status code included in the response.
"""
try:
assert len(event) > 0, "event (name or type) is required"
self.event = event
assert (
event_created_at.tzinfo is not None
and event_created_at.tzinfo == timezone.utc
), "event_created_at must use the UTC timezone"
assert event_created_at < datetime.now(
timezone.utc
), "event_created_at must be in the past"
self.event_created_at = event_created_at
self.resource_counts = None
if resource_counts is not None:
for key in ["datasets", "policies", "systems"]:
val = resource_counts.get(key)
assert (
val is not None
), f'resource_counts must include a "{key}" key'
assert isinstance(
val, int
), f'The value of resource_counts["{key}"] must be an integer'
self.resource_counts = resource_counts
self.endpoint = None
if endpoint is not None:
assert urlparse(endpoint).path != "", "endpoint must include a URL path"
self.endpoint = endpoint
self.command = command
self.docker = docker
self.error = error
self.extra_data = extra_data or {}
self.flags = flags
self.local_host = local_host
self.status_code = status_code
if self.command is not None or self.endpoint is not None:
assert self.status_code is not None, "status_code must be provided"
if self.error is not None:
assert (
self.status_code is not None
), "An error was provided, but status_code is empty"
assert self.status_code > 0 and (
self.status_code < 200 or self.status_code > 299
), "An error was provided, but the provided status_code indicates success"
except AssertionError as err:
raise InvalidEventError(str(err)) from None
| [
2,
279,
2645,
600,
25,
15560,
28,
1165,
12,
21834,
12,
853,
2886,
11,
1165,
12,
21834,
12,
39098,
12,
1078,
7657,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
640,
11340,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
11,
32233,
... | 2.550947 | 1,953 |
import re
keyspace = {
"gog": [r"^[a-z,A-Z,0-9]{5}-[a-z,A-Z,0-9]{5}-[a-z,A-Z,0-9]{5}-[a-z,A-Z,0-9]{5}$"],
"steam": [
r"^[a-z,A-Z,0-9]{5}-[a-z,A-Z,0-9]{5}-[a-z,A-Z,0-9]{5}$",
r"^[a-z,A-Z,0-9]{5}-[a-z,A-Z,0-9]{5}-[a-z,A-Z,0-9]{5}-[a-z,A-Z,0-9]{5}-[a-z,A-Z,0-9]{5}$",
],
"playstation": [r"^[a-z,A-Z,0-9]{4}-[a-z,A-Z,0-9]{4}-[a-z,A-Z,0-9]{4}$"],
"origin": [
r"^[a-z,A-Z,0-9]{4}-[a-z,A-Z,0-9]{4}-[a-z,A-Z,0-9]{4}-[a-z,A-Z,0-9]{4}-[a-z,A-Z,0-9]{4}$"
],
"uplay": [
r"^[a-z,A-Z,0-9]{4}-[a-z,A-Z,0-9]{4}-[a-z,A-Z,0-9]{4}-[a-z,A-Z,0-9]{4}$",
r"^[a-z,A-Z,0-9]{3}-[a-z,A-Z,0-9]{4}-[a-z,A-Z,0-9]{4}-[a-z,A-Z,0-9]{4}-[a-z,A-Z,0-9]{4}$",
],
"url": [r"^http"],
}
_compiled = {k: [re.compile(r) for r in v] for k, v in keyspace.items()}
| [
11748,
302,
198,
198,
13083,
10223,
796,
1391,
198,
220,
220,
220,
366,
70,
519,
1298,
685,
81,
1,
61,
58,
64,
12,
89,
11,
32,
12,
57,
11,
15,
12,
24,
60,
90,
20,
92,
49146,
64,
12,
89,
11,
32,
12,
57,
11,
15,
12,
24,
60... | 1.221374 | 655 |
# Advent of Code 2016
#
# From https://adventofcode.com/2016/day/6
import numpy as np
msgs = np.array([list(row.strip()) for row in open('../inputs/Advent2016_06.txt', 'r')], dtype=str)
part1 = part2 = ''
for y in range(msgs.shape[1]):
unique, counts = np.unique(msgs[:, y], return_counts=True)
part1 += unique[np.argmax(counts)]
part2 += unique[np.argmin(counts)]
print(f"AoC 2016 Day 6, Part 1 answer is {part1}")
print(f"AoC 2016 Day 6, Part 2 answer is {part2}") | [
2,
33732,
286,
6127,
1584,
198,
2,
198,
2,
3574,
3740,
1378,
324,
1151,
1659,
8189,
13,
785,
14,
5304,
14,
820,
14,
21,
198,
11748,
299,
32152,
355,
45941,
198,
198,
907,
14542,
796,
45941,
13,
18747,
26933,
4868,
7,
808,
13,
3631... | 2.454082 | 196 |
import sys
import time
import os.path
import argparse
try:
from flickrSync import FlickrAPI
import_error = False
except ImportError:
import_error = True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--authorize", action="store_true", help="run the authorization setup")
parser.add_argument("-f", "--folder", action="store", help="folder to synchronize")
parser.add_argument("-d", "--deletion", action="store_true", help="allow deletion on Flickr while synchronizing")
parser.add_argument("-s", "--saving", action="store_true", help="allow download from Flickr while synchronizing")
parser.add_argument("-u", "--upload", action="store_true", help="allow upload to Flickr while synchronizing")
parser.add_argument("-m", "--max", action="store", help="maximal number of downloads / uploads at once (0 means unlimited)")
parser.add_argument("-w", "--wait", action="store", help="time to wait between downloads / uploads")
args = parser.parse_args()
LOCAL_DIR = os.path.dirname(os.path.realpath(__file__))
if not args.wait:
args.wait = 0;
else:
args.wait = float(args.wait)
if not args.max or float(args.max) == 0:
args.max = False;
else:
args.max = int(float(args.max))
if (args.folder or args.authorize) and not import_error:
INSTANCE = FlickrAPI(LOCAL_DIR)
authorized = INSTANCE.CheckTokens()
if args.authorize and not authorized:
authorized = INSTANCE.OAuthSingIn()
if authorized and args.folder:
INSTANCE.SyncPhotos(args.folder, args.saving, args.deletion, args.upload, args.max, args.wait)
elif import_error:
print("app.py script error @ " + time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(time.time())))
print("ERROR: flickrSync module cannot be imported.")
else:
parser.print_help()
| [
11748,
25064,
198,
11748,
640,
198,
11748,
28686,
13,
6978,
198,
11748,
1822,
29572,
198,
28311,
25,
198,
197,
6738,
781,
18994,
28985,
1330,
27085,
17614,
198,
197,
11748,
62,
18224,
796,
10352,
198,
16341,
17267,
12331,
25,
198,
197,
... | 2.988275 | 597 |
import unittest
from pynars.NARS.DataStructures import Task
from pynars.NAL.MetaLevelInference.VariableSubstitution import *
from pynars.NARS.RuleMap import RuleMap
import Tests.utils_for_test as utils_for_test
from Tests.utils_for_test import *
from pynars.utils.Print import PrintType, out_print
# utils_for_test.rule_map = RuleMap_v2()
class SubstituteVar:
''''''
@property
@property
@property
@property
find_var_with_pos: Callable = lambda pos_search, variables, positions: [var for var, pos in zip(variables, positions) if pos[:len(pos_search)] == pos_search]
def unification_variable(term1: Term, term2: Term, pos_common1: List[int], pos_common2: List[int]):
''''''
# 1. find the variables in the first common position
ivar1 = find_var_with_pos(pos_common1, term1._index_var.var_independent, term1._index_var.positions_ivar)
dvar1 = find_var_with_pos(pos_common1, term1._index_var.var_dependent, term1._index_var.positions_dvar)
qvar1 = find_var_with_pos(pos_common1, term1._index_var.var_query, term1._index_var.positions_qvar)
# 2. find the variables in the second common position
ivar2 = find_var_with_pos(pos_common2, term2._index_var.var_independent, term2._index_var.positions_ivar)
dvar2 = find_var_with_pos(pos_common2, term2._index_var.var_dependent, term2._index_var.positions_dvar)
qvar2 = find_var_with_pos(pos_common2, term2._index_var.var_query, term2._index_var.positions_qvar)
# 3. build the mapping
mapping_ivar = _build_mapping(term1._index_var.var_independent, term2._index_var.var_independent, ivar1, ivar2)
mapping_dvar = _build_mapping(term1._index_var.var_dependent, term2._index_var.var_dependent, dvar1, dvar2)
mapping_qvar = _build_mapping(term1._index_var.var_query, term2._index_var.var_query, qvar1, qvar2)
return SubstituteVar(mapping_ivar, mapping_dvar, mapping_qvar)
class TEST_NAL6(unittest.TestCase):
''''''
def test_substition_var_to_var(self):
'''
<(&&, <#x-->A>, <#x-->B>, <<$y-->C>==><$y-->D>>, <$z-->E>) ==> <$z-->F>>.
<<$x-->F>==><$x-->H>>.
|-
<(&&, <#x-->A>, <#x-->B>, <<$y-->C>==><$y-->D>>, <$z-->E>) ==> <$x-->H>>.
'''
term1 = Narsese.parse("<<$x-->F>==><$x-->H>>.").term
term2 = Narsese.parse("<(&&, <#x-->A>, <#x-->B>, <<$y-->C>==><$y-->D>>, <$z-->E>) ==> <$z-->F>>.").term
subst_var = unification_variable(term1, term2, [0], [1]) # to find possible replacement.
subst_var.apply(term1, term2)
# subst_var.apply()
term3 = Statement.Implication(term1[0], term2[1])
# term_substitution = substitution(compound, Term("A"), Term("D"))
# self.assertEqual(term_substitution, term_new)
pass
def test_unification_0(self):
'''
'Variable unification
'If something is a bird, then it is a flyer.
<<$x --> bird> ==> <$x --> flyer>>. %1.00;0.90%
<bird-->filyer>
'If something is a bird, then it is not a flyer.
<<$y --> bird> ==> <$y --> flyer>>. %0.00;0.70%
1
'If something is a bird, then usually, it is a flyer.
''outputMustContain('<<$1 --> bird> ==> <$1 --> flyer>>. %0.79;0.92%')
'''
tasks_derived = memory_accept_revision(
'<<$x --> bird> ==> <$x --> flyer>>. %1.00;0.90%',
'<<$y --> bird> ==> <$y --> flyer>>. %0.00;0.70%'
)
self.assertTrue(
output_contains(tasks_derived, '<<$1 --> bird> ==> <$1 --> flyer>>. %0.79;0.92%')
)
pass
def test_unification_1(self):
'''
'Variable unification
'If something is a bird, then it is a animal.
<<$x --> bird> ==> <$x --> animal>>. %1.00;0.90%
'If something is a robin, then it is a bird.
<<$y --> robin> ==> <$y --> bird>>. %1.00;0.90%
3
'If something is a robin, then it is a animal.
''outputMustContain('<<$1 --> robin> ==> <$1 --> animal>>. %1.00;0.81%')
'I guess that if something is a animal, then it is a robin.
''outputMustContain('<<$1 --> animal> ==> <$1 --> robin>>. %1.00;0.45%')
'''
rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises(
'<<$x --> bird> ==> <$x --> animal>>. %1.00;0.90%',
'<<$y --> robin> ==> <$y --> bird>>. %1.00;0.90%',
'<$x --> bird>.', index_task=(0,), index_belief=(1,)
)
self.assertNotEqual(rules, None)
subst_var = unification_variable(task.term, belief.term, [0], [1]) # to find possible replacement.
subst_var.apply(task.term, belief.term)
tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules]
repr(tasks_derived[0].term)
self.assertTrue(
output_contains(tasks_derived, '<<$1 --> robin> ==> <$1 --> animal>>. %1.00;0.81%')
)
self.assertTrue(
output_contains(tasks_derived, '<<$1 --> animal> ==> <$1 --> robin>>. %1.00;0.45%')
)
self.assertTrue(
not output_contains(tasks_derived, '<<$1 --> animal> ==> <$2 --> robin>>. %1.00;0.45%')
)
pass
def test_unification_2(self):
'''
'Variable unification
'If something is a swan, then it is a bird.
<<$x --> swan> ==> <$x --> bird>>. %1.00;0.80%
'If something is a swan, then it is a swimmer.
<<$y --> swan> ==> <$y --> swimmer>>. %0.80;0.90%
3
'I believe that if something is a swan, then it is a bird or a swimmer.
''outputMustContain('<<$1 --> swan> ==> (||,<$1 --> bird>,<$1 --> swimmer>)>. %1.00;0.72%')
'I believe that if something is a swan, then usually, it is both a bird and a swimmer.
''outputMustContain('<<$1 --> swan> ==> (&&,<$1 --> bird>,<$1 --> swimmer>)>. %0.80;0.72%')
'I guess if something is a swimmer, then it is a bird.
''outputMustContain('<<$1 --> swimmer> ==> <$1 --> bird>>. %1.00;0.37%')
'I guess if something is a bird, then it is a swimmer.
''outputMustContain('<<$1 --> bird> ==> <$1 --> swimmer>>. %0.80;0.42%')
'I guess something is a bird, if and only if it is a swimmer.
''outputMustContain('<<$1 --> bird> <=> <$1 --> swimmer>>. %0.80;0.42%')
'''
rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises(
'<<$x --> swan> ==> <$x --> bird>>. %1.00;0.80% ',
'<<$y --> swan> ==> <$y --> swimmer>>. %0.80;0.90%',
'<$x --> swan>.'
)
self.assertNotEqual(rules, None)
subst_var = unification_variable(task.term, belief.term, [0], [0]) # to find possible replacement.
subst_var.apply(task.term, belief.term)
tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules]
self.assertTrue(
output_contains(tasks_derived, '<<$1 --> swan> ==> (||,<$1 --> bird>,<$1 --> swimmer>)>. %1.00;0.72%')
)
self.assertTrue(
output_contains(tasks_derived, '<<$1 --> swan> ==> (&&,<$1 --> bird>,<$1 --> swimmer>)>. %0.80;0.72%')
)
self.assertTrue(
output_contains(tasks_derived, '<<$1 --> swimmer> ==> <$1 --> bird>>. %1.00;0.37%')
)
self.assertTrue(
output_contains(tasks_derived, '<<$1 --> bird> ==> <$1 --> swimmer>>. %0.80;0.42%')
)
self.assertTrue(
output_contains(tasks_derived, '<<$1 --> bird> <=> <$1 --> swimmer>>. %0.80;0.42%')
)
pass
def test_unification_3(self):
'''
'Variable unification
'What can be said about bird can also be said about robin.
<<bird --> $x> ==> <robin --> $x>>. %1.00;0.90%
'What can be said about swimmer usually can also be said about robin.
<<swimmer --> $y> ==> <robin --> $y>>. %0.70;0.90%
3
'What can be said about bird and swimmer can also be said about robin.
''outputMustContain('<(&&,<bird --> $1>,<swimmer --> $1>) ==> <robin --> $1>>. %1.00;0.81%')
'What can be said about bird or swimmer can also be said about robin.
''outputMustContain('<(||,<bird --> $1>,<swimmer --> $1>) ==> <robin --> $1>>. %0.70;0.81%')
'I guess what can be said about bird can also be said about swimmer.
''outputMustContain('<<bird --> $1> ==> <swimmer --> $1>>. %1.00;0.36%')
'I guess what can be said about swimmer can also be said about bird.
''outputMustContain('<<swimmer --> $1> ==> <bird --> $1>>. %0.70;0.45%')
'I guess bird and swimmer share most properties.
''outputMustContain('<<bird --> $1> <=> <swimmer --> $1>>. %0.70;0.45%')
'''
rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises(
'<<bird --> $x> ==> <robin --> $x>>. %1.00;0.90%',
'<<swimmer --> $y> ==> <robin --> $y>>. %0.70;0.90%',
'<robin --> $x>.'
)
self.assertNotEqual(rules, None)
subst_var = unification_variable(task.term, belief.term, [1], [1]) # to find possible replacement.
subst_var.apply(task.term, belief.term)
tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules]
self.assertTrue(
output_contains(tasks_derived, '<(&&,<bird --> $1>,<swimmer --> $1>) ==> <robin --> $1>>. %1.00;0.81%')
)
self.assertTrue(
output_contains(tasks_derived, '<(||,<bird --> $1>,<swimmer --> $1>) ==> <robin --> $1>>. %0.70;0.81%')
)
self.assertTrue(
output_contains(tasks_derived, '<<bird --> $1> ==> <swimmer --> $1>>. %1.00;0.36%')
)
self.assertTrue(
output_contains(tasks_derived, '<<swimmer --> $1> ==> <bird --> $1>>. %0.70;0.45%')
)
self.assertTrue(
output_contains(tasks_derived, '<<bird --> $1> <=> <swimmer --> $1>>. %0.70;0.45%')
)
def test_unification_4(self):
'''
'Variable unification
'If something can fly and chirp, then it is a bird.
<(&&,<$x --> flyer>,<$x --> [chirping]>) ==> <$x --> bird>>. %1.00;0.90%
'If something has wings, then it can fly.
<<$y --> [with_wings]> ==> <$y --> flyer>>. %1.00;0.90%
8
'If something can chirp and has wings, then it is a bird.
''outputMustContain('<(&&,<$1 --> [chirping]>,<$1 --> [with_wings]>) ==> <$1 --> bird>>. %1.00;0.81%')
'''
rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises(
'<(&&,<$x --> flyer>,<$x --> [chirping]>) ==> <$x --> bird>>. %1.00;0.90%',
'<<$y --> [with_wings]> ==> <$y --> flyer>>. %1.00;0.90%',
'<$y --> flyer>.'
)
self.assertNotEqual(rules, None)
subst_var = unification_variable(task.term, belief.term, [0,0], [1]) # to find possible replacement.
subst_var.apply(task.term, belief.term)
tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules]
self.assertTrue(
output_contains(tasks_derived, '<(&&,<$1 --> [chirping]>,<$1 --> [with_wings]>) ==> <$1 --> bird>>. %1.00;0.81%')
)
pass
def test_unification_5(self):
'''
'Variable unification
'If something can fly, chirp, and eats worms, then it is a bird.
<(&&,<$x --> flyer>,<$x --> [chirping]>, <(*, $x, worms) --> food>) ==> <$x --> bird>>.
'If something can chirp and has wings, then it is a bird.
<(&&,<$x --> [chirping]>,<$x --> [with_wings]>) ==> <$x --> bird>>.
''//6
12
'If something can fly and eats worms, then I guess it has wings.
''outputMustContain('<(&&,<$1 --> flyer>,<(*,$1,worms) --> food>) ==> <$1 --> [with_wings]>>. %1.00;0.45%')
'I guess if something has wings, then it can fly and eats worms.
''outputMustContain('<<$1 --> [with_wings]> ==> (&&,<$1 --> flyer>,<(*,$1,worms) --> food>)>. %1.00;0.45%')
'''
pass
def test_unification_6(self):
'''
'Variable unification
'If something can fly and eats worms, then it is a bird.
<(&&,<$x --> flyer>,<(*,$x,worms) --> food>) ==> <$x --> bird>>.
'If something can fly, then it has wings.
<<$y --> flyer> ==> <$y --> [with_wings]>>.
// 4 originally
13
'If something has wings and eats worms, then I guess it is a bird.
''outputMustContain('<(&&,<$1 --> [with_wings]>,<(*,$1,worms) --> food>) ==> <$1 --> bird>>. %1.00;0.45%')
'''
pass
def test_elimination_0(self):
'''
'Variable elimination
'If something is a bird, then it is an animal.
<<$x --> bird> ==> <$x --> animal>>. %1.00;0.90%
'A robin is a bird.
<robin --> bird>. %1.00;0.90%
3
'A robin is an animal.
''outputMustContain('<robin --> animal>. %1.00;0.81%')
'''
rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises(
'<<$x --> bird> ==> <$x --> animal>>. %1.00;0.90%',
'<robin --> bird>. %1.00;0.90%',
'bird.'
)
self.assertNotEqual(rules, None)
subst_var = unification_variable(task.term, belief.term, [0], [0]) # to find possible replacement.
subst_var.apply(task.term, belief.term)
tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules]
self.assertTrue(
output_contains(tasks_derived, '<robin --> animal>. %1.00;0.81%')
)
pass
def test_elimination_1(self):
'''
'Variable elimination
'If something is a bird, then it is an animal.
<<$x --> bird> ==> <$x --> animal>>.
'A tiger is an animal.
<tiger --> animal>.
10
'I guess that a tiger is a bird.
''outputMustContain('<tiger --> bird>. %1.00;0.45%')
'''
pass
def test_elimination_2(self):
'''
'Variable elimination
'Something is a animal if and only if it is a bird.
<<$x --> animal> <=> <$x --> bird>>.
'A robin is a bird.
<robin --> bird>.
3
'A robin is a animal.
''outputMustContain('<robin --> animal>. %1.00;0.81%')
'''
pass
def test_elimination_3(self):
'''
'Variable elimination
'Some bird can swim.
(&&,<#x --> bird>,<#x --> swimmer>).
'Swan is a type of bird.
<swan --> bird>. %0.90%
3
'I guess swan can swim.
''outputMustContain('<swan --> swimmer>. %0.90;0.43%')
'''
pass
def test_elimination_4(self):
'''
'Variable elimination
'Tweety has wings.
<{Tweety} --> [with_wings]>.
'If something can chirp and has wings, then it is a bird.
<(&&,<$x --> [chirping]>,<$x --> [with_wings]>) ==> <$x --> bird>>.
23
'If Tweety can chirp, then it is a bird.
''outputMustContain('<<{Tweety} --> [chirping]> ==> <{Tweety} --> bird>>. %1.00;0.81%')
'''
pass
def test_elimination_5(self):
'''
'Variable elimination
'If something can fly, chirp, and eats worms, then it is a bird.
<(&&,<$x --> flyer>,<$x --> [chirping]>, <(*, $x, worms) --> food>) ==> <$x --> bird>>.
'Tweety can fly.
<{Tweety} --> flyer>.
7
'If Tweety can chirp and eats worms, then it is a bird.
''outputMustContain('<(&&,<(*,{Tweety},worms) --> food>,<{Tweety} --> [chirping]>) ==> <{Tweety} --> bird>>. %1.00;0.81%')
'''
pass
def test_elimination_6(self):
'''
'Variable elimination
'Every lock can be opened by every key.
<(&&,<$x --> key>,<$y --> lock>) ==> <$y --> (/,open,$x,_)>>.
'Lock-1 is a lock.
<{lock1} --> lock>.
20
'Lock-1 can be opened by every key.
''outputMustContain('<<$1 --> key> ==> <{lock1} --> (/,open,$1,_)>>. %1.00;0.81%')
'''
pass
def test_multiple_variable_elimination_0(self):
'''
'Multiple variable elimination
'Every lock can be opened by some key.
<<$x --> lock> ==> (&&,<#y --> key>,<$x --> (/,open,#y,_)>)>. %1.00;0.90%
'Lock-1 is a lock.
<{lock1} --> lock>. %1.00;0.90%
9
'Some key can open Lock-1.
''outputMustContain('(&&,<#1 --> key>,<{lock1} --> (/,open,#1,_)>). %1.00;0.81%')
'''
rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises(
'<<$x --> lock> ==> (&&,<#y --> key>,<$x --> (/,open,#y,_)>)>. %1.00;0.90%',
'<{lock1} --> lock>. %1.00;0.90%',
'lock.'
)
self.assertNotEqual(rules, None)
tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules]
self.assertTrue(
output_contains(tasks_derived, '(&&,<#1 --> key>,<{lock1} --> (/,open,#1,_)>). %1.00;0.81%')
)
pass
def test_multiple_variable_elimination_1(self):
'''
'Multiple variable elimination
'There is a lock that can be opened by every key.
(&&,<#x --> lock>,<<$y --> key> ==> <#x --> (/,open,$y,_)>>).
'Lock-1 is a lock.
<{lock1} --> lock>.
9
'I guess Lock-1 can be opened by every key.
''outputMustContain('<<$1 --> key> ==> <{lock1} --> (/,open,$1,_)>>. %1.00;0.43%')
'''
pass
def test_multiple_variable_elimination_2(self):
'''
'Multiple variable elimination
'There is a key that can open some lock.
(&&,<#x --> (/,open,#y,_)>,<#x --> lock>,<#y --> key>).
'Lock-1 is a lock.
<{lock1} --> lock>.
18
'I guess there is a key that can open Lock-1.
''outputMustContain('(&&,<#1 --> key>,<{lock1} --> (/,open,#1,_)>). %1.00;0.43%')
'''
pass
def test_introduction_0(self):
'''
'Introduction
'A swan is a bird.
<swan --> bird>.
'A swan is usually a swimmer.
<swan --> swimmer>. %0.80%
3
'I guess a bird is usually a swimmer.
''outputMustContain('<<$1 --> bird> ==> <$1 --> swimmer>>. %0.80;0.45%')
'I guess a swimmer is a bird.
''outputMustContain('<<$1 --> swimmer> ==> <$1 --> bird>>. %1.00;0.39%')
'I guess a bird is usually a swimmer, and the other way around.
''outputMustContain('<<$1 --> bird> <=> <$1 --> swimmer>>. %0.80;0.45%')
'Some bird can swim.
''outputMustContain('(&&,<#1 --> bird>,<#1 --> swimmer>). %0.80;0.81%')
'''
pass
def test_introduction_1(self):
'''
'Introduction
'A gull is a swimmer.
<gull --> swimmer>.
'Usually, a swan is a swimmer.
<swan --> swimmer>. %0.80%
3
'I guess what can be said about gull usually can also be said about swan.
''outputMustContain('<<gull --> $1> ==> <swan --> $1>>. %0.80;0.45%')
'I guess what can be said about swan can also be said about gull.
''outputMustContain('<<swan --> $1> ==> <gull --> $1>>. %1.00;0.39%')
'I guess gull and swan share most properties.
''outputMustContain('<<gull --> $1> <=> <swan --> $1>>. %0.80;0.45%')
'Gull and swan have some common property.
''outputMustContain('(&&,<gull --> #1>,<swan --> #1>). %0.80;0.81%')
'''
pass
def test_introduction_2(self):
'''
'Introduction
'Key-1 opens Lock-1.
<{key1} --> (/,open,_,{lock1})>.
'Key-1 is a key.
<{key1} --> key>.
45
'I guess every key can open Lock-1.
''outputMustContain('<<$1 --> key> ==> <$1 --> (/,open,_,{lock1})>>. %1.00;0.45%')
'Some key can open Lock-1.
''//outputMustContain('(&&,<#1 --> (/,open,_,{lock1})>,<#1 --> key>). %1.00;0.81%') //reversed
'' outputMustContain('(&&,<#1 --> (/,open,_,{lock1})>,<#1 --> key>). %1.00;0.25%')
'''
pass
def test_multiple_variables_introduction_0(self):
'''
'Multiple variables introduction
'Lock-1 can be opened by every key.
<<$x --> key> ==> <{lock1} --> (/,open,$x,_)>>.
'Lock-1 is a lock.
<{lock1} --> lock>.
166
'There is a lock that can be opened by every key.
''outputMustContain('(&&,<#1 --> lock>,<<$2 --> key> ==> <#1 --> (/,open,$2,_)>>). %1.00;0.81%')
'I guess every lock can be opened by every key.
''outputMustContain('<(&&,<$1 --> key>,<$2 --> lock>) ==> <$2 --> (/,open,$1,_)>>. %1.00;0.45%')
'''
pass
def test_multiple_variables_introduction_1(self):
'''
'Multiple variables introduction
'Lock-1 can be opened by some key.
(&&,<#x --> key>,<{lock1} --> (/,open,#x,_)>).
'Lock-1 is a lock.
<{lock1} --> lock>.
17
'There is a key that can open some lock.
''outputMustContain('(&&,<#1 --> key>,<#2 --> (/,open,#1,_)>,<#2 --> lock>). %1.00;0.81%')
'I guess every lock can be opened by some key.
''outputMustContain('<<$1 --> lock> ==> (&&,<#2 --> key>,<$1 --> (/,open,#2,_)>)>. %1.00;0.45%')
'''
pass
def test_recursion(self):
'''
'Recursion
'0 is a number
<0 --> num>. %1.00;0.90%
'If n is a number, n+1 is also a number
<<$1 --> num> ==> <(*,$1) --> num>>. %1.00;0.90%
'3 is a number?
<(*,(*,(*,0))) --> num>?
70000
'I guess 3 is a number
''outputMustContain('<(*,(*,(*,0))) --> num>. %1.00;0.66%')
'''
pass
def test_second_level_variable_unification_0(self):
'''
'Second level variable unification
'there is a lock which is opened by all keys
(&&,<#1 --> lock>,<<$2 --> key> ==> <#1 --> (/,open,$2,_)>>). %1.00;0.90%
'key1 is a key
<{key1} --> key>. %1.00;0.90%
5
'there is a lock which is opened by key1
''outputMustContain('(&&,<#1 --> (/,open,{key1},_)>,<#1 --> lock>). %1.00;0.81%')
'''
pass
def test_second_level_variable_unification_1(self):
'''
'Second level variable unification
'all locks are opened by some key
<<$1 --> lock> ==> (&&,<#2 --> key>,<$1 --> (/,open,#2,_)>)>. %1.00;0.90%
'key1 is a key
<{key1} --> key>. %1.00;0.90%
5
'maybe all locks are opened by key1
''outputMustContain('')
//''outputMustContain('<<$1 --> lock> ==> <$1 --> (/,open,{key1},_)>>. %1.00;0.43%')
'''
pass
def test_second_variable_introduction_induction(self):
'''
'Second variable introduction (induction)
'if something opens lock1, it is a key
<<lock1 --> (/,open,$1,_)> ==> <$1 --> key>>.
'lock1 is a key
<lock1 --> lock>.
7
'there is a lock with the property that when opened by something, this something is a key (induction)
''outputMustContain('<(&&,<#1 --> (/,open,$2,_)>,<#1 --> lock>) ==> <$2 --> key>>. %1.00;0.45%')
'''
pass
def test_variable_elimination_deduction(self):
'''
'Second variable introduction (induction)
'lock1 is a lock
<lock1 --> lock>. %1.00;0.90%
'there is a lock with the property that when opened by something, this something is a key
<(&&,<#1 --> lock>,<#1 --> (/,open,$2,_)>) ==> <$2 --> key>>. %1.00;0.90%
4
'whatever opens lock1 is a key
''outputMustContain('<<lock1 --> (/,open,$1,_)> ==> <$1 --> key>>. %1.00;0.81%')
'''
pass
def test_abduction_with_variable_elimination_abduction(self):
'''
'Abduction with variable elimination (abduction)
'whatever opens lock1 is a key
<<lock1 --> (/,open,$1,_)> ==> <$1 --> key>>. %1.00;0.90%
'there is a lock with the property that when opened by something, this something is a key
<(&&,<#1 --> lock>,<#1 --> (/,open,$2,_)>) ==> <$2 --> key>>. %1.00;0.90%
10
'lock1 is a lock
''outputMustContain('<lock1 --> lock>. %1.00;0.45%')
'''
pass
def test_birdClaimedByBob(self):
'''
'from https://code.google.com/archive/p/open-nars/issues/7
<(&,<{Tweety} --> bird>,<bird --> fly>) --> claimedByBob>.
<<(&,<#1 --> $2>,<$3 --> #1>) --> claimedByBob> ==> <<$3 --> $2> --> claimedByBob>>.
<?x --> claimedByBob>?
100
''outputMustContain('<<{Tweety} --> fly> --> claimedByBob>. %1.00;0.81%')
'''
pass
def test_can_of_worms(self):
'''
<0 --> num>. %1.00;0.90%
<0 --> (/,num,_)>. %1.00;0.90%
20
''outputMustContain('<<$1 --> num> ==> <$1 --> (/,num,_)>>. %1.00;0.45%')
'''
pass
def test_nlp1(self):
'''
<(\,REPRESENT,_,CAT) --> cat>. %1.00;0.90%
<(\,(\,REPRESENT,_,<(*,CAT,FISH) --> FOOD>),_,eat,fish) --> cat>.
5
''outputMustContain('<<(\,REPRESENT,_,$1) --> $2> ==> <(\,(\,REPRESENT,_,<(*,$1,FISH) --> FOOD>),_,eat,fish) --> $2>>. %1.00;0.40%')
'''
pass
def test_nlp2(self):
'''
<cat --> (/,(/,REPRESENT,_,<(*,CAT,FISH) --> FOOD>),_,eat,fish)>.
<cat --> CAT>. %1.00;0.90%
300
''outputMustContain('<<$1 --> $2> ==> <$1 --> (/,(/,REPRESENT,_,<(*,$2,FISH) --> FOOD>),_,eat,fish)>>. %1.00;0.40%')
'''
pass
def test_redundant(self):
'''
<<lock1 --> (/,open,$1,_)> ==> <$1 --> key>>.
100
''outputMustNotContain('<(&&,<lock1 --> (/,open,$1,_)>,<(*,$1,lock1) --> open>) ==> <$1 --> key>>. %1.00;0.81%')
''outputMustNotContain('<<(*,$1,lock1) --> open> ==> <lock1 --> (/,open,$1,_)>>. %1.00;0.45%')
'''
pass
def test_symmetry(self):
'''
<(*,a,b) --> like>. %1.00;0.90%
<(*,b,a) --> like>. %1.00;0.90%
<<(*,$1,$2) --> like> <=> <(*,$2,$1) --> like>>?
20
''outputMustContain('<<(*,$1,$2) --> like> <=> <(*,$2,$1) --> like>>. %1.00;0.40%')
'''
pass
def test_uncle(self):
'''
<tim --> (/,uncle,_,tom)>. %1.00;0.90%
<tim --> (/,uncle,tom,_)>. %0.00;0.90%
10
''outputMustContain('<<$1 --> (/,uncle,_,$2)> ==> <$1 --> (/,uncle,$2,_)>>. %0.00;0.40%')
'would be a strange variable introduction when it would be allowed to use ImageExt and not just looking at <SUB --> PRED>
'this is a strange example I added..
'''
pass
def test_unification_a1(self):
'''
'Variable unification
'If something is a bird, then it is a animal.
<<#x-->A> ==> (&&, <#y-->B>, <#x-->C>)>. %1.00;0.90%
'If something is a robin, then it is a bird.
<(&&, <#x-->B>, <#y-->C>) ==> <#x --> D>>. %1.00;0.90%
3
'If something is a robin, then it is a animal.
''outputMustContain('<<#1 --> A> ==> <#2 --> D>>. %1.00;0.81%')
'I guess that if something is a animal, then it is a robin.
''outputMustContain('<<#1 --> D> ==> <#2 --> A>>. %1.00;0.45%')
'''
rules, task, belief, concept, task_link, term_link, result1, result2 = rule_map_two_premises(
'<<#x-->A> ==> (&&, <#y-->B>, <#x-->C>)>. %1.00;0.90%',
'<(&&, <#x-->B>, <#y-->C>) ==> <#x --> D>>. %1.00;0.90% ',
'(&&, <#y-->B>, <#x-->C>).'
)
self.assertNotEqual(rules, None)
subst_var = unification_variable(task.term, belief.term, [1], [0]) # to find possible replacement.
subst_var.apply(task.term, belief.term)
tasks_derived = [rule(task, belief, task_link, term_link) for rule in rules]
self.assertTrue(
output_contains(tasks_derived, '<<#1 --> A> ==> <#2 --> D>>. %1.00;0.81%')
)
self.assertTrue(
output_contains(tasks_derived, '<<#1 --> D> ==> <#2 --> A>>. %1.00;0.45%')
)
self.assertTrue(
not output_contains(tasks_derived, '<<$1 --> D> ==> <$1 --> A>>. %1.00;0.45%')
)
print("")
out_print(PrintType.IN, task.sentence.repr, *task.budget)
out_print(PrintType.IN, belief.sentence.repr, *belief.budget)
for task in tasks_derived:
task: Task
out_print(PrintType.OUT, task.sentence.repr, *task.budget)
pass
if __name__ == '__main__':
test_classes_to_run = [
TEST_NAL6
]
loader = unittest.TestLoader()
suites = []
for test_class in test_classes_to_run:
suite = loader.loadTestsFromTestCase(test_class)
suites.append(suite)
suites = unittest.TestSuite(suites)
runner = unittest.TextTestRunner()
results = runner.run(suites) | [
11748,
555,
715,
395,
198,
198,
6738,
279,
2047,
945,
13,
45,
27415,
13,
6601,
44909,
942,
1330,
15941,
198,
6738,
279,
2047,
945,
13,
45,
1847,
13,
48526,
4971,
818,
4288,
13,
43015,
7004,
301,
2738,
1330,
1635,
198,
6738,
279,
204... | 2.024651 | 14,523 |
# Copyright 2021 iiPython
# Modules
import os
import json
from pyhttpfs import pyhttpfs
# Initialization
_ICONS_FILE = os.path.join(pyhttpfs.assets_dir, "icons.json")
_CAN_LOAD = os.path.isfile(_ICONS_FILE)
if not _CAN_LOAD:
pyhttpfs.log("[yellow]No `icons.json` file present, icons will be disabled.")
_ICON_DATA = json.loads(open(_ICONS_FILE, "r").read()) if _CAN_LOAD else {}
# Icon loader
| [
2,
15069,
33448,
21065,
37906,
198,
198,
2,
3401,
5028,
198,
11748,
28686,
198,
11748,
33918,
198,
6738,
12972,
4023,
9501,
1330,
12972,
4023,
9501,
198,
198,
2,
20768,
1634,
198,
62,
2149,
19213,
62,
25664,
796,
28686,
13,
6978,
13,
... | 2.746575 | 146 |
"""
GraphQL queries in string form used by SmashGGScraper.
"""
# TODO: This is lifted from the old project. Confirm these still work (will need some rework).
from datetime import datetime
from ranking_scraper.gql_query import GraphQLQuery, StringWithoutQuotes
TOURNAMENTS_BY_COUNTRY_PAGING = """
query TournamentsByCountryPaging($countryCode: String!, $afterDate: Timestamp!, $perPage: Int!) {
tournaments(query: {
perPage: $perPage
filter: {
countryCode: $countryCode
videogameIds: [1386]
upcoming: false
hasOnlineEvents: false
afterDate: $afterDate
}
}) {
pageInfo {
totalPages
perPage
}
}
}
""".strip()
TOURNAMENTS_BY_COUNTRY = """
query TournamentsByCountry($countryCode: String!, $afterDate: Timestamp!,
$page: Int!, $perPage: Int!) {
tournaments(query: {
page: $page
perPage: $perPage
filter: {
countryCode: $countryCode
videogameIds: [1386]
upcoming: false
hasOnlineEvents: false
afterDate: $afterDate
}
}) {
nodes {
id
name
countryCode
endAt
events {
id
name
isOnline
numEntrants
state
type
videogame {
id
}
}
}
}
}
""".strip()
TOURNAMENTS_ALL_PAGING = """
query TournamentsPaging($afterDate: Timestamp!, $beforeDate: Timestamp!,
$perPage: Int!) {
tournaments(query: {
perPage: $perPage
filter: {
videogameIds: [1386]
upcoming: false
hasOnlineEvents: false
afterDate: $afterDate
beforeDate: $beforeDate
}
}) {
pageInfo {
totalPages
perPage
}
}
}
""".strip()
TOURNAMENTS_ALL = """
query TournamentsAll($afterDate: Timestamp!, $beforeDate: Timestamp!,
$page: Int!, $perPage: Int!) {
tournaments(query: {
page: $page
perPage: $perPage
filter: {
videogameIds: [1386]
upcoming: false
hasOnlineEvents: false
afterDate: $afterDate
beforeDate: $beforeDate
},
sort: startAt
}) {
nodes {
id
name
countryCode
endAt
events {
id
name
isOnline
numEntrants
state
type
videogame {
id
}
}
}
}
}
""".strip()
EVENT_PHASES = """
query EventPhases($eventId: ID!) {
event(id: $eventId) {
phases {
id
name
numSeeds
bracketType
}
}
}
""".strip()
PHASE_SETS_PAGING = """
query PhaseSetsPaging($phaseId: ID!, $perPage: Int!) {
phase(id: $phaseId) {
id
name
sets(
perPage: $perPage
sortType: CALL_ORDER
) {
pageInfo {
totalPages
}
}
}
}
""".strip()
PHASE_SETS = """
query PhaseSets($phaseId: ID!, $page: Int!, $perPage: Int!) {
phase(id: $phaseId) {
sets(
page: $page
perPage: $perPage
sortType: CALL_ORDER
) {
nodes {
id
slots {
standing {
placement
stats {
score {
value
}
}
}
entrant {
participants {
gamerTag
user {
id
location {
country
}
}
verified
}
}
}
}
}
}
}
""".strip()
# TODO: Implement get player details query
| [
37811,
198,
37065,
9711,
20743,
287,
4731,
1296,
973,
416,
18214,
11190,
3351,
38545,
13,
198,
37811,
198,
198,
2,
16926,
46,
25,
770,
318,
13663,
422,
262,
1468,
1628,
13,
7326,
2533,
777,
991,
670,
357,
10594,
761,
617,
302,
1818,
... | 1.977452 | 1,774 |
#!/usr/bin/python
import sys
import getopt
import os
if __name__ == "__main__":
if len(sys.argv) < 2:
usage()
sys.exit()
main(sys.argv[1:])
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
25064,
198,
11748,
651,
8738,
198,
11748,
28686,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
197,
361,
18896,
7,
17597,
13,
853,
85,
8,
1279,
36... | 2.188406 | 69 |
import datetime
import pytest
from copy import deepcopy
from django.urls import reverse
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.gis.geos import Point
from django.utils import timezone
from freezegun import freeze_time
from guardian.shortcuts import assign_perm, remove_perm
from resources.models.resource import Resource
from ..enums import UnitAuthorizationLevel, UnitGroupAuthorizationLevel
from resources.models import (Day, Equipment, Period, Reservation, ReservationMetadataSet, ResourceEquipment,
ResourceType, Unit, UnitAuthorization, UnitGroup)
from .utils import assert_response_objects, check_only_safe_methods_allowed, is_partial_dict_in_list, MAX_QUERIES
@pytest.fixture
@pytest.mark.django_db
@pytest.fixture
@pytest.mark.django_db
@pytest.fixture
def _check_permissions_dict(api_client, resource, is_admin, is_manager, is_viewer, can_make_reservations,
can_ignore_opening_hours, can_bypass_payment):
"""
Check that user permissions returned from resource endpoint contain correct values
for given user and resource. api_client should have the user authenticated.
"""
url = reverse('resource-detail', kwargs={'pk': resource.pk})
response = api_client.get(url)
print(response.data)
assert response.status_code == 200
permissions = response.data['user_permissions']
assert len(permissions) == 6
assert permissions['is_admin'] == is_admin
assert permissions['is_manager'] == is_manager
assert permissions['is_viewer'] == is_viewer
assert permissions['can_make_reservations'] == can_make_reservations
assert permissions['can_ignore_opening_hours'] == can_ignore_opening_hours
assert permissions['can_bypass_payment'] == can_bypass_payment
@pytest.mark.django_db
def test_disallowed_methods(all_user_types_api_client, list_url, detail_url):
"""
Tests that only safe methods are allowed to unit list and detail endpoints.
"""
check_only_safe_methods_allowed(all_user_types_api_client, (list_url, detail_url))
@pytest.mark.django_db
def test_user_permissions_in_resource_endpoint(api_client, resource_in_unit, user, group):
"""
Tests that resource endpoint returns a permissions dict with correct values.
"""
api_client.force_authenticate(user=user)
# normal user, reservable = True
_check_permissions_dict(api_client, resource_in_unit, is_admin=False, is_manager=False,
is_viewer=False, can_make_reservations=True, can_ignore_opening_hours=False,
can_bypass_payment=False)
# normal user, reservable = False
resource_in_unit.reservable = False
resource_in_unit.save()
_check_permissions_dict(api_client, resource_in_unit, is_admin=False, is_manager=False,
is_viewer=False, can_make_reservations=False, can_ignore_opening_hours=False,
can_bypass_payment=False)
# admin, reservable = False
user.is_general_admin = True
user.save()
api_client.force_authenticate(user=user)
_check_permissions_dict(api_client, resource_in_unit, is_admin=True, is_manager=False,
is_viewer=False, can_make_reservations=True, can_ignore_opening_hours=True,
can_bypass_payment=True)
user.is_general_admin = False
user.save()
# user has explicit permission to make reservation
user.groups.add(group)
assign_perm('unit:can_make_reservations', group, resource_in_unit.unit)
api_client.force_authenticate(user=user)
_check_permissions_dict(api_client, resource_in_unit, is_admin=False, is_manager=False,
is_viewer=False, can_make_reservations=True, can_ignore_opening_hours=False,
can_bypass_payment=False)
remove_perm('unit:can_make_reservations', group, resource_in_unit.unit)
resource_group = resource_in_unit.groups.create(name='rg1')
assign_perm('group:can_make_reservations', group, resource_group)
api_client.force_authenticate(user=user)
_check_permissions_dict(api_client, resource_in_unit, is_admin=False, is_manager=False,
is_viewer=False, can_make_reservations=True, can_ignore_opening_hours=False,
can_bypass_payment=False)
assign_perm('unit:can_ignore_opening_hours', group, resource_in_unit.unit)
api_client.force_authenticate(user=user)
_check_permissions_dict(api_client, resource_in_unit, is_admin=False, is_manager=False,
is_viewer=False, can_make_reservations=True, can_ignore_opening_hours=True,
can_bypass_payment=False)
remove_perm('unit:can_ignore_opening_hours', group, resource_in_unit.unit)
# user has explicit permission to bypass payment
assign_perm('unit:can_bypass_payment', group, resource_in_unit.unit)
api_client.force_authenticate(user=user)
_check_permissions_dict(api_client, resource_in_unit, is_admin=False, is_manager=False,
is_viewer=False, can_make_reservations=True, can_ignore_opening_hours=False,
can_bypass_payment=True)
remove_perm('unit:can_bypass_payment', group, resource_in_unit.unit)
# unit admins can ignore opening hours
user.is_general_admin = False
user.save()
user.unit_authorizations.create(
authorized=user,
level=UnitAuthorizationLevel.admin,
subject=resource_in_unit.unit
)
user.save()
api_client.force_authenticate(user=user)
_check_permissions_dict(api_client, resource_in_unit, is_admin=True, is_manager=False,
is_viewer=False, can_make_reservations=True,can_ignore_opening_hours=True,
can_bypass_payment=True)
user.unit_authorizations.all().delete()
# unit managers can ignore opening hours
user.unit_authorizations.create(
authorized=user,
level=UnitAuthorizationLevel.manager,
subject=resource_in_unit.unit
)
user.save()
api_client.force_authenticate(user=user)
_check_permissions_dict(api_client, resource_in_unit, is_admin=False, is_manager=True,
is_viewer=False, can_make_reservations=True, can_ignore_opening_hours=True,
can_bypass_payment=True)
user.unit_authorizations.all().delete()
# unit viewer
user.unit_authorizations.create(
authorized=user,
level=UnitAuthorizationLevel.viewer,
subject=resource_in_unit.unit
)
user.save()
api_client.force_authenticate(user=user)
_check_permissions_dict(api_client, resource_in_unit, is_admin=False, is_manager=False,
is_viewer=True, can_make_reservations=True, can_ignore_opening_hours=False,
can_bypass_payment=False)
@pytest.mark.django_db
def test_non_public_resource_visibility(api_client, resource_in_unit, user, staff_user):
"""
Tests that non-public resources are not returned for non-staff.
"""
resource_in_unit.public = False
resource_in_unit.save()
url = reverse('resource-detail', kwargs={'pk': resource_in_unit.pk})
response = api_client.get(url)
assert response.status_code == 404
# Unauthenticated
url = reverse('resource-list')
response = api_client.get(url)
assert response.status_code == 200
assert response.data['count'] == 0
# Authenticated as non-staff
api_client.force_authenticate(user=user)
response = api_client.get(url)
assert response.status_code == 200
assert response.data['count'] == 0
# Authenticated as non-admin staff
user.is_staff = True
user.save()
response = api_client.get(url)
assert response.status_code == 200
assert response.data['count'] == 0
# Authenticated as admin
user.is_general_admin = True
user.save()
response = api_client.get(url)
assert response.status_code == 200
assert response.data['count'] == 1
url = reverse('resource-detail', kwargs={'pk': resource_in_unit.pk})
response = api_client.get(url)
assert response.status_code == 200
# Authenticated as unit manager
user.is_general_admin = False
user.save()
user.unit_authorizations.create(
authorized=staff_user,
level=UnitAuthorizationLevel.manager,
subject=resource_in_unit.unit
)
user.save()
url = reverse('resource-list')
response = api_client.get(url)
assert response.status_code == 200
assert response.data['count'] == 1
assert Unit.objects.managed_by(user).values_list('id', flat=True)[0] == response.data['results'][0]['unit']
# Authenticated as unit admin
user.unit_authorizations.create(
authorized=staff_user,
level=UnitAuthorizationLevel.admin,
subject=resource_in_unit.unit
)
user.save()
url = reverse('resource-list')
response = api_client.get(url)
assert response.status_code == 200
assert response.data['count'] == 1
assert Unit.objects.managed_by(user).values_list('id', flat=True)[0] == response.data['results'][0]['unit']
# Authenticated as unit group admin
user.unit_authorizations.all().delete()
unit_group = UnitGroup.objects.create(name='foo')
unit_group.members.add(resource_in_unit.unit)
user.unit_group_authorizations.create(
authorized=staff_user,
level=UnitGroupAuthorizationLevel.admin,
subject=unit_group
)
user.save()
url = reverse('resource-list')
response = api_client.get(url)
assert response.status_code == 200
assert response.data['count'] == 1
assert Unit.objects.managed_by(user).values_list('id', flat=True)[0] == response.data['results'][0]['unit']
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
def test_api_resource_reservation_feedback_url_get(api_client, resource_in_unit, detail_url):
"""Tests that reservation feedback url is included in get response"""
test_feedback_url = 'https://test.fi'
resource_in_unit.reservation_feedback_url = test_feedback_url
resource_in_unit.save()
response = api_client.get(detail_url)
assert response.status_code == 200
reservation_feedback_url = response.data['reservation_feedback_url']
assert reservation_feedback_url == test_feedback_url
@pytest.mark.django_db
def test_api_resource_reservation_feedback_url_update(api_client, resource_in_unit,
user_with_permissions):
"""Tests that reservation feedback url can be updated via api by authorized user"""
test_feedback_url = 'https://test.fi'
resource_data = {'reservation_feedback_url': test_feedback_url}
api_client.force_authenticate(user=user_with_permissions)
response = api_client.patch(get_update_url(resource_in_unit), data=resource_data)
assert response.status_code == 200
reservation_feedback_url = response.data['reservation_feedback_url']
assert reservation_feedback_url == test_feedback_url
@pytest.mark.django_db
def test_api_resource_reservation_feedback_url_update_invalid(api_client, resource_in_unit,
user_with_permissions):
"""Tests that reservation feedback url cannot be updated with invalid url"""
test_feedback_url = 'not-url'
resource_data = {'reservation_feedback_url': test_feedback_url}
api_client.force_authenticate(user=user_with_permissions)
response = api_client.patch(get_update_url(resource_in_unit), data=resource_data)
assert response.status_code == 400
@pytest.mark.django_db
@pytest.mark.django_db
@freeze_time('2016-10-25')
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.parametrize('filtering, expected_resource_indexes', (
({}, [0, 1]),
({'available_between': '2115-04-08T08:00:00+02:00,2115-04-08T10:00:00+02:00'}, [0, 1]),
({'available_between': '2115-04-08T08:00:00+02:00,2115-04-08T10:00:01+02:00'}, [1]),
({'available_between': '2115-04-08T10:59:59+02:00,2115-04-08T12:00:00+02:00'}, [1]),
({'available_between': '2115-04-08T10:59:59+02:00,2115-04-08T12:00:01+02:00'}, []),
({'available_between': '2115-04-08T13:00:00+02:00,2115-04-08T18:00:00+02:00'}, [0, 1]),
))
@pytest.mark.django_db
@pytest.mark.parametrize('filtering, expected_resource_indexes', (
({}, [0, 1]),
({'available_between': '2115-04-08T06:00:00+02:00,2115-04-08T07:00:00+02:00'}, []),
({'available_between': '2115-04-08T07:59:59+02:00,2115-04-08T16:00:00+02:00'}, []),
({'available_between': '2115-04-08T08:00:00+02:00,2115-04-08T16:00:00+02:00'}, [0]),
({'available_between': '2115-04-08T08:00:00+02:00,2115-04-08T16:00:01+02:00'}, []),
({'available_between': '2115-04-08T12:00:00+02:00,2115-04-08T14:00:00+02:00'}, [0, 1]),
({'available_between': '2115-04-14T12:00:00+02:00,2115-04-14T14:00:00+02:00'}, [0]),
))
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.parametrize('start, end, period, expected', (
('00:00', '00:30', 60, []),
('06:00', '06:30', 60, []),
('06:00', '06:30', 30, [1]),
('06:00', '08:30', 60, [1]),
('06:00', '08:30', 30, [0, 1]),
('09:00', '11:00', 60, [0, 1]),
('09:00', '11:00', 120, [1]),
('10:00', '12:00', 60, [0, 1]),
('10:00', '12:00', 120, [1]),
('10:00', '12:00', 180, []),
('10:00', '14:00', 120, [1]),
('10:00', '15:00', 120, [0, 1]),
('12:00', '17:00', 120, [0, 1]),
('12:00', '17:00', 180, [0, 1]),
('15:00', '17:00', 60, [0, 1]),
('15:00', '17:00', 120, [1]),
('17:00', '18:00', 60, [1]),
('17:00', '18:00', 120, []),
('00:00', '23:00', 180, [0, 1]),
('00:00', '23:00', 240, [1]),
))
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
@pytest.mark.django_db
def test_order_by_accessibility_no_viewpoint(list_url, api_client, resource_with_accessibility_data,
resource_with_accessibility_data2):
""" Ordering by accessibility requires that the desired accessibility viewpoint for ordering
is provided
"""
response = api_client.get('%s?order_by=accessibility' % list_url)
assert response.status_code == 400
@pytest.mark.django_db
def test_order_by_accessibility(list_url, api_client, resource_with_accessibility_data,
resource_with_accessibility_data2, accessibility_viewpoint_wheelchair,
accessibility_viewpoint_hearing):
""" resource_with_accessibility_data should rank higher in wheelchair accessibility
resource_with_accessibility_data2 should rank higher in hearing accessibility
"""
response = api_client.get('{}?order_by=-accessibility&accessibility_viewpoint={}'.format(
list_url, accessibility_viewpoint_wheelchair.id
))
assert response.status_code == 200
assert_response_objects(response, [resource_with_accessibility_data, resource_with_accessibility_data2])
assert response.data['results'][0]['name']['fi'] == resource_with_accessibility_data.name_fi
assert response.data['results'][1]['name']['fi'] == resource_with_accessibility_data2.name_fi
response = api_client.get('{}?order_by=-accessibility&accessibility_viewpoint={}'.format(
list_url, accessibility_viewpoint_hearing.id
))
assert response.status_code == 200
assert_response_objects(response, [resource_with_accessibility_data, resource_with_accessibility_data2])
assert response.data['results'][0]['name']['fi'] == resource_with_accessibility_data2.name_fi
assert response.data['results'][1]['name']['fi'] == resource_with_accessibility_data.name_fi
@pytest.mark.django_db
def test_order_by_accessibility_inaccessible_unit(list_url, api_client, resource_with_accessibility_data,
resource_with_accessibility_data3,
accessibility_viewpoint_wheelchair):
""" resource_with_accessibility_data should rank higher in wheelchair accessibility.
resource_with_accessibility_data3 is wheelchair accessible, but
resource_with_accessibility_data3.unit is not wheelchair accessible
"""
response = api_client.get('{}?order_by=-accessibility&accessibility_viewpoint={}'.format(
list_url, accessibility_viewpoint_wheelchair.id
))
assert response.status_code == 200
assert_response_objects(response, [resource_with_accessibility_data, resource_with_accessibility_data3])
assert response.data['results'][0]['name']['fi'] == resource_with_accessibility_data.name_fi
assert response.data['results'][1]['name']['fi'] == resource_with_accessibility_data3.name_fi
@pytest.mark.django_db
def test_resource_with_accessibility_data_no_include(api_client, resource_with_accessibility_data, detail_url):
""" Resource endpoint should not include accessibility data when not explicitly included """
response = api_client.get(detail_url)
assert response.status_code == 200
assert 'accessibility_summaries' not in response.data
@pytest.mark.django_db
def test_resource_with_accessibility_data(api_client, resource_with_accessibility_data, detail_url):
""" Resource endpoint should include accessibility data when include-parameter is used """
url = "{}?include=accessibility_summaries".format(detail_url)
response = api_client.get(url)
assert response.status_code == 200
assert 'accessibility_summaries' in response.data
for acc in resource_with_accessibility_data.accessibility_summaries.all():
assert is_partial_dict_in_list(
{'value': acc.value.value, 'viewpoint_id': acc.viewpoint_id},
response.data['accessibility_summaries'])
@pytest.mark.django_db
def test_query_counts(user_api_client, staff_api_client, list_url, django_assert_max_num_queries):
"""
Test that DB query count is less than allowed
"""
with django_assert_max_num_queries(MAX_QUERIES):
user_api_client.get(list_url)
with django_assert_max_num_queries(MAX_QUERIES):
staff_api_client.get(list_url)
| [
11748,
4818,
8079,
198,
11748,
12972,
9288,
198,
6738,
4866,
1330,
2769,
30073,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
1420... | 2.509327 | 7,398 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import csv
import json
import struct
import platform
import subprocess
from time import sleep
from copy import deepcopy
from functools import namedtuple
from datetime import datetime
from collections import OrderedDict
import netifaces
import numpy as np
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from matplotlib import cm
from matplotlib.ticker import MaxNLocator
# from scapy.all import srp, Ether, ARP, conf
# Capture packet manager
# from docutils.nodes import section
from src.capturePkt.roughPacket import RoughPacket
# Thread workers
from src.threads.queryThread import QueryThread
from src.threads.termsThread import TermsThread
from src.threads.scanThread import ScanThread
from src.threads.trafficThread import TrafficThread
from src.threads.poisonThread import PoisonThread
from src.threads.captureThread import CaptureThread
from src.threads.openThread import OpenThread
from src.threads.saveThread import SaveThread
from src.threads.searchThread import SearchThread
from src.threads.parseThread import ParseThread
# Menu open dialogs
from src.dialogs.shineDialog import ui_FilterDialog
from src.dialogs.shineDialog import Ui_NodeDialog
from src.dialogs.shineDialog import Ui_LoadDialog
from src.dialogs.shineDialog import Ui_StatisticDialog
from src.windows.shineMainWindow import ShineMainWindow
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
269,
21370,
198,
11748,
33918,
198,
11748,
2878,
198,
11748,
3859,
198,
11748,
850,
... | 3.5175 | 400 |
# coding:utf-8
import time
import pytest
from lib.session import Session
from lib.errors import SessionExpiredError
pytest_plugins = ["errbot.backends.test"]
extra_plugin_dir = "."
def test_session():
"""
Session class tests
"""
user_id = "test_id"
secret = "test_secret"
session = Session(user_id, secret)
# Session not expired
assert False is session.is_expired()
# Unseal a session
assert True is session.is_sealed()
session.unseal()
assert False is session.is_sealed()
# Time-to-live
assert 3600 == session.ttl()
session.ttl(300)
assert 300 == session.ttl()
# Secret
assert True is session.match_secret(secret)
# Expired session
session.ttl(1)
time.sleep(2)
with pytest.raises(SessionExpiredError):
session.is_expired()
if __name__ == "__main__":
print("Run with pytest")
exit(1)
| [
2,
19617,
25,
40477,
12,
23,
198,
11748,
640,
198,
11748,
12972,
9288,
198,
6738,
9195,
13,
29891,
1330,
23575,
198,
6738,
9195,
13,
48277,
1330,
23575,
3109,
6474,
12331,
198,
198,
9078,
9288,
62,
37390,
796,
14631,
8056,
13645,
13,
... | 2.595376 | 346 |
#!/usr/bin/env python3
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import pytest
import torch
import poptorch
import helpers
@pytest.mark.parametrize("bias", [True, False])
@pytest.mark.parametrize("batch_first", [True, False])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
12131,
29681,
7295,
12052,
13,
1439,
2489,
10395,
13,
198,
198,
11748,
12972,
9288,
198,
11748,
28034,
198,
11748,
1461,
13165,
354,
198,
11748,
49385,
628,
1... | 2.939759 | 83 |
"""This module contains BIP39 helper functions"""
import hashlib
from ecc import PrivateKey
BASE58_ALPHABET = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def get_seed(mnemonic_bytes, passphrase=None):
"""
This function creates a mnemonic seed from bytes encoded mnemonic.
Passphrase is optional
"""
if passphrase is not None:
salt = ("mnemonic" + passphrase).encode("utf8")
else:
salt = "mnemonic".encode("utf8")
seed = hashlib.pbkdf2_hmac(
'sha512',
mnemonic_bytes,
salt,
2048,
)
return seed
def get_pubkey_sec(private_key_bytes):
"""
This function returns SEC encoded public key from byte-encoded private key
"""
secret = int.from_bytes(private_key_bytes, "big")
private_key = PrivateKey(secret)
public_key = private_key.point
return public_key.sec(compressed=True)
def derivation_path_string(path, private=True):
"""
This function returns a string friendly version of the derivation path
"""
if private:
result = "m"
else:
result = "M"
for item in path:
result += "/"
index, hardened = item
if hardened:
result += str(index) + "'"
else:
result += str(index)
return result
def decode_base58(base58_string):
"""
This function decodes a base58 string to a number
"""
num = 0
for char in base58_string:
num *= 58
num += BASE58_ALPHABET.index(char)
return num.to_bytes(82, byteorder='big')
def encode_base58(data):
"""
This function encodes bytes to a base58 string
"""
# determine how many 0 bytes (b'\x00') s starts with
count = 0
for byte in data:
if byte == 0:
count += 1
else:
break
# convert to big endian integer
num = int.from_bytes(data, 'big')
prefix = '1' * count
result = ''
while num > 0:
num, mod = divmod(num, 58)
result = BASE58_ALPHABET[mod] + result
return prefix + result
def encode_base58_checksum(data):
"""
This function returns the Base58 check format
"""
return encode_base58(data + hash256(data)[:4])
def hash160(data):
"""sha256 followed by ripemd160"""
return hashlib.new('ripemd160', hashlib.sha256(data).digest()).digest()
def hash256(data):
"""two rounds of sha256"""
return hashlib.sha256(hashlib.sha256(data).digest()).digest()
def sha256(data):
"""one round of sha256"""
return hashlib.sha256(data).digest()
| [
37811,
1212,
8265,
4909,
347,
4061,
2670,
31904,
5499,
37811,
198,
198,
11748,
12234,
8019,
198,
6738,
21399,
1330,
15348,
9218,
198,
198,
33,
11159,
3365,
62,
1847,
11909,
6242,
2767,
796,
705,
10163,
2231,
3134,
4531,
24694,
32988,
1751... | 2.402062 | 1,067 |
# fish_base.common demo
# 2017.3.15 create by Leo
# 2018.2.11 edit by David Yi
from fishbase.fish_common import *
from fishbase.fish_file import get_abs_filename_with_sub_path
# 2018.2.12 common 中 config 文件处理相关,#11013
# 2018.5.10
# 2018.5.15
# 2018.5.19
# 2018.5.26
# 2018.5.30
if __name__ == '__main__':
# 定义需要序列化的对象
# 序列化对象
result = serialize_instance(Foo)
print(result)
# 定义需要判断的列表
# 合法列表
test_legitimate_list = ['Hello World', 1]
# 非法列表
test_illegal_list = ['Hello World', None]
# 判断列表是否有非法参数
result = if_any_elements_is_space(test_legitimate_list)
print(result)
result = if_any_elements_is_space(test_illegal_list)
print(result)
demo_common_config()
demo_common_md5()
demo_json_contain()
dic1 = {'key1': 'value1', 'key2': 'value2'}
print(splice_url_params(dic1))
demo_singleton()
demo_uuid()
demo_dict()
| [
2,
5916,
62,
8692,
13,
11321,
13605,
198,
2,
2177,
13,
18,
13,
1314,
2251,
416,
19632,
198,
2,
2864,
13,
17,
13,
1157,
4370,
416,
3271,
26463,
198,
198,
6738,
5916,
8692,
13,
11084,
62,
11321,
1330,
1635,
198,
6738,
5916,
8692,
13... | 1.899177 | 486 |
from alnitak.tests import setup
from alnitak import prog
from alnitak import parser as Parser
from alnitak import exceptions as Except
from pathlib import Path
from subprocess import Popen, PIPE
s = setup.Init(keep=True)
prog = setup.create_state_obj(s)
a_flag = Parser.Flag(Parser.FlagType.bare, '-a', '--aflag')
b_flag = Parser.Flag(Parser.FlagType.bare, '-b', '--bflag')
c_flag = Parser.Flag(Parser.FlagType.bare, '-c', '--cflag')
x_flag = Parser.Flag(Parser.FlagType.option, '-x', '--xflag')
y_flag = Parser.Flag(Parser.FlagType.option, '-y', '--yflag')
z_flag = Parser.Flag(Parser.FlagType.option, '-z', '--zflag')
m_flag = Parser.Flag(Parser.FlagType.mandatory, '-m', '--mflag')
n_flag = Parser.Flag(Parser.FlagType.mandatory, '-n', '--nflag')
o_flag = Parser.Flag(Parser.FlagType.mandatory, '-o', '--oflag')
| [
198,
6738,
435,
48825,
461,
13,
41989,
1330,
9058,
198,
6738,
435,
48825,
461,
1330,
1172,
198,
6738,
435,
48825,
461,
1330,
30751,
355,
23042,
263,
198,
6738,
435,
48825,
461,
1330,
13269,
355,
18181,
198,
198,
6738,
3108,
8019,
1330,
... | 2.562112 | 322 |
if __name__ == "__main__":
N = int(input())
while N > 0:
x = int(input())
print(fib(x))
N = N - 1
| [
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
399,
796,
493,
7,
15414,
28955,
198,
220,
220,
220,
981,
399,
1875,
657,
25,
198,
220,
220,
220,
220,
220,
220,
220,
2124,
796,
493,
7,
15414,
... | 1.783784 | 74 |
from distutils.core import setup
setup(
name='dribbble_palettes',
packages=['dribbble_palettes'],
install_requires=[
"Pillow==2.7.0",
"Scrapy==1.0.3",
],
entry_points={
'console_scripts': [
'palette_from_color = dribbble_palettes.palette_from_color:cli',
]
}
)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
67,
822,
65,
903,
62,
18596,
23014,
3256,
198,
220,
220,
220,
10392,
28,
17816,
67,
822,
65,
903,
62,
18596,
23014,
6,
4357,
198,
220,
2... | 1.976048 | 167 |
# Tested on Arduino NanoRP2040 Connect
from machine import I2C, Pin
import time
import LCDDisplay10
# Create an I2C object out of our SDA and SCL pin objects
sda_pin = Pin(12) # GPIO12, A4, D18, STEMMA QT - blue wire (for Arduino NanoRP2040 Connect)
scl_pin = Pin(13) # GPIO13, A5, D19, STEMMA QT - yellow wire (for Arduino NanoRP2040 Connect)
i2c = machine.I2C(id=0, scl=scl_pin, sda=sda_pin, freq=10000)
# i2c = machine.SoftI2C(scl=scl_pin, sda=sda_pin, freq=400_000)
display = LCDDisplay10.LCDDisplay10(i2c)
setup()
while(True):
loop()
time.sleep_ms(1000)
| [
2,
6208,
276,
319,
27634,
33504,
20031,
1238,
1821,
8113,
201,
198,
201,
198,
6738,
4572,
1330,
314,
17,
34,
11,
13727,
201,
198,
11748,
640,
201,
198,
201,
198,
11748,
23598,
23114,
940,
201,
198,
201,
198,
2,
13610,
281,
314,
17,
... | 2.334646 | 254 |
#!/usr/bin/env python
# -*- coding=utf-8 -*-
########################################################################
# File Name: prototools.py
#
# Author: aceway
# Mail: aceway@qq.com
# Created Time: 2014年08月25日 星期一 21时55分33秒
# Description: ...
#
########################################################################
import json
try:
from google.protobuf.internal.containers import RepeatedCompositeFieldContainer, RepeatedScalarFieldContainer
except:
print "请为python安装google的protobuf模块"
def trans_data_from_proto_to_json(protoObj, attrs, jsonObj):
u'''
*从protobuf对象提取数据,转换成json格式数据。*
- Args:
- protoObj: protobuf对象;
- attrs: 是一个tuple, 内部为protoobj的属性名称罗列. []表示内部属性repeated, {k:v}内表示嵌套
- jsonObj: 转换结果存储到的json对象
- Returns:
- 无
'''
if isinstance(attrs, tuple) and len(attrs) > 0 :
for attr in attrs:
if isinstance(attr, basestring):
#直接值('a', 'b', 'c')
val = getattr(protoObj, attr)
jsonObj[attr] = val
elif isinstance(attr, dict) and len(attr.items()) == 1:
#嵌套一个对象({'a':('x', 'y', 'z')}, )
k,v = attr.items()[0]
if isinstance(v, tuple):
val = getattr(protoObj, k)
jsonObj[k] = {}
trans_data_from_proto_to_json(val, v, jsonObj[k])
elif isinstance(attr, list) and len(attr) == 1:
#嵌套反复值/对象 ([ ], )
info = attr[0]
if isinstance(info, basestring) and len(info) > 0:
#被反复嵌套的对象是直接值 (['a'], )
val = getattr(protoObj, info)
jsonObj[info] = [ v for v in val ]
elif isinstance(info, dict) and len(info.items()) == 1:
#被反复嵌套的对象是一个protobuf对象 ( [ { } ],)
k, v = info.items()[0]
val = getattr(protoObj, k)
jsonObj[k] = []
if isinstance(v, basestring): #([{'a':"xxx"}],)
jsonObj[k] = []
for dt in val:
tmpJson = {}
trans_data_from_proto_to_json(dt, (v,), tmpJson )
jsonObj[k].append(tmpJson)
elif isinstance(v, tuple): #([{'a':('x', 'y', 'z') }],)
for dt in val:
tmpJson = {}
trans_data_from_proto_to_json(dt, v, tmpJson)
jsonObj[k].append(tmpJson)
def trans_data_from_json_to_proto(protoObj, attrsIn, jsonObj):
u'''
*根据指定的字段属性将数据打包进protobuf对象---主要是直接的json数据
- Args:
- protoObj: protobuf对象,从jsonObj中解析出的数据将被打包进该对象;
- attrsIn: 必须是一个tuple, 内部为protoObj的属性名称罗列, []表示内部属性repeated, {k:v}内表示嵌套,
- Example:
- ( 'msg_id', 'name')
- ( {'roles':('uid', 'reg_time', 'channel_id', 'server_id', 'name', 'role_type', 'lv', 'last_login_tm') }, )
- ( ['task_ids'], )
- ( [{'roles':('uid', 'reg_time', 'channel_id', 'server_id', 'name', 'role_type', 'lv', 'last_login_tm') }], )
- ( [{ 'prises':( 'prise_name', [{'data': ('time', 'count') }] ) }], )
- Returns:
- 构造出的proto对象
'''
if isinstance(attrsIn, tuple):
for attr in attrsIn:
if isinstance(attr, basestring) and len(attr)>0 and hasattr(protoObj, attr) and attr in jsonObj:
#直接基础类型数据(char,int,long...)对象
v = jsonObj.get(attr, None)
if v is not None:
attr_type = type( getattr(protoObj, attr) )
setattr( protoObj, attr, attr_type(v) )
elif isinstance(attr, dict) and len(attr.items()) == 1:
#直接一个protobuf对象
pattr, subattr = attr.items()[0]
if isinstance(pattr, basestring) and len(pattr)>0 and hasattr(protoObj, pattr) and pattr in jsonObj:
if isinstance(subattr, tuple):
#对象非repeated
trans_data_from_dict_to_proto(getattr(protoObj, pattr), subattr, jsonObj.get(pattr, None))
else:
pass#配置格式错误
else:
pass #配置格式错误
elif isinstance(attr, list) and len(attr) == 1: #and hasattr(protoObj, attr[0]):
#repeated类型
if isinstance(attr[0], basestring) and len(attr[0]) > 1 and hasattr(protoObj, attr[0][1:]):
#被repeated的是基础类型(char,int,long...)对象
#RepeatedScalarFieldContainer 数据类型的获取方式? #暂时用hack方式在v的值上实现s,i,l,f,b分贝表示string,int,long,float,bool类型
values = jsonObj.get(attr[0], None)
type_dict = {'b':bool, 's':str, 'i':int, 'l':long, 'f':float, }
type_char = attr[0][0]
attr_type = type_dict.get(type_char, str)
attr = attr[0][1:]
att = getattr(protoObj, attr)
if att is not None and type(att) == RepeatedScalarFieldContainer and isinstance(values, list):
for v in values:
att.append( attr_type(v) )
else:
pass #数据格式有误
elif isinstance(attr[0], dict) and len(attr[0].items()) == 1:
#被repeated的是protobuf对象
pattr, subattr = attr[0].items()[0]
if isinstance(pattr, basestring) and len(pattr)>0 and hasattr(protoObj, pattr) and pattr in jsonObj:
if isinstance(subattr, tuple):
#对象内部成员是protobuf对象类型
values = jsonObj.get(pattr, None)
if len(values) > 0 and type(getattr(protoObj, pattr)) == RepeatedCompositeFieldContainer :
for value in values:
adder = getattr(protoObj, pattr).add()
trans_data_from_dict_to_proto(adder, subattr, value)
else:
pass #数据格式错误
else:
pass #数据格式错误
else:
pass #数据格式有误
return protoObj
def trans_data_from_dict_to_proto(protoObj, attrsIn, dictObj):
u'''
*根据指定的字段属性将数据打包进protobuf对象---主要是直接从HTTP的GET, POST中取数据打包进protoObj*
- Args:
- protoObj: protobuf对象,从dictObj中解析出的数据将被打包进该对象;
- attrsIn: 必须是一个tuple, 内部为protoObj的属性名称罗列, []表示内部属性repeated, {k:v}内表示嵌套,
- string,int,long,float,bool类型的repeat, 在attr名字前分别加s,i,l,f,b表示,其value用逗号分割
- Example:
- ('name', )
- ('msg_id', 'errcode')
- (['stask_ids'], ) #task_ids前的s表示 string
- ( [{'suser_ids'}], )
- ([{ 'prises':( 'prise_name', [{'data': ('time', 'count') }] ) }], )
- ( [{'roles':('uid', 'reg_time', 'channel_id', 'server_id', 'name', 'role_type', 'lv', 'last_login_tm') }], )
- 注:RepeatedScalarFieldContainer 类型未找到动态获取对象的基础类型,故类型信息放在对应字段名的第一个字符,配置时需要注意
- dictObj: 字典,存储请求的条件信息,将被pack进protoObj,可直接用request.GET,requst.POST作为该参数
- Returns:
- 构造出的proto对象
'''
if isinstance(attrsIn, tuple):
for attr in attrsIn:
if isinstance(attr, basestring) and len(attr)>0 and hasattr(protoObj, attr):
#直接基础类型数据(char,int,long...)对象
v = dictObj.get(attr, None)
if v is not None:
attr_type = type( getattr(protoObj, attr) )
setattr( protoObj, attr, attr_type(v) )
elif isinstance(attr, dict) and len(attr.items()) == 1:
#直接一个protobuf对象
pattr, subattr = attr.items()[0]
if isinstance(pattr, basestring) and len(pattr)>0 and hasattr(protoObj, pattr) and pattr in dictObj:
if isinstance(subattr, tuple):
#对象非repeated
trans_data_from_dict_to_proto(getattr(protoObj, pattr), subattr, dictObj.get(pattr, None))
elif isinstance(subattr, list) and len(subattr)==1 and isinstance(subattr[0], tuple):
#对象repeated
values = json.loads( dictObj.get(pattr) )
if len(values) > 0 and type(getattr(protoObj, pattr)) == RepeatedCompositeFieldContainer :
for value in values:
adder = getattr(protoObj, pattr).add()
trans_data_from_dict_to_proto(adder, subattr, value)
else:
pass#配置格式错误
elif isinstance(attr, list) and len(attr) == 1: #and hasattr(protoObj, attr[0]):
#repeated类型
if isinstance(attr[0], basestring) and len(attr[0]) > 1 and hasattr(protoObj, attr[0][1:]):
#被repeated的是基础类型(char,int,long...)对象
#RepeatedScalarFieldContainer 数据类型的获取方式? #暂时用hack方式在v的值上实现s,i,l,f,b分贝表示string,int,long,float,bool类型
values = dictObj.get(attr[0], None)
type_dict = {'b':bool, 's':str, 'i':int, 'l':long, 'f':float}
type_char = attr[0][0]
attr_type = type_dict[type_char]
attr = attr[0][1:]
att = getattr(protoObj, attr)
if att is not None and type(att) == RepeatedScalarFieldContainer and isinstance(values, basestring):
vlist = values.strip(', ').split(',')
for v in vlist:
att.append( attr_type(v) )
else:
pass #数据格式有误
elif isinstance(attr[0], dict) and len(attr[0].items()) == 1:
#被repeated的是protobuf对象
pattr, subattr = attr[0].items()[0]
if isinstance(pattr, basestring) and len(pattr)>0 and hasattr(protoObj, pattr) and pattr in dictObj:
if isinstance(subattr, tuple):
#对象内部成员是protobuf对象类型
dt = dictObj.get(pattr)
if isinstance(dt ,basestring):
values = json.loads( dt )
else:
values = dt
if len(values) > 0 and type(getattr(protoObj, pattr)) == RepeatedCompositeFieldContainer :
for value in values:
adder = getattr(protoObj, pattr).add()
trans_data_from_dict_to_proto(adder, subattr, value)
else:
pass #数据格式错误
else:
pass #数据格式错误
else:
pass #数据格式有误
return protoObj
def get_proto_file_lines(proto_file, proto_lines_data=None, verbose=False):
u"""
将proto协议文件转换从一行---去除注释, 方便从中搜索,匹配,查找某一proto协议
"""
if proto_lines_data is None:
import re
re_rpl = re.compile(r"\s+")
if os.path.isfile( proto_file ):
with open(proto_file) as pf:
bContinue = False
lines = []
for line in pf:
line = line.strip(' \t\r\n')
if len(line) == 0: continue
if line.startswith('/*') and line.endswith('*/'): continue
if line.startswith('/*'):
bContinue = True
continue
if line.endswith('*/'):
bContinue = False
continue
if bContinue: continue
if line.startswith('//'): continue
line = line.split("//")[0]
if len( line.strip() ) > 0:
lines.append(line.strip(' \t\r\n').replace("\t", " "))
lines_data = "".join(lines)
proto_lines_data, number = re_rpl.subn(" ", lines_data)
if verbose: print "re match times: ", number
return proto_lines_data
else:
return None
else:
return proto_lines_data
def extrace_proto_define_from_line(line_proto_data, verbose=False):
u"""
从行proto协议里提取所有 message xxxx_in的定义,返回:[ (name, attr), ]
"""
proto_list = []
max_pos = len(line_proto_data)
end_tag ="_in{"
begin_tag ="}message"
end_pos = line_proto_data.find(end_tag)
begin_pos = line_proto_data.rfind(begin_tag, 0, end_pos)
while begin_pos >= 0 and end_pos >= 0:
msg_name = line_proto_data[ begin_pos+len(begin_tag) : end_pos ]
if verbose: print msg_name
idx = end_pos
flags = 0
while idx < max_pos:
if line_proto_data[idx] == "{":
flags += 1
elif line_proto_data[idx] == "}":
flags -= 1
if flags == 0: break;
idx += 1
msg_define = line_proto_data[ end_pos + len(end_tag) -1 : idx +1 ]
proto_list.append( ( msg_name.strip(), msg_define.strip() ) )
line_proto_data = line_proto_data[ end_pos + len(end_tag) : ]
end_pos = line_proto_data.find(end_tag)
begin_pos = line_proto_data.rfind(begin_tag, 0, end_pos)
return proto_list
def get_message_define_info(line_proto_data, msg_type, verbose=False):
u"""
从行proto协议里提取指定message的定义---用在message引用其它message定义
"""
max_pos = len(line_proto_data)
find_str = "message " + msg_type + "{"
start_pos = line_proto_data.find( find_str )
if start_pos >= 0:
idx = start_pos + 1
flags = 0
while idx < max_pos:
if line_proto_data[idx] == "{":
flags += 1
elif line_proto_data[idx] == "}":
flags -= 1
if flags == 0:break;
idx += 1
msg_define = line_proto_data[ start_pos + len(find_str) -1 : idx + 1 ]
if verbose: print "enbedded message define: ", msg_define
return msg_define
else:
return None
def gen_test_data_with_proto_file(proto_file, out_file, verbose=False):
u"""
遍历proto文件里的 _in消息,搜索定义 attr, 填充出value, 写入文件
"""
if verbose: print "Generation info:\n\tproto file:{p}\n\tout file:{o}".format(p=proto_file,o=out_file)
if os.path.isfile(proto_file):
with open(proto_file, 'r') as pfile:
with open(out_file, 'w') as ofile:
proto_lines = get_proto_file_lines(proto_file, verbose=verbose)
proto_list = extrace_proto_define_from_line(proto_lines, verbose=verbose)
file_name = out_file[ out_file.rfind('/') + 1: ]
dt = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
ofile.write( out_file_header.format( fname = file_name, tm=dt ) )
ofile.write( "test_data_list=[\n" )
for (name, attr) in proto_list:
attr = attr.strip("{};")
av = pack_attr_value_with_proto_define(attr, proto_lines, verbose=verbose)
out = has_out_in_proto_file(proto_lines, name)
av_line = "(%r, %r, %r)"%(name, av, out)
#print av_line
ofile.write( "\t" )
ofile.write( av_line )
#ofile.write( str((name, av)) )
ofile.write( ",\n\n" )
ofile.write( "]" )
return True
else:
print "It's not a file: ", proto_file
return False
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198,
2,
532,
9,
12,
19617,
28,
40477,
12,
23,
532,
9,
12,
220... | 1.540265 | 10,493 |
# -*- coding: utf-8 -*-
"""
melenium.capabilities
~~~~~~~~~~~~~~~~~~~~~
Contains ChromeCapabilities.
"""
__all__ = ['ChromeCapabilities']
import base64
from .presets import PRESETS
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
17694,
47477,
13,
11128,
5738,
198,
27156,
8728,
93,
198,
198,
4264,
1299,
13282,
15610,
5738,
13,
198,
198,
37811,
198,
198,
834,
439,
834,
796,
37250,... | 4.689189 | 74 |
import sys
import traceback
from os.path import abspath, dirname, join, basename
from socket import error
from hashlib import md5
from datetime import datetime
from gevent.pywsgi import WSGIHandler, WSGIServer
from websocket.policyserver import FlashPolicyServer
from websocket import WebSocket
import gevent
assert gevent.version_info >= (0, 13, 2), 'Newer version of gevent is required to run websocket.server'
__all__ = ['WebsocketHandler', 'WebsocketServer']
| [
11748,
25064,
198,
11748,
12854,
1891,
198,
6738,
28686,
13,
6978,
1330,
2352,
6978,
11,
26672,
3672,
11,
4654,
11,
1615,
12453,
198,
6738,
17802,
1330,
4049,
198,
6738,
12234,
8019,
1330,
45243,
20,
198,
6738,
4818,
8079,
1330,
4818,
8... | 3.448529 | 136 |
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""ZServer FTP Channel for use the medusa's ftp server.
FTP Service for Zope.
This server allows FTP connections to Zope. In general FTP is used
to manage content. You can:
* Create and delete Folders, Documents, Files, and Images
* Edit the contents of Documents, Files, Images
In the future, FTP may be used to edit object properties.
FTP Protocol
The FTP protocol for Zope gives Zope objects a way to make themselves
available to FTP services. See the 'lib/python/OFS/FTPInterface.py' for
more details.
FTP Permissions
FTP access is controlled by one permission: 'FTP access' if bound to a
role, users of that role will be able to list directories, and cd to
them. Creating and deleting and changing objects are all governed by
existing Zope permissions.
Permissions are to a certain extent reflected in the permission bits
listed in FTP file listings.
FTP Authorization
Zope supports both normal and anonymous logins. It can be difficult
to authorize Zope users since they are defined in distributed user
databases. Normally, all logins will be accepted and then the user must
proceed to 'cd' to a directory in which they are authorized. In this
case for the purpose of FTP limits, the user is considered anonymous
until they cd to an authorized directory.
Optionally, users can login with a special username which indicates
where they are defined. Their login will then be authenticated in
the indicated directory, and they will not be considered anonymous.
The form of the name is '<username>@<path>' where path takes the form
'<folder id>[/<folder id>...]' For example: 'amos@Foo/Bar' This will
authenticate the user 'amos' in the directory '/Foo/Bar'. In addition
the user's FTP session will be rooted in the authenticated directory,
i.e. they will not be able to cd out of the directory.
The main reason to use the rooted FTP login, is to allow non-anonymous
logins. This may be handy, if for example, you disallow anonymous logins,
or if you set the limit for simultaneous anonymous logins very low.
"""
from PubCore import handle
from medusa.ftp_server import ftp_channel, ftp_server, recv_channel
import asyncore, asynchat
from medusa import filesys
from FTPResponse import make_response
from FTPRequest import FTPRequest
from ZServer import requestCloseOnExec
import os
import stat
import time
class zope_ftp_channel(ftp_channel):
"Passes its commands to Zope, not a filesystem"
read_only=0
anonymous=1
# Overriden async_chat methods
push_with_producer=push
# Overriden ftp_channel methods
def cmd_nlst (self, line):
'give name list of files in directory'
self.get_dir_list(line,0)
def cmd_list (self, line):
'give list files in a directory'
# handles files as well as directories.
# XXX also should maybe handle globbing, yuck.
self.get_dir_list(line,1)
def cmd_cwd (self, line):
'change working directory'
response=make_response(self, self.cwd_completion,
self._join_paths(self.path,line[1]))
request=FTPRequest(line[1],'CWD',self,response)
handle(self.module,request,response)
def cwd_completion(self,path,response):
'cwd completion callback'
status=response.getStatus()
if status==200:
listing=response._marshalledBody()
# check to see if we are cding to a non-foldoid object
if type(listing[0])==type(''):
self.respond('550 No such directory.')
return
else:
self.path=path or '/'
self.respond('250 CWD command successful.')
# now that we've sucussfully cd'd perhaps we are no
# longer anonymous
if self.anonymous and not self.userid=='anonymous':
self.anonymous=None
elif status in (401, 403):
self.respond('530 Unauthorized.')
else:
self.respond('550 No such directory.')
def cmd_cdup (self, line):
'change to parent of current working directory'
self.cmd_cwd((None,'..'))
def cmd_pwd (self, line):
'print the current working directory'
self.respond (
'257 "%s" is the current directory.' % (
self.path
)
)
cmd_xpwd=cmd_pwd
def cmd_mdtm(self, line):
'show last modification time of file'
if len (line) != 2:
self.command.not_understood (' '.join(line))
return
response=make_response(self, self.mdtm_completion)
request=FTPRequest(line[1],'MDTM',self,response)
handle(self.module,request,response)
def cmd_size(self, line):
'return size of file'
if len (line) != 2:
self.command.not_understood (' '.join(line))
return
response=make_response(self, self.size_completion)
request=FTPRequest(line[1],'SIZE',self,response)
handle(self.module,request,response)
def cmd_stor (self, line, mode='wb'):
'store a file'
if len (line) < 2:
self.command_not_understood (' '.join(line))
return
elif self.restart_position:
self.respond ('553 restart on STOR not yet supported')
return
# XXX Check for possible problems first?
# Right now we are limited in the errors we can issue, since
# we agree to accept the file before checking authorization
fd = ContentReceiver(self.stor_callback, line[1])
self.respond (
'150 Opening %s connection for %s' % (
self.type_map[self.current_mode],
line[1]
)
)
self.make_recv_channel(fd)
def stor_callback(self, path, data, size):
'callback to do the STOR, after we have the input'
response = make_response(self, self.stor_completion)
request = FTPRequest(path, 'STOR', self, response,
stdin=data, size=size)
handle(self.module, request, response)
def cmd_rnfr (self, line):
'rename from'
if len (line) != 2:
self.command_not_understood (' '.join(line))
else:
self.fromfile = line[1]
pathf,idf=os.path.split(self.fromfile)
response=make_response(self, self.rnfr_completion)
request=FTPRequest(pathf,('RNFR',idf),self,response)
handle(self.module,request,response)
cmd_xmkd=cmd_mkd
cmd_xrmd=cmd_rmd
def cmd_user(self, line):
'specify user name'
if len(line) > 1:
self.userid = line[1]
self.respond('331 Password required.')
else:
self.command_not_understood (' '.join (line))
def cmd_pass(self, line):
'specify password'
if len(line) < 2:
pw = ''
else:
pw = line[1]
self.password=pw
i=self.userid.find('@')
if i ==-1:
if self.server.limiter.check_limit(self):
self.respond ('230 Login successful.')
self.authorized = 1
self.anonymous = 1
self.log_info ('Successful login.')
else:
self.respond('421 User limit reached. Closing connection.')
self.close_when_done()
else:
path=self.userid[i+1:]
self.userid=self.userid[:i]
self.anonymous=None
response=make_response(self, self.pass_completion,
self._join_paths('/',path))
request=FTPRequest(path,'PASS',self,response)
handle(self.module,request,response)
# Override ftp server receive channel reponse mechanism
# XXX hack alert, this should probably be redone in a more OO way.
def handle_close (self):
"""response and closure of channel is delayed."""
s = self.channel.server
s.total_files_in.increment()
s.total_bytes_in.increment(self.bytes_in.as_long())
self.fd.close()
self.readable=lambda :0 # don't call close again
recv_channel.handle_close=handle_close
class ContentReceiver:
"Write-only file object used to receive data from FTP"
class FTPLimiter:
"""Rudimentary FTP limits. Helps prevent denial of service
attacks. It works by limiting the number of simultaneous
connections by userid. There are three limits, one for anonymous
connections, and one for authenticated logins. The total number
of simultaneous anonymous logins my be less than or equal to the
anonymous limit. Each authenticated user can have up to the user
limit number of simultaneous connections. The total limit is the
maximum number of simultaneous connections of any sort. Do *not*
set the total limit lower than or equal to the anonymous limit."""
def check_limit(self,channel):
"""Check to see if the user has exhausted their limit or not.
Check for existing channels with the same userid and the same
ftp server."""
total=0
class_total=0
if channel.anonymous:
for existing_channel in asyncore.socket_map.values():
if (hasattr(existing_channel,'server') and
existing_channel.server is channel.server):
total=total+1
if existing_channel.anonymous:
class_total=class_total+1
if class_total > self.anon_limit:
return None
else:
for existing_channel in asyncore.socket_map.values():
if (hasattr(existing_channel,'server') and
existing_channel.server is channel.server):
total=total+1
if channel.userid==existing_channel.userid:
class_total=class_total+1
if class_total > self.user_limit:
return None
if total <= self.total_limit:
return 1
class FTPServer(ftp_server):
"""FTP server for Zope."""
ftp_channel_class = zope_ftp_channel
limiter=FTPLimiter(10,1)
shutup=0
| [
29113,
29113,
7804,
4242,
2235,
198,
2,
198,
2,
15069,
357,
66,
8,
6244,
1168,
3008,
5693,
290,
25767,
669,
13,
198,
2,
198,
2,
770,
3788,
318,
2426,
284,
262,
8617,
286,
262,
1168,
3008,
5094,
13789,
11,
198,
2,
10628,
362,
13,
... | 2.488569 | 4,374 |
### script to generate ImageNet-100 dataset
### official split: https://github.com/HobbitLong/CMC/blob/master/imagenet100.txt
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--imagenet_path', type=str, required=True)
parser.add_argument('--imagenet100_path', type=str, required=True)
args = parser.parse_args()
source_folder = args.imagenet_path #'/path/to/imagenet/'
target_folder = args.imagenet100_path #'/path/to/imagenet-100/'
imagenet_100_list = ['n02869837', 'n01749939', 'n02488291', 'n02107142', 'n13037406',
'n02091831', 'n04517823', 'n04589890', 'n03062245', 'n01773797',
'n01735189', 'n07831146', 'n07753275', 'n03085013', 'n04485082',
'n02105505', 'n01983481', 'n02788148', 'n03530642', 'n04435653',
'n02086910', 'n02859443', 'n13040303', 'n03594734', 'n02085620',
'n02099849', 'n01558993', 'n04493381', 'n02109047', 'n04111531',
'n02877765', 'n04429376', 'n02009229', 'n01978455', 'n02106550',
'n01820546', 'n01692333', 'n07714571', 'n02974003', 'n02114855',
'n03785016', 'n03764736', 'n03775546', 'n02087046', 'n07836838',
'n04099969', 'n04592741', 'n03891251', 'n02701002', 'n03379051',
'n02259212', 'n07715103', 'n03947888', 'n04026417', 'n02326432',
'n03637318', 'n01980166', 'n02113799', 'n02086240', 'n03903868',
'n02483362', 'n04127249', 'n02089973', 'n03017168', 'n02093428',
'n02804414', 'n02396427', 'n04418357', 'n02172182', 'n01729322',
'n02113978', 'n03787032', 'n02089867', 'n02119022', 'n03777754',
'n04238763', 'n02231487', 'n03032252', 'n02138441', 'n02104029',
'n03837869', 'n03494278', 'n04136333', 'n03794056', 'n03492542',
'n02018207', 'n04067472', 'n03930630', 'n03584829', 'n02123045',
'n04229816', 'n02100583', 'n03642806', 'n04336792', 'n03259280',
'n02116738', 'n02108089', 'n03424325', 'n01855672', 'n02090622']
os.makedirs(target_folder, exist_ok=True)
for mode in ['train', 'val']:
source_sub_folder = os.path.join(source_folder, mode)
target_sub_folder = os.path.join(target_folder, mode)
os.makedirs(target_sub_folder, exist_ok=True)
for folder_name in os.listdir(source_sub_folder):
if folder_name in imagenet_100_list:
script = 'scp -r %s %s'(os.path.join(source_sub_folder, folder_name), target_sub_folder)
os.system(script)
| [
21017,
4226,
284,
7716,
7412,
7934,
12,
3064,
27039,
198,
21017,
1743,
6626,
25,
3740,
1378,
12567,
13,
785,
14,
39,
672,
2545,
14617,
14,
34,
9655,
14,
2436,
672,
14,
9866,
14,
320,
11286,
316,
3064,
13,
14116,
198,
198,
11748,
286... | 1.85408 | 1,446 |
'''
Author: He,Yifan
Date: 2022-02-18 16:06:00
LastEditors: He,Yifan
LastEditTime: 2022-02-18 16:36:42
'''
from typing import Sequence, Union
from collections import deque
import numpy as np
from pgsyn.push.config import PushConfig
from pgsyn.push.type_library import PushTypeLibrary
from pgsyn.push.atoms import CodeBlock
from pgsyn.push.stack import PushStack
from pgsyn.utils import Token
class PushState(dict):
"""A collection of PushStacks used during push program execution."""
__slots__ = ["stdout", "inputs", "untyped", "type_library", "push_config"]
@classmethod
def from_dict(cls, d, type_library: PushTypeLibrary, push_config: PushConfig):
"""Set the state to match the given dictionary.
.. warning::
This is written to be used in ``pyshgp`` tests, NOT as part of
push program execution or evolution. There are no checks to confirm
that the ``d`` can be converted to a valid Push state.
Parameters
----------
d : dict
Dict that is converted into a Push state.
type_library : PushTypeLibrary
A Push type library.
push_config : PushConfig
The configuration of the current program being executed.
"""
state = cls(type_library, push_config)
inputs = []
stdout = ""
for k, v in d.items():
if k == 'inputs':
inputs = v
elif k == 'stdout':
stdout += v
elif k == "untyped":
for el in v:
state.untyped.append(el)
else:
for el in v:
state[k].push(el)
state.load_inputs(inputs)
state.stdout = stdout
return state
def load_code(self, program: CodeBlock):
"""Push the given CodeBlock to the execution stack."""
self["exec"].push(program)
def load_inputs(self, inputs):
"""Load a list of input values onto the PushState inputs.
Parameters
----------
inputs : list
List of input values.
"""
if not isinstance(inputs, (list, np.ndarray)):
raise ValueError(
"Push inputs must be a list, got {t}".format(t=type(inputs)))
self.inputs = inputs
def observe_stacks(self, types: Sequence[str]) -> list:
"""Return a list of output values based on the given types indicated.
Items are take from the tops of each stack. If multiple occurrences of
the same type are in ``output_types``, the returned values are taken
from progressively deeper in that stack. Does not pop the values off
the stacks.
Parameters
----------
types : list
List of strings denoting the push types of the returned values.
"""
values = []
counts = {}
for typ in types:
if typ == "stdout":
values.append(self.stdout)
else:
ndx = counts.get(typ, 0)
values.append(self[typ].nth(ndx))
counts[typ] = ndx + 1
return values
def pop_from_stacks(self, types: Sequence[str]) -> Union[Sequence, Token]:
"""Pop the top items for each value_type. Return a vector of the values popped from each stack."""
values = []
for typ in types:
val = self[typ].top()
if val is Token.no_stack_item:
return Token.revert
else:
values.append(val)
self[typ].pop()
return values
def push_to_stacks(self, values: list, types: Sequence[str]):
"""Check that all values can be coerced into their expected PushType. Push them onto the correct stack."""
for ndx in range(len(values)):
val = values[ndx]
typ = types[ndx]
if typ == "stdout":
self.stdout += str(val)
elif typ == "untyped":
self.untyped.append(val)
else:
self[typ].push(val)
def size(self):
"""Return the size of the PushState."""
return sum([len(s) for s in self.values()]) + len(self.inputs)
def pretty_print(self):
"""Print the state of all stacks in the PushState."""
for k, v in self.items():
print(" ".join([k, ":", str(v)]))
print("untyped : " + str(self.untyped))
print("inputs : " + str(self.inputs))
print("stdout : " + str(self.stdout))
| [
7061,
6,
198,
13838,
25,
679,
11,
56,
361,
272,
198,
10430,
25,
33160,
12,
2999,
12,
1507,
1467,
25,
3312,
25,
405,
198,
5956,
18378,
669,
25,
679,
11,
56,
361,
272,
198,
5956,
18378,
7575,
25,
33160,
12,
2999,
12,
1507,
1467,
2... | 2.235438 | 2,043 |
from mpi4py import MPI
from mpi4py.futures import MPICommExecutor
import torch
import transformers
from transformers import AutoModelForSequenceClassification, BertModel, RobertaModel, BertTokenizerFast, RobertaTokenizer
from transformers import PreTrainedModel, BertConfig, RobertaConfig
from transformers import Trainer, TrainingArguments
from transformers.data.data_collator import default_data_collator
from transformers.tokenization_utils_base import BatchEncoding
from transformers import EvalPrediction
from transformers import AutoModelForMaskedLM
from transformers import AdamW
from transformers import HfArgumentParser
from dataclasses import dataclass, field
from transformers.integrations import deepspeed_config, is_deepspeed_zero3_enabled
import deepspeed
from torch.nn import functional as F
import toolz
import time
from functools import partial
import traceback
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from sklearn.metrics import mean_squared_error, mean_absolute_error
import pandas as pd
import re
import gc
import os
import json
import pandas as pd
import numpy as np
import requests
from tqdm.auto import tqdm
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DD
seq_model_name = "Rostlab/prot_bert_bfd" # for fine-tuning
# this logic is necessary because online-downloading and caching doesn't seem to work
if os.path.exists('seq_tokenizer'):
seq_tokenizer = BertTokenizerFast.from_pretrained('seq_tokenizer/', do_lower_case=False)
else:
seq_tokenizer = BertTokenizerFast.from_pretrained(seq_model_name, do_lower_case=False)
seq_tokenizer.save_pretrained('seq_tokenizer/')
model_directory = '/gpfs/alpine/world-shared/bip214/maskedevolution/models/bert_large_1B/model'
tokenizer_directory = '/gpfs/alpine/world-shared/bip214/maskedevolution/models/bert_large_1B/tokenizer'
tokenizer_config = json.load(open(tokenizer_directory+'/config.json','r'))
smiles_tokenizer = BertTokenizerFast.from_pretrained(tokenizer_directory, **tokenizer_config)
max_smiles_length = min(200,BertConfig.from_pretrained(model_directory).max_position_embeddings)
# Mpro has 306 residues
max_seq_length = min(4096,BertConfig.from_pretrained(seq_model_name).max_position_embeddings)
# use distributed data parallel on a node-local basis for inference
#os.environ['RANK'] = os.environ['OMPI_COMM_WORLD_LOCAL_RANK']
#os.environ['WORLD_SIZE'] = os.environ['OMPI_COMM_WORLD_LOCAL_SIZE']
#os.environ['LOCAL_RANK'] = os.environ['OMPI_COMM_WORLD_LOCAL_RANK']
#os.environ['MASTER_ADDR'] = '127.0.0.1'
#os.environ['MASTER_PORT'] = '29500'
os.environ['RANK'] = '0'
os.environ['WORLD_SIZE'] = '1'
os.environ['LOCAL_RANK'] = '0'
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = str(29500+int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']))
#torch.cuda.set_device(int(os.environ['LOCAL_RANK']))
@dataclass
#
# parser - used to handle deepspeed case as well
parser = HfArgumentParser([TrainingArguments,InferenceArguments])
training_args, inference_args = parser.parse_args_into_dataclasses()
if __name__ == "__main__":
comm = MPI.COMM_WORLD
with MPICommExecutor(comm, root=0) as executor:
if executor is not None:
import glob
fns = glob.glob(inference_args.input_path)
fns = [f for f in fns if not os.path.exists(inference_args.output_path+'/'+os.path.basename(f))]
executor.map(main, fns)
| [
6738,
285,
14415,
19,
9078,
1330,
4904,
40,
198,
6738,
285,
14415,
19,
9078,
13,
69,
315,
942,
1330,
4904,
2149,
2002,
23002,
38409,
198,
198,
11748,
28034,
198,
198,
11748,
6121,
364,
198,
6738,
6121,
364,
1330,
11160,
17633,
1890,
4... | 2.830645 | 1,240 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 - 2019, doudoudzj
# All rights reserved.
#
# InPanel is distributed under the terms of the (new) BSD License.
# The full license can be found in 'LICENSE'.
"""Module for Cron Jobs Management."""
import os
import re
crontab = '/etc/crontab'
cronspool = '/var/spool/cron/'
cfg_map = {
'SHELL': 'shell',
'MAILTO': 'mailto',
'HOME': 'home',
'PATH': 'path'
}
def cron_list(level='normal', user=None):
'''
parser Cron config to python object (array)
return a list of cron jobs
'''
if level == 'normal':
if user is None:
return None
spool = os.path.join(cronspool, user)
elif level == 'system':
spool = crontab
try:
if not os.path.exists(spool):
return None
except OSError:
return None
crons = []
with open(spool) as f:
i = 0
for line in f:
line = line.strip()
line_user = ''
if re.findall("^\d|^\*|^\-", line):
if level == 'normal':
text = re.split('\s+', line, 5)
command = text[5]
line_user = user
elif level == 'system':
# this user's list
text = re.split('\s+', line, 6)
if user and user != text[5]:
continue
else:
line_user = text[5]
command = text[6]
else:
continue
i += 1
crons.append({
'id': i,
'minute': text[0],
'hour': text[1],
'day': text[2],
'month': text[3],
'weekday': text[4],
'command': command,
'user': line_user
})
return crons
def cron_add(user, minute, hour, day, month, weekday, command, level):
'''add normal or system cron
'''
if level == 'system':
if user is None or user == '' or len(user) == 0:
return False
spool = crontab
line = "%s %s %s %s %s %s %s\n" % (minute, hour, day, month, weekday, user, command)
else:
user = user or 'root'
spool = os.path.join(cronspool, user)
line = "%s %s %s %s %s %s\n" % (minute, hour, day, month, weekday, command)
with open(spool, 'a+') as f:
f.write(line)
return True
return False
def cron_mod(user, id, minute, hour, day, month, weekday, command, level, currlist=''):
'''modify normal or system cron
'''
if user is None or id is None:
return False
if level == 'system':
spool = crontab
cron_line = "%s %s %s %s %s %s %s\n" % (minute, hour, day, month, weekday, user, command)
else:
spool = os.path.join(cronspool, user)
cron_line = "%s %s %s %s %s %s\n" % (minute, hour, day, month, weekday, command)
with open(spool, 'r') as f:
lines = f.readlines()
i, j = 0, 0
for line in lines:
j += 1
if re.findall("^\d|^\*|^\-", line):
if level == 'normal':
i += 1
elif level == 'system':
# if currlist is this user's list
if currlist and currlist == user:
text = re.split('\s+', line, 6)
if user == text[5]:
i += 1
else:
i += 1
else:
continue
if str(i) == str(id):
lines[j-1] = cron_line
break
with open(spool, 'w+') as f:
f.writelines(lines)
return True
return False
if __name__ == "__main__":
import json
crontab = '/Users/douzhenjiang/test/inpanel/test/crontab'
cronspool = '/Users/douzhenjiang/test/inpanel/test/var_spool_cron'
# print crontab
# os.system("top")
# print loadconfig(crontab, cfg_map)
# print raw_loadconfig(crontab)
# print(load_config())
# print update_config({'shell': 'shelshelshel', 'home': 'homehomehome', 'path':'abc'})
# print dict((v, k) for k, v in cfg_map.items())
config = cron_list(level='system', user='root')
# config = cron_list(level='system', user='apache')
# config = cron_list(level='system')
print(json.dumps(config))
# print(cron_add('root', '*','*','*','*','*', 'command'))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
66,
8,
2177,
532,
13130,
11,
2255,
67,
2778,
89,
73,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
554,
26639,
318,
9387,
739,
262,
2846,
... | 1.901598 | 2,378 |
# -*- encoding: utf-8 -*-
"""
@File : /detectron2/datasets/split_flying_chairs.py
@Time : 2020-11-24 23:58:33
@Author : Facebook, Inc. and its affiliates.
@Last Modified: 2020-11-25 22:23:00
@Modified By : Chen-Jianhu (jhchen.mail@gmail.com)
@License : Copyright(C), USTC
@Desc : This script is used for split Flying Chairs dataset.
Usage:
python split_flying_chairs.py
"""
import os.path as osp
import glob
import json
import numpy as np
import time
def _random_split(pairs, split_ratio: float = 0.972):
"""
Args:
pairs (list): sample list, each item like this:
{
"image1": img1_path,
"image2": img2_path,
"flow_map": flow_map
}
split_ratio (tuple): the training set ratio,
so test set ration is compute by `1 - split_ratio`.
# This default split ratios if refrence the FlowNet paper.
"""
start = time.time()
print(
"Start to randomly divide the samples into training and test sets, "
"training set ratio: {} ...".format(split_ratio)
)
train_set_idxs = np.random.uniform(0, 1, len(pairs)) < split_ratio
train_set = []
test_set = []
for train_set_idx, sample in zip(train_set_idxs, pairs):
if train_set_idx:
train_set.append(sample)
else:
test_set.append(sample)
print("Finished. time: {:.2f}s.".format(time.time() - start))
print("Train: {}, Test: {}.".format(len(train_set), len(test_set)))
return train_set, test_set
if __name__ == "__main__":
dataset_dir = osp.abspath("FlyingChairs_release")
print("Dataset root: {}".format(dataset_dir))
image_flow_map_pairs = _generate_image_flow_map_pairs(dataset_dir)
datasets = _random_split(image_flow_map_pairs)
# Dump to json file
for s, dataset in zip(["train", "test"], datasets):
file_name = "flying_chairs_{}.json".format(s)
file_path = osp.join(dataset_dir, file_name)
start = time.time()
print("Start writing {} to {} ...".format(file_name, file_path))
json_str = json.dumps(dataset, ensure_ascii=False, indent=4)
with open(file_path, "w") as f:
f.write(json_str)
print("Finished. time: {:.2f}s.".format(time.time() - start))
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
8979,
220,
220,
220,
220,
220,
220,
220,
220,
1058,
1220,
15255,
478,
1313,
17,
14,
19608,
292,
1039,
14,
35312,
62,
45928,
62,
49655,
13,
9078,
198,
... | 2.235182 | 1,046 |
""" 本文件中的代码可以通过使用命令 python ex_3_18.py 运行
(#号及其后面内容为注释,可以忽略)
"""
import torch.nn as nn
# 模块列表的使用方法
# 模块字典的使用方法
| [
37811,
42164,
105,
23877,
229,
20015,
114,
40792,
21410,
47987,
163,
254,
223,
20998,
107,
20015,
98,
34460,
248,
32573,
229,
45635,
18796,
101,
37772,
121,
20015,
97,
21015,
409,
62,
18,
62,
1507,
13,
9078,
5525,
123,
238,
26193,
234,
... | 0.861314 | 137 |
from django.contrib import admin
from bookmarks import models
admin.site.register(models.Bookmark, BookmarkAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
1492,
14306,
1330,
4981,
628,
198,
28482,
13,
15654,
13,
30238,
7,
27530,
13,
10482,
4102,
11,
4897,
4102,
46787,
8,
198
] | 3.545455 | 33 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# Copyright (c) Ostap developpers.
# =============================================================================
## @file ostap/math/tests/test_math_interpolation.py
# Test module for the file ostap/math/interpolation.py
# =============================================================================
""" Test module for ostap/math/interpolation.py
"""
# =============================================================================
import random,math
import ostap.math.models
from ostap.math.interpolation import ( interpolate , points ,
interpolate_bernstein ,
interpolate_bspline )
from ostap.math.base import doubles
from ostap.core.core import Ostap, SE
from ostap.math.models import f1_draw
from ostap.utils.utils import wait
from ostap.utils.timing import timing
from ostap.plotting.canvas import use_canvas
from ostap.utils.progress_bar import progress_bar
import ostap.logger.table as T
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger ( 'ostap.test_math_interpolation' )
else : logger = getLogger ( __name__ )
# =============================================================================
functions = set ()
## calculate "distance" between two functions
def distance ( fun1 , fun2 , low , high ) :
"""calculate ``distance'' between two functions"""
df = lambda x : abs(fun1(x)-fun2(x))
from ostap.math.integral import integral
di = integral ( df , low , high )
return di / ( high - low )
# =============================================================================
## interpolate the function
def run_func_interpolation ( fun , N , low , high , scale = 1.e-5 , logger = logger , name = 'Interpolation') :
"""Interpolate the function"""
Abscissas = Ostap.Math.Interpolation.Abscissas
abscissas = ( ( 'Uniform' , Abscissas ( N , low , high, 0 ) ) ,
( 'Chebyshev' , Abscissas ( N , low , high, 1 ) ) ,
( 'Lobatto' , Abscissas ( N , low , high, 2 ) ) ,
## ( 'Random' , Abscissas ( doubles ( random.uniform ( low , high ) for i in range ( N ) ) ) )
)
tables = [ ( a[0] , points ( fun , a[1] ) ) for a in abscissas ]
interpolants = []
for i , t in enumerate ( tables ) :
item = ( 'Bernstein' , t[0] ) , interpolate_bernstein ( t[1] , None , low , high )
interpolants.append ( item )
item = ( 'Neville' , t[0] ) , Ostap.Math.Neville ( t[1] )
interpolants.append ( item )
item = ( 'Lagrange' , t[0] ) , Ostap.Math.Lagrange ( t[1] )
interpolants.append ( item )
item = ( 'Newton' , t[0] ) , Ostap.Math.Newton ( t[1] )
interpolants.append ( item )
item = ( 'Berrut1st' , t[0] ) , Ostap.Math.Berrut1st ( t[1] )
interpolants.append ( item )
item = ( 'Berrut2nd' , t[0] ) , Ostap.Math.Berrut2nd ( t[1] )
interpolants.append ( item )
item = ( 'Barycentric' , t[0] ) , Ostap.Math.Barycentric ( t[1] )
interpolants.append ( item )
for d in range ( 0 , 9 ) :
item = ( 'FloaterHormann%d' % d , t[0] ) , Ostap.Math.FloaterHormann ( t[1] , d )
interpolants.append ( item )
for d in range ( 1 , 5 ) :
item = ( 'BSpline%d' % d , t[0] ) , interpolate_bspline ( t[1] , None , d )
interpolants.append ( item )
with wait ( 3 ) , use_canvas ( name ) :
ff = lambda x : fun ( x )
f1_draw ( ff , xmin = low , xmax = high , linecolor = 2 , linewidth = 2 )
for i , item in enumerate ( interpolants ) :
p , f = item
n1 , n2 = p
color = i + 3
f.draw ( 'same' , linecolor = color )
if 1 == color : color = 'Black'
elif 2 == color : color = 'Red'
elif 3 == color : color = 'Green'
elif 4 == color : color = 'Blue'
elif 5 == color : color = 'Yellow'
elif 6 == color : color = 'Magenta'
elif 7 == color : color = 'Cyan'
elif 8 == color : color = 'DarkGreen'
logger.info ( 'Color %10s for %s:%s' % ( color , n1 , n2 ) )
xx = []
NP = 50000
for i in range ( NP ) : xx.append ( random.uniform ( low , high ) )
xx.sort()
from collections import defaultdict
counters = defaultdict(SE)
cpu = {}
## loop over all interpolants
for n , fi in interpolants :
n1 , n2 = n
cnt = counters [ ( n1 , n2 , fi ) ]
with timing ( '' , logger = None ) as t :
for x in xx :
v = fun ( x )
vi = fi ( x )
cnt += abs ( vi - v ) / scale
cpu [ (n1,n2) ] = t.delta
rows = [ ( 'Interpolant' , 'Grid' , 'mean+/-rms' , 'max' , 'distance') ]
for item in counters :
n1 , n2 , ff = item
c = counters[item]
d = distance ( ff , fun , low , high )
row = n1 , n2 , '%9.2f +/- %-09.1f' % ( c.mean().value() , c.rms() ) , '%-9.1f' % c.max() , '%.3g' % d
rows.append ( row )
title = 'Interpolation precision (%d random points)[x%s]' % ( NP , scale )
table = T.table ( rows , title = title , prefix = '# ' , alignment = 'lllll' )
logger.info ( '%s:\n%s' % ( title , table ) )
lst = []
for k in cpu :
item = cpu[k] , k[0], k[1]
lst.append ( item )
lst.sort()
rows = [ ( 'Interpolant' , 'Grid' , 'CPU [s]' ) ]
for t,k0,k1 in lst :
row = k0, k1 , '%.4g' % cpu[ (k0,k1) ]
rows.append ( row )
title = 'CPU: %d points' % NP
table = T.table ( rows , title = title , prefix = '# ' , alignment = 'll' )
logger.info ( '%s:\n%s' % ( title , table ) )
# ==========================================================================================================
## interpolate the grid
def run_grid_interpolation ( tfunc , dct , N , low , high , scale = 1.e-8 , logger = logger , name = 'interpolation' ) :
"""Interpolate the grid"""
Abscissas = Ostap.Math.Interpolation.Abscissas
data = points ( dct )
## list of interpolants
interpolants = []
## bernstein interpolant
interpolants.append ( ( 'Bernstein' , interpolate_bernstein ( data , None , low , high ) ) )
## neville interpolant
interpolants.append ( ( 'Neville' , Ostap.Math.Neville ( data ) ) )
## largange interpolant
interpolants.append ( ( 'Lagrange' , Ostap.Math.Lagrange ( data ) ) )
## (true) Barycentric interpolant
interpolants.append ( ( 'Barycentric' , Ostap.Math.Barycentric ( data ) ) )
## Newton interpolant
interpolants.append ( ( 'Newton' , Ostap.Math.Newton ( data ) ) )
## 1st Berrut interpolant
interpolants.append ( ( 'Berrut 1st' , Ostap.Math.Berrut1st ( data ) ) )
## 2nd Berrut interpolant
interpolants.append ( ( 'Berrut 2nd' , Ostap.Math.Berrut2nd ( data ) ) )
for d in range ( 10 ) :
interpolants.append ( ( 'FloaterHormann/%d' % d , Ostap.Math.FloaterHormann ( data , d ) ) )
## bspline interpolant
## bs = Ostap.Math.BSpline ( low , high , len ( data ) - 1 - degree , degree )
for d in range ( 1 , 5 ) :
interpolants.append ( ( 'BSpline/%s' % d , interpolate_bspline ( data , None , d ) ) )
for n,t in interpolants :
functions.add ( ( name , t ) )
with wait ( 1 ) , use_canvas ( name ) :
ff = lambda x : tfunc ( x )
f1_draw ( ff , xmin = low , xmax = high , linecolor = 1 , linewidth = 3 )
for i , c in enumerate ( interpolants ) :
n , f = c
color = i + 2
f.draw ( 'same' , linecolor = color )
if 1 == color : color = 'Black'
elif 2 == color : color = 'Red'
elif 3 == color : color = 'Green'
elif 4 == color : color = 'Blue'
elif 5 == color : color = 'Yellow'
elif 6 == color : color = 'Magenta'
elif 7 == color : color = 'Cyan'
elif 8 == color : color = 'DarkGreen'
logger.info ( 'Color %10s for %s' % ( color , n ) )
xx = []
NP = 50000
for i in range ( NP ) : xx.append ( random.uniform ( low , high ) )
xx.sort()
from collections import defaultdict
counters = defaultdict(SE)
cpu = {}
## loop over all interpolants
for n , fi in interpolants :
cnt = counters [ ( n , fi ) ]
with timing ( '' , logger = None ) as t :
for x in xx :
v = tfunc ( x )
vi = fi ( x )
cnt += abs ( vi - v ) / scale
cpu [ n ] = t.delta
rows = [ ( 'Configuration' , 'mean+/-rms' , 'max' , 'distance') ]
for item in counters :
n , ff = item
c = counters[item]
d = distance ( ff , tfunc , low , high )
row = n , '%9.2f +/- %-09.1f' % ( c.mean().value() , c.rms() ) , '%-9.1f' % c.max() , '%.3g' % d
rows.append ( row )
title = 'Interpolation precision (%d random points)[x%s]' % ( NP , scale )
table = T.table ( rows , title = title , prefix = '# ' , alignment = 'llll' )
logger.info ( '%s:\n%s' % ( title , table ) )
lst = []
for k in cpu :
item = cpu[k] , k
lst.append ( item )
lst.sort()
rows = [ ( 'Interpolant' , 'CPU [s]' ) ]
for t,k in lst :
row = k , '%.4g' % cpu[k]
rows.append ( row )
title = 'CPU: %d points' % NP
table = T.table ( rows , title = title , prefix = '# ' , alignment = 'll' )
logger.info ( '%s:\n%s' % ( title , table ) )
# =============================================================================
## interpolate cos function
# =============================================================================
## interpolate |sin| function
# =============================================================================
## interpolate |sin(2x)| function
# =============================================================================
## interpolate the table of values
# =============================================================================
## interpolate the table of values
# =============================================================================
## interpolate the table of values
# =============================================================================
## interpolate the table of values
# =============================================================================
# =============================================================================
## check that everything is serializable
# =============================================================================
# =============================================================================
if '__main__' == __name__ :
test_cos ()
test_abssin ()
test_abs2sin ()
test_random_grid_sin ()
test_random_grid_abssin ()
test_random_grid_sin2 ()
test_random_grid_gauss ()
## check finally that everything is serializeable:
test_pickle ()
with timing ('test_db' , logger ) :
test_db ()
# =============================================================================
## The END
# =============================================================================
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
38093,
25609,
220,
198,
2,
15069,
357,
66,
8,
38919,
499,
390,
626,
37186,
13,
198,
2,
38093,
25609,
198,
2235,
... | 2.358955 | 5,243 |
from .settings_class import * # noqa: F401, F403
from .settings_proxy_class import * # noqa: F401, F403
| [
6738,
764,
33692,
62,
4871,
1330,
1635,
220,
1303,
645,
20402,
25,
376,
21844,
11,
376,
31552,
198,
6738,
764,
33692,
62,
36436,
62,
4871,
1330,
1635,
220,
1303,
645,
20402,
25,
376,
21844,
11,
376,
31552,
198
] | 2.789474 | 38 |
import json
import requests
from .Error import PoorResponse
from .QuandooModel import urljoin, QuandooModel, QuandooDatetime
| [
11748,
33918,
198,
198,
11748,
7007,
198,
198,
6738,
764,
12331,
1330,
23676,
31077,
198,
6738,
764,
4507,
392,
2238,
17633,
1330,
19016,
22179,
11,
2264,
392,
2238,
17633,
11,
2264,
392,
2238,
27354,
8079,
628,
628,
198
] | 3.447368 | 38 |
import logging
from typing import Any, Optional
import attr
import giturlparse
SUPPORTED_PROTOCOLS = {"http", "https", "ssh", "git"}
@attr.define
| [
11748,
18931,
198,
6738,
19720,
1330,
4377,
11,
32233,
198,
198,
11748,
708,
81,
198,
11748,
17606,
6371,
29572,
198,
198,
40331,
15490,
1961,
62,
4805,
2394,
4503,
3535,
50,
796,
19779,
4023,
1600,
366,
5450,
1600,
366,
45824,
1600,
36... | 3 | 50 |
from hashlib import md5
class HashSet:
"""
An implementation of a hash set. Supports strings, ints, and any class that implements __hash__.
Note that all sets contain None.
"""
def add(self, elem):
"""
Add an element to the set.
"""
if self.array[self.__find_pos(elem)] != elem:
self.array[self.__find_pos(elem)] = elem
self.__size += 1
if self.__size > (len(self.array) / 2):
self.__rehash()
def add_all(self, elems):
"""
Add all elements from an iterable to the set.
"""
for elem in elems:
self.add(elem)
def contains(self, elem):
"""
Returns true if the element is in the set, false otherwise. All sets are considered to contain None.
"""
return elem is None or self.array[self.__find_pos(elem)] is not None
def size(self):
"""
Returns the number of elements currently in the set.
"""
return self.__size
def __find_pos(self, elem):
"""
An internal method for determining the position for an element in the backing list.
"""
if elem is None:
return 0
offset = 1
if isinstance(elem, int):
position = elem % len(self.array)
elif isinstance(elem, str):
digest = md5(elem.encode('utf-8')).hexdigest()
position = int(digest, 16) % len(self.array)
else:
# Throws an AttributeError if hash isn't implemented, which seems reasonable
position = elem.hash % len(self.array)
while self.array[position] is not None:
if (self.array[position] == elem):
break
else:
position += offset
offset += 1
while position >= len(self.array):
position -= len(self.array)
return position
def __rehash(self):
"""
An internal method for resizing the backing list.
"""
old_elems = self.array
self.array = [None] * (len(old_elems)**2)
self.__size = 0
self.add_all(old_elems)
| [
6738,
12234,
8019,
1330,
45243,
20,
198,
198,
4871,
21059,
7248,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1052,
7822,
286,
257,
12234,
900,
13,
220,
45267,
13042,
11,
493,
82,
11,
290,
597,
1398,
326,
23986,
11593,
17831,
... | 2.126204 | 1,038 |
#!/usr/bin/env python
# encoding: utf-8
# Copyright 2012 Aaron Morton
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
entry_points = """
[console_scripts]
cass_snapshot_link = cass_snapshot_link:main
"""
#doubt this is the right way to do this
import sys
major, minor, _, _, _ = sys.version_info
if (major == 2 and minor < 7) or (major == 2 and minor < 2):
install_requires = ["argparse>1.2"]
else:
install_requires = []
setup(
name='cass_snapshot_link',
version='0.1.0',
author='Aaron Morton',
author_email='aaron@thelastpickle.com',
packages = [],
install_requires=install_requires,
entry_points=entry_points
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
2,
15069,
2321,
12139,
35766,
198,
2,
220,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
19... | 3.132626 | 377 |
import numpy as np
from hetu.onnx.X2hetu.handler import BackendHandler, onnx_op
import hetu as ht
@onnx_op('Identity')
@onnx_op('Reshape')
@onnx_op('Transpose')
@onnx_op('Slice')
@onnx_op('Concat')
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
339,
28047,
13,
261,
77,
87,
13,
55,
17,
3202,
84,
13,
30281,
1330,
5157,
437,
25060,
11,
319,
77,
87,
62,
404,
198,
198,
11748,
339,
28047,
355,
289,
83,
628,
198,
31,
261,
77,
87,... | 2.079208 | 101 |
"""
11367. Report Card Time
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 64 ms
해결 날짜: 2020년 9월 18일
"""
if __name__ == '__main__':
main()
| [
37811,
198,
1157,
27824,
13,
6358,
5172,
3862,
198,
198,
168,
252,
239,
168,
226,
109,
168,
252,
238,
25,
2124,
23919,
15,
81,
198,
168,
244,
116,
168,
244,
112,
25,
11361,
513,
198,
168,
8955,
168,
248,
102,
31619,
102,
242,
167,... | 1.313043 | 115 |
import torch
def reparameterize(mean: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:
""" The reparameterization trick (https://arxiv.org/abs/1312.6114) in a Gaussian distribution.
Args:
mean (torch.Tensor): The mean of the distribution.
logvar (torch.Tensor): The log-variance of the distribution.
Returns:
torch.Tensor: sampled values
"""
std = torch.exp(logvar * 0.5)
eps = torch.randn_like(std)
return mean + std * eps | [
11748,
28034,
198,
198,
4299,
1128,
41158,
2357,
1096,
7,
32604,
25,
28034,
13,
51,
22854,
11,
2604,
7785,
25,
28034,
13,
51,
22854,
8,
4613,
28034,
13,
51,
22854,
25,
198,
220,
220,
220,
37227,
383,
1128,
41158,
2357,
1634,
6908,
3... | 2.502564 | 195 |
import random
from random import randint
import time
percentg = randint(90, 100)
percentg = str(percentg)
randassign = input("Enter assignment example: The map quiz >>>")
time.sleep(1)
finished = False
while finished == False:
goals = ["I will complete most of my homework in class",
"I will be focused in class " + percentg + "% of the time",
"I will be resourceful in class.",
"I will be strategic in class for maximum learning",
"I will communicate effectively with the teacher to finish " + randassign + ".",
"I will manage my time to make sure I finish " + randassign + ".",
"I will be knowledgeable.",
"I will be motivated " + percentg + "% of the time.",
"I will listen to the teacher to learn"]
print(random.choice(goals))
choices = input("Are you satisfied with your answer? Yes/No ")
choices = choices.lower()
if (choices == 'yes'):
finished = True
break
else :
print("generating new result...")
| [
11748,
4738,
198,
6738,
4738,
1330,
43720,
600,
198,
11748,
640,
198,
25067,
70,
796,
43720,
600,
7,
3829,
11,
1802,
8,
198,
25067,
70,
796,
965,
7,
25067,
70,
8,
198,
25192,
562,
570,
796,
5128,
7203,
17469,
16237,
1672,
25,
383,
... | 3.355634 | 284 |
import numpy as np
import matplotlib.pyplot as plt
from .tools import median_mad
from .dataio import DataIO
from .catalogueconstructor import CatalogueConstructor
from .tools import make_color_dict, get_neighborhood
def plot_centroids(arg0, labels=[], alpha=1, neighborhood_radius=None, **kargs):
"""
arg0 can be cataloguecconstructor or catalogue (a dict)
"""
if isinstance(labels, int):
labels = [labels]
if isinstance(arg0, CatalogueConstructor):
cc = arg0
dataio = cc.dataio
if not hasattr(cc, 'colors'):
cc.refresh_colors()
colors = cc.colors
chan_grp = cc.chan_grp
centroids_wfs = cc.centroids_median
label_inds = []
for label in labels:
ind = cc.index_of_label(label)
label_inds.append(ind)
ratio_mad = cc.info['peak_detector_params']['relative_threshold']
elif isinstance(arg0, dict) and 'clusters' in arg0:
catalogue = arg0
dataio = kargs['dataio']
chan_grp = catalogue['chan_grp']
clusters = catalogue['clusters']
colors = make_color_dict(clusters)
centroids_wfs = catalogue['centers0']
label_inds = []
for label in labels:
ind = np.nonzero(clusters['cluster_label']==label)[0][0]
label_inds.append(ind)
ratio_mad = catalogue['peak_detector_params']['relative_threshold']
else:
raise(Exception('arg0 must a catalogue constructor or a catalogue dict'))
channels = dataio.channel_groups[chan_grp]['channels']
geometry =dataio.get_geometry(chan_grp=chan_grp)
if neighborhood_radius is not None:
assert len(labels) == 1
neighborhood = get_neighborhood(geometry, neighborhood_radius)
extremum_channel = clusters[label_inds[0]]['extremum_channel']
keep = neighborhood[extremum_channel, :]
centroids_wfs = centroids_wfs[:, :, keep]
channels = np.array(channels)[keep]
geometry = geometry[keep, :]
kargs['ratio_mad'] = ratio_mad
kargs['waveforms'] = centroids_wfs[label_inds, :, :]
kargs['channels'] = channels
kargs['geometry'] = geometry
kargs = _prepare_waveform_fig(**kargs)
for ind, label in zip(label_inds, labels):
wf = centroids_wfs[ind, :, :]
kargs['waveforms'] = wf
kargs['color'] = colors.get(label, 'k')
_plot_wfs(**kargs)
_enhance_waveform_fig(**kargs)
def plot_waveforms_histogram(arg0, label=None, ax=None, channels=None,
bin_min=None, bin_max=None, bin_size=0.1, units='MAD',
dataio=None,# usefull when arg0 is catalogue
):
"""
arg0 can be cataloguecconstructor or catalogue (a dict)
"""
if ax is None:
fig, ax = plt.subplots()
if isinstance(arg0, CatalogueConstructor):
cc = arg0
dataio = cc.dataio
chan_grp = cc.chan_grp
# take waveforms
#~ ind = cc.index_of_label(label)
spike_labels = cc.all_peaks['cluster_label'][cc.some_peaks_index]
wf = cc.some_waveforms[spike_labels==label]
wf = wf[:, :, channels]
if units in ('uV', 'μV'):
wf = wf * cc.signals_mads[channels][None, None, :] * dataio.datasource.bit_to_microVolt
n_left = cc.info['waveform_extractor_params']['n_left']
n_right = cc.info['waveform_extractor_params']['n_right']
elif isinstance(arg0, dict) and 'clusters' in arg0:
catalogue = arg0
chan_grp = catalogue['chan_grp']
clusters = catalogue['clusters']
n_left = catalogue['n_left']
n_right = catalogue['n_right']
all_wf = []
for seg_num in range(dataio.nb_segment):
# TODO loop over segments
spikes = dataio.get_spikes(seg_num=seg_num, chan_grp=chan_grp,)
# take waveforms
#~ spike_labels = spikes['cluster_label']
spikes = spikes[spikes['cluster_label'] == label]
sample_indexes = spikes['index']
if sample_indexes.size>1000:
# limit to 1000 spike by segment
sample_indexes = np.random.choice(sample_indexes, size=1000)
sample_indexes = sample_indexes[(sample_indexes>-n_left)]
wf_ = dataio.get_some_waveforms(seg_num=seg_num, chan_grp=chan_grp,
sample_indexes=sample_indexes, n_left=n_left, n_right=n_right)
wf_ = wf_[:, :, channels]
all_wf.append(wf_)
wf = np.concatenate(all_wf, axis=0)
if units in ('uV', 'μV'):
wf = wf * catalogue['signals_mads'][channels][None, None, :] * dataio.datasource.bit_to_microVolt
else:
raise(Exception('arg0 must a catalogue constructor or a catalogue dict'))
if bin_min is None:
bin_min = np.min(wf) - 1.
if bin_max is None:
bin_max = np.max(wf) +1
if bin_size is None:
if units=='MAD':
bin_size = 0.1
elif units in ('uV', 'μV'):
bin_size = (bin_max - bin_min) / 500.
#~ data = wf.swapaxes(1,2).reshape(wf.shape[0], -1)
#~ bins = np.arange(bin_min, bin_max, bin_size)
#~ hist2d = np.zeros((data.shape[1], bins.size))
#~ indexes0 = np.arange(data.shape[1])
#~ data_bined = np.floor((data-bin_min)/bin_size).astype('int32')
#~ data_bined = data_bined.clip(0, bins.size-1)
#~ for d in data_bined:
#~ hist2d[indexes0, d] += 1
#~ im = ax.imshow(hist2d.T, interpolation='nearest',
#~ origin='lower', aspect='auto', extent=(0, data.shape[1], bin_min, bin_max), cmap='hot')
im = plot_waveforms_density(wf, bin_min, bin_max, bin_size, ax=ax)
peak_width = n_right - n_left
for c, chan in enumerate(channels):
abs_chan = dataio.channel_groups[chan_grp]['channels'][chan]
ax.text(c*peak_width-n_left, 0, '{}'.format(abs_chan), size=10, ha='center', color='w')
if c>0:
ax.axvline((c) * peak_width, color='w')
ax.set_xticks([])
def plot_isi(dataio, catalogue=None, chan_grp=None, label=None, ax=None, bin_min=0, bin_max=100, bin_size=1.):
"""
bin are in ms
"""
if ax is None:
fig, ax = plt.subplots()
if catalogue is None:
catalogue = dataio.load_catalogue(chan_grp=chan_grp)
sr = dataio.sample_rate
bins = np.arange(bin_min, bin_max, bin_size)
count = None
for seg_num in range(dataio.nb_segment):
spikes = dataio.get_spikes(seg_num=seg_num, chan_grp=chan_grp,)
spikes = spikes[spikes['cluster_label'] == label]
sample_indexes = spikes['index']
isi = np.diff(sample_indexes)/ (sr/1000.)
count_, bins = np.histogram(isi, bins=bins)
if count is None:
count = count_
else:
count += count_
ax.plot(bins[:-1], count, color='k') # TODO color
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
6738,
764,
31391,
1330,
14288,
62,
9937,
198,
6738,
764,
7890,
952,
1330,
6060,
9399,
198,
6738,
764,
9246,
30326,
41571,
273,
1330,
... | 2.007529 | 3,586 |
#! python3
print("Task 9.4")
class Restaurant():
"""Class about opening restaurant"""
restaurant = Restaurant('food house', 'grill bar')
restaurant.describe_restaurant()
restaurant.open_restaurant()
print(f"\nNumber served: {restaurant.number_served}")
restaurant.number_served = 430
print(f"Number served: {restaurant.number_served}")
restaurant.number_served = 4245
print(f"Number served: {restaurant.number_served}")
restaurant.number_served = 8900
print(f"Number served: {restaurant.number_served}")
print("Task 9.5")
class User():
"""Represent a user profile."""
def __init__(self, first_name, last_name, password, email, username, hobby, location):
"""Initialize the user."""
self.first_name = first_name.title()
self.last_name = last_name.title()
self.email = email
self.password = password
self.username = username
self.hobby = hobby
self.location = location.title()
self.login_attempts = 0
user1 = User('adam', 'bears', 'a_bear@g.com', 'ursamajor', 'ursa', 'survival', 'Vancouver')
print(user1.describe_user())
user1.greet_user()
print(f"\nLogin attempts: ")
user1.increment_login_attempts()
user1.increment_login_attempts()
user1.increment_login_attempts()
user1.increment_login_attempts()
user1.increment_login_attempts()
print(f" Login attempts: {user1.login_attempts}")
print(f" Reset login attempts:")
user1.reset_login_attempst()
print(f" Login attempts: {user1.login_attempts}") | [
2,
0,
21015,
18,
198,
198,
4798,
7203,
25714,
860,
13,
19,
4943,
198,
198,
4871,
26078,
33529,
198,
220,
220,
220,
37227,
9487,
546,
4756,
7072,
37811,
198,
198,
2118,
2899,
415,
796,
26078,
10786,
19425,
2156,
3256,
705,
2164,
359,
... | 2.605585 | 573 |
import yaml
from dagster.utils import file_relative_path
from docs_snippets.concepts.configuration.make_values_resource_any import basic_result
from docs_snippets.concepts.configuration.make_values_resource_config_schema import (
different_values_job,
)
| [
11748,
331,
43695,
198,
6738,
48924,
1706,
13,
26791,
1330,
2393,
62,
43762,
62,
6978,
198,
6738,
34165,
62,
16184,
3974,
1039,
13,
43169,
82,
13,
11250,
3924,
13,
15883,
62,
27160,
62,
31092,
62,
1092,
1330,
4096,
62,
20274,
198,
673... | 3.25 | 80 |
from . import raw
from . import sst2
__all__ = ["raw", "sst2"]
| [
6738,
764,
1330,
8246,
198,
6738,
764,
1330,
264,
301,
17,
198,
198,
834,
439,
834,
796,
14631,
1831,
1600,
366,
82,
301,
17,
8973,
198
] | 2.461538 | 26 |
from flask.ext.security import UserMixin
from app.models import db
from app.models.relationships import roles_users, user_skills
class User(db.Model, UserMixin):
"""
Basic user model
"""
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode(255))
email = db.Column(db.Unicode(255), unique=True)
password = db.Column(db.Unicode(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
roles = db.relationship('Role', secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
skills = db.relationship("Skill",
secondary=user_skills,
backref=db.backref("users", lazy='dynamic'))
@property
def _user_skills_as_set(self):
"""
returns student skills as set to work with it
"""
return set([skill.id for skill in self.skills]) | [
6738,
42903,
13,
2302,
13,
12961,
1330,
11787,
35608,
259,
198,
198,
6738,
598,
13,
27530,
1330,
20613,
198,
6738,
598,
13,
27530,
13,
39468,
5748,
1330,
9176,
62,
18417,
11,
2836,
62,
8135,
2171,
628,
198,
4871,
11787,
7,
9945,
13,
... | 2.5 | 366 |
import requests
from lxml import html
import shutil
import time
import os
import logging
import re
import random
# logpath = 'results_hero_academia.log'
# if os.path.exists(logpath):
# pass
# logging.basicConfig(filename=logpath)
domain = "http://eatmanga.com"
# time.sleep(3)
# if os.access(incomplete_local_chapter_path, os.W_OK):
# os.rename(incomplete_local_chapter_path, local_chapter_path)
# else:
# print('Could not access folder for renaming. Wait 3 seconds.')
# time.sleep(3)
# if os.access(incomplete_local_chapter_path, os.W_OK):
# os.rename(incomplete_local_chapter_path, local_chapter_path)
# else:
# print('Could not access folder for renaming (2nd try). Skipping renaming')
if __name__ == '__main__':
main() | [
11748,
7007,
198,
6738,
300,
19875,
1330,
27711,
198,
11748,
4423,
346,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
18931,
198,
11748,
302,
198,
11748,
4738,
198,
198,
2,
2604,
6978,
796,
705,
43420,
62,
11718,
62,
330,
324,
22859,
... | 2.40413 | 339 |
#!/usr/bin/env.python
#_*_ coding: utf-8 _*_
from selenium import webdriver
import os
import time
import Trade
#获取当前时间
#主函数
#执行程序
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
13,
29412,
201,
198,
2,
62,
9,
62,
19617,
25,
3384,
69,
12,
23,
4808,
9,
62,
201,
198,
201,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
201,
198,
11748,
28686,
201,
198,
11748,
640,
201,
... | 1.602041 | 98 |
"""
Different CCA methods
=====================
Exempliefies different CCA methods
"""
# %%
# Import necessary libraries.
import pandas as pd
import numpy as np
from numpy.linalg import svd
from statsmodels.multivariate.cancorr import CanCorr
from sparsecca import cca_ipls
from sparsecca import cca_pmd
from sparsecca import multicca_pmd
from sparsecca import pmd
# %%
# Get toy data example from seaborn
path = "https://raw.githubusercontent.com/mwaskom/seaborn-data/master/penguins.csv"
df = pd.read_csv(path)
df = df.dropna()
X = df[['bill_length_mm', 'bill_depth_mm']]
Z = df[['flipper_length_mm', 'body_mass_g']]
X = ((X - np.mean(X)) / np.std(X)).to_numpy()
Z = ((Z - np.mean(Z)) / np.std(Z)).to_numpy()
# %%
# Define function for printing weights
# %%
# First, let's try CanCorr function from statsmodels package.
stats_cca = CanCorr(Z, X)
print(stats_cca.corr_test().summary())
print_weights('X', stats_cca.x_cancoef)
print_weights('Z', stats_cca.y_cancoef)
# %%
# Next, use CCA algorithm from Witten et al.
U, V, D = cca_pmd(X, Z, penaltyx=1.0, penaltyz=1.0, K=2, standardize=False)
x_weights = U[:, 0]
z_weights = V[:, 0]
corrcoef = np.corrcoef(np.dot(x_weights, X.T), np.dot(z_weights, Z.T))[0, 1]
print("Corrcoef for comp 1: " + str(corrcoef))
print_weights('X', U)
print_weights('Z', V)
# %%
# As the CCA algorithm in Witten et al is faster version of
# computing SVD of X.T @ Z, try that.
U, D, V = svd(X.T @ Z)
x_weights = U[:, 0]
z_weights = V[0, :]
corrcoef = np.corrcoef(np.dot(x_weights, X.T), np.dot(z_weights, Z.T))[0, 1]
print("Corrcoef for comp 1: " + str(corrcoef))
print_weights('X', U)
print_weights('V', V.T)
# %%
# The novelty in Witten et al is developing matrix decomposition similar
# to SVD, but which allows to add convex penalties (here lasso).
# Using that to X.T @ Z without penalty results to same as above.
U, V, D = pmd(X.T @ Z, K=2, penaltyu=1.0, penaltyv=1.0, standardize=False)
x_weights = U[:, 0]
z_weights = V[:, 0]
corrcoef = np.corrcoef(np.dot(x_weights, X.T), np.dot(z_weights, Z.T))[0, 1]
print("Corrcoef for comp 1: " + str(corrcoef))
print_weights('X', U)
print_weights('Z', V)
# %%
# However, when you add penalties, you get a sparse version of CCA.
U, V, D = pmd(X.T @ Z, K=2, penaltyu=0.8, penaltyv=0.9, standardize=False)
x_weights = U[:, 0]
z_weights = V[:, 0]
corrcoef = np.corrcoef(np.dot(x_weights, X.T), np.dot(z_weights, Z.T))[0, 1]
print("Corrcoef for comp 1: " + str(corrcoef))
print_weights('X', U)
print_weights('Z', V)
# %%
# PMD is really fantastically simple and powerful idea, and as seen,
# can be used to implement sparse CCA. However, for SVD(X.T @ Z) to be
# equivalent to CCA, cov(X) and cov(Z) should be diagonal,
# which can sometimes give problems. Another CCA algorithm allowing convex penalties
# that does not require cov(X) and cov(Z) to be diagonal, was presented in
# Mai et al (2019). It is based on iterative least squares formulation, and as it is
# solved with GLM, it allows elastic net -like weighting of L1 and L2 -norms for
# both datasets separately.
X_weights, Z_weights = cca_ipls(X, Z, alpha_lambda=0.0, beta_lambda=0.0, standardize=False,
n_pairs=2, glm_impl='glmnet_python')
x_weights = X_weights[:, 0]
z_weights = Z_weights[:, 0]
corrcoef = np.corrcoef(np.dot(x_weights, X.T), np.dot(z_weights, Z.T))[0, 1]
print("Corrcoef for comp 1: " + str(corrcoef))
print_weights("X", X_weights)
print_weights("Z", Z_weights)
| [
37811,
220,
198,
40341,
327,
8141,
5050,
198,
4770,
1421,
28,
198,
198,
3109,
18856,
2086,
444,
1180,
327,
8141,
5050,
198,
198,
37811,
198,
198,
2,
43313,
198,
2,
17267,
3306,
12782,
13,
198,
198,
11748,
19798,
292,
355,
279,
67,
1... | 2.470588 | 1,411 |
import unittest
import numpy as np
from chainer import cuda
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.faster_rcnn import bbox2loc
from chainercv.links.model.faster_rcnn import loc2bbox
from chainercv.utils import generate_random_bbox
testing.run_module(__name__, __file__)
| [
11748,
555,
715,
395,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
6333,
263,
1330,
269,
15339,
198,
6738,
6333,
263,
1330,
4856,
198,
6738,
6333,
263,
13,
33407,
1330,
708,
81,
198,
198,
6738,
6333,
2798,
85,
13,
28751,
... | 3.046729 | 107 |
import mysql.connector
from mysql.connector import Error
from sqlalchemy import create_engine | [
11748,
48761,
13,
8443,
273,
198,
6738,
48761,
13,
8443,
273,
1330,
13047,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392
] | 4.227273 | 22 |
import itertools
import os
from dataclasses import dataclass
from typing import Optional, List, Tuple
import librosa
import numpy as np
import torch
from nemo.collections.asr.parts.preprocessing import AudioSegment
from speech_processing.speech_utils import MAX_16_BIT_PCM
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
TARGET_SAMPLE_RATE = 16_000
@dataclass
@dataclass
@dataclass
@dataclass
if __name__ == "__main__":
# idxs = list((k,list(g)) for k,g in itertools.groupby(list(enumerate("thisss isss a ttteeest")), key=lambda x: x[1]))
# print(idxs)
audio = AudioSegment.from_file(
"/home/tilo/data/asr_data/GERMAN/tuda/raw/german-speechdata-package-v2/dev/2015-02-09-15-18-46_Samson.wav",
target_sr=TARGET_SAMPLE_RATE,
offset=0.0,
trim=False,
)
asr = SpeechToText(
model_name="jonatasgrosman/wav2vec2-large-xlsr-53-german",
).init()
array = audio.samples
print(f"array-shape: {array.shape}")
logits_file = "/tmp/logits.npy"
if not os.path.isfile(logits_file):
assert False
logits = asr._calc_logits(array, audio.sample_rate)
np.save(logits_file, logits)
logits = torch.from_numpy(np.load(logits_file))
print(asr.decode_with_timestamps(logits, len(array)))
# print(asr.transcribe_audio_array(array, audio.sample_rate))
| [
11748,
340,
861,
10141,
198,
11748,
28686,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
19720,
1330,
32233,
11,
7343,
11,
309,
29291,
198,
198,
11748,
9195,
4951,
64,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28... | 2.296796 | 593 |
import json
import requests
import sys
import time
from argparse import ArgumentParser
from collections import deque
from os.path import isfile
from tabber import Tabber
if __name__ == "__main__":
args = _argparse().parse_args()
assert args.name or args.input, "either seed file or seed string needs to be supplied"
seed = [args.name]
if args.input and isfile(args.input):
seed = read_names(args.input)
main(Connection(User(sys.argv[1]), "http://ws.audioscrobbler.com/2.0/"), seed)
| [
11748,
33918,
198,
11748,
7007,
198,
11748,
25064,
198,
11748,
640,
198,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
6738,
17268,
1330,
390,
4188,
198,
6738,
28686,
13,
6978,
1330,
318,
7753,
198,
6738,
7400,
527,
1330,
16904,
527,
... | 2.916201 | 179 |
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
"""The Implementation of the common layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import dragon
from ..layer import Layer
class InnerProductLayer(Layer):
"""The implementation of ``InnerProductLayer``.
Parameters
----------
num_output : int
The output dim. Refer `InnerProductParameter.num_output`_.
bias_term : boolean
Whether to use bias. Refer `InnerProductParameter.bias_term`_.
weight_filler : caffe_pb2.FillerParameter
The filler of weight. Refer `InnerProductParameter.weight_filler`_.
bias_filler : caffe_pb2.FillerParameter
The filler of bias. Refer `InnerProductParameter.bias_filler`_.
axis : int
The start axis to calculate. Refer `InnerProductParameter.axis`_.
transpose : boolean
Whether to transpose the weights. Refer `InnerProductParameter.transpose`_.
"""
class AccuracyLayer(Layer):
"""The implementation of ``AccuracyLayer``.
Parameters
----------
top_k : int
The top-k accuracy. Refer `AccuracyParameter.top_k`_.
axis : int
The axis of classes. Refer `AccuracyParameter.axis`_.
ignore_label : int
The label to ignore. Refer `AccuracyParameter.ignore_label`_.
"""
class PythonLayer(Layer):
"""The implementation of ``PythonLayer``.
Parameters
----------
module : str
The module. Refer `PythonParameter.module`_.
layer : str
The class name of layer. Refer `PythonParameter.layer`_.
param_str : str
The str describing parameters. Refer `PythonParameter.param_str`_.
"""
class EltwiseLayer(Layer):
"""The implementation of ``EltwiseLayer``.
Parameters
----------
operation : EltwiseParameter.EltwiseOp
The operation. Refer `EltwiseParameter.operation`_.
coeff : list of float
The coefficients. Refer `EltwiseParameter.coeff`_.
"""
class AddLayer(Layer):
"""The extended implementation of ``EltwiseLayer``."""
class ConcatLayer(Layer):
"""The implementation of ``ConcatLayer``.
Parameters
----------
axis : int
The axis to concatenate. Refer `ConcatParameter.axis`_.
"""
class SliceLayer(Layer):
"""The implementation of ``SliceLayer``.
Parameters
----------
axis : int
The axis to concatenate. Refer ``SliceParameter.axis``.
slice_point : sequence of int
The optional slice points. Refer ``SliceParameter.slice_point``.
"""
class CropLayer(Layer):
"""The implementation of ``CropLayer``.
Parameters
----------
axis : int
The start axis. Refer `CropParameter.axis`_.
offset : list of int
The offsets. Refer `CropParameter.offset`_.
"""
class ReshapeLayer(Layer):
"""The implementation of ``ReshapeLayer``.
Parameters
----------
shape : list of int
The output shape. Refer `ReshapeParameter.shape`_.
"""
class PermuteLayer(Layer):
"""The implementation of ``PermuteLayer``.
Parameters
----------
order : list of int
The permutation. Refer `PermuteParameter.order`_.
"""
class FlattenLayer(Layer):
"""The implementation of ``FlattenLayer``.
Parameters
----------
axis : int
The start axis. Refer `FlattenParameter.axis`_.
end_axis : int
The end axis. Refer `FlattenParameter.end_axis`_.
"""
class GatherLayer(Layer):
"""The extended implementation of ``GatherOp``.
Parameters
----------
axis : int
The axis for gathering. Refer ``GatherParameter.axis``.
"""
class SoftmaxLayer(Layer):
"""The implementation of ``SoftmaxLayer``.
Parameters
----------
axis : int
The axis to perform softmax. Refer `SoftmaxParameter.axis`_.
"""
class ArgMaxLayer(Layer):
"""The implementation of ``ArgMaxLayer``.
Parameters
----------
top_k : int
The top k results to keep. Refer `ArgMaxParameter.top_k`_.
axis : int
The axis to perform argmax. Refer `ArgMaxParameter.axis`_.
"""
class BatchNormLayer(Layer):
"""The implementation of ``BatchNormLayer``.
Parameters
----------
use_global_stats : boolean
Refer `BatchNormParameter.use_global_stats`_.
moving_average_fraction : float
Refer `BatchNormParameter.moving_average_fraction`_.
eps : float
Refer `BatchNormParameter.eps`_.
"""
class GroupNormLayer(Layer):
"""The implementation of ``GroupNormLayer``.
Parameters
----------
group : int
Refer ``GroupNormParameter.group``.
eps : float
Refer ``GroupNormParameter.eps``.
"""
class InstanceNormLayer(Layer):
"""The implementation of ``InstanceNormLayer``.
Introduced by `[Ulyanov et.al, 2016] <https://arxiv.org/abs/1607.08022>`_
Parameters
----------
eps : float
Refer ``InstanceNormParameter.eps``.
"""
class ScaleLayer(Layer):
"""The implementation of ``ScaleLayer``.
Parameters
----------
axis : int
The start axis. Refer `ScaleParameter.axis`_.
num_axes : int
The number of axes. Refer `ScaleParameter.num_axes`_.
filler : FillerParameter
The filler of scale parameter. Refer `ScaleParameter.filler`_.
bias_term : boolean
Whether to use bias. Refer `ScaleParameter.bias_term`_.
bias_filler : FillerParameter
The filler of bias parameter. Refer `ScaleParameter.bias_filler`_.
"""
class BNLayer(Layer):
"""The implementation of ``BNLayer``.
Parameters
----------
use_global_stats : boolean
Refer `BatchNormParameter.use_global_stats`_.
moving_average_fraction : float
Refer `BatchNormParameter.moving_average_fraction`_.
eps : float
Refer `BatchNormParameter.eps`_.
filler : FillerParameter
The filler of scale parameter. Refer `ScaleParameter.filler`_.
bias_filler : FillerParameter
The filler of bias parameter. Refer `ScaleParameter.bias_filler`_.
"""
class GNLayer(Layer):
"""The implementation of ``GNLayer``.
Parameters
----------
group : int
Refer ``GroupNormParameter.group``.
eps : float
Refer ``GroupNormParameter.eps``.
filler : FillerParameter
The filler of scale parameter. Refer `ScaleParameter.filler`_.
bias_filler : FillerParameter
The filler of bias parameter. Refer `ScaleParameter.bias_filler`_.
"""
class NormalizeLayer(Layer):
"""The implementation of ``NormalizeLayer``.
Parameters
----------
across_spatial : boolean
Whether to stat spatially. Refer `NormalizeParameter.across_spatial`_.
scale_filler : FillerParameter
The filler of scale parameter. Refer `NormalizeParameter.scale_filler`_.
channel_shared : boolean
Whether to scale across channels. Refer `NormalizeParameter.channel_shared`_.
eps : float
The eps. Refer `NormalizeParameter.eps`_.
"""
class TileLayer(Layer):
"""The extended implementation of ``TileLayer``.
Parameters
----------
multiples : caffe_pb2.BlobShape
The multiples. Refer `TileParameter.multiples`_.
"""
class ReductionLayer(Layer):
"""The extended implementation of ``ReductionLayer``.
Parameters
----------
operation : caffe_pb2.ReductionOp
The operation. Refer `ReductionParameter.operation`_.
axis : int
The axis to to reduce. Refer `ReductionParameter.axis`_.
"""
class ExpandDimsLayer(Layer):
"""The implementation of ``ExpandDimsLayer``.
Parameters
----------
axis : int
This axis to expand at. Refer `ExpandDimsParameter.axis`_.
"""
class StopGradientLayer(Layer):
"""The implementation of ``StopGradientLayer``."""
class ProposalLayer(Layer):
"""The implementation of ``ProposalLayer``.
Parameters
----------
stride : list of int
The stride of anchors. Refer ``ProposalParameter.stride``.
scale : list of float
The scales of anchors. Refer `ProposalParameter.scale`_.
ratio : list of float
The ratios of anchors. Refer `ProposalParameter.ratio`_.
pre_nms_top_n : int
The num of anchors before nms. Refer `ProposalParameter.pre_nms_topn`_.
post_nms_top_n : int
The num of anchors after nms. Refer `ProposalParameter.post_nms_topn`_.
nms_thresh : float
The threshold of nms. Refer `ProposalParameter.nms_thresh`_.
min_size : int
The min size of anchors. Refer `ProposalParameter.min_size`_.
min_level : int
Finest level of the FPN pyramid. Refer ``ProposalParameter.min_level``.
max_level : int
Coarsest level of the FPN pyramid. Refer ``ProposalParameter.max_level``.
canonical_scale : int
The baseline scale of mapping policy. Refer ``ProposalParameter.canonical_scale``.
canonical_level : int
Heuristic level of the canonical scale. Refer ``ProposalParameter.canonical_level``.
"""
class CastLayer(Layer):
"""The implementation of ``CastLayer``.
Parameters
----------
dtype : str
The stride of anchors. Refer ``CastParameter.dtype``.
""" | [
2,
20368,
1783,
10541,
198,
2,
15069,
357,
66,
8,
2177,
12,
25579,
11,
1001,
17167,
17760,
11,
1766,
1539,
43,
8671,
13,
198,
2,
198,
2,
49962,
739,
262,
347,
10305,
362,
12,
2601,
682,
13789,
13,
198,
2,
921,
815,
423,
2722,
25... | 2.838464 | 3,411 |
from tkinter import *
from time import *
from threading import *
#root = Tk()
#root.title("Win Screen")
#app = Winscreen(root)
#root.mainloop() | [
6738,
256,
74,
3849,
1330,
1635,
198,
6738,
640,
1330,
1635,
198,
6738,
4704,
278,
1330,
1635,
198,
198,
2,
15763,
796,
309,
74,
3419,
198,
2,
15763,
13,
7839,
7203,
16643,
15216,
4943,
198,
2,
1324,
796,
24423,
32060,
7,
15763,
8,
... | 2.88 | 50 |
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
import os
setup(
name='alf',
version='0.0.6',
python_requires='>=3.7.0',
install_requires=[
'atari_py==0.1.7',
'cpplint',
'clang-format==9.0',
'fasteners',
'gin-config@git+https://github.com/HorizonRobotics/gin-config.git',
'gym==0.15.4',
'gym3==0.3.3',
'h5py==3.5.0',
'matplotlib==3.4.1',
'numpy',
'opencv-python',
'pathos==0.2.4',
# with python3.7, the default version of pillow (PIL) is 8.2.0,
# which breaks some pyglet based rendering in gym
'pillow==7.2.0',
'procgen==0.10.4',
'psutil',
'pybullet==2.5.0',
'pyglet==1.3.2', # higher version breaks classic control rendering
'rectangle-packer==2.0.0',
'sphinx==3.0',
'sphinx-autobuild',
'sphinx-autodoc-typehints@git+https://github.com/hnyu/sphinx-autodoc-typehints.git',
'sphinxcontrib-napoleon==0.7',
'sphinx-rtd-theme==0.4.3', # used to build html docs locally
'tensorboard==2.6.0',
'torch==1.8.1',
'torchvision==0.9.1',
'torchtext==0.9.1',
'cnest==1.0.4',
], # And any other dependencies alf needs
package_data={'': ['*.gin']},
packages=find_packages(),
)
| [
2,
15069,
357,
66,
8,
13130,
22776,
47061,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
35... | 2.232719 | 868 |
"""Quadrocopter Controller."""
from RLBench import Policy
from RLBench.spaces import BoundedSpace
from RLBench.envs._quadrocopter import StateVector
import numpy as np
import logging
logger = logging.getLogger(__name__)
__all__ = ('NonLinearQuadrocopterController')
# TODO: Controller: Documentation
class NonLinearQuadrocopterController(Policy):
"""Non-linear quadrocopter controller."""
def __init__(self, zeta_z=0.7, params=[.7, .7, .7, .5, .707],
reference=None):
"""Initialize NonLinearQuadrocopterController.
Parameters
----------
zeta_z :
params :
reference :
"""
self._zeta_z = zeta_z
self._params = np.array(params)
self.reference = reference
if params is not None:
self.initialized = True
else:
self.initialized = False
self._par_space = BoundedSpace(np.array([0., 0., 0., 0., 0.]),
np.array([1., 1., 1., 1., 1.]))
def map(self, state):
"""Map state to action.
Depends on a reference object. If the environment has a reference
object it needs to set the reference at the start of the rollout.
Parameters
----------
state : array-like
Element of state space.
Returns
-------
action : ndarray
Element of action space.
"""
ref = self.reference.reference
state = StateVector(state)
# Allocate memory for the 4 outputs of the controller.
action = np.empty((4,), dtype=np.float32)
# Retrieve the different parameters and make sure the critical ones
# are non zero.
tau_x, tau_y, tau_z, tau_w, zeta = self._params
if tau_x < 1e-3:
tau_x = 1e-3
logger.warning('Parameter `tau_x` too small for controller, '
+ 'has been clipped to 1e-3"')
if tau_y < 1e-3:
tau_y = 1e-3
logger.warning('Parameter `tau_y` too small for controller, '
+ 'has been clipped to 1e-3"')
if tau_w < 1e-3:
tau_w = 1e-3
logger.warning('Parameter `tau_w` too small for controller, '
+ 'has been clipped to 1e-3"')
if zeta < 1e-3:
zeta = 1e-3
logger.warning('Parameter `zeta` too small for controller, '
+ 'has been clipped to 1e-3"')
# desired acceleration in x and y (global coordinates, [m/s^2] )
ax = (2. * zeta / tau_x * (ref.vel[0] - state.vel[0])
+ 1. / (tau_x**2) * (ref.pos[0] - state.pos[0]))
ay = (2. * zeta / tau_y * (ref.vel[1] - state.vel[1])
+ 1. / (tau_y**2) * (ref.pos[1] - state.pos[1]))
# Normalize by thrust
thrust = np.linalg.norm(np.array([ax, ay, 9.81 + state.acc[2]]))
ax /= thrust
ay /= thrust
# Rotate desired accelerations into the yaw-rotated inertial frame
ax_b = ax * np.cos(state.euler[2]) + ay * np.sin(state.euler[2])
ay_b = -ax * np.sin(state.euler[2]) + ay * np.cos(state.euler[2])
# Get euler angles from rotation matrix
action[1] = np.arcsin(-ay_b)
action[0] = np.arcsin(ax_b / np.cos(action[1]))
# Z-velocity command m/sec)
action[2] = (2. * self._zeta_z / tau_z * (ref.vel[2] - state.vel[2])
+ 1. / (tau_z**2) * (ref.pos[2] - state.pos[2]))
# Yaw rate command (rad/sec)??
yaw_err = (np.mod(ref.euler[2] - state.euler[2] + np.pi, 2 * np.pi)
- np.pi)
action[3] = yaw_err / tau_w + ref.omega_b[2]
return action
@property
def parameters(self):
"""Set controller parameters."""
return self._params
@parameters.setter
@property
def parameter_space(self):
"""Set controller parameter space."""
return self._par_space
| [
37811,
4507,
324,
12204,
32563,
22741,
526,
15931,
198,
6738,
45715,
44199,
1330,
7820,
198,
6738,
45715,
44199,
13,
2777,
2114,
1330,
347,
6302,
14106,
198,
6738,
45715,
44199,
13,
268,
14259,
13557,
47003,
12204,
32563,
1330,
1812,
38469,... | 2.046963 | 1,959 |
from django.contrib import admin
from .models import Customer
# Register your models here.
admin.site.register(Customer,CustomerInfo)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
22092,
198,
198,
2,
17296,
534,
4981,
994,
13,
198,
220,
220,
220,
220,
198,
28482,
13,
15654,
13,
30238,
7,
44939,
11,
44939,
12360,
8,
198
] | 3.5 | 40 |
import os
import pathlib
import json
from datanode import remove
# check_datanode("/home/akanksha/BD_YAH/NameNode/") | [
11748,
28686,
198,
11748,
3108,
8019,
198,
11748,
33918,
198,
6738,
4818,
272,
1098,
1330,
4781,
198,
2,
2198,
62,
19608,
272,
1098,
7203,
14,
11195,
14,
461,
2283,
3099,
14,
14529,
62,
56,
18429,
14,
5376,
19667,
14,
4943
] | 2.9 | 40 |
import argparse
import torch
from run import load, run
if __name__ == '__main__':
main()
| [
11748,
1822,
29572,
198,
11748,
28034,
198,
6738,
1057,
1330,
3440,
11,
1057,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.939394 | 33 |
import numpy as np
import math
from sklearn.preprocessing import KBinsDiscretizer
def KBinsDiscretize(data_x, n_bins=0, alpha=3.322, encode="ordinal", strategy="uniform"):
"""
"""
# Makes n_bins optional, calculates optimal n_bins by default
# Sturges Rule - num_bins = 1 + 3.322 * log_10(num_inputs)
if n_bins == 0:
# cap bins at 256
n_bins = min(math.floor(1 + alpha * math.log10(data_x[0].shape), 256))
kbins = KBinsDiscretizer(n_bins, encode='ordinal', strategy='uniform')
kbins.fit(data_x)
binned_x = kbins.transform(data_x)
return binned_x
| [
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
14204,
1040,
15642,
1186,
7509,
198,
198,
4299,
14204,
1040,
15642,
1186,
1096,
7,
7890,
62,
87,
11,
299,
62,
65,
1040,
28,
15,
11,
17130... | 2.375 | 256 |
#!/usr/bin/python
# author luke
#常量会用大写的来命名
MAXKEY=1000
if __name__ == '__main__':
use_hash()
hash('xiongda') #两次启动不一样,是因为加入了盐值
#时间复杂度是O(1) 空间复杂度不是O(1) | [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
1772,
300,
4649,
198,
198,
2,
30585,
116,
34932,
237,
27670,
248,
18796,
101,
32014,
37863,
247,
21410,
30266,
98,
37772,
121,
28938,
235,
198,
22921,
20373,
28,
12825,
198,
198,
361,
1159... | 1.185714 | 140 |
'''
Threaded Port Scanner 1.0.0:
A python code to demonstrate demonstrates a Threaded Port scanner built using Python 3.x
We here use threading to speed up the process
Note: Port scanning is dangerous, so you are advised to not to use
this script without permission
'''
__author__ = "Rishit Dagli"
__copyright__ = ""
__credits__ = ["Rishit Dagli"]
__license__ = "Apache License 2.0"
__version__ = "1.0.0"
__maintainer__ = "Rishit Dagli"
__email__ = "rishit.dagli@gmail.com"
__status__ = "Development"
import socket
import time
import threading
from queue import Queue
# set Timeout time
socket.setdefaulttimeout(0.25)
print_lock = threading.Lock()
target = input('Enter the host to be scanned: ')
t_IP = socket.gethostbyname(target)
print ('Starting scan on host: ', t_IP)
def portscan(port):
'''
@author = "Rishit Dagli"
scan for ports
'''
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
con = s.connect((t_IP, port))
with print_lock:
print(port, 'is open')
con.close()
except:
pass
def threader():
'''
@author = "Rishit Dagli"
Do the portscan in a threads
'''
while True:
worker = q.get()
portscan(worker)
q.task_done()
q = Queue()
startTime = time.time()
for x in range(100):
t = threading.Thread(target = threader)
t.daemon = True
t.start()
for worker in range(1, 500):
q.put(worker)
# Join the results from threads
q.join()
# Print time taken
print('Time taken:', time.time() - startTime)
# print("functions- portscan, threader")
# print(Docs:)
# print(portscan.__doc__)
# print(threader.__doc__)
| [
7061,
6,
201,
198,
16818,
276,
4347,
20937,
1008,
352,
13,
15,
13,
15,
25,
201,
198,
201,
198,
32,
21015,
2438,
284,
10176,
15687,
257,
14122,
276,
4347,
27474,
3170,
1262,
11361,
513,
13,
87,
201,
198,
1135,
994,
779,
4704,
278,
... | 2.389503 | 724 |
#!/usr/bin/python3
import re
import sys
good_text = []
text = sys.stdin.readlines()
for line in text:
line = re.split(r"\W+", line)
line[0] = re.sub(r'(?<!^)(?=[A-Z])', '_', line[0]).upper()
print("#define %-25s {%3s, %3s, %3s, 255}" %
(line[0], line[2], line[3], line[4]))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
302,
198,
11748,
25064,
198,
198,
11274,
62,
5239,
796,
17635,
198,
198,
5239,
796,
25064,
13,
19282,
259,
13,
961,
6615,
3419,
198,
198,
1640,
1627,
287,
2420,
25,
198,
220... | 1.948387 | 155 |
# -*- coding: utf-8 -*-
import math
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required, permission_required
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.db import transaction
from django.db.models import F, Q
from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponseBadRequest
from django.shortcuts import get_object_or_404, redirect, _get_queryset
from django.utils.translation import ugettext_lazy as _
from django.utils.decorators import method_decorator
from django.views.generic.edit import ModelFormMixin
from django.views.decorators.csrf import csrf_protect
try:
from django.views import generic
except ImportError:
try:
from cbv import generic
except ImportError:
raise ImportError('If you using django version < 1.3 you should install django-cbv for pybb')
from pure_pagination import Paginator
from pybb.models import Category, Forum, Topic, Post, TopicReadTracker, ForumReadTracker, PollAnswerUser
from pybb.forms import PostForm, AdminPostForm, EditProfileForm, AttachmentFormSet, PollAnswerFormSet, PollForm
from pybb.templatetags.pybb_tags import pybb_editable_by, pybb_topic_poll_not_voted
from pybb.templatetags.pybb_tags import pybb_topic_moderated_by
from pybb import defaults
def filter_hidden(request, queryset_or_model):
"""
Return queryset for model, manager or queryset, filtering hidden objects for non staff users.
"""
queryset = _get_queryset(queryset_or_model)
if request.user.is_staff:
return queryset
return queryset.filter(hidden=False)
@login_required
@login_required
@login_required
@login_required
@login_required
@permission_required('pybb.block_users') | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
10688,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
202... | 2.993641 | 629 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import apisix.runner.http.method as RunnerMethod
from a6pluginproto import Method as A6Method
| [
2,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
393,
517,
198,
2,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
9387,
351,
198,
2,
428,
670,
329,
3224,
1321,
5115,
6634,
9238,
13,
198,
2,
383,
... | 3.99095 | 221 |
import sys
import typing
def rule_add(type: typing.Union[int, str] = 'GOAL'):
'''Add a boid rule to the current boid state
:param type: TypeGOAL Goal, Go to assigned object or loudest assigned signal source.AVOID Avoid, Get away from assigned object or loudest assigned signal source.AVOID_COLLISION Avoid Collision, Maneuver to avoid collisions with other boids and deflector objects in near future.SEPARATE Separate, Keep from going through other boids.FLOCK Flock, Move to center of neighbors and match their velocity.FOLLOW_LEADER Follow Leader, Follow a boid or assigned object.AVERAGE_SPEED Average Speed, Maintain speed, flight level or wander.FIGHT Fight, Go to closest enemy and attack when in range.
:type type: typing.Union[int, str]
'''
pass
def rule_del():
'''Delete current boid rule
'''
pass
def rule_move_down():
'''Move boid rule down in the list
'''
pass
def rule_move_up():
'''Move boid rule up in the list
'''
pass
def state_add():
'''Add a boid state to the particle system
'''
pass
def state_del():
'''Delete current boid state
'''
pass
def state_move_down():
'''Move boid state down in the list
'''
pass
def state_move_up():
'''Move boid state up in the list
'''
pass
| [
11748,
25064,
198,
11748,
19720,
628,
198,
4299,
3896,
62,
2860,
7,
4906,
25,
19720,
13,
38176,
58,
600,
11,
965,
60,
796,
705,
11230,
1847,
6,
2599,
198,
220,
220,
220,
705,
7061,
4550,
257,
1489,
312,
3896,
284,
262,
1459,
1489,
... | 2.923414 | 457 |
# -*- coding: utf-8 -*-
"""
/dms/export_dms/help_form.py
.. enthaelt die kompletten Kontext-Hilfetexte fuer die Exportseite
Django content Management System
Hans Rauch
hans.rauch@gmx.net
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 29.11.2007 Beginn der Arbeit
"""
from django.utils.translation import ugettext as _
help_form = {}
# ----------------------------------------------------------------
help_form['content_type'] = {
'title' : _(u'Format der Exportdatei'),
'help' : _(u"""<p>
Hier wird das Export-Format festgelegt.
</p>
""") }
# ----------------------------------------------------------------
help_form['tab_base'] = {
'title' : _(u'Basisdaten'),
'info' : _(u"""<p>
Dieses Objekt wird mit eventuell vorhandenen Unterobjekten im XML-Format exportiert. Diese
Funktion wird insbesondere zur selektiven Datensicherung sowie zum Datenaustausch
zweier Djambala-Systeme benötigt.</p>
<p>
<b>Aus technischen Gründen werden von Datei-Objekten nur die <i>Beschreibungen</i> exportiert,
nicht aber die eigentlichen Dateien!</b>
</p>""") }
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
14,
67,
907,
14,
39344,
62,
67,
907,
14,
16794,
62,
687,
13,
9078,
198,
198,
492,
920,
3099,
2120,
4656,
479,
296,
1154,
32407,
17431,
5239,
12,
39,
346,... | 2.521368 | 468 |
import numpy as np
import os
import glob
import optuna.trial
import pandas as pd
from easyPheno.preprocess import base_dataset
from easyPheno.utils import helper_functions
from easyPheno.model import _model_functions, _base_model
from easyPheno.evaluation import eval_metrics, results_analysis
def post_generate_feature_importances(results_directory_genotype_level: str, data_dir: str):
"""
Summarize the results for each phenotype and datasplit for all models and save in a file.
:param results_directory_genotype_level: Results directory at the level of the name of the genotype matrix
"""
genotype_name = results_directory_genotype_level.split('/')[-1] + '.h5'
for study in os.listdir(results_directory_genotype_level):
study_name = study + '.csv'
for phenotype in os.listdir(results_directory_genotype_level + '/' + study):
print('++++++++++++++ PHENOTYPE ' + phenotype + ' ++++++++++++++')
current_directory = results_directory_genotype_level + '/' + study + '/' + phenotype + '/'
dataset = base_dataset.Dataset(
data_dir=data_dir, genotype_matrix_name=genotype_name, phenotype_matrix_name=study_name,
phenotype=phenotype, datasplit='cv-test', n_outerfolds=5, n_innerfolds=5,
test_set_size_percentage=20, val_set_size_percentage=20,
encoding='012', maf_percentage=10
)
# Retrain on full train + val data with best hyperparams and apply on test
print("## Retrain best model and test ##")
outerfold_info = dataset.datasplit_indices['outerfold_0']
X_test, y_test, sample_ids_test = \
dataset.X_full[outerfold_info['test']], dataset.y_full[outerfold_info['test']], \
dataset.sample_ids_full[outerfold_info['test']]
X_retrain, y_retrain, sample_ids_retrain = \
dataset.X_full[~np.isin(np.arange(len(dataset.X_full)), outerfold_info['test'])], \
dataset.y_full[~np.isin(np.arange(len(dataset.y_full)), outerfold_info['test'])], \
dataset.sample_ids_full[~np.isin(np.arange(len(dataset.sample_ids_full)),
outerfold_info['test'])]
snp_ids_df = pd.DataFrame(dataset.snp_ids)
print('Saving SNP ids')
print(snp_ids_df.shape)
snp_ids_df.to_csv(
current_directory + phenotype + '_snp_ids.csv',
sep=',', decimal='.', float_format='%.10f',
index=False
)
for path in glob.glob(current_directory + '*'):
models = path.split('/')[-1].split('_')[3:-2]
print('working on ' + path)
for current_model in models:
print('Model: ' + current_model)
try:
results_file = glob.glob(path + '/*.csv')[0]
results = pd.read_csv(results_file)
results = results.loc[:, [current_model in col for col in results.columns]]
eval_dict_saved = results_analysis.result_string_to_dictionary(
result_string=results[current_model + '___eval_metrics'][0]
)
except:
print('No results file')
continue
if current_model in ['randomforest', 'xgboost', 'linearregression', 'elasticnet']:
current_directory = path + '/' + current_model + '/'
if os.path.exists(current_directory + 'final_model_feature_importances.csv'):
print('Already existing')
continue
try:
modelpath = glob.glob(current_directory + '/unfitted_model*')[0].split('/')[-1]
except:
continue
"""
model = _model_functions.load_retrain_model(
path=current_directory, filename=modelpath, X_retrain=X_retrain, y_retrain=y_retrain
)
"""
best_params = results_analysis.result_string_to_dictionary(
result_string=results[current_model + '___best_params'][0]
)
task = 'regression' if 'test_rmse' in eval_dict_saved.keys() else 'classification'
trial = optuna.trial.FixedTrial(params=best_params)
helper_functions.set_all_seeds()
model: _base_model.BaseModel = helper_functions.get_mapping_name_to_class()[
current_model](
task=task, optuna_trial=trial,
n_outputs=len(np.unique(dataset.y_full)) if task == 'classification' else 1,
**{}
)
model.retrain(X_retrain=X_retrain, y_retrain=y_retrain)
y_pred_test = model.predict(X_in=X_test)
eval_scores = \
eval_metrics.get_evaluation_report(y_pred=y_pred_test, y_true=y_test, task=model.task,
prefix='test_')
print('Compare Results from initial testing to refitting')
print('New fitting: ')
print(eval_scores)
print('Old fitting: ')
print(eval_dict_saved)
top_n = min(len(dataset.snp_ids), 1000)
feat_import_df = pd.DataFrame()
if current_model in ['randomforest', 'xgboost']:
feature_importances = model.model.feature_importances_
sorted_idx = feature_importances.argsort()[::-1][:top_n]
feat_import_df['snp_ids_standard'] = dataset.snp_ids[sorted_idx]
feat_import_df['feat_importance_standard'] = feature_importances[sorted_idx]
else:
coefs = model.model.coef_
dims = coefs.shape[0] if len(coefs.shape) > 1 else 1
for dim in range(dims):
coef = coefs[dim] if len(coefs.shape) > 1 else coefs
sorted_idx = coef.argsort()[::-1][:top_n]
feat_import_df['snp_ids_' + str(dim)] = dataset.snp_ids[sorted_idx]
feat_import_df['coefficients_' + str(dim)] = coef[sorted_idx]
feat_import_df.to_csv(
current_directory + 'final_model_feature_importances.csv',
sep=',', decimal='.', float_format='%.10f',
index=False
)
post_generate_feature_importances(
results_directory_genotype_level=
'/bit_storage/Workspace/Maura/PhenotypePred/FrontiersPaperExperiments/A_thal/ld_pruned_arabidopsis_2029_maf001',
data_dir='/bit_storage/Workspace/Maura/PhenotypePred/data/ArabidopsisThaliana/'
)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
15095,
198,
198,
11748,
2172,
9613,
13,
45994,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
2562,
47,
831,
78,
13,
3866,
14681,
1330,
2779,
62,
19608,
292,
316,
19... | 1.832261 | 4,042 |
import os
from os.path import abspath, basename, dirname, join, normpath
from django.conf import global_settings
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
SITE_ROOT = dirname(DJANGO_ROOT)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
STATIC_URL = '/static/'
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'static')),
)
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
MEDIA_URL = '/media/'
INSTALLED_APPS = (
'bootstrap_admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
# Third Party
'rest_framework',
'rest_framework.authtoken',
'rest_framework_gis',
'geoposition',
'corsheaders',
'versatileimagefield',
'storages',
'django_filters',
'rest_framework_swagger',
'floppyforms',
# Apps
'towns',
'motels',
'rooms',
'core',
'amenities',
'comments',
'restapi',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
)
BOOTSTRAP_ADMIN_SIDEBAR_MENU = True
# CORS settings
CORS_ORIGIN_ALLOW_ALL = False
ROOT_URLCONF = 'motelsAPI.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, '../templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'motelsAPI.wsgi.application'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
VERSATILEIMAGEFIELD_SETTINGS = {
# The amount of time, in seconds, that references to created images
# should be stored in the cache. Defaults to `2592000` (30 days)
'cache_length': 2592000,
# The name of the cache you'd like `django-versatileimagefield` to use.
# Defaults to 'versatileimagefield_cache'. If no cache exists with the name
# provided, the 'default' cache will be used instead.
'cache_name': 'versatileimagefield_cache',
# The save quality of modified JPEG images. More info here:
# http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
# Defaults to 70
'jpeg_resize_quality': 70,
# The name of the top-level folder within storage classes to save all
# sized images. Defaults to '__sized__'
'sized_directory_name': '__sized__',
# The name of the directory to save all filtered images within.
# Defaults to '__filtered__':
'filtered_directory_name': '__filtered__',
# The name of the directory to save placeholder images within.
# Defaults to '__placeholder__':
'placeholder_directory_name': '__placeholder__',
# Whether or not to create new images on-the-fly. Set this to `False` for
# speedy performance but don't forget to 'pre-warm' to ensure they're
# created and available at the appropriate URL.
'create_images_on_demand': True
}
VERSATILEIMAGEFIELD_RENDITION_KEY_SETS = {
'common_size': [
('thumbnail_image', 'thumbnail__100x100'),
('small_image', 'thumbnail__160x120'),
('medium_image', 'thumbnail__320x240'),
('large_image', 'thumbnail__640x480'),
('full_image', 'url')
]
}
SWAGGER_SETTINGS = {
'exclude_namespaces': [],
'api_version': '0.1',
'api_path': '/',
'enabled_methods': [
'get',
'post',
],
'api_key': '',
'is_authenticated': True,
'is_superuser': True,
'permission_denied_handler': None,
'info': {
'contact': '',
'description': 'This is Cinco Letras'
'api swagger documentation',
'license': '',
'licenseUrl': '',
'termsOfServiceUrl': '',
'title': '5 Letras Api',
},
'doc_expansion': 'list',
}
GEOPOSITION_MAP_OPTIONS = {
'minZoom': 3,
'maxZoom': 15,
}
GEOPOSITION_MARKER_OPTIONS = {
'cursor': 'move'
}
# DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_PRELOAD_METADATA = True
AWS_QUERYSTRING_AUTH = False
# AWS_ACCESS_KEY_ID = values.Value(environ_prefix=None)
# AWS_SECRET_ACCESS_KEY = values.Value(environ_prefix=None)
# AWS_STORAGE_BUCKET_NAME = values.Value(environ_prefix=None)
| [
11748,
28686,
198,
6738,
28686,
13,
6978,
1330,
2352,
6978,
11,
1615,
12453,
11,
26672,
3672,
11,
4654,
11,
2593,
6978,
198,
6738,
42625,
14208,
13,
10414,
1330,
3298,
62,
33692,
198,
198,
35028,
1565,
11230,
62,
13252,
2394,
796,
26672... | 2.349136 | 2,257 |
"""Test numpy AoS v SoA for H layout"""
#todo: Clean this up - very quickly knocked up.
import numpy as np
import time
def numpy_SoA_v_AoS(L=10000):
"""
Compare reading and writing from/to numpy dataset using Array of Structures v Strucure of Arrays
"""
libE_fields = [('sim_id',int),
('given',bool),
('given_time',float),
('lead_rank',int),
('returned',bool),
('paused',bool),
]
H = np.zeros(L, dtype=libE_fields)
print(__doc__)
print("Array length = %s \n" % L)
print("Writing numpy array values:")
start = time.time()
# Compute AoS as numpy array ---------------------------
H['sim_id'][:len(H)] = 1
#H['given'][:len(H)] = 1
#H['given_time'][:len(H)] = 1.5
#H['paused'][:len(H)] = 1
end = time.time()
time_AoS = end-start
print("Write Time AoS = %.8f" % time_AoS)
#Check
#print(H)
# Compute SoA as sep. numpy arrays ---------------------
#First sep structures
SoA_sim_id=np.zeros(L, dtype=int)
SoA_given=np.zeros(L, dtype=bool)
SoA_given_time=np.zeros(L, dtype=float)
SoA_lead_rank=np.zeros(L, dtype=int)
SoA_returned=np.zeros(L, dtype=bool)
SoA_paused=np.zeros(L, dtype=bool)
start = time.time()
SoA_sim_id[:L]=1
#SoA_given[:L]=1
#SoA_given_time[:L]=1.5
#SoA_paused[:L]=1
end = time.time()
time_SoA = end - start
print("Write Time SoA = %.8f" % time_SoA)
#Check
#print(SoA_sim_id)
#print(SoA_given)
#print(SoA_given_time)
#print(SoA_lead_rank)
#print(SoA_returned)
#print(SoA_paused)
print("Write Time Speedup = %.2f" % (time_AoS/time_SoA) )
# Repeat ---------------------------------------------------------------
print('\nRepeat to account for potential paging issues.....')
print("Writing numpy array values 2:")
start = time.time()
H['sim_id'][:len(H)] = 2
end = time.time()
time_AoS = end-start
print("Write Time AoS 2 = %.8f" % time_AoS)
start = time.time()
SoA_sim_id[:L]=2
end = time.time()
time_SoA = end - start
print("Write Time SoA 2 = %.8f" % time_SoA)
print("Write Time Speedup 2 = %.2f" % (time_AoS/time_SoA) )
# Accessing --------------------------------------------------------
print("\nReading numpy array values:")
#Time for appending to list might be significant???
#AoS---------------------------------------------------
list_AoS = []
start = time.time()
for i in range(L):
list_AoS.append(H['sim_id'][i])
#print(list_AoS)
end = time.time()
time_AoS = end-start
print("Read sim_id Time AoS = %.8f" % time_AoS)
#SoA---------------------------------------------------
list_SoA=[]
start = time.time()
for i in range(L):
list_SoA.append(SoA_sim_id[i])
#print(list_SoA)
end = time.time()
time_SoA = end-start
print("Read sim_id Time SoA = %.8f" % time_SoA)
print("Read Time Speedup = %.2f" % (time_AoS/time_SoA) )
#Try with pre-initialised list [*Update - looks like its the same perf.]
#AoS---------------------------------------------------
print("\nReading numpy array values with pre-initialised list:")
plist_AoS = list(range(L))
start = time.time()
for i in range(L):
plist_AoS[i]=(H['sim_id'][i])
#print(plist_AoS)
end = time.time()
time_AoS = end-start
print("Read sim_id Time AoS = %.8f" % time_AoS)
#SoA---------------------------------------------------
plist_SoA = list(range(L))
start = time.time()
for i in range(L):
plist_SoA[i]=(SoA_sim_id[i])
#print(plist_SoA)
end = time.time()
time_SoA = end-start
print("Read sim_id Time SoA = %.8f" % time_SoA)
print("Read Time Speedup = %.2f" % (time_AoS/time_SoA) )
print("")
if __name__ == "__main__":
numpy_SoA_v_AoS()
| [
37811,
14402,
299,
32152,
27378,
50,
410,
1406,
32,
329,
367,
12461,
37811,
198,
198,
2,
83,
24313,
25,
5985,
428,
510,
532,
845,
2952,
13642,
510,
13,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
198,
4299,
299,
3... | 2.414853 | 1,562 |
'''
To rotate geometrical objects.
- Initial contribution by Gary Klindt
'''
from scipy.linalg import norm, det
import numpy as np
def rotateVector(vector, alpha, axis, eps=1e-8):
"""
return a rotated vector by alpha around axis
"""
vector = np.array(vector)
axis = np.array(axis)
if (norm(axis) < eps):
return vector
axis = axis / norm(axis)
rota = rotationMatrix(alpha, axis)
return np.dot(rota, vector)
def rotationMatrix(alpha, axis, eps=1e-8):
"""
- return the rotation matrix, given axis and angle
- not tested with negative angles. Expected a value from [0,pi] (arccos)
"""
if abs(alpha) < eps:
return np.diag([1, 1, 1])
(a, b, c, d) = angleAxis2Quaternion(alpha, axis)
res = np.zeros((3, 3))
res[0, 0] = -1 + 2 * a * a + 2 * d * d
res[1, 1] = -1 + 2 * b * b + 2 * d * d
res[2, 2] = -1 + 2 * c * c + 2 * d * d
res[0, 1] = 2 * (a * b - c * d)
res[0, 2] = 2 * (a * c + b * d)
res[1, 2] = 2 * (b * c - a * d)
res[1, 0] = 2 * (a * b + c * d)
res[2, 0] = 2 * (a * c - b * d)
res[2, 1] = 2 * (b * c + a * d)
if abs(det(res) - 1) > eps:
raise RuntimeError("Rotation matrix det not equal to 1: det=", det(res))
return res
def angleAxis2Quaternion(alpha, axis):
"""
:param alpha: rotation angle,
:param axis: np.array or list of shape 3
"""
axis = np.array(axis)
axis = axis / norm(axis) # Make sure axis is normalized
q4 = np.cos(alpha / 2)
h = np.sin(alpha / 2)
q1 = h * axis[0]
q2 = h * axis[1]
q3 = h * axis[2]
return (q1, q2, q3, q4)
| [
7061,
6,
198,
2514,
23064,
4903,
908,
8143,
5563,
13,
198,
12,
20768,
10156,
416,
10936,
14770,
521,
83,
198,
7061,
6,
198,
198,
6738,
629,
541,
88,
13,
75,
1292,
70,
1330,
2593,
11,
1062,
198,
11748,
299,
32152,
355,
45941,
628,
... | 2.185484 | 744 |
__author__ = 'Devesh Bajpai'
'''
https://codeforces.com/problemset/problem/519/B
Solution: Calculate the sum of each round of errors. The difference of first and second will give the error resolved
by second round. Similarly, the difference of second and third will give the error resolved by third round.
'''
if __name__ == "__main__":
raw_input() # ignoring n
first_errors = map(int, raw_input().split(" "))
second_errors = map(int, raw_input().split(" "))
third_errors = map(int, raw_input().split(" "))
solve(first_errors, second_errors, third_errors)
| [
834,
9800,
834,
796,
705,
5005,
1158,
71,
347,
1228,
49712,
6,
198,
198,
7061,
6,
198,
5450,
1378,
19815,
891,
273,
728,
13,
785,
14,
1676,
22143,
316,
14,
45573,
14,
47785,
14,
33,
198,
198,
46344,
25,
27131,
378,
262,
2160,
286,... | 3.068063 | 191 |
from tkinter import *
from tkinter.ttk import *
from time import strftime
sws = Tk()
sws.title("Clock")
label=Label(sws, font=("",35),background="blue",foreground="pink")
label.pack(anchor='center')
time()
mainloop()
| [
6738,
256,
74,
3849,
1330,
1635,
201,
198,
6738,
256,
74,
3849,
13,
926,
74,
1330,
1635,
201,
198,
201,
198,
6738,
640,
1330,
965,
31387,
201,
198,
2032,
82,
796,
309,
74,
3419,
201,
198,
2032,
82,
13,
7839,
7203,
44758,
4943,
201... | 2.408163 | 98 |
"""
Definition of PyTorch "Dataset" that iterates through compressed videos
and return compressed representations (I-frames, motion vectors,
or residuals) for training or testing.
"""
import os
import os.path
import random
import numpy as np
import torch
import torch.utils.data as data
from coviar import get_num_frames
from coviar import load
from models import transforms
import torchvision
import pdb
import pandas as pd
from tqdm import tqdm, trange
from sklearn.utils import shuffle
GOP_SIZE = 12
| [
37811,
198,
36621,
286,
9485,
15884,
354,
366,
27354,
292,
316,
1,
326,
11629,
689,
832,
25388,
5861,
198,
392,
1441,
25388,
24612,
357,
40,
12,
37805,
11,
6268,
30104,
11,
220,
198,
273,
29598,
82,
8,
329,
3047,
393,
4856,
13,
198,... | 3.3875 | 160 |
import pytest
from wordle.heartify import heartify
@pytest.mark.parametrize(
"text, theme, mode, black, yellow, green, expected",
[
(
"""Wordle 235 2/6
🟩⬛⬛🟨⬛
🟩🟩🟩🟩🟩
""",
"heart",
"dark",
None,
None,
None,
"""Wordle 235 2/6
💚🖤🖤💛🖤
💚💚💚💚💚
""",
),
],
)
| [
11748,
12972,
9288,
198,
198,
6738,
1573,
293,
13,
11499,
1958,
1330,
2612,
1958,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7,
198,
220,
220,
220,
366,
5239,
11,
7505,
11,
4235,
11,
2042,
11,
7872,
11,
4077,
1... | 1.405405 | 259 |
# Copyright 2020 ETH Zurich and University of Bologna.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
#
# Fabian Schuiki <fschuiki@iis.ee.ethz.ch>
# Florian Zaruba <zarubaf@iis.ee.ethz.ch>
import math
import pathlib
from copy import copy
from mako.lookup import TemplateLookup
from . import util
templates = TemplateLookup(directories=[pathlib.Path(__file__).parent],
output_encoding="utf-8")
xbars = list()
code_package = ""
code_module = dict()
# An address map.
# A leaf node in the address map.
# An interconnect node in the address map.
# A route within an address map node.
# Create a new route which unifies two other routes.
# An address range.
# A parameter.
# AXI struct emission.
# AXI-Lite struct emission.
# Register bus struct emission
# An AXI bus.
# An AXI-Lite bus.
# A register bus.
# A crossbar.
# An AXI crossbar.
# An AXI-Lite crossbar.
# Generate the code.
| [
2,
15069,
12131,
35920,
43412,
290,
2059,
286,
347,
928,
2616,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
11,
766,
38559,
24290,
329,
3307,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
... | 2.821727 | 359 |
from __future__ import absolute_import
import unittest
import numpy as np
from tests.sample_data import SampleData
from pyti import stochastic
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
11748,
555,
715,
395,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
5254,
13,
39873,
62,
7890,
1330,
27565,
6601,
198,
6738,
12972,
20259,
1330,
3995,
354,
3477,
628
] | 3.717949 | 39 |
"""Tests for matrix_operations.py."""
import os
import unittest
from util import matrix_operations
TEST_DIRECTORY = os.path.dirname(__file__)
if __name__ == '__main__':
res = unittest.main(verbosity=3, exit=False)
| [
37811,
51,
3558,
329,
17593,
62,
3575,
602,
13,
9078,
526,
15931,
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
6738,
7736,
1330,
17593,
62,
3575,
602,
198,
198,
51,
6465,
62,
17931,
23988,
15513,
796,
28686,
13,
6978,
13,
15908,... | 2.695122 | 82 |
# flake8: noqa
"""
Wrappers around matplotlib
"""
from __future__ import absolute_import, division, print_function
__version__ = '2.1.2'
import utool as ut
ut.noinject(__name__, '[plottool_ibeis.__init__]')
# Hopefully this was imported sooner. TODO remove dependency
#from guitool_ibeis import __PYQT__
#import guitool_ibeis.__PYQT__ as __PYQT__
from plottool_ibeis import __MPL_INIT__
__MPL_INIT__.init_matplotlib()
import matplotlib as mpl
#mpl.use('Qt4Agg')
import matplotlib.pyplot as plt
from plottool_ibeis import plot_helpers as ph
from plottool_ibeis import plot_helpers
from plottool_ibeis import mpl_keypoint
from plottool_ibeis import mpl_keypoint as mpl_kp
from plottool_ibeis import mpl_sift as mpl_sift
from plottool_ibeis import draw_func2
from plottool_ibeis import draw_func2 as df2
from plottool_ibeis import fig_presenter
from plottool_ibeis import custom_constants
from plottool_ibeis import custom_figure
from plottool_ibeis import draw_sv
from plottool_ibeis import viz_featrow
from plottool_ibeis import viz_keypoints
from plottool_ibeis import viz_image2
from plottool_ibeis import plots
from plottool_ibeis import interact_annotations
from plottool_ibeis import interact_keypoints
from plottool_ibeis import interact_multi_image
from plottool_ibeis import interactions
from plottool_ibeis import interact_impaint
from plottool_ibeis import color_funcs
#from plottool_ibeis import abstract_iteraction
# TODO utoolify this
IMPORT_TUPLES = [
('plot_helpers', None),
('fig_presenter', None),
('custom_constants', None),
('custom_figure', None),
('plots', None),
('draw_func2', None),
('interact_impaint', None),
('interactions', None),
('interact_multi_image', None),
('interact_keypoints', None),
('interact_matches', None),
#('abstract_iteraction', None),
('nx_helpers', None),
]
# The other module shouldn't exist.
# Functions in it need to be organized
from plottool_ibeis.plots import draw_hist_subbin_maxima
#from plottool_ibeis.draw_func2 import * # NOQA
from plottool_ibeis.mpl_keypoint import draw_keypoints
from plottool_ibeis.mpl_sift import draw_sifts, render_sift_on_patch
from plottool_ibeis import fig_presenter
import utool
#def reload_subs():
# rrr()
# df2.rrr()
# plot_helpers.rrr()
# draw_sv.rrr()
# viz_keypoints.rrr()
# viz_image2.rrr()
# rrr()
#rrrr = reload_subs
import sys
__DYNAMIC__ = '--nodyn' not in sys.argv
#__DYNAMIC__ = '--dyn' in sys.argv
"""
python -c "import plottool_ibeis" --dump-plottool_ibeis-init
python -c "import plottool_ibeis" --update-plottool_ibeis-init
"""
DOELSE = False
if __DYNAMIC__:
# TODO: import all utool external prereqs. Then the imports will not import
# anything that has already in a toplevel namespace
# COMMENTED OUT FOR FROZEN __INIT__
# Dynamically import listed util libraries and their members.
from utool._internal import util_importer
# FIXME: this might actually work with rrrr, but things arent being
# reimported because they are already in the modules list
ignore_endswith = ['_cyth']
import_execstr = util_importer.dynamic_import(__name__, IMPORT_TUPLES, ignore_endswith=ignore_endswith)
exec(import_execstr)
DOELSE = False
else:
# Do the nonexec import (can force it to happen no matter what if alwyas set
# to True)
DOELSE = True
if DOELSE:
pass
# <AUTOGEN_INIT>
from plottool_ibeis import plot_helpers
from plottool_ibeis import fig_presenter
from plottool_ibeis import custom_constants
from plottool_ibeis import custom_figure
from plottool_ibeis import plots
from plottool_ibeis import draw_func2
from plottool_ibeis import interact_impaint
from plottool_ibeis import interactions
from plottool_ibeis import interact_multi_image
from plottool_ibeis import interact_keypoints
from plottool_ibeis import interact_matches
from plottool_ibeis import nx_helpers
from plottool_ibeis.plot_helpers import (SIFT_OR_VECFIELD, del_plotdat, draw,
ensureqt, get_bbox_centers,
get_plotdat, get_plotdat_dict,
get_square_row_cols, kp_info, qt4ensure,
set_plotdat,)
from plottool_ibeis.fig_presenter import (SLEEP_TIME, VERBOSE,
all_figures_bring_to_front,
all_figures_show,
all_figures_tight_layout,
all_figures_tile, bring_to_front,
close_all_figures, close_figure,
get_all_figures, get_all_qt4_wins,
get_all_windows, get_fig,
get_figure_window, get_geometry,
get_main_win_base, iup, iupdate,
present, register_qt4_win, reset,
set_geometry, show, show_figure,
unregister_qt4_win, update,)
from plottool_ibeis.custom_constants import (BLACK, BLUE, BRIGHT_GREEN,
BRIGHT_PURPLE, DARK_BLUE,
DARK_GREEN, DARK_ORANGE, DARK_RED,
DARK_YELLOW, DEEP_PINK, DPI,
FALSE_RED, FIGSIZE, FIGSIZE_BIGGER,
FIGSIZE_GOLD, FIGSIZE_HUGE,
FIGSIZE_MED, FIGSIZE_SQUARE, FONTS,
FontProp, GRAY, GREEN, LARGE,
LARGER, LIGHTGRAY, LIGHT_BLUE,
LIGHT_GREEN, LIGHT_PINK,
LIGHT_PURPLE, MED, NEUTRAL,
NEUTRAL_BLUE, ORANGE, PHI,
PHI_denom, PHI_numer, PINK, PURPLE,
PURPLE2, RED, SMALL, SMALLER,
SMALLEST, TRUE_BLUE, TRUE_GREEN,
UNKNOWN_PURP, WHITE, YELLOW,
golden_wh, golden_wh2,)
from plottool_ibeis.custom_figure import (FIGTITLE_SIZE, LABEL_SIZE, LEGEND_SIZE,
TITLE_SIZE, cla, clf, customize_figure,
customize_fontprop, figure, gca, gcf,
get_ax, get_image_from_figure,
prepare_figure_for_save,
prepare_figure_fpath, sanitize_img_ext,
sanitize_img_fname, save_figure,
set_figtitle, set_ticks, set_title,
set_xlabel, set_xticks, set_ylabel,
set_yticks, split,)
from plottool_ibeis.plots import (colorline, draw_histogram,
draw_time_distribution, draw_time_histogram,
draw_timedelta_pie, estimate_pdf,
get_good_logyscale_kwargs, interval_line_plot,
interval_stats_plot, is_default_dark_bg,
multi_plot, plot_densities,
plot_multiple_scores, plot_pdf,
plot_probabilities, plot_probs,
plot_rank_cumhist, plot_score_histograms,
plot_search_surface, plot_sorted_scores,
plot_stems, set_logyscale_from_data,
unicode_literals, word_histogram2, wordcloud,
zoom_effect01,)
from plottool_ibeis.draw_func2 import (BASE_FNUM, DARKEN, DEBUG, DF2_DIVIDER_KEY,
FALSE, LEGEND_LOCATION, OffsetImage2,
RenderingContext, SAFE_POS, TAU,
TMP_mevent, TRUE, absolute_lbl, add_alpha,
adjust_subplots, adjust_subplots,
adjust_subplots_safe,
append_phantom_legend_label,
ax_absolute_text, ax_relative_text,
axes_bottom_button_bar,
cartoon_stacked_rects, color_orimag,
color_orimag_colorbar, colorbar,
customize_colormap, dark_background,
distinct_colors, distinct_markers,
draw_bbox, draw_border, draw_boxedX,
draw_keypoint_gradient_orientations,
draw_keypoint_patch, draw_kpts2,
draw_line_segments, draw_line_segments2,
draw_lines2, draw_patches_and_sifts,
draw_stems, draw_text,
draw_text_annotations, draw_vector_field,
ensure_divider, ensure_fnum,
execstr_global, extract_axes_extents,
fig_relative_text, fnum_generator,
get_all_markers, get_axis_bbox,
get_axis_xy_width_height,
get_binary_svm_cmap, get_num_rc,
get_orientation_color, get_pnum_func,
get_save_directions, imshow, imshow_null,
is_texmode, label_to_colors, legend,
lighten_rgb, lowerright_text,
make_axes_locatable, make_bbox,
make_bbox_positioners, make_fnum_nextgen,
make_ori_legend_img, make_pnum_nextgen,
next_fnum, overlay_icon, pad_axes,
param_plot_iterator, parse_fontkw, plot,
plot2, plotWidget, plot_bars,
plot_descriptor_signature, plot_fmatch,
plot_func, plot_hist, plot_histpdf,
plot_sift_signature, plot_surface3d,
pnum_generator, postsetup_axes,
presetup_axes, print_valid_cmaps,
remove_patches, render_figure_to_image,
reverse_colormap, rotate_plot,
scores_to_cmap, scores_to_color,
set_axis_extent, set_axis_limit,
set_figsize, show_chipmatch2,
show_histogram, show_if_requested,
show_kpts, show_phantom_legend_labels,
show_signature, show_was_requested,
small_xticks, small_yticks, space_xticks,
space_yticks, to_base255,
udpate_adjust_subplots, unique_rows,
update_figsize, upperleft_text,
upperright_text, variation_trunctate,
width_from,)
from plottool_ibeis.interact_impaint import (PAINTER_BASE, PaintInteraction,
draw_demo, impaint_mask2,)
from plottool_ibeis.interactions import (ExpandableInteraction, PanEvents,
check_if_subinteract, pan_factory,
zoom_factory,)
from plottool_ibeis.interact_multi_image import (BASE_CLASS, Button,
MultiImageInteraction,)
from plottool_ibeis.interact_keypoints import (KeypointInteraction,
draw_feat_row, ishow_keypoints,
show_keypoints,)
from plottool_ibeis.interact_matches import (MatchInteraction2,
show_keypoint_gradient_orientations,)
from plottool_ibeis.nx_helpers import (GraphVizLayoutConfig, LARGE_GRAPH,
apply_graph_layout_attrs, draw_network2,
dump_nx_ondisk, ensure_nonhex_color,
format_anode_pos, get_explicit_graph,
get_nx_layout, make_agraph,
make_agraph_args,
netx_draw_images_at_positions,
nx_agraph_layout,
parse_aedge_layout_attrs,
parse_anode_layout_attrs, parse_point,
show_nx,)
import utool
print, rrr, profile = utool.inject2(__name__, '[plottool_ibeis]')
def reassign_submodule_attributes(verbose=True):
"""
why reloading all the modules doesnt do this I don't know
"""
import sys
if verbose and '--quiet' not in sys.argv:
print('dev reimport')
# Self import
import plottool_ibeis
# Implicit reassignment.
seen_ = set([])
for tup in IMPORT_TUPLES:
if len(tup) > 2 and tup[2]:
continue # dont import package names
submodname, fromimports = tup[0:2]
submod = getattr(plottool_ibeis, submodname)
for attr in dir(submod):
if attr.startswith('_'):
continue
if attr in seen_:
# This just holds off bad behavior
# but it does mimic normal util_import behavior
# which is good
continue
seen_.add(attr)
setattr(plottool_ibeis, attr, getattr(submod, attr))
def reload_subs(verbose=True):
""" Reloads plottool_ibeis and submodules """
if verbose:
print('Reloading submodules')
rrr(verbose=verbose)
get_rrr(plot_helpers)(verbose=verbose)
get_rrr(fig_presenter)(verbose=verbose)
get_rrr(custom_constants)(verbose=verbose)
get_rrr(custom_figure)(verbose=verbose)
get_rrr(plots)(verbose=verbose)
get_rrr(draw_func2)(verbose=verbose)
get_rrr(interact_impaint)(verbose=verbose)
get_rrr(interactions)(verbose=verbose)
get_rrr(interact_multi_image)(verbose=verbose)
get_rrr(interact_keypoints)(verbose=verbose)
get_rrr(interact_matches)(verbose=verbose)
get_rrr(nx_helpers)(verbose=verbose)
rrr(verbose=verbose)
try:
# hackish way of propogating up the new reloaded submodule attributes
reassign_submodule_attributes(verbose=verbose)
except Exception as ex:
print(ex)
rrrr = reload_subs
# </AUTOGEN_INIT>
| [
2,
781,
539,
23,
25,
645,
20402,
198,
37811,
198,
36918,
11799,
1088,
2603,
29487,
8019,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
198,
834,
9641,
834,
796,
705,
17,
13,
16,
... | 1.729194 | 9,132 |
default_app_config = 'mayan.apps.document_parsing.apps.DocumentParsingApp'
| [
12286,
62,
1324,
62,
11250,
796,
705,
11261,
272,
13,
18211,
13,
22897,
62,
79,
945,
278,
13,
18211,
13,
24941,
47,
945,
278,
4677,
6,
201,
198
] | 2.714286 | 28 |
from mailsync.models.adapter import Adapter | [
6738,
285,
1768,
13361,
13,
27530,
13,
324,
3429,
1330,
43721
] | 3.909091 | 11 |
##########################################################################################
# The drawing function
# --------------------
#
# level level of Sierpinski triangle (minimum value = 1)
# ss screensize (Draws on a screen of size ss x ss. Default value = 400.)
#-----------------------------------------------------------------------------------------
##########################################################################################
DrawSierpinskiTriangle(5)
| [
29113,
29113,
14468,
7804,
2235,
198,
2,
197,
197,
464,
8263,
2163,
198,
2,
197,
197,
19351,
198,
2,
198,
2,
1241,
197,
197,
5715,
286,
311,
959,
79,
21141,
22950,
357,
39504,
1988,
796,
352,
8,
198,
2,
37786,
197,
197,
1416,
5681... | 5.616279 | 86 |
import numpy as np
import model
import torch.nn as nn
from torch.utils import data
import torch
import torch.optim as optim
from torch.autograd import Variable
from time import gmtime, strftime
use_gpu = 0
import matplotlib.pyplot as plt
import sys
path = '/home/zhangyan/triplet_loss'
chrN_start = 18
chrN_end = 18
sample_number = 3000
anchor_raw = np.load(path + '/GM12878_primary_diag_chr'+str(chrN_start)+'_' + str(chrN_end) + '_downtoK562.npy').astype(np.float32)
positive_raw = np.load(path + '/GM12878_replicate_diag_chr'+str(chrN_start)+'_' + str(chrN_end) + '_downtoK562.npy').astype(np.float32)
negative_raw = np.load(path + '/IMR90_diag_chr'+str(chrN_start)+'_' + str(chrN_end) + '_downtoK562.npy').astype(np.float32)
for sample_number in range (0, 3500, 50):
print sample_number
anchor = anchor_raw[sample_number]
positive = positive_raw[sample_number]
negative = negative_raw[sample_number]
batch_size = anchor.shape[0]
anchor = [anchor]
positive = [positive]
negative = [negative]
windows_size = 3
importance_map_decrease = np.zeros((50, 50))
for i in range(0, 51 - windows_size):
z = np.copy(np.copy(anchor[0][0]))
z[i:i+windows_size, :] = 0
anchor.append([z,])
z = np.copy(np.copy(positive[0][0]))
z[i:i+windows_size, :] = 0
positive.append([z,])
z = np.copy(np.copy(negative[0][0]))
z[i:i+windows_size, :] = 0
negative.append([z,])
z[:,i:i+windows_size] = 0
anchor.append([z,])
z = np.copy(np.copy(positive[0][0]))
z[:,i:i+windows_size] = 0
positive.append([z,])
z = np.copy(np.copy(negative[0][0]))
z[:,i:i+windows_size] = 0
negative.append([z,])
anchor = np.array(anchor)
positive = np.array(positive)
negative = np.array(negative)
print anchor.shape, positive.shape, negative.shape
#sys.exit()
batch_size = anchor.shape[0]
training_set = data.TensorDataset(torch.from_numpy(anchor), torch.from_numpy(np.zeros(anchor.shape[0])))
train_loader_anchor = torch.utils.data.DataLoader(training_set, batch_size=batch_size, shuffle=False)
print "number of batch is ", len(train_loader_anchor)
tenth_bath = len(train_loader_anchor) / 10
training_set = data.TensorDataset(torch.from_numpy(positive), torch.from_numpy(np.zeros(positive.shape[0])))
train_loader_positive = torch.utils.data.DataLoader(training_set, batch_size=batch_size, shuffle=False)
training_set = data.TensorDataset(torch.from_numpy(negative), torch.from_numpy(np.zeros(negative.shape[0])))
train_loader_negative = torch.utils.data.DataLoader(training_set, batch_size=batch_size, shuffle=False)
train_loader_K562 = train_loader_negative
Net = model.TripletNetwork()
if use_gpu:
Net = Net.cuda()
optimizer = optim.SGD(Net.parameters(), lr = 0.001, momentum=0.9)
triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2)
Net.eval()
Net.load_state_dict(torch.load(path + '/triplet_maxMargin_chr1_8_epoch_400'))
running_loss = 0.0
for i, (v1, v2, v3, v4) in enumerate(zip(train_loader_anchor, train_loader_positive, train_loader_negative, train_loader_K562)):
print i
anchorHiC, lab = v1
positiveHiC, lab = v2
negativeHiC, lab = v3
K562HiC, lab = v4
anchorHiC = Variable(anchorHiC)
positiveHiC = Variable(positiveHiC)
negativeHiC = Variable(negativeHiC)
K562HiC = Variable(K562HiC)
if use_gpu:
anchorHiC = anchorHiC.cuda()
positiveHiC = positiveHiC.cuda()
negativeHiC = negativeHiC.cuda()
K562HiC = K562HiC.cuda()
print "start prediction"
output_anchor, output_pos, output_neg = Net(anchorHiC, positiveHiC, negativeHiC)
#output_anchor, output_pos, output_K562 = Net(anchorHiC, positiveHiC, K562HiC)
print "end prediction"
output_anchor = output_anchor.cpu().data.numpy()
output_pos = output_pos.cpu().data.numpy()
output_neg = output_neg.cpu().data.numpy()
original_loss = d(output_anchor[0], output_neg[0]) - d(output_anchor[0], output_pos[0])
n = 1
add_times1 = np.zeros((50, 50))
add_times2 = np.zeros((50, 50))
importance_map1 = np.zeros((50, 50))
importance_map2 = np.zeros((50, 50))
for i in range(0, 51 - windows_size):
z = np.copy(np.copy(anchor[0][0]))
#print z.shape
add_times1[i:i+windows_size, :] += 1
add_times2[:, i:i+windows_size] += 1
importance_map1[i:i+windows_size, :] += min(0, d(output_anchor[n], output_neg[n]) - d(output_anchor[n], output_pos[n]) - original_loss)
n += 1
importance_map2[:, i:i+windows_size] += min(0, d(output_anchor[n], output_neg[n]) - d(output_anchor[n], output_pos[n]) - original_loss)
n += 1
importance_map1 = -importance_map1 / add_times1
importance_map2 = -importance_map2 / add_times2
importance_map = importance_map1 * importance_map2
zeroindex = [[],[]]
for i in range(0, 50):
for j in range(i, 50):
zeroindex[0].append(i)
zeroindex[1].append(j)
from scipy.stats import pearsonr
from scipy.stats import spearmanr
print '-------------------'
print "anchor vs positive", pearsonr(anchor[0].flatten(), positive[0].flatten())[0], spearmanr(anchor[0].flatten()[0], positive[0].flatten())
print "anchor vs negative", pearsonr(anchor[0].flatten(), negative[0].flatten())[0], spearmanr(anchor[0].flatten()[0], negative[0].flatten())
print '####################'
ax1 = plt.subplot(2, 2, 1)
ax1.title.set_text("original")
plt.imshow(anchor[0][0], cmap=plt.cm.jet, interpolation='nearest', origin='lower', vmin = 0, vmax = 100)
ax1 = plt.subplot(2, 2, 2)
ax1.title.set_text(str(d(output_anchor[i], output_pos[i])) )
plt.imshow(positive[0][0], cmap=plt.cm.jet, interpolation='nearest', origin='lower', vmin = 0, vmax = 100)
ax1 = plt.subplot(2, 2, 3)
ax1.title.set_text(str(d(output_anchor[i], output_neg[i])) )
plt.imshow(negative[0][0], cmap=plt.cm.jet, interpolation='nearest', origin='lower', vmin = 0, vmax = 100)
importance_map[zeroindex] = 0
ax1 = plt.subplot(2, 2, 4)
ax1.title.set_text("importance")
plt.imshow(importance_map, cmap=plt.cm.jet, interpolation='nearest', origin='lower')
plt.colorbar()
plt.show()
plt.close()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2746,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
28034,
13,
26791,
1330,
1366,
198,
11748,
28034,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
6738,
28034,
13,
2306,
519,
6335,
133... | 2.240415 | 2,895 |
"""
在终端中获取颜色(RGBA),打印描述信息,否则提示颜色不存在
"R" -> "红色"
"G" -> "绿色"
"B" -> "蓝色"
"A" -> "透明度"
"""
dict_color_info = {
"R": "红色",
"G": "绿色",
"B": "蓝色",
"A": "透明度"
}
color = input("请输入颜色(RGBA):")
# print(dict_color_info[color]) # 如果字典不存在当前key,会报错.
if color in dict_color_info:
print(dict_color_info[color])
else:
print("您输入的颜色不存在")
| [
37811,
198,
220,
220,
220,
10263,
250,
101,
163,
119,
230,
44165,
107,
40792,
164,
236,
115,
20998,
244,
165,
95,
250,
164,
231,
110,
7,
48192,
4339,
828,
33699,
241,
39355,
108,
162,
237,
237,
32573,
108,
46479,
94,
162,
223,
107,
... | 1.132716 | 324 |
"""Database tools and helpers."""
from datetime import datetime
from click import command, echo
from flask_sqlalchemy import SQLAlchemy
from flask.cli import with_appcontext
db = SQLAlchemy()
class Post(db.Model):
"""A blog post."""
id = db.Column(db.Integer, primary_key=True)
author_id = db.Column(db.Text, nullable=False)
category = db.Column(db.Text, nullable=False)
created = db.Column(db.DateTime, default=datetime.utcnow)
slug = db.Column(db.Text, nullable=False)
title = db.Column(db.Text, nullable=False)
description = db.Column(db.Text, nullable=True)
articleUrl = db.Column(db.Text, nullable=True)
imageUrl = db.Column(db.Text, nullable=True)
forName = db.Column(db.Text, nullable=True)
class Category(db.Model):
"""A post category."""
categoryId = db.Column(db.Integer, primary_key=True)
category = db.Column(db.Text, nullable=False)
created = db.Column(db.DateTime, default=datetime.utcnow)
@command("init-db")
@with_appcontext
def init_db_command():
"""Initialize the database."""
db.create_all()
echo("Initialized the database.")
def init_app(app):
"""Initialize the Flask app for database usage."""
db.init_app(app)
app.cli.add_command(init_db_command)
| [
37811,
38105,
4899,
290,
49385,
526,
15931,
628,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
6738,
3904,
1330,
3141,
11,
9809,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
6738,
42903,
13,
44506,
1330,
... | 2.728448 | 464 |
import json
import os
import re
import time
import ast
import python_minifier
class Visitor(ast.NodeVisitor):
"""Used to list all the modules imported by a script."""
if __name__ == "__main__":
import sys
package_name = sys.argv[1] if len(sys.argv) > 1 else ""
src_dir = sys.argv[2] if len(sys.argv) > 2 else os.getcwd()
make(package_name, src_dir) | [
11748,
33918,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
640,
198,
11748,
6468,
198,
198,
11748,
21015,
62,
1084,
7483,
198,
198,
4871,
6911,
2072,
7,
459,
13,
19667,
15854,
2072,
2599,
198,
220,
220,
220,
37227,
38052,
284,
1351,
... | 2.64539 | 141 |
#
# PyGoWave Server - The Python Google Wave Server
# Copyright 2009 Patrick Schneider <patrick.p2k.schneider@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.template import Library
from django.conf import settings
register = Library()
@register.simple_tag
| [
198,
2,
198,
2,
9485,
5247,
39709,
9652,
532,
383,
11361,
3012,
17084,
9652,
198,
2,
15069,
3717,
9925,
26039,
1279,
29615,
13,
79,
17,
74,
13,
20601,
710,
1304,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
49962,
739,
262,
24843,
1378... | 3.71564 | 211 |
import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class VectorMathNode(Node, ArmLogicTreeNode):
'''Vector math node'''
bl_idname = 'LNVectorMathNode'
bl_label = 'Vector Math'
bl_icon = 'CURVE_PATH'
property0: EnumProperty(
items = [('Add', 'Add', 'Add'),
('Dot Product', 'Dot Product', 'Dot Product'),
('Multiply', 'Multiply', 'Multiply'),
('Normalize', 'Normalize', 'Normalize'),
('Subtract', 'Subtract', 'Subtract'),
('Average', 'Average', 'Average'),
('Cross Product', 'Cross Product', 'Cross Product'),
('Length', 'Length', 'Length'),
],
name='', default='Add')
add_node(VectorMathNode, category='Value')
| [
11748,
275,
9078,
198,
6738,
275,
9078,
13,
1676,
862,
1330,
1635,
198,
6738,
275,
9078,
13,
19199,
1330,
19081,
11,
19081,
39105,
198,
6738,
3211,
13,
6404,
291,
17440,
13,
1670,
62,
77,
4147,
1330,
1635,
198,
198,
4871,
20650,
37372... | 2.161125 | 391 |
from datetime import datetime
import pytest
from audit.models import db
@pytest.mark.asyncio
async def test_table_partitioning():
"""
We can't create logs by using the `client` fixture because of this issue
https://github.com/encode/starlette/issues/440, so inserting directly
into the DB instead.
"""
category = "presigned_url"
insert_stmt = 'insert into {}("request_url", "status_code", "timestamp", "username", "sub", "guid", "resource_paths", "action") values (\'request_url\', 200, \'{}\', \'user1\', 10, \'guid\', ARRAY[\'/my/resource/path1\', \'/path2\'], \'action\')'
# initially, we should only have 1 table, no partitions
assert await get_table_names() == [category]
# insert a July 1789 entry. It should trigger the creation of a partition
await db.scalar(db.text(insert_stmt.format(category, "1789_07_14")))
assert await get_table_names() == [category, f"{category}_1789_07"]
# insert another July 1789 entry. It should go in the existing partition
await db.scalar(db.text(insert_stmt.format(category, "1789_07_30")))
assert await get_table_names() == [category, f"{category}_1789_07"]
# insert a Jan 2021 entry. It should trigger the creation of a partition
await db.scalar(db.text(insert_stmt.format(category, "2021_01_05")))
assert await get_table_names() == [
category,
f"{category}_1789_07",
f"{category}_2021_01",
]
# after inserting the 3 entries, querying the table should return all 3
data = await db.all(db.text(f"select username, timestamp from {category}"))
assert data == [
("user1", datetime(1789, 7, 14)),
("user1", datetime(1789, 7, 30)),
("user1", datetime(2021, 1, 5)),
]
# there should be no data in the main table itself. All the data is in
# the partitions
data = await db.all(db.text(f"select username, timestamp from only {category}"))
assert data == []
# querying the partition tables should only return the entries whose
# timestamp is in each partition's range
data = await db.all(db.text(f"select username, timestamp from {category}_1789_07"))
assert data == [("user1", datetime(1789, 7, 14)), ("user1", datetime(1789, 7, 30))]
data = await db.all(db.text(f"select username, timestamp from {category}_2021_01"))
assert data == [("user1", datetime(2021, 1, 5))]
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
12972,
9288,
198,
198,
6738,
14984,
13,
27530,
1330,
20613,
628,
628,
198,
31,
9078,
9288,
13,
4102,
13,
292,
13361,
952,
198,
292,
13361,
825,
1332,
62,
11487,
62,
3911,
653,
278,
33529... | 2.783721 | 860 |
import pytest
from remote_works.task.emails import (
send_task_confirmation, send_payment_confirmation)
@pytest.mark.integration
| [
11748,
12972,
9288,
198,
198,
6738,
6569,
62,
5225,
13,
35943,
13,
368,
1768,
1330,
357,
198,
220,
220,
220,
3758,
62,
35943,
62,
10414,
36241,
11,
3758,
62,
37301,
62,
10414,
36241,
8,
628,
198,
31,
9078,
9288,
13,
4102,
13,
18908,... | 3.022222 | 45 |
from flask import Flask, render_template, request, jsonify, url_for
import atexit
import os
import json
import folium
from botocore.client import Config
import ibm_boto3
import pandas as pd
import ast
from collections import namedtuple
import numpy as np
province_map = ProvinceMap("province_id_mapping.csv")
app = Flask(__name__, static_url_path='')
db_name = 'mydb'
client = None
db = None
# If you want to connect to your COS to obtain results for visualisation, uncomment the lines below and fill in the necessary credentials
#client_to_COS = ibm_boto3.client(service_name='s3',
# ibm_api_key_id="",
# ibm_auth_endpoint="https://iam.ng.bluemix.net/oidc/token",
# config=Config(signature_version='oauth'),
# endpoint_url="")
# On IBM Cloud Cloud Foundry, get the port number from the environment variable PORT
# When running this app on the local machine, default the port to 8000
port = int(os.getenv('PORT', 8000))
@app.route('/')
@app.route('/map')
@atexit.register
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port, debug=True)
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
2581,
11,
33918,
1958,
11,
19016,
62,
1640,
198,
11748,
379,
37023,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
5955,
1505,
198,
6738,
10214,
420,
382,
13,
16366,
1330,
17056,
198... | 2.845953 | 383 |
import torch
import torchvision
import torchvision.models as models
import json
import pandas as pd
import numpy as np
import torch.nn as nn
import scipy.io as io
from torch.utils.data import Dataset
from torch.utils import data
import os
from PIL import Image
from tqdm import tqdm
import h5py
# data_dir = "./test"
data_dir = "../XXison/datasets_for_ma/"
dataset_info = json.load(open("../XXison/dataset.json", "r"))
df = pd.DataFrame.from_dict(dataset_info, orient="index")
df['file_name'] = df.index
df["file_name"] = data_dir + df["file_name"].astype(str) #新增圖片完整路徑(full path)
orig_df = pd.DataFrame.from_dict(dataset_info, orient="index") #載入只有圖片檔名的data(後續可複製一份df代替,就不用再次載入)
orig_df['file_name'] = orig_df.index
print("Load dataset.json successfully!!!")
transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
])
model = models.resnet101(pretrained=True)
layer = model._modules.get('avgpool')
model.eval()
if torch.cuda.is_available():
gpus = '0'
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = gpus
device_ids = [i for i in range(torch.cuda.device_count())]
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
if len(device_ids)>1:
model = nn.DataParallel(model, device_ids = device_ids).cuda()
else:
model = model.cuda()
else:
print("Use CPU only, check your environment!")
feat = {}
feature_list = []
file_path_list = []
label_list = []
files = df["file_name"].tolist() #之前用os.walk去訪歷資料夾,但是百萬張圖片的速度太慢,透過之前預先建好的json檔案,直接取得路徑
# files = ['/root/notebooks/nfs/work/yanwei.liu/Phison/test/1.jpg','/root/notebooks/nfs/work/yanwei.liu/Phison/test/2.jpg','/root/notebooks/nfs/work/yanwei.liu/Phison/test/3.jpg']
for file_path in tqdm(files[0:700000]):
if (is_image(file_path)): #假如路徑是圖片
image = Image.open(file_path) #開圖檔
pic_vector = get_vector(image) #用ResNet101萃取圖片的特徵向量
# print(pic_vector.is_cuda) #檢查是否用GPU
file_path_list.append(file_path) #把該張圖片的絕對路徑加到list保存
feature_list.append(pic_vector.data.tolist()) #把該張圖片萃取的特徵加到list保存
#如果當前萃取之圖片的檔名與原始json檔中的檔名相符,取得該檔名的class值,並加到list保存
label_list.append(orig_df.loc[orig_df['file_name'] == file_path.split("/")[-1] ]['class'].values[0])
for index, value in enumerate(label_list):
if value == 'good':
label_list[index] = 1
elif value == 'missing':
label_list[index] = 2
elif value == 'shift':
label_list[index] = 3
elif value == 'stand':
label_list[index] = 4
elif value == 'broke':
label_list[index] = 5
elif value == 'short':
label_list[index] = 6
# https://stackoverflow.com/a/11885718/13369757 (單維度dim轉置)
# https://stackoverflow.com/a/7464816 (處理char字串呈現問題)
"""
feat['features'] = np.array(feature_list).astype('float32').T #小檔案(2GB)以內,可用這4行註解的,直接產生mat檔案
feat['image_files'] = np.array(file_path_list, dtype=np.object)[None].T
feat['labels'] = np.array(label_list).astype('float32')[None].T
io.savemat('res101.mat',feat)
"""
# 由於scipy無法將大量的矩陣寫入到 mat檔案的關係,因此改用hdf5格式保存
# 到Octave開啟進行Transpose後,將多個h5合併成1個h5後,再保存成v7版本的mat檔案,最後透過tfvaegan進行訓練
feat['features'] = np.array(feature_list).astype('float32') #大檔案(2GB)以上,則使用以下程式保存hdf5格式
# feat['image_files'] = np.array(file_path_list, dtype=np.object) #為降低空間占用,不保存檔案路徑到hdf5
feat['labels'] = np.array(label_list).astype('float32')[None].T #於Octave開啟後,記得將labels的變數進行Transpose
save_dict_to_hdf5(feat, 'res101.h5')
| [
11748,
28034,
198,
11748,
28034,
10178,
198,
11748,
28034,
10178,
13,
27530,
355,
4981,
198,
11748,
33918,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
... | 1.679254 | 2,198 |
import fire
if __name__ == '__main__':
fire.Fire(hello)
| [
11748,
2046,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
2046,
13,
13543,
7,
31373,
8,
198
] | 2.423077 | 26 |
from flask import render_template
import lib.es as es
| [
6738,
42903,
1330,
8543,
62,
28243,
198,
11748,
9195,
13,
274,
355,
1658,
198
] | 3.857143 | 14 |
import numpy as np
import porespy as ps
import scipy.ndimage as spim
from skimage.morphology import square
from edt import edt
ps.settings.tqdm['disable'] = True
if __name__ == '__main__':
t = IBIPTest()
self = t
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
279,
382,
2777,
88,
355,
26692,
198,
11748,
629,
541,
88,
13,
358,
9060,
355,
599,
320,
198,
6738,
1341,
9060,
13,
24503,
1435,
1330,
6616,
198,
6738,
1225,
83,
1330,
1225,
83,
198,
862,
13... | 2.301775 | 169 |
'''
Crub Mixin has been superceded by fetch_and_carry mixin
Created on 2 Jul 2014
@author: peterb
'''
from blueshed.model_helpers import base
from collections import OrderedDict
from decimal import Decimal
from sqlalchemy.inspection import inspect
import logging
| [
7061,
6,
198,
13916,
549,
15561,
259,
468,
587,
2208,
771,
276,
416,
21207,
62,
392,
62,
34993,
5022,
259,
198,
198,
41972,
319,
362,
5979,
1946,
198,
198,
31,
9800,
25,
279,
2357,
65,
198,
7061,
6,
198,
6738,
25570,
704,
13,
1984... | 2.610169 | 118 |
from typing import Tuple, Union, overload, Optional
from pylsp_rope import typing
from pylsp_rope.typing import LineNumber, CharNumber, Literal
START_OF_LINE: Literal["^"] = "^"
END_OF_LINE: Literal["$"] = "$"
AutoLineNumber = Union[LineNumber, int]
AutoCharNumber = Union[CharNumber, int]
_CharNumberOrMarker = Union[AutoCharNumber, Literal["^", "$"]]
_PrimitiveLineCharNumber = Union[
AutoLineNumber, Tuple[AutoLineNumber, Optional[_CharNumberOrMarker]]
]
@overload
@overload
@overload
@overload
def Position(
line: _PrimitiveLineCharNumber,
character: Optional[_CharNumberOrMarker] = None,
*,
_default_character: _CharNumberOrMarker = CharNumber(0),
) -> typing.Position:
"""
Returns a [Position](https://microsoft.github.io/language-server-protocol/specification#position)
object for a document.
`pos` can be:
- Tuple[LineNumber, CharNumber] are passed directly to the object
- int selects the start of the line
- "^" the first non-blank character of the line
- "$" the end of the line, which is the start of the next line
Selects the start of line 4
>>> a = Position(4)
>>> b = Position(4, 0)
>>> c = Position((4, 0))
>>> assert a == b == c
Selects the end of line 4:
>>> c = Position(4, "$")
>>> d = Position(5, 0)
>>> assert c == d
"""
if isinstance(line, tuple):
# assert (
# character is None
# ), "If `line` is a tuple, then `character` must not be supplied"
lineno, character = line
else:
lineno = line
if character is None:
character = _default_character
if character == "$":
lineno = LineNumber(lineno + 1)
character = CharNumber(0)
assert character != "^", "not implemented yet"
return {
"line": lineno,
"character": character,
}
def Range(
start: _PrimitiveLineCharNumber,
end: Optional[_PrimitiveLineCharNumber] = None,
) -> typing.Range:
"""
Returns a [Range](https://microsoft.github.io/language-server-protocol/specification#range)
object for a document.
`start` and `end` accepts the same arguments as Position object.
If `start` or `end` is an int, then the whole line is selected.
Selects the whole line 4, including the line ending
>>> a = Range(4)
>>> b = Range(4, 4)
>>> c = Range((4, 0), (5, 0))
>>> assert a == b == c
Selects line 4-6
>>> d = Range(4, 6)
>>> e = Range((4, 0), (7, 0))
>>> assert d == e
"""
if end is None:
end = start
return {
"start": Position(start, _default_character=CharNumber(0)),
"end": Position(end, _default_character=END_OF_LINE),
}
| [
6738,
19720,
1330,
309,
29291,
11,
4479,
11,
31754,
11,
32233,
198,
198,
6738,
279,
2645,
2777,
62,
305,
431,
1330,
19720,
198,
6738,
279,
2645,
2777,
62,
305,
431,
13,
774,
13886,
1330,
6910,
15057,
11,
3178,
15057,
11,
25659,
1691,
... | 2.563419 | 1,088 |