blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fa28c117231d16bbe5db5e5da355985a37a13b78 | eb604a447e028af5351349ae4fc25aa475846be1 | /ABC51~100/ABC065/a.py | 7dd4c9593c7acac3674725efc75993feb41fe00b | [] | no_license | RuRey0310/Competitive_Programming | 2fe494410aa49c4d44e85f99530664afeb0e1d96 | e230cc4034ef3b2fe984b583add7a9e3449aa7f5 | refs/heads/master | 2023-04-17T22:16:25.130530 | 2020-06-14T13:43:33 | 2020-06-14T13:43:33 | 246,257,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | x, a, b = map(int, input().split())
if a >= b:
print("delicious")
elif a + x >= b:
print("safe")
else:
print("dangerous") | [
"58558604+RuRey0310@users.noreply.github.com"
] | 58558604+RuRey0310@users.noreply.github.com |
0e08023cfcde1510efd0f8e72737baf061c7dfb2 | 8b47042bb524b08f6923ca14b5a816cb04c4118c | /pyxhook.py | 9702d576a4b8df4e8fe339124ab12780e18fdff7 | [
"Apache-2.0"
] | permissive | adrijshikhar/keylogger | cc4bbe4d49b16f85e981cddfea75488e4c398826 | 3585faa41394093ea1fc241f222a4f3bd3ccbda5 | refs/heads/master | 2021-07-10T23:04:31.801941 | 2020-08-17T09:40:10 | 2020-08-17T09:40:10 | 184,454,728 | 1 | 0 | Apache-2.0 | 2020-08-17T09:40:11 | 2019-05-01T17:32:35 | Python | UTF-8 | Python | false | false | 18,470 | py | #!/usr/bin/python
#
# pyxhook -- an extension to emulate some of the PyHook library on linux.
#
# Copyright (C) 2008 Tim Alexander <dragonfyre13@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Thanks to Alex Badea <vamposdecampos@gmail.com> for writing the Record
# demo for the xlib libraries. It helped me immensely working with these
# in this library.
#
# Thanks to the python-xlib team. This wouldn't have been possible without
# your code.
#
# This requires:
# at least python-xlib 1.4
# xwindows must have the "record" extension present, and active.
#
# This file has now been somewhat extensively modified by
# Daniel Folkinshteyn <nanotube@users.sf.net>
# So if there are any bugs, they are probably my fault. :)
from __future__ import print_function
import sys
import re
import time
import threading
from Xlib import X, XK, display
from Xlib.ext import record
from Xlib.protocol import rq
#######################################################################
# #######################START CLASS DEF###############################
#######################################################################
class HookManager(threading.Thread):
""" This is the main class. Instantiate it, and you can hand it KeyDown
and KeyUp (functions in your own code) which execute to parse the
pyxhookkeyevent class that is returned.
This simply takes these two values for now:
KeyDown : The function to execute when a key is pressed, if it
returns anything. It hands the function an argument that
is the pyxhookkeyevent class.
KeyUp : The function to execute when a key is released, if it
returns anything. It hands the function an argument that is
the pyxhookkeyevent class.
"""
def __init__(self, parameters=False):
threading.Thread.__init__(self)
self.finished = threading.Event()
# Give these some initial values
self.mouse_position_x = 0
self.mouse_position_y = 0
self.ison = {"shift": False, "caps": False}
# Compile our regex statements.
self.isshift = re.compile('^Shift')
self.iscaps = re.compile('^Caps_Lock')
self.shiftablechar = re.compile('|'.join((
'^[a-z0-9]$',
'^minus$',
'^equal$',
'^bracketleft$',
'^bracketright$',
'^semicolon$',
'^backslash$',
'^apostrophe$',
'^comma$',
'^period$',
'^slash$',
'^grave$'
)))
self.logrelease = re.compile('.*')
self.isspace = re.compile('^space$')
# Choose which type of function use
self.parameters = parameters
if parameters:
self.lambda_function = lambda x, y: True
else:
self.lambda_function = lambda x: True
# Assign default function actions (do nothing).
self.KeyDown = self.lambda_function
self.KeyUp = self.lambda_function
self.MouseAllButtonsDown = self.lambda_function
self.MouseAllButtonsUp = self.lambda_function
self.MouseMovement = self.lambda_function
self.KeyDownParameters = {}
self.KeyUpParameters = {}
self.MouseAllButtonsDownParameters = {}
self.MouseAllButtonsUpParameters = {}
self.MouseMovementParameters = {}
self.contextEventMask = [X.KeyPress, X.MotionNotify]
# Hook to our display.
self.local_dpy = display.Display()
self.record_dpy = display.Display()
def run(self):
# Check if the extension is present
if not self.record_dpy.has_extension("RECORD"):
print("RECORD extension not found", file=sys.stderr)
sys.exit(1)
# r = self.record_dpy.record_get_version(0, 0)
# print("RECORD extension version {major}.{minor}".format(
# major=r.major_version,
# minor=r.minor_version
# ))
# Create a recording context; we only want key and mouse events
self.ctx = self.record_dpy.record_create_context(
0,
[record.AllClients],
[{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
# (X.KeyPress, X.ButtonPress),
'device_events': tuple(self.contextEventMask),
'errors': (0, 0),
'client_started': False,
'client_died': False,
}])
# Enable the context; this only returns after a call to
# record_disable_context, while calling the callback function in the
# meantime
self.record_dpy.record_enable_context(self.ctx, self.processevents)
# Finally free the context
self.record_dpy.record_free_context(self.ctx)
def cancel(self):
self.finished.set()
self.local_dpy.record_disable_context(self.ctx)
self.local_dpy.flush()
def printevent(self, event):
print(event)
def HookKeyboard(self):
# We don't need to do anything here anymore, since the default mask
# is now set to contain X.KeyPress
# self.contextEventMask[0] = X.KeyPress
pass
def HookMouse(self):
# We don't need to do anything here anymore, since the default mask
# is now set to contain X.MotionNotify
# need mouse motion to track pointer position, since ButtonPress
# events don't carry that info.
# self.contextEventMask[1] = X.MotionNotify
pass
def processhookevents(self, action_type, action_parameters, events):
# In order to avoid duplicate code, i wrote a function that takes the
# input value of the action function and, depending on the initialization,
# launches it or only with the event or passes the parameter
if self.parameters:
action_type(events, action_parameters)
else:
action_type(events)
def processevents(self, reply):
if reply.category != record.FromServer:
return
if reply.client_swapped:
print("* received swapped protocol data, cowardly ignored")
return
try:
# Get int value, python2.
intval = ord(reply.data[0])
except TypeError:
# Already bytes/ints, python3.
intval = reply.data[0]
if (not reply.data) or (intval < 2):
# not an event
return
data = reply.data
while len(data):
event, data = rq.EventField(None).parse_binary_value(
data,
self.record_dpy.display,
None,
None
)
if event.type == X.KeyPress:
hookevent = self.keypressevent(event)
self.processhookevents(
self.KeyDown, self.KeyDownParameters, hookevent)
elif event.type == X.KeyRelease:
hookevent = self.keyreleaseevent(event)
self.processhookevents(
self.KeyUp, self.KeyUpParameters, hookevent)
elif event.type == X.ButtonPress:
hookevent = self.buttonpressevent(event)
self.processhookevents(
self.MouseAllButtonsDown, self.MouseAllButtonsDownParameters, hookevent)
elif event.type == X.ButtonRelease:
hookevent = self.buttonreleaseevent(event)
self.processhookevents(
self.MouseAllButtonsUp, self.MouseAllButtonsUpParameters, hookevent)
elif event.type == X.MotionNotify:
# use mouse moves to record mouse position, since press and
# release events do not give mouse position info
# (event.root_x and event.root_y have bogus info).
hookevent = self.mousemoveevent(event)
self.processhookevents(
self.MouseMovement, self.MouseMovementParameters, hookevent)
# print("processing events...", event.type)
def keypressevent(self, event):
matchto = self.lookup_keysym(
self.local_dpy.keycode_to_keysym(event.detail, 0)
)
if self.shiftablechar.match(
self.lookup_keysym(
self.local_dpy.keycode_to_keysym(event.detail, 0))):
# This is a character that can be typed.
if not self.ison["shift"]:
keysym = self.local_dpy.keycode_to_keysym(event.detail, 0)
return self.makekeyhookevent(keysym, event)
else:
keysym = self.local_dpy.keycode_to_keysym(event.detail, 1)
return self.makekeyhookevent(keysym, event)
else:
# Not a typable character.
keysym = self.local_dpy.keycode_to_keysym(event.detail, 0)
if self.isshift.match(matchto):
self.ison["shift"] = self.ison["shift"] + 1
elif self.iscaps.match(matchto):
if not self.ison["caps"]:
self.ison["shift"] = self.ison["shift"] + 1
self.ison["caps"] = True
if self.ison["caps"]:
self.ison["shift"] = self.ison["shift"] - 1
self.ison["caps"] = False
return self.makekeyhookevent(keysym, event)
def keyreleaseevent(self, event):
if self.shiftablechar.match(
self.lookup_keysym(
self.local_dpy.keycode_to_keysym(event.detail, 0))):
if not self.ison["shift"]:
keysym = self.local_dpy.keycode_to_keysym(event.detail, 0)
else:
keysym = self.local_dpy.keycode_to_keysym(event.detail, 1)
else:
keysym = self.local_dpy.keycode_to_keysym(event.detail, 0)
matchto = self.lookup_keysym(keysym)
if self.isshift.match(matchto):
self.ison["shift"] = self.ison["shift"] - 1
return self.makekeyhookevent(keysym, event)
def buttonpressevent(self, event):
# self.clickx = self.rootx
# self.clicky = self.rooty
return self.makemousehookevent(event)
def buttonreleaseevent(self, event):
# if (self.clickx == self.rootx) and (self.clicky == self.rooty):
# # print("ButtonClock {detail} x={s.rootx y={s.rooty}}".format(
# # detail=event.detail,
# # s=self,
# # ))
# if event.detail in (1, 2, 3):
# self.captureclick()
# else:
# pass
# print("ButtonDown {detail} x={s.clickx} y={s.clicky}".format(
# detail=event.detail,
# s=self
# ))
# print("ButtonUp {detail} x={s.rootx} y={s.rooty}".format(
# detail=event.detail,
# s=self
# ))
return self.makemousehookevent(event)
def mousemoveevent(self, event):
self.mouse_position_x = event.root_x
self.mouse_position_y = event.root_y
return self.makemousehookevent(event)
# need the following because XK.keysym_to_string() only does printable
# chars rather than being the correct inverse of XK.string_to_keysym()
def lookup_keysym(self, keysym):
for name in dir(XK):
if name.startswith("XK_") and getattr(XK, name) == keysym:
return name.lstrip("XK_")
return "[{}]".format(keysym)
def asciivalue(self, keysym):
asciinum = XK.string_to_keysym(self.lookup_keysym(keysym))
return asciinum % 256
def makekeyhookevent(self, keysym, event):
storewm = self.xwindowinfo()
if event.type == X.KeyPress:
MessageName = "key down"
elif event.type == X.KeyRelease:
MessageName = "key up"
return pyxhookkeyevent(
storewm["handle"],
storewm["name"],
storewm["class"],
self.lookup_keysym(keysym),
self.asciivalue(keysym),
False,
event.detail,
MessageName
)
def makemousehookevent(self, event):
storewm = self.xwindowinfo()
if event.detail == 1:
MessageName = "mouse left "
elif event.detail == 3:
MessageName = "mouse right "
elif event.detail == 2:
MessageName = "mouse middle "
elif event.detail == 5:
MessageName = "mouse wheel down "
elif event.detail == 4:
MessageName = "mouse wheel up "
else:
MessageName = "mouse {} ".format(event.detail)
if event.type == X.ButtonPress:
MessageName = "{} down".format(MessageName)
elif event.type == X.ButtonRelease:
MessageName = "{} up".format(MessageName)
else:
MessageName = "mouse moved"
return pyxhookmouseevent(
storewm["handle"],
storewm["name"],
storewm["class"],
(self.mouse_position_x, self.mouse_position_y),
MessageName
)
def xwindowinfo(self):
try:
windowvar = self.local_dpy.get_input_focus().focus
wmname = windowvar.get_wm_name()
wmclass = windowvar.get_wm_class()
wmhandle = str(windowvar)[20:30]
except:
# This is to keep things running smoothly.
# It almost never happens, but still...
return {"name": None, "class": None, "handle": None}
if (wmname is None) and (wmclass is None):
try:
windowvar = windowvar.query_tree().parent
wmname = windowvar.get_wm_name()
wmclass = windowvar.get_wm_class()
wmhandle = str(windowvar)[20:30]
except:
# This is to keep things running smoothly.
# It almost never happens, but still...
return {"name": None, "class": None, "handle": None}
if wmclass is None:
return {"name": wmname, "class": wmclass, "handle": wmhandle}
else:
return {"name": wmname, "class": wmclass[0], "handle": wmhandle}
class pyxhookkeyevent:
""" This is the class that is returned with each key event.f
It simply creates the variables below in the class.
Window : The handle of the window.
WindowName : The name of the window.
WindowProcName : The backend process for the window.
Key : The key pressed, shifted to the correct caps value.
Ascii : An ascii representation of the key. It returns 0 if
the ascii value is not between 31 and 256.
KeyID : This is just False for now. Under windows, it is the
Virtual Key Code, but that's a windows-only thing.
ScanCode : Please don't use this. It differs for pretty much
every type of keyboard. X11 abstracts this
information anyway.
MessageName : "key down", "key up".
"""
def __init__(
self, Window, WindowName, WindowProcName, Key, Ascii, KeyID,
ScanCode, MessageName):
self.Window = Window
self.WindowName = WindowName
self.WindowProcName = WindowProcName
self.Key = Key
self.Ascii = Ascii
self.KeyID = KeyID
self.ScanCode = ScanCode
self.MessageName = MessageName
def __str__(self):
return '\n'.join((
'Window Handle: {s.Window}',
'Window Name: {s.WindowName}',
'Window\'s Process Name: {s.WindowProcName}',
'Key Pressed: {s.Key}',
'Ascii Value: {s.Ascii}',
'KeyID: {s.KeyID}',
'ScanCode: {s.ScanCode}',
'MessageName: {s.MessageName}',
)).format(s=self)
class pyxhookmouseevent:
"""This is the class that is returned with each key event.f
It simply creates the variables below in the class.
Window : The handle of the window.
WindowName : The name of the window.
WindowProcName : The backend process for the window.
Position : 2-tuple (x,y) coordinates of the mouse click.
MessageName : "mouse left|right|middle down",
"mouse left|right|middle up".
"""
def __init__(
self, Window, WindowName, WindowProcName, Position, MessageName):
self.Window = Window
self.WindowName = WindowName
self.WindowProcName = WindowProcName
self.Position = Position
self.MessageName = MessageName
def __str__(self):
return '\n'.join((
'Window Handle: {s.Window}',
'Window\'s Process Name: {s.WindowProcName}',
'Position: {s.Position}',
'MessageName: {s.MessageName}',
)).format(s=self)
#######################################################################
# ########################END CLASS DEF################################
#######################################################################
if __name__ == '__main__':
hm = HookManager()
hm.HookKeyboard()
hm.HookMouse()
hm.KeyDown = hm.printevent
hm.KeyUp = hm.printevent
hm.MouseAllButtonsDown = hm.printevent
hm.MouseAllButtonsUp = hm.printevent
hm.MouseMovement = hm.printevent
hm.start()
time.sleep(10)
hm.cancel()
| [
"adrijshikhar85@gmail.com"
] | adrijshikhar85@gmail.com |
f19e1692deea8813c50381d88104bc5c4c96bc22 | d964f0a98f5bfa5f71d23de2df2d32a07219cc1f | /students/Eliseev/TMS/HomeWork-6/Task-1.py | b08cb31240bfbdd09169731db0f71945664f22c4 | [] | no_license | AlexandrSech/Z49-TMS | cc8e3308e54c8e9f7ed03c6d7891da074e5c7c45 | d83200ff04c06772ef643b31569b37006420cd6b | refs/heads/main | 2023-07-28T02:40:17.355675 | 2021-08-15T16:54:27 | 2021-08-15T16:54:27 | 366,099,594 | 3 | 4 | null | 2021-09-08T16:52:11 | 2021-05-10T16:00:24 | Python | UTF-8 | Python | false | false | 443 | py | import random
size_x = int(input("Размер матрицы по x: "))
size_y = int(input("Размер матрицы по y: "))
a = int(input("Стартовое число: "))
b = int(input("Конечное число: "))
list_1 = []
for el in range(size_x):
list_1.append(random.randint(a, b))
for el in range(size_y):
i = 0
for el in list_1:
list_1[i] = random.randint(a, b)
i += 1
print(list_1) | [
"alexseeffwork@gmail.com"
] | alexseeffwork@gmail.com |
023b579f0ad04b706b7b8fc678ff2abefea06615 | d0dcc43298a26c4799b7fb8dbf8a3d2843d59713 | /rpi_deep_pantilt/detect/util/label.py | 604dee11a0a771001116af273df325e2d0e2409b | [
"MIT"
] | permissive | timayy/rpi-deep-pantilt | 24b3904bc1b868cbd3599806d0b1a59e8c08e924 | 5173887dd88c31d08f3e2e802acd365dbf0daba9 | refs/heads/master | 2023-04-19T23:51:04.417680 | 2021-05-10T14:17:54 | 2021-05-10T14:17:54 | 365,947,036 | 0 | 0 | MIT | 2021-05-10T10:29:25 | 2021-05-10T06:56:40 | Python | UTF-8 | Python | false | false | 6,570 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Python
import logging
# Lib
import tensorflow as tf
from google.protobuf import text_format
# app
from rpi_deep_pantilt.detect.util import string_int_label_map_pb2
def convert_label_map_to_categories(label_map, max_num_classes, use_display_name=True):
"""Given label map proto returns categories list compatible with eval.
This function converts label map proto and returns a list of dicts, each of
which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
We only allow class into the list if its id-label_id_offset is
between 0 (inclusive) and max_num_classes (exclusive).
If there are several items mapping to the same id in the label map,
we will only keep the first one in the categories list.
Args:
label_map: a StringIntLabelMapProto or None. If None, a default categories
list is created with max_num_classes categories.
max_num_classes: maximum number of (consecutive) label indices to include.
use_display_name: (boolean) choose whether to load 'display_name' field as
category name. If False or if the display_name field does not exist, uses
'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append(
{
"id": class_id + label_id_offset,
"name": "category_{}".format(class_id + label_id_offset),
}
)
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info(
"Ignore item %d since it falls outside of requested " "label range.",
item.id,
)
continue
if use_display_name and item.HasField("display_name"):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
categories.append({"id": item.id, "name": name})
return categories
def _validate_label_map(label_map):
"""Checks if a label map is valid.
Args:
label_map: StringIntLabelMap to validate.
Raises:
ValueError: if label map is invalid.
"""
for item in label_map.item:
if item.id < 0:
raise ValueError("Label map ids should be >= 0.")
if (
item.id == 0
and item.name != "background"
and item.display_name != "background"
):
raise ValueError("Label map id 0 is reserved for the background label")
def load_labelmap(path):
"""Loads label map proto.
Args:
path: path to StringIntLabelMap proto text file.
Returns:
a StringIntLabelMapProto
"""
with tf.compat.v1.gfile.GFile(path, "r") as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
_validate_label_map(label_map)
return label_map
def create_categories_from_labelmap(label_map_path, use_display_name=True):
"""Reads a label map and returns categories list compatible with eval.
This function converts label map proto and returns a list of dicts, each of
which has the following keys:
'id': an integer id uniquely identifying this category.
'name': string representing category name e.g., 'cat', 'dog'.
Args:
label_map_path: Path to `StringIntLabelMap` proto text file.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False or if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
label_map = load_labelmap(label_map_path)
max_num_classes = max(item.id for item in label_map.item)
return convert_label_map_to_categories(label_map, max_num_classes, use_display_name)
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for cat in categories:
category_index[cat["id"]] = cat
return category_index
def create_category_index_from_labelmap(label_map_path, use_display_name=True):
"""Reads a label map and returns a category index.
Args:
label_map_path: Path to `StringIntLabelMap` proto text file.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False or if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
A category index, which is a dictionary that maps integer ids to dicts
containing categories, e.g.
{1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}, ...}
"""
categories = create_categories_from_labelmap(label_map_path, use_display_name)
return create_category_index(categories)
| [
"noreply@github.com"
] | timayy.noreply@github.com |
70ca9355a32a11f9c8c43216e644aff6d4ba1726 | 950ea6cf3d5b82141af484005ebff10da8d63d70 | /Clase3/desafio1/escape2.py | 00dd7754a3e7bcc04535b9d7ca0c12aced854fdc | [] | no_license | jrodriguezcorte/desafiopyton | 2879c9c631a7b1b9b2d7c64310af504851ce5d72 | 4ecc464eaa8247e84f2fb07f55e6741d28d6913f | refs/heads/master | 2020-07-15T14:36:37.748449 | 2019-10-03T00:22:35 | 2019-10-03T00:22:35 | 205,584,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | import sys
import math
g = sys.argv[1] # gravedad del planeta
r = sys.argv[2] # radio del planeta en kms
print(g)
print(r)
# pase de radio de kms a m y a float
r = float(r)*1000
# pase de gravedad a float
g = float(g)
# calculo de velocidad
velocidad = math.sqrt(2*g*r)
print(velocidad)
# impresión de resultado final
print("La velocidad de escape es de {} m/s".format(velocidad))
| [
"jrodriguezcorte@gmail.com"
] | jrodriguezcorte@gmail.com |
67a841ac1879780cf4fb03786cd5b1d6924639eb | 93713f46f16f1e29b725f263da164fed24ebf8a8 | /Library/lib/python3.7/site-packages/sympy/physics/mechanics/system.py | b82288bed1b78b4314a676d5a08789aa26293f22 | [
"BSD-3-Clause"
] | permissive | holzschu/Carnets | b83d15136d25db640cea023abb5c280b26a9620e | 1ad7ec05fb1e3676ac879585296c513c3ee50ef9 | refs/heads/master | 2023-02-20T12:05:14.980685 | 2023-02-13T15:59:23 | 2023-02-13T15:59:23 | 167,671,526 | 541 | 36 | BSD-3-Clause | 2022-11-29T03:08:22 | 2019-01-26T09:26:46 | Python | UTF-8 | Python | false | false | 18,663 | py | from sympy.core.backend import eye, Matrix, zeros
from sympy.physics.mechanics import dynamicsymbols
from sympy.physics.mechanics.functions import find_dynamicsymbols
__all__ = ['SymbolicSystem']
class SymbolicSystem(object):
"""SymbolicSystem is a class that contains all the information about a
system in a symbolic format such as the equations of motions and the bodies
and loads in the system.
There are three ways that the equations of motion can be described for
Symbolic System:
[1] Explicit form where the kinematics and dynamics are combined
x' = F_1(x, t, r, p)
[2] Implicit form where the kinematics and dynamics are combined
M_2(x, p) x' = F_2(x, t, r, p)
[3] Implicit form where the kinematics and dynamics are separate
M_3(q, p) u' = F_3(q, u, t, r, p)
q' = G(q, u, t, r, p)
where
x : states, e.g. [q, u]
t : time
r : specified (exogenous) inputs
p : constants
q : generalized coordinates
u : generalized speeds
F_1 : right hand side of the combined equations in explicit form
F_2 : right hand side of the combined equations in implicit form
F_3 : right hand side of the dynamical equations in implicit form
M_2 : mass matrix of the combined equations in implicit form
M_3 : mass matrix of the dynamical equations in implicit form
G : right hand side of the kinematical differential equations
Parameters
==========
coord_states : ordered iterable of functions of time
This input will either be a collection of the coordinates or states
of the system depending on whether or not the speeds are also
given. If speeds are specified this input will be assumed to
be the coordinates otherwise this input will be assumed to
be the states.
right_hand_side : Matrix
This variable is the right hand side of the equations of motion in
any of the forms. The specific form will be assumed depending on
whether a mass matrix or coordinate derivatives are given.
speeds : ordered iterable of functions of time, optional
This is a collection of the generalized speeds of the system. If
given it will be assumed that the first argument (coord_states)
will represent the generalized coordinates of the system.
mass_matrix : Matrix, optional
The matrix of the implicit forms of the equations of motion (forms
[2] and [3]). The distinction between the forms is determined by
whether or not the coordinate derivatives are passed in. If
they are given form [3] will be assumed otherwise form [2] is
assumed.
coordinate_derivatives : Matrix, optional
The right hand side of the kinematical equations in explicit form.
If given it will be assumed that the equations of motion are being
entered in form [3].
alg_con : Iterable, optional
The indexes of the rows in the equations of motion that contain
algebraic constraints instead of differential equations. If the
equations are input in form [3], it will be assumed the indexes are
referencing the mass_matrix/right_hand_side combination and not the
coordinate_derivatives.
output_eqns : Dictionary, optional
Any output equations that are desired to be tracked are stored in a
dictionary where the key corresponds to the name given for the
specific equation and the value is the equation itself in symbolic
form
coord_idxs : Iterable, optional
If coord_states corresponds to the states rather than the
coordinates this variable will tell SymbolicSystem which indexes of
the states correspond to generalized coordinates.
speed_idxs : Iterable, optional
If coord_states corresponds to the states rather than the
coordinates this variable will tell SymbolicSystem which indexes of
the states correspond to generalized speeds.
bodies : iterable of Body/Rigidbody objects, optional
Iterable containing the bodies of the system
loads : iterable of load instances (described below), optional
Iterable containing the loads of the system where forces are given
by (point of application, force vector) and torques are given by
(reference frame acting upon, torque vector). Ex [(point, force),
(ref_frame, torque)]
Attributes
==========
coordinates : Matrix, shape(n, 1)
This is a matrix containing the generalized coordinates of the system
speeds : Matrix, shape(m, 1)
This is a matrix containing the generalized speeds of the system
states : Matrix, shape(o, 1)
This is a matrix containing the state variables of the system
alg_con : List
This list contains the indices of the algebraic constraints in the
combined equations of motion. The presence of these constraints
requires that a DAE solver be used instead of an ODE solver.
If the system is given in form [3] the alg_con variable will be
adjusted such that it is a representation of the combined kinematics
and dynamics thus make sure it always matches the mass matrix
entered.
dyn_implicit_mat : Matrix, shape(m, m)
This is the M matrix in form [3] of the equations of motion (the mass
matrix or generalized inertia matrix of the dynamical equations of
motion in implicit form).
dyn_implicit_rhs : Matrix, shape(m, 1)
This is the F vector in form [3] of the equations of motion (the right
hand side of the dynamical equations of motion in implicit form).
comb_implicit_mat : Matrix, shape(o, o)
This is the M matrix in form [2] of the equations of motion.
This matrix contains a block diagonal structure where the top
left block (the first rows) represent the matrix in the
implicit form of the kinematical equations and the bottom right
block (the last rows) represent the matrix in the implicit form
of the dynamical equations.
comb_implicit_rhs : Matrix, shape(o, 1)
This is the F vector in form [2] of the equations of motion. The top
part of the vector represents the right hand side of the implicit form
of the kinemaical equations and the bottom of the vector represents the
right hand side of the implicit form of the dynamical equations of
motion.
comb_explicit_rhs : Matrix, shape(o, 1)
This vector represents the right hand side of the combined equations of
motion in explicit form (form [1] from above).
kin_explicit_rhs : Matrix, shape(m, 1)
This is the right hand side of the explicit form of the kinematical
equations of motion as can be seen in form [3] (the G matrix).
output_eqns : Dictionary
If output equations were given they are stored in a dictionary where
the key corresponds to the name given for the specific equation and
the value is the equation itself in symbolic form
bodies : Tuple
If the bodies in the system were given they are stored in a tuple for
future access
loads : Tuple
If the loads in the system were given they are stored in a tuple for
future access. This includes forces and torques where forces are given
by (point of application, force vector) and torques are given by
(reference frame acted upon, torque vector).
Example
=======
As a simple example, the dynamics of a simple pendulum will be input into a
SymbolicSystem object manually. First some imports will be needed and then
symbols will be set up for the length of the pendulum (l), mass at the end
of the pendulum (m), and a constant for gravity (g). ::
>>> from sympy import Matrix, sin, symbols
>>> from sympy.physics.mechanics import dynamicsymbols, SymbolicSystem
>>> l, m, g = symbols('l m g')
The system will be defined by an angle of theta from the vertical and a
generalized speed of omega will be used where omega = theta_dot. ::
>>> theta, omega = dynamicsymbols('theta omega')
Now the equations of motion are ready to be formed and passed to the
SymbolicSystem object. ::
>>> kin_explicit_rhs = Matrix([omega])
>>> dyn_implicit_mat = Matrix([l**2 * m])
>>> dyn_implicit_rhs = Matrix([-g * l * m * sin(theta)])
>>> symsystem = SymbolicSystem([theta], dyn_implicit_rhs, [omega],
... dyn_implicit_mat)
Notes
=====
m : number of generalized speeds
n : number of generalized coordinates
o : number of states
"""
def __init__(self, coord_states, right_hand_side, speeds=None,
mass_matrix=None, coordinate_derivatives=None, alg_con=None,
output_eqns={}, coord_idxs=None, speed_idxs=None, bodies=None,
loads=None):
"""Initializes a SymbolicSystem object"""
# Extract information on speeds, coordinates and states
if speeds is None:
self._states = Matrix(coord_states)
if coord_idxs is None:
self._coordinates = None
else:
coords = [coord_states[i] for i in coord_idxs]
self._coordinates = Matrix(coords)
if speed_idxs is None:
self._speeds = None
else:
speeds_inter = [coord_states[i] for i in speed_idxs]
self._speeds = Matrix(speeds_inter)
else:
self._coordinates = Matrix(coord_states)
self._speeds = Matrix(speeds)
self._states = self._coordinates.col_join(self._speeds)
# Extract equations of motion form
if coordinate_derivatives is not None:
self._kin_explicit_rhs = coordinate_derivatives
self._dyn_implicit_rhs = right_hand_side
self._dyn_implicit_mat = mass_matrix
self._comb_implicit_rhs = None
self._comb_implicit_mat = None
self._comb_explicit_rhs = None
elif mass_matrix is not None:
self._kin_explicit_rhs = None
self._dyn_implicit_rhs = None
self._dyn_implicit_mat = None
self._comb_implicit_rhs = right_hand_side
self._comb_implicit_mat = mass_matrix
self._comb_explicit_rhs = None
else:
self._kin_explicit_rhs = None
self._dyn_implicit_rhs = None
self._dyn_implicit_mat = None
self._comb_implicit_rhs = None
self._comb_implicit_mat = None
self._comb_explicit_rhs = right_hand_side
# Set the remainder of the inputs as instance attributes
if alg_con is not None and coordinate_derivatives is not None:
alg_con = [i + len(coordinate_derivatives) for i in alg_con]
self._alg_con = alg_con
self.output_eqns = output_eqns
# Change the body and loads iterables to tuples if they are not tuples
# already
if type(bodies) != tuple and bodies is not None:
bodies = tuple(bodies)
if type(loads) != tuple and loads is not None:
loads = tuple(loads)
self._bodies = bodies
self._loads = loads
@property
def coordinates(self):
"""Returns the column matrix of the generalized coordinates"""
if self._coordinates is None:
raise AttributeError("The coordinates were not specified.")
else:
return self._coordinates
@property
def speeds(self):
"""Returns the column matrix of generalized speeds"""
if self._speeds is None:
raise AttributeError("The speeds were not specified.")
else:
return self._speeds
@property
def states(self):
"""Returns the column matrix of the state variables"""
return self._states
@property
def alg_con(self):
"""Returns a list with the indices of the rows containing algebraic
constraints in the combined form of the equations of motion"""
return self._alg_con
@property
def dyn_implicit_mat(self):
"""Returns the matrix, M, corresponding to the dynamic equations in
implicit form, M x' = F, where the kinematical equations are not
included"""
if self._dyn_implicit_mat is None:
raise AttributeError("dyn_implicit_mat is not specified for "
"equations of motion form [1] or [2].")
else:
return self._dyn_implicit_mat
@property
def dyn_implicit_rhs(self):
"""Returns the column matrix, F, corresponding to the dynamic equations
in implicit form, M x' = F, where the kinematical equations are not
included"""
if self._dyn_implicit_rhs is None:
raise AttributeError("dyn_implicit_rhs is not specified for "
"equations of motion form [1] or [2].")
else:
return self._dyn_implicit_rhs
@property
def comb_implicit_mat(self):
"""Returns the matrix, M, corresponding to the equations of motion in
implicit form (form [2]), M x' = F, where the kinematical equations are
included"""
if self._comb_implicit_mat is None:
if self._dyn_implicit_mat is not None:
num_kin_eqns = len(self._kin_explicit_rhs)
num_dyn_eqns = len(self._dyn_implicit_rhs)
zeros1 = zeros(num_kin_eqns, num_dyn_eqns)
zeros2 = zeros(num_dyn_eqns, num_kin_eqns)
inter1 = eye(num_kin_eqns).row_join(zeros1)
inter2 = zeros2.row_join(self._dyn_implicit_mat)
self._comb_implicit_mat = inter1.col_join(inter2)
return self._comb_implicit_mat
else:
raise AttributeError("comb_implicit_mat is not specified for "
"equations of motion form [1].")
else:
return self._comb_implicit_mat
@property
def comb_implicit_rhs(self):
"""Returns the column matrix, F, corresponding to the equations of
motion in implicit form (form [2]), M x' = F, where the kinematical
equations are included"""
if self._comb_implicit_rhs is None:
if self._dyn_implicit_rhs is not None:
kin_inter = self._kin_explicit_rhs
dyn_inter = self._dyn_implicit_rhs
self._comb_implicit_rhs = kin_inter.col_join(dyn_inter)
return self._comb_implicit_rhs
else:
raise AttributeError("comb_implicit_mat is not specified for "
"equations of motion in form [1].")
else:
return self._comb_implicit_rhs
def compute_explicit_form(self):
"""If the explicit right hand side of the combined equations of motion
is to provided upon initialization, this method will calculate it. This
calculation can potentially take awhile to compute."""
if self._comb_explicit_rhs is not None:
raise AttributeError("comb_explicit_rhs is already formed.")
inter1 = getattr(self, 'kin_explicit_rhs', None)
if inter1 is not None:
inter2 = self._dyn_implicit_mat.LUsolve(self._dyn_implicit_rhs)
out = inter1.col_join(inter2)
else:
out = self._comb_implicit_mat.LUsolve(self._comb_implicit_rhs)
self._comb_explicit_rhs = out
@property
def comb_explicit_rhs(self):
"""Returns the right hand side of the equations of motion in explicit
form, x' = F, where the kinematical equations are included"""
if self._comb_explicit_rhs is None:
raise AttributeError("Please run .combute_explicit_form before "
"attempting to access comb_explicit_rhs.")
else:
return self._comb_explicit_rhs
@property
def kin_explicit_rhs(self):
"""Returns the right hand side of the kinematical equations in explicit
form, q' = G"""
if self._kin_explicit_rhs is None:
raise AttributeError("kin_explicit_rhs is not specified for "
"equations of motion form [1] or [2].")
else:
return self._kin_explicit_rhs
def dynamic_symbols(self):
"""Returns a column matrix containing all of the symbols in the system
that depend on time"""
# Create a list of all of the expressions in the equations of motion
if self._comb_explicit_rhs is None:
eom_expressions = (self.comb_implicit_mat[:] +
self.comb_implicit_rhs[:])
else:
eom_expressions = (self._comb_explicit_rhs[:])
functions_of_time = set()
for expr in eom_expressions:
functions_of_time = functions_of_time.union(
find_dynamicsymbols(expr))
functions_of_time = functions_of_time.union(self._states)
return tuple(functions_of_time)
def constant_symbols(self):
"""Returns a column matrix containing all of the symbols in the system
that do not depend on time"""
# Create a list of all of the expressions in the equations of motion
if self._comb_explicit_rhs is None:
eom_expressions = (self.comb_implicit_mat[:] +
self.comb_implicit_rhs[:])
else:
eom_expressions = (self._comb_explicit_rhs[:])
constants = set()
for expr in eom_expressions:
constants = constants.union(expr.free_symbols)
constants.remove(dynamicsymbols._t)
return tuple(constants)
@property
def bodies(self):
"""Returns the bodies in the system"""
if self._bodies is None:
raise AttributeError("bodies were not specified for the system.")
else:
return self._bodies
@property
def loads(self):
"""Returns the loads in the system"""
if self._loads is None:
raise AttributeError("loads were not specified for the system.")
else:
return self._loads
| [
"nicolas.holzschuch@inria.fr"
] | nicolas.holzschuch@inria.fr |
46b39b7702a9141219f94da8c8bd28f61995ddac | 60c4255fb0cf7ed817ff09d8113bf404cde8e12b | /env/lib/python2.7/site-packages/django/db/models/fields/related.py | 097b33363a2c3931424d17747bbaba9e3fa56da9 | [] | no_license | adamjberg/finna-be-octo-ninja | 83aba13f619d4fbfb5308e48336917f0ada0459d | cf16bfcb3d7bb4e878ba0b99ad701b5cda8be34c | refs/heads/master | 2021-01-10T20:19:20.849476 | 2014-01-11T05:42:23 | 2014-01-11T05:42:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65,415 | py | from operator import attrgetter
from django.db import connection, connections, router
from django.db.backends import util
from django.db.models import signals, get_model
from django.db.models.fields import (AutoField, Field, IntegerField,
PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist)
from django.db.models.related import RelatedObject
from django.db.models.query import QuerySet
from django.db.models.query_utils import QueryWrapper
from django.db.models.deletion import CASCADE
from django.utils.encoding import smart_text
from django.utils import six
from django.utils.translation import ugettext_lazy as _, string_concat
from django.utils.functional import curry, cached_property
from django.core import exceptions
from django import forms
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
pending_lookups = {}
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
if isinstance(relation, six.string_types):
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
else:
# it's actually a model class
app_label = relation._meta.app_label
model_name = relation._meta.object_name
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
model = get_model(app_label, model_name,
seed_cache=False, only_installed=False)
if model:
operation(field, model, cls)
else:
key = (app_label, model_name)
value = (cls, field, operation)
pending_lookups.setdefault(key, []).append(value)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
#HACK
class RelatedField(object):
def contribute_to_class(self, cls, name):
sup = super(RelatedField, self)
# Store the opts for related_query_name()
self.opts = cls._meta
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name)
if not cls._meta.abstract and self.rel.related_name:
self.rel.related_name = self.rel.related_name % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower(),
}
other = self.rel.to
if isinstance(other, six.string_types) or other._meta.pk is None:
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.object_name.lower() + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.field_name = self.rel.field_name or self.rel.to._meta.pk.name
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.related = RelatedObject(other, cls, self)
if not cls._meta.abstract:
self.contribute_to_related_class(other, self.related)
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
# FIXME: lt and gt are explicitly allowed to make
# get_(next/prev)_by_date work; other lookups are not allowed since that
# gets messy pretty quick. This is a good candidate for some refactoring
# in the future.
if lookup_type in ['exact', 'gt', 'lt', 'gte', 'lte']:
return self._pk_trace(value, 'get_prep_lookup', lookup_type)
if lookup_type in ('range', 'in'):
return [self._pk_trace(v, 'get_prep_lookup', lookup_type) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError("Related Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
# FIXME: lt and gt are explicitly allowed to make
# get_(next/prev)_by_date work; other lookups are not allowed since that
# gets messy pretty quick. This is a good candidate for some refactoring
# in the future.
if lookup_type in ['exact', 'gt', 'lt', 'gte', 'lte']:
return [self._pk_trace(value, 'get_db_prep_lookup', lookup_type,
connection=connection, prepared=prepared)]
if lookup_type in ('range', 'in'):
return [self._pk_trace(v, 'get_db_prep_lookup', lookup_type,
connection=connection, prepared=prepared)
for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError("Related Field has invalid lookup: %s" % lookup_type)
def _pk_trace(self, value, prep_func, lookup_type, **kwargs):
# Value may be a primary key, or an object held in a relation.
# If it is an object, then we need to get the primary key value for
# that object. In certain conditions (especially one-to-one relations),
# the primary key may itself be an object - so we need to keep drilling
# down until we hit a value that can be used for a comparison.
v = value
# In the case of an FK to 'self', this check allows to_field to be used
# for both forwards and reverse lookups across the FK. (For normal FKs,
# it's only relevant for forward lookups).
if isinstance(v, self.rel.to):
field_name = getattr(self.rel, "field_name", None)
else:
field_name = None
try:
while True:
if field_name is None:
field_name = v._meta.pk.name
v = getattr(v, field_name)
field_name = None
except AttributeError:
pass
except exceptions.ObjectDoesNotExist:
v = None
field = self
while field.rel:
if hasattr(field.rel, 'field_name'):
field = field.rel.to._meta.get_field(field.rel.field_name)
else:
field = field.rel.to._meta.pk
if lookup_type in ('range', 'in'):
v = [v]
v = getattr(field, prep_func)(lookup_type, v, **kwargs)
if isinstance(v, list):
v = v[0]
return v
def related_query_name(self):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_name or self.opts.object_name.lower()
class SingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_query_set(self, **db_hints):
db = router.db_for_read(self.related.model, **db_hints)
return self.related.model._base_manager.using(db)
def get_prefetch_query_set(self, instances):
rel_obj_attr = attrgetter(self.related.field.attname)
instance_attr = lambda obj: obj._get_pk_val()
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
params = {'%s__pk__in' % self.related.field.name: list(instances_dict)}
qs = self.get_query_set(instance=instances[0]).filter(**params)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in qs:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return qs, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
params = {'%s__pk' % self.related.field.name: related_pk}
try:
rel_obj = self.get_query_set(instance=instance).get(**params)
except self.related.model.DoesNotExist:
rel_obj = None
else:
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.related.model.DoesNotExist
else:
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self.related.opts.object_name)
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.related.get_accessor_name()))
elif value is not None and not isinstance(value, self.related.model):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.related.get_accessor_name(), self.related.opts.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": instance is on database "%s", value is on database "%s"' %
(value, instance._state.db, value._state.db))
related_pk = getattr(instance, self.related.field.rel.get_related_field().attname)
if related_pk is None:
raise ValueError('Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
(value, instance._meta.object_name))
# Set the value of the related field to the value of the related object's related field
setattr(value, self.related.field.attname, related_pk)
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_query_set(self, **db_hints):
db = router.db_for_read(self.field.rel.to, **db_hints)
rel_mgr = self.field.rel.to._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if getattr(rel_mgr, 'use_for_related_fields', False):
return rel_mgr.using(db)
else:
return QuerySet(self.field.rel.to).using(db)
def get_prefetch_query_set(self, instances):
other_field = self.field.rel.get_related_field()
rel_obj_attr = attrgetter(other_field.attname)
instance_attr = attrgetter(self.field.attname)
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
if other_field.rel:
params = {'%s__pk__in' % self.field.rel.field_name: list(instances_dict)}
else:
params = {'%s__in' % self.field.rel.field_name: list(instances_dict)}
qs = self.get_query_set(instance=instances[0]).filter(**params)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.rel.multiple:
rel_obj_cache_name = self.field.related.get_cache_name()
for rel_obj in qs:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return qs, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = getattr(instance, self.field.attname)
if val is None:
rel_obj = None
else:
other_field = self.field.rel.get_related_field()
if other_field.rel:
params = {'%s__%s' % (self.field.rel.field_name, other_field.rel.field_name): val}
else:
params = {'%s__exact' % self.field.rel.field_name: val}
qs = self.get_query_set(instance=instance)
# Assuming the database enforces foreign keys, this won't fail.
rel_obj = qs.get(**params)
if not self.field.rel.multiple:
setattr(rel_obj, self.field.related.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.field.rel.to.DoesNotExist
else:
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self.field.name)
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name))
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.field.name, self.field.rel.to._meta.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": instance is on database "%s", value is on database "%s"' %
(value, instance._state.db, value._state.db))
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.related.get_cache_name(), None)
# Set the value of the related field
try:
val = getattr(value, self.field.rel.get_related_field().attname)
except AttributeError:
val = None
setattr(instance, self.field.attname, val)
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
if value is not None and not self.field.rel.multiple:
setattr(value, self.field.related.get_cache_name(), instance)
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's default
# manager.
superclass = self.related.model._default_manager.__class__
rel_field = self.related.field
rel_model = self.related.model
attname = rel_field.rel.get_related_field().attname
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.core_filters = {
'%s__%s' % (rel_field.name, attname): getattr(instance, attname)
}
self.model = rel_model
def get_query_set(self):
try:
return self.instance._prefetched_objects_cache[rel_field.related_query_name()]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
qs = super(RelatedManager, self).get_query_set().using(db).filter(**self.core_filters)
val = getattr(self.instance, attname)
if val is None or val == '' and connections[db].features.interprets_empty_strings_as_nulls:
# We don't want to use qs.none() here, see #19652
return qs.filter(pk__in=[])
qs._known_related_objects = {rel_field: {self.instance.pk: self.instance}}
return qs
def get_prefetch_query_set(self, instances):
rel_obj_attr = attrgetter(rel_field.attname)
instance_attr = attrgetter(attname)
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
db = self._db or router.db_for_read(self.model, instance=instances[0])
query = {'%s__%s__in' % (rel_field.name, attname): list(instances_dict)}
qs = super(RelatedManager, self).get_query_set().using(db).filter(**query)
# Since we just bypassed this class' get_query_set(), we must manage
# the reverse relation manually.
for rel_obj in qs:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_field.name, instance)
cache_name = rel_field.related_query_name()
return qs, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (self.model._meta.object_name, obj))
setattr(obj, rel_field.name, self.instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
# Update kwargs with the related object that this
# ForeignRelatedObjectsDescriptor knows about.
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs):
val = getattr(self.instance, attname)
for obj in objs:
# Is obj actually part of this descriptor set?
if getattr(obj, rel_field.attname) == val:
setattr(obj, rel_field.name, None)
obj.save()
else:
raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, self.instance))
remove.alters_data = True
def clear(self):
self.update(**{rel_field.name: None})
clear.alters_data = True
return RelatedManager
def create_many_related_manager(superclass, rel):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
class ManyRelatedManager(superclass):
def __init__(self, model=None, query_field_name=None, instance=None, symmetrical=None,
source_field_name=None, target_field_name=None, reverse=False,
through=None, prefetch_cache_name=None):
super(ManyRelatedManager, self).__init__()
self.model = model
self.query_field_name = query_field_name
self.core_filters = {'%s__pk' % query_field_name: instance._get_pk_val()}
self.instance = instance
self.symmetrical = symmetrical
self.source_field_name = source_field_name
self.target_field_name = target_field_name
self.reverse = reverse
self.through = through
self.prefetch_cache_name = prefetch_cache_name
self._fk_val = self._get_fk_val(instance, source_field_name)
if self._fk_val is None:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def _get_fk_val(self, obj, field_name):
"""
Returns the correct value for this relationship's foreign key. This
might be something else than pk value when to_field is used.
"""
if not self.through:
# Make custom m2m fields with no through model defined usable.
return obj.pk
fk = self.through._meta.get_field(field_name)
if fk.rel.field_name and fk.rel.field_name != fk.rel.to._meta.pk.attname:
attname = fk.rel.get_related_field().get_attname()
return fk.get_prep_lookup('exact', getattr(obj, attname))
else:
return obj.pk
def get_query_set(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.instance.__class__, instance=self.instance)
return super(ManyRelatedManager, self).get_query_set().using(db)._next_is_sticky().filter(**self.core_filters)
def get_prefetch_query_set(self, instances):
instance = instances[0]
from django.db import connections
db = self._db or router.db_for_read(instance.__class__, instance=instance)
query = {'%s__pk__in' % self.query_field_name:
set(obj._get_pk_val() for obj in instances)}
qs = super(ManyRelatedManager, self).get_query_set().using(db)._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
source_col = fk.column
join_table = self.through._meta.db_table
connection = connections[db]
qn = connection.ops.quote_name
qs = qs.extra(select={'_prefetch_related_val':
'%s.%s' % (qn(join_table), qn(source_col))})
select_attname = fk.rel.get_related_field().get_attname()
return (qs,
attrgetter('_prefetch_related_val'),
attrgetter(select_attname),
False,
self.prefetch_cache_name)
# If the ManyToMany relation has an intermediary model,
# the add and remove methods do not exist.
if rel.through._meta.auto_created:
def add(self, *objs):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table
if self.symmetrical:
self._remove_items(self.target_field_name, self.source_field_name, *objs)
remove.alters_data = True
def clear(self):
self._clear_items(self.source_field_name)
# If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table
if self.symmetrical:
self._clear_items(self.target_field_name)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError("Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = \
super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError('Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db))
fk_val = self._get_fk_val(obj, target_field_name)
if fk_val is None:
raise ValueError('Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name))
new_ids.add(self._get_fk_val(obj, target_field_name))
elif isinstance(obj, Model):
raise TypeError("'%s' instance expected, got %r" % (self.model._meta.object_name, obj))
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True)
vals = vals.filter(**{
source_field_name: self._fk_val,
'%s__in' % target_field_name: new_ids,
})
new_ids = new_ids - set(vals)
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self._fk_val,
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
old_ids.add(self._get_fk_val(obj, target_field_name))
else:
old_ids.add(obj)
# Work out what DB we're operating on
db = router.db_for_write(self.through, instance=self.instance)
# Send a signal to the other end if need be.
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
# Remove the specified objects from the join table
self.through._default_manager.using(db).filter(**{
source_field_name: self._fk_val,
'%s__in' % target_field_name: old_ids
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
def _clear_items(self, source_field_name):
db = router.db_for_write(self.through, instance=self.instance)
# source_field_name: the PK colname in join table for the source object
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
self.through._default_manager.using(db).filter(**{
source_field_name: self._fk_val
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related
# model's default manager.
return create_many_related_manager(
self.related.model._default_manager.__class__,
self.related.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
rel_model = self.related.model
manager = self.related_manager_cls(
model=rel_model,
query_field_name=self.related.field.name,
prefetch_cache_name=self.related.field.related_query_name(),
instance=instance,
symmetrical=False,
source_field_name=self.related.field.m2m_reverse_field_name(),
target_field_name=self.related.field.m2m_field_name(),
reverse=True,
through=self.related.field.rel.through,
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
if not self.related.field.rel.through._meta.auto_created:
opts = self.related.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.field.rel.through
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's
# default manager.
return create_many_related_manager(
self.field.rel.to._default_manager.__class__,
self.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
manager = self.related_manager_cls(
model=self.field.rel.to,
query_field_name=self.field.related_query_name(),
prefetch_cache_name=self.field.name,
instance=instance,
symmetrical=self.field.rel.symmetrical,
source_field_name=self.field.m2m_field_name(),
target_field_name=self.field.m2m_reverse_field_name(),
reverse=False,
through=self.field.rel.through,
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
if not self.field.rel.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ManyToOneRel(object):
def __init__(self, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.to, self.field_name = to, field_name
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.multiple = True
self.parent_link = parent_link
self.on_delete = on_delete
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
class OneToOneRel(ManyToOneRel):
def __init__(self, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None):
super(OneToOneRel, self).__init__(to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete
)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None):
self.to = to
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.symmetrical = symmetrical
self.multiple = True
self.through = through
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the field in the to' object to which this relationship is tied
(this is always the primary key on the target model). Provided for
symmetry with ManyToOneRel.
"""
return self.to._meta.pk
class ForeignKey(RelatedField, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Model %(model)s with pk %(pk)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, to_field=None, rel_class=ManyToOneRel, **kwargs):
try:
to_name = to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
if 'db_index' not in kwargs:
kwargs['db_index'] = True
kwargs['rel'] = rel_class(to, to_field,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
Field.__init__(self, **kwargs)
def validate(self, value, model_instance):
if self.rel.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.rel.to._default_manager.using(using).filter(
**{self.rel.field_name: value}
)
qs = qs.complex_filter(self.rel.limit_choices_to)
if not qs.exists():
raise exceptions.ValidationError(self.error_messages['invalid'] % {
'model': self.rel.to._meta.verbose_name, 'pk': value})
def get_attname(self):
return '%s_id' % self.name
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.rel.get_related_field().name)
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.rel.get_related_field().attname)
return field_default
def get_db_prep_save(self, value, connection):
if value == '' or value == None:
return None
else:
return self.rel.get_related_field().get_db_prep_save(value,
connection=connection)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_text(choice_list[1][0])
return Field.value_to_string(self, obj)
def contribute_to_class(self, cls, name):
super(ForeignKey, self).contribute_to_class(cls, name)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
if isinstance(self.rel.to, six.string_types):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "o2m")
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.model._meta.swapped:
setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related))
if self.rel.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.rel.limit_choices_to)
if self.rel.field_name is None:
self.rel.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
if isinstance(self.rel.to, six.string_types):
raise ValueError("Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet" %
(self.name, self.rel.to))
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.rel.get_related_field()
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type(connection=connection)
return rel_field.db_type(connection=connection)
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
description = _("One-to-one relationship")
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(),
SingleRelatedObjectDescriptor(related))
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.rel.to):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
managed = True
if isinstance(field.rel.to, six.string_types) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT:
to_model = field.rel.to
to = to_model.split('.')[-1]
def set_managed(field, model, cls):
field.rel.through._meta.managed = model._meta.managed or cls._meta.managed
add_lazy_relation(klass, field, to_model, set_managed)
elif isinstance(field.rel.to, six.string_types):
to = klass._meta.object_name
to_model = klass
managed = klass._meta.managed
else:
to = field.rel.to._meta.object_name
to_model = field.rel.to
managed = klass._meta.managed or to_model._meta.managed
name = '%s_%s' % (klass._meta.object_name, field.name)
if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name:
from_ = 'from_%s' % to.lower()
to = 'to_%s' % to.lower()
else:
from_ = klass._meta.object_name.lower()
to = to.lower()
meta = type('Meta', (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'managed': managed,
'auto_created': klass,
'app_label': klass._meta.app_label,
'db_tablespace': klass._meta.db_tablespace,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
})
# Construct and return the new class.
return type(str(name), (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(klass, related_name='%s+' % name, db_tablespace=field.db_tablespace),
to: models.ForeignKey(to_model, related_name='%s+' % name, db_tablespace=field.db_tablespace)
})
class ManyToManyField(RelatedField, Field):
description = _("Many-to-many relationship")
def __init__(self, to, **kwargs):
try:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
# Python 2.6 and earlier require dictionary keys to be of str type,
# not unicode and class names must be ASCII (in Python 2.x), so we
# forcibly coerce it here (breaks early if there's a problem).
to = str(to)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to == RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None))
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
Field.__init__(self, **kwargs)
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
self.help_text = string_concat(self.help_text, ' ', msg)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return util.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"Function that can be curried to provide the source accessor or DB column name for the m2m table"
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
for f in self.rel.through._meta.fields:
if hasattr(f, 'rel') and f.rel and f.rel.to == related.model:
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"Function that can be curried to provide the related accessor or DB column name for the m2m table"
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
for f in self.rel.through._meta.fields:
if hasattr(f, 'rel') and f.rel and f.rel.to == related.parent_model:
if related.model == related.parent_model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
else:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_text(data)
def contribute_to_class(self, cls, name):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name):
self.rel.related_name = "%s_rel_+" % name
super(ManyToManyField, self).contribute_to_class(cls, name)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
if not self.rel.through and not cls._meta.abstract and not cls._meta.swapped:
self.rel.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, six.string_types):
def resolve_through_model(field, model, cls):
field.rel.through = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
if isinstance(self.rel.to, six.string_types):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "m2m")
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.model._meta.swapped:
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = curry(self._get_m2m_attr, related, 'rel')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'rel')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to)
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
| [
"ilikecattle@gmail.com"
] | ilikecattle@gmail.com |
1b3ba494b9274d8bfbf69baa03de441e184361f0 | a5d6b01c1966d443d9c459fa7d8a16ec9faed81b | /FundamentalAnalysis/ratios.py | 70d4c67ade62f34f123577f67500ce22f78f276e | [
"MIT"
] | permissive | Rizwan-Ahmed-Surhio/FundamentalAnalysis | 8f22053b371782ce63ac4bf90cf750317e61316f | cbb7eddb38ddabaaba40dbfe8d5f7a77a5277bcd | refs/heads/master | 2022-09-17T09:45:57.434250 | 2020-05-09T13:44:05 | 2020-05-09T13:44:05 | 265,997,489 | 0 | 0 | MIT | 2020-05-22T02:14:46 | 2020-05-22T02:14:46 | null | UTF-8 | Python | false | false | 3,115 | py | from urllib.request import urlopen
import json
import pandas as pd
def key_metrics(ticker, period="annual"):
"""
Description
----
Gives information about key metrics of a company overtime which includes
i.a. PE ratio, Debt to Equity, Dividend Yield and Average Inventory.
Input
----
ticker (string)
The company ticker (for example: "NFLX")
period (string)
Data period, this can be "annual" or "quarter".
Output
----
data (dataframe)
Data with variables in rows and the period in columns.
"""
response = urlopen("https://financialmodelingprep.com/api/v3/company-key-metrics/" +
ticker + "?period=" + period)
data = response.read().decode("utf-8")
data_json = json.loads(data)['metrics']
data_formatted = {}
for data in data_json:
if period == "quarter":
date = data['date'][:7]
else:
date = data['date'][:4]
del data['date']
data_formatted[date] = data
return pd.DataFrame(data_formatted)
def financial_ratios(ticker):
"""
Description
----
Gives information about the financial ratios of a company overtime
which includes i.a. investment, liquidity, profitability and debt ratios.
Input
----
ticker (string)
The company ticker (for example: "LYFT")
period (string)
Data period, this can be "annual" or "quarter".
Output
----
data (dataframe)
Data with variables in rows and the period in columns.
"""
response = urlopen("https://financialmodelingprep.com/api/v3/financial-ratios/" +
ticker)
data = response.read().decode("utf-8")
data_json = json.loads(data)['ratios']
data_formatted = {}
for data in data_json:
date = data['date'][:4]
del data['date']
ratio_data = {}
for key in data.keys():
ratio_data.update(data[key])
data_formatted[date] = ratio_data
return pd.DataFrame(data_formatted)
def financial_statement_growth(ticker, period="annual"):
"""
Description
----
Gives information about the financial statement growth of a company overtime
which includes i.a. EBIT growth (%) and shareholder equity growth (% per 3, 5
and 10 years)
Input
----
ticker (string)
The company ticker (for example: "WMT")
period (string)
Data period, this can be "annual" or "quarter".
Output
----
data (dataframe)
Data with variables in rows and the period in columns.
"""
response = urlopen("https://financialmodelingprep.com/api/v3/financial-statement-growth/" +
ticker + "?period=" + period)
data = response.read().decode("utf-8")
data_json = json.loads(data)['growth']
data_formatted = {}
for data in data_json:
if period == "quarter":
date = data['date'][:7]
else:
date = data['date'][:4]
del data['date']
data_formatted[date] = data
return pd.DataFrame(data_formatted) | [
"jer.bouma@gmail.com"
] | jer.bouma@gmail.com |
e3bd7407ac001b6c763f279a4f44f968e841749e | d4c18144c06765af02a9a7021f1d03861249dc27 | /IntegrationTests/_ProductData.py | c848b268cb9815da2adcc5f0b90ea27328745052 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | joerg-krause/ohPipeline | 4af73a4a61f708df001b6805439bfec434ca07bb | c788afd8c6ce76fdf0e228ccf984d31ee29f849d | refs/heads/master | 2020-12-03T06:35:45.734791 | 2017-06-28T11:15:02 | 2017-06-28T11:15:02 | 93,066,116 | 0 | 0 | null | 2017-06-01T14:29:34 | 2017-06-01T14:29:34 | null | UTF-8 | Python | false | false | 304 | py | """_ProductData - product specific data
"""
# Available source specified by type
ds = {'NetAux' : 'Net Aux',
'Playlist': 'Playlist',
'Radio' : 'Radio',
'Receiver': 'Songcast',
'Spotify' : 'Spotify',
'UpnpAv' : 'UPnP AV'}
kSrcByType = { 'SoftPlayer' : ds }
| [
"graham.douglas@linn.co.uk"
] | graham.douglas@linn.co.uk |
ae77048f197d299f0d4b63a385ef7e92c0a987d0 | 404735b549ade1756f4e6f4a0eb30a4c19da8692 | /SampleScripts/sample_3D_marked.py | 8df508c4f621905ac3d75c293a90dec4b08b5860 | [] | no_license | Jooyeop-Han/pyStereo3D | 3e7eddf79c05c55a6e49f972ebbe25d1a6237f35 | 33ac28e6d47fd867f73594f34c9e12853ac9be49 | refs/heads/master | 2022-10-06T09:26:38.976813 | 2020-06-05T13:35:41 | 2020-06-05T13:35:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,054 | py | from Stereo3D import Stereo3D, StereoCalibration
from Stereo3D.StereoCapture import *
camera_name = "deimos"
marked_image_filepath = "SampleData/deimos_left_marked.png"
stcap = StereoCapture("Image",["SampleData/deimos_left.png","SampleData/deimos_right.png"])
# define inout folder
folder = "SampleData/"
outputfolder = "SampleData/output/"
CAL_MODE_FROM_IMAGES = 0
CAL_MODE_FROM_YAML = 1
CAL_MODE_FROM_XML = 2
stcal = None
cal_mode = CAL_MODE_FROM_YAML
if (cal_mode == CAL_MODE_FROM_IMAGES):
# define calibration directories
left_images_folder = folder + "deimos_cal/"
right_images_folder = folder + "deimos_cal/"
output_folder = folder + "deimos_cal/"
left_wildcard = "*_l.png"
right_wildcard = "*_r.png"
grid_size = 39.0
grid_rows = 6
grid_cols = 8
# generate calibration from images
stcal = StereoCalibration()
stcal.calibrate(
left_images_folder,right_images_folder,
output_folder,left_wildcard,right_wildcard,
grid_size, grid_rows, grid_cols
)
elif (cal_mode == CAL_MODE_FROM_YAML):
# define calibration files for left and right image
left_cal_file = folder + camera_name +"_left.yaml"
right_cal_file = folder + camera_name +"_right.yaml"
# get calibration from yaml files
stcal = StereoCalibration()
stcal.get_cal_from_yaml(left_cal_file,right_cal_file)
elif (cal_mode == CAL_MODE_FROM_XML):
# define calibration files for left and right image
left_cal_file = folder + camera_name +"_left_calibration.xml"
right_cal_file = folder + camera_name +"_right_calibration.xml"
stereo_cal_file = folder + camera_name +"_stereo_calibration.xml"
left_rect_file = folder + camera_name +"_left_rectification.xml"
right_rect_file = folder + camera_name +"_right_rectification.xml"
# get calibration from yaml files
stcal = StereoCalibration()
stcal.get_cal_from_xml(left_cal_file,right_cal_file,stereo_cal_file,left_rect_file,right_rect_file)
matcher = cv2.StereoBM_create()
default_min_disp = 1000
default_num_disparities = 3
default_block_size = 7
default_uniqueness_ratio = 12
default_texture_threshold = 5
default_speckle_size = 30
default_speckle_range = 16
calc_block = (2 * default_block_size + 5)
matcher.setBlockSize(calc_block)
matcher.setMinDisparity(int(default_min_disp - 1000))
matcher.setNumDisparities(16*(default_num_disparities+1))
matcher.setUniquenessRatio(default_uniqueness_ratio)
matcher.setTextureThreshold(default_texture_threshold)
matcher.setSpeckleWindowSize(default_speckle_size)
matcher.setSpeckleRange(default_speckle_range)
# setup Stereo3D
s3D = Stereo3D(stcap,stcal,matcher)
connected = s3D.connect()
marked_image = cv2.imread(marked_image_filepath)
# run Stereo3D GUI for generating 3D
res, _ = s3D.grab3D(False)
if res:
rect_marked_image, _ = s3D.stereo_calibration.rectify_pair(marked_image,s3D.image_right)
points_file_string = "points.ply"
s3D.save_point_cloud(s3D.disparity,rect_marked_image,outputfolder,points_file_string,False)
else:
print("Failed to grab 3D") | [
"benknight135@gmail.com"
] | benknight135@gmail.com |
06b7cb7a483e81e6402461f00e0f0f1d5bfff1f3 | 04eafeab701674f444c4044b3ba3e858267851d1 | /venv/Scripts/pip3-script.py | 3762478de869a5b68f3532c02b0d142f1acf426e | [] | no_license | srimathila/Codedaddies_list1 | 3e213fda5e1fc9d253035063d437039f081b0f21 | 1ffbd4d7001aa9f23eea18bff9030a2572177b2f | refs/heads/master | 2022-10-11T20:33:21.731309 | 2020-06-18T13:15:29 | 2020-06-18T13:15:29 | 273,242,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | #!C:\Users\Srimathi\PycharmProjects\Codedaddies_list1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"srimathila@gmail.com"
] | srimathila@gmail.com |
c200fdab044077d5fee3c3a281b5d80d3f20d77c | 6503f1836a9402fe03f43d1a6d1fa8de29cc6f3d | /modulos/documento/migrations/0003_banner_tipo.py | ad5bb8fb9396202e9dc603d438a8b5d109d3021e | [] | no_license | eajahuanca/intra-port | 3b8c06f4449e9d34deefd3b780da7f15fc47a0be | 9bcea30f2307869ec18ff5c8a2ecd067e5b9ad39 | refs/heads/master | 2020-07-16T18:51:50.031003 | 2019-09-02T11:56:23 | 2019-09-02T11:56:23 | 205,846,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | # Generated by Django 2.2.4 on 2019-08-25 00:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('documento', '0002_banner'),
]
operations = [
migrations.AddField(
model_name='banner',
name='tipo',
field=models.CharField(choices=[('BANNER', 'BANNER'), ('MODAL', 'MODAL')], default='BANNER', max_length=10, verbose_name='Tipo'),
),
]
| [
"edwinajahuancacallisaya@gmail.com"
] | edwinajahuancacallisaya@gmail.com |
b45974f3432a85b175e9e353766f728ea785d360 | d466e0e51197a1e4e40067f076c9be86400ab99a | /FaceRecognitionVideo.py | c08cf28ccb9b333d331ba5cf4da7cefcfe42a1c6 | [] | no_license | Codebugged-Research/Ikshana-Prem | f4c735e595a73fd631d622218cef62b6a0cbc432 | 65c2905fbd7201a82db1129f2dc3ddd2e992026a | refs/heads/main | 2023-07-31T01:18:26.107597 | 2021-09-15T09:14:41 | 2021-09-15T09:14:41 | 388,703,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,186 | py | import cv2
import numpy as np
import os
import face_recognition
from PIL import ImageTk, Image
class Face_Recognition_Video():
def __init__(self,cap,lmain):
self.lmain = lmain
self.path = './KnownFaces'
self.known_face_encodings,self.known_face_names = self.get_known_face_encodings()
self.cap = cap
def get_known_face_encodings(self):
ls = os.listdir(self.path)
known_face_encodings = []
known_face_names = []
for i in ls:
name,extension = i.split('.')
if extension in ['jpeg','jpg','png']:
me = face_recognition.load_image_file(self.path+'/'+i)
face = face_recognition.face_encodings(me)[0]
known_face_encodings.append(face)
known_face_names.append(name)
return known_face_encodings,known_face_names
def detect_faces(self,frame):
try:
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for encodeFace, faceLoc in zip(face_encodings, face_locations):
matches = face_recognition.compare_faces(self.known_face_encodings, encodeFace)
faceDis = face_recognition.face_distance(self.known_face_encodings, encodeFace)
matchIndex = np.argmin(faceDis)
if matches[matchIndex]:
name = self.known_face_names[matchIndex].upper()
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(frame, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
return frame
except:
return frame
def Process(self):
_,frame = self.cap.read()
if frame is not None:
frame = self.detect_faces(frame.copy())
frame = cv2.resize(frame, (750, 590))
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
self.lmain.imgtk = imgtk
self.lmain.configure(image=imgtk)
self.lmain.after(10, lambda:self.Process()) | [
"noreply@github.com"
] | Codebugged-Research.noreply@github.com |
30a9170ae768d8d884e539bb99808aeacba49d97 | e0b0dc4e5e694c2e104fb0cc4ff45a88f99ea5d1 | /sampling/pong_animation.py | 0b9044fe0416c877e69937bbeddafc56c1c5a495 | [] | no_license | mzenk/lif_pong | c4ff7cbb44d1a4ea71692db05fc9aab3d309ee01 | 1ea9ccf25bbb54f12b00b5acd9b4e471acdc6bf1 | refs/heads/master | 2020-12-24T00:09:55.249315 | 2018-05-16T11:28:03 | 2018-05-16T11:28:03 | 237,319,451 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,784 | py | # animation for pong
from __future__ import division
import numpy as np
import cPickle
import matplotlib
# matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib import animation
import time
from util import get_windowed_image_index
def get_frames(rbm, image, winsize):
global img_shape
# uncover the image pixel-by-pixel => t = n_pxls_x;
# the paddle speed is limited to n_pxls_y / n_pxls_x / 2
# fractions = np.arange(0, 1., 1./img_shape[1])
# if an index range is given
fractions = np.arange(img_shape[1] + 1)
frames = []
paddle_pos = img_shape[0] / 2
paddle_length = img_shape[0] / rbm.n_labels
max_speed = img_shape[1] / img_shape[0] / 2
v_init = np.random.randint(2, size=rbm.n_visible).astype(float)
burnIn = 20
n_samples = 100
fig = plt.figure()
for frac in fractions:
# first get the prediction
clamped = get_windowed_image_index(img_shape, frac,
window_size=winsize)
clamped_input = image[clamped]
# RBM
unclampedinp = np.setdiff1d(np.arange(rbm.n_inputs), clamped)
samples = \
rbm.sample_with_clamped_units(n_samples + burnIn, clamped,
clamped_input, v_init=v_init,
binary=True)[burnIn:]
prediction = np.average(samples, axis=0)
# carry previous state over
unclampedvis = np.setdiff1d(np.arange(rbm.n_visible), clamped)
v_init[unclampedvis] = samples[-1]
trajectory = prediction[:-rbm.n_labels]
labels = prediction[-rbm.n_labels:]
inferred_img = np.zeros(np.prod(img_shape))
# use rgb to make clamped part distinguishable from unclamped part
inferred_img[clamped] = clamped_input
inferred_img = np.tile(inferred_img, (3, 1)).T
inferred_img[unclampedinp, 0] = trajectory
# ---
# # DBM
# clamped = [None] * (1 + rbm.n_layers)
# clamped[0] = clamped
# clamped_val = [None] * (1 + rbm.n_layers)
# clamped_val[0] = clamped_input
# samples = rbm.draw_samples(n_samples + burnIn, clamped=clamped,
# clamped_val=clamped_val, layer_ind='all')
# inferred_img = np.average(samples[burnIn:, :rbm.n_visible], axis=0)
# labels = np.average(samples[burnIn:, -rbm.n_labels:], axis=0)
# inferred_img = np.tile(inferred_img, (3, 1)).T
# if not clamped.size == np.prod(img_shape):
# inferred_img[np.setdiff1d(np.arange(rbm.n_visible),
# clamped), 1:] = 0
# # ---
inferred_img = inferred_img.reshape((img_shape[0], img_shape[1], 3))
# timestep=1; paddle center should be aligned with label index => +.5
target = np.average(np.arange(rbm.n_labels), weights=labels)
paddle_pos += max_speed * \
min(2*(target - paddle_pos / paddle_length + .5), 1)
# update paddle
paddle_pxls = np.zeros((img_shape[0], 3))
if paddle_pos <= paddle_length/2:
paddle_pxls[:paddle_length, 1] = 1
paddle_pos = 0
elif paddle_pos + paddle_length/2 >= img_shape[0] - 1:
paddle_pxls[-paddle_length:, 1] = 1
paddle_pos = img_shape[0] - 1
else:
paddle_pxls[int(paddle_pos) - paddle_length//2:
int(paddle_pos + np.round(paddle_length/2)), 1] = 1
pixels = np.hstack((inferred_img, np.expand_dims(paddle_pxls, 1)))
# visualize labels as well
labelsrep = np.repeat(labels, img_shape[0] // rbm.n_labels)
labelsrgb = np.tile(np.expand_dims(labelsrep, 1), (1, 3))
labelsrgb[:, 1:] = 0
pixels = np.hstack((pixels, np.expand_dims(labelsrgb, 1)))
# plotting
width = .7
ax1 = fig.add_axes([.05, .2, width, width*3/4])
ax1.xaxis.set_visible(False)
ax1.yaxis.set_visible(False)
ax2 = fig.add_axes([width - .02, .2, .2, width*3/4])
ax2.set_ylim([-.5, pixels.shape[0] - .5])
ax2.set_xlim([0, 1])
ax2.xaxis.set_ticks([0., 0.5, 1.])
ax2.tick_params(left='off', right='off',
labelleft='off', labelright='off')
ax2.set_xlabel('P(last column)')
# barh doesnt work because apparently BarContainer has no 'set_visible'
f1 = ax1.imshow(pixels, interpolation='Nearest', cmap='gray',
origin='lower')
f2 = ax2.plot(inferred_img[:, -1, 0], np.arange(img_shape[0]) - .5,
'ro')[0]
frames.append((f1, f2))
# print('Max. prediction time: {}s'.format(max(prediction_time)))
return fig, frames
img_shape = (36, 48)
# Load Pong data
# data_name = 'pong_var_start{}x{}'.format(*img_shape)
data_name = 'pong_noisy'
with np.load('../datasets/' + data_name + '.npz') as d:
train_set, _, test_set = d[d.keys()[0]]
# Load rbm
# rbm_name = data_name + '_crbm.pkl'
rbm_name = 'pong_var_start36x48_crbm.pkl'
with open('saved_rbms/' + rbm_name, 'rb') as f:
rbm = cPickle.load(f)
# pick random examples and infer trajectories
np.random.seed(125575)
example_id = np.random.choice(test_set[0].shape[0], size=3, replace=False)
for i, example in enumerate(test_set[0][example_id]):
fig, frames = get_frames(rbm, example, winsize=6)
# Set up formatting for the movie files --- whatever this is
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, bitrate=1800)
traj_anim = animation.ArtistAnimation(fig, frames, interval=200,
repeat_delay=3000, blit=True)
traj_anim.save('figures/animation_{}_no{}.mp4'.format(data_name, i))
| [
"maximilian.zenk@kip.uni-heidelberg.de"
] | maximilian.zenk@kip.uni-heidelberg.de |
ec945f1e5445ab9c62b7489307d152b9f35f5e91 | 91e5a14ad1cc2cb6e0787d79673bfa1d1299d707 | /flasksOfFantasy.py | 64f6c5244e5f0cc41e6182d6c053df846a475605 | [] | no_license | cSquaerd/flasksOfFantasy | 09f054b71f27c91ef3378ca9115ec1ae03e22c86 | c748cdf38753ade9f9dfbbba8bfbb0e919fe5503 | refs/heads/master | 2023-04-20T02:24:28.573510 | 2021-05-18T23:56:39 | 2021-05-18T23:56:39 | 344,361,794 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,769 | py | # IMPORTS
import os
import json
import time
import shutil
import flask as fl
import fofDB as db
import fofKEY as key
import fofSTR as strings
import defaultSheet as ds
# CONSTANTS
DEBUG = True
NO_CONFIG_MESSAGE = "Using default SSL and Host configurations " \
+ "(no https and localhost)"
SQL_WRITE_ERROR = "Failed to write to database.\n" \
+ "Contact the administrator for further details."
testSheets = [
{"name": "Foo", "link": "/sheet/foo"},
{"name": "Jym", "link": "/sheet/jym"}
]
noJSONError = {"error": "No JSON Submitted."}
# Header Control Functions (use with fl.after_this_request in rule functions)
def noCaching(response):
response.headers["Cache-Control"] = "no-cache"
return response
# Configurtion Initialization
if os.path.exists("./config.json"):
try:
configFile = open("./config.json", "r")
configDict = json.load(configFile)
configFile.close()
SSL_PATH_TO_CERT = configDict["sslPathToCert"]
SSL_PATH_TO_PRIVKEY = configDict["sslPathToPrivKey"]
COOKIE_SECURE = configDict["cookieSecure"]
HOST = configDict["host"]
except Exception as e:
print(e)
print(NO_CONFIG_MESSAGE)
SSL_PATH_TO_CERT = ""
SSL_PATH_TO_PRIVKEY = ""
COOKIE_SECURE = True
HOST = "localhost"
else:
print("Could not find 'config.json' in root directory,")
print(NO_CONFIG_MESSAGE)
SSL_PATH_TO_CERT = ""
SSL_PATH_TO_PRIVKEY = ""
COOKIE_SECURE = True
HOST = "localhost"
# App Creation
base = fl.Flask(__name__)
if not os.path.exists("./" + key.KEY_FILE):
key.newKey()
base.secret_key = key.getKey()
base.config.update(
SESSION_COOKIE_SAMESITE = "Strict",
SESSION_COOKIE_SECURE = COOKIE_SECURE,
SESSION_COOKIE_HTTPONLY = True
)
# App Rule Functions
def login():
if fl.request.method == "GET":
if "user" in fl.session:
return fl.redirect(fl.url_for("userpage"))
else:
return fl.render_template("login.html")
elif fl.request.method == "POST":
if fl.request.is_json:
credentials = fl.request.get_json()
userData = db.queryRead(
"SELECT * FROM USERS WHERE username = :username",
credentials
)
if len(userData) == 0:
return fl.jsonify({"error": "Bad username."})
else:
userData = userData[0]
if db.checkPassword(
credentials["password"],
userData["salt"], userData["hash"]
):
fl.session["user"] = credentials["username"]
return fl.jsonify({"url": fl.url_for("userpage"), "error": "None"})
else:
return fl.jsonify({"error": "Bad password."})
else:
return fl.jsonify(noJSONError)
def logout():
del fl.session["user"]
return fl.redirect(fl.url_for("login"))
def staticCahcingCheck(script):
noCacheList = ["common.py", "sheetDialog.py", "sheetDefault.css"]
print(script)
if script in noCacheList:
fl.after_this_request(noCaching)
return fl.send_from_directory("./static/", script)
else:
fl.redirect("/static/" + script)
def getSheets(user):
raw = db.queryRead(
"SELECT * FROM SHEETS WHERE username = :user",
{"user": user}
)
return [
{"name": sheet["sheetname"], "link": sheet["path"].split(".json")[0][1:]}
for sheet in raw
]
sheetQuery = "SELECT * FROM SHEETS WHERE username = :user AND sheetname = :sheet"
checkDBForSheet = lambda user, sheet : sheet == db.queryRead(
sheetQuery, {"user": user, "sheet": sheet}
)[0]["sheetname"]
getSheetPath = lambda user, sheet : db.queryRead(
sheetQuery, {"user": user, "sheet": sheet}
)[0]["path"]
def loadSheet(user, sheet):
if "user" in fl.session:
if user == fl.session["user"]:
try:
if checkDBForSheet(user, sheet):
return fl.render_template(
"sheet.html",
sheetName = sheet,
username = fl.session["user"],
abilities = (
"strength", "dexterity", "constitution",
"intelligence", "wisdom", "charisma"
),
coins = ("gold", "silver", "copper")
)
else:
return fl.redirect(fl.url_for("userpage"))
except IndexError:
return fl.redirect(fl.url_for("userpage"))
else:
return fl.redirect(fl.url_for("userpage"))
else:
return fl.redirect(fl.url_for("login"))
def saveSheet(user, sheet):
if "user" in fl.session:
if user == fl.session["user"]:
if not fl.request.is_json:
return fl.jsonify({"error": "Bad POST Request."})
else:
sheetData = fl.request.get_json()
sheetPath = getSheetPath(user, sheet)
print(sheetPath)
try:
if checkDBForSheet(user, sheet):
try:
sheetFile = open(sheetPath, 'w')
json.dump(
sheetData, sheetFile,
indent = 4, sort_keys = True
)
sheetFile.close()
return fl.jsonify({
"error": "None."
})
except OSError as e:
return fl.jsonify({"error": "OS Error: " + e})
else:
return fl.jsonify({
"error": "Sheet not found." \
+ "You shouldn't be seeing this error..."
})
except IndexError:
return fl.jsonify({
"error": "Sheet doesn't exist. " \
+ "You shouldn't be seeing this error..."
})
else:
return fl.jsonify({"error": "Improper Access."})
else:
return fl.jsonify({"error": "Can I see your passport?"})
def sendSheet(user, sheet):
fl.after_this_request(noCaching)
if "user" in fl.session:
if user == fl.session["user"]:
try:
sheetPath = db.queryRead(
"SELECT * FROM SHEETS " \
+ "WHERE username = :user " \
+ "AND sheetname = :sheet",
{"user": user, "sheet": sheet}
)[0]["path"]
#print(sheetPath)
print("\t[" + user + "]: Sending sheet " + sheet)
return fl.send_from_directory("./sheets/" + user + '/', sheet + ".json")
except IndexError:
print("\t[" + user + "]: Sheet \"" + sheet + "\" does not exist")
return fl.redirect(fl.url_for("userpage"))
else:
print("Incorrect user access for " + sheet)
return fl.redirect(fl.url_for("userpage"))
else:
return fl.redirect(fl.url_for("login"))
#print("End of sendSheet reached, oh no!")
def userpage():
if fl.request.method == "GET":
if "user" in fl.session:
return fl.render_template(
"userpage.html",
user = fl.session["user"],
sheets = getSheets(fl.session["user"])
)
else:
return fl.redirect(fl.url_for("login"))
elif fl.request.method == "POST":
if not "user" in fl.session:
return fl.redirect(fl.url_for("login"))
elif fl.request.is_json:
userRequest = fl.request.get_json()
userRequest["user"] = fl.session["user"]
if userRequest["method"] == "newSheet":
if not strings.isAllowedChars(userRequest["newSheetName"]):
return fl.jsonify({
"error": "Outlawed characters detected in \"" \
+ userRequest["newSheetName"] \
+ "\". Please do not use quote marks or the backslash."
})
elif len(
db.queryRead(
"SELECT * FROM SHEETS " \
+ "WHERE username = :user " \
+ "AND sheetname = :newSheetName",
userRequest
)
) != 0:
return fl.jsonify({
"error": "Sheet \"" + userRequest["newSheetName"] \
+ "\" already exists. Please retry with a different name."
})
else:
userRequest["path"] = "./sheets/" + userRequest["user"] + '/' \
+ userRequest["newSheetName"] + ".json"
if len(
db.queryWrite(
"INSERT INTO SHEETS VALUES " \
+ "(:user, :newSheetName, :path)",
userRequest
)
) == 0:
newFile = open(userRequest["path"], 'w')
json.dump(ds.defaultSheet, newFile, indent = 4, sort_keys = True)
newFile.close()
return fl.jsonify({
"error": "None.",
"url": userRequest["path"],
"newSheetName": userRequest["newSheetName"]
})
else:
return fl.jsonify({
"error": SQL_WRITE_ERROR,
})
elif userRequest["method"] == "delete":
if len(
db.queryWrite(
"DELETE FROM SHEETS " \
+ "WHERE sheetname = :sheetName",
userRequest
)
) == 0:
if not os.path.exists("./recycleBin/"):
os.mkdir("./recycleBin/")
if not os.path.exists(
"./recycleBin/" + userRequest["user"] + '/'
):
os.mkdir("./recycleBin/" + userRequest["user"] + '/')
os.rename(
"./sheets/" + userRequest["user"] + '/' \
+ userRequest["sheetName"] + ".json",
"./recycleBin/" + userRequest["user"] + '/' \
+ userRequest["sheetName"] + '.' \
+ str(time.time_ns()) + ".json"
)
return fl.jsonify({
"error": "None.",
"sheetName": userRequest["sheetName"]
})
else:
return fl.jsonify({"error": SQL_WRITE_ERROR})
elif userRequest["method"] == "duplicate":
if len(
db.queryRead(
"SELECT * FROM SHEETS " \
+ "WHERE sheetname = :duplicateName",
userRequest
)
) != 0:
return fl.jsonify(
{
"error": "Duplicate name \"" \
+ userRequest["duplicateName"] \
+ "\" already in use. Please try again."
}
)
else:
userRequest["originalPath"] = "./sheets/" + userRequest["user"] \
+ '/' + userRequest["sheetName"] + ".json"
userRequest["duplicatePath"] = "./sheets/" + userRequest["user"] \
+ '/' + userRequest["duplicateName"] + ".json"
if len(
db.queryWrite(
"INSERT INTO SHEETS VALUES " \
+ "(:user, :duplicateName, :duplicatePath)",
userRequest
)
) != 0:
return fl.jsonify({"error": SQL_WRITE_ERROR})
else:
shutil.copyfile(
userRequest["originalPath"],
userRequest["duplicatePath"]
)
return fl.jsonify({
"error": "None.",
"sheetName": userRequest["sheetName"],
"duplicateName": userRequest["duplicateName"]
})
else:
return fl.jsonify({"error": "Bad POST Request."})
else:
return fl.jsonify(noJSONError)
def index():
#return "<h1>Hello World!</h1>"
return fl.render_template("index.html")
# App Rule Instanctiation
base.add_url_rule('/', "index", index)
base.add_url_rule(
"/favicon.ico", "favicon",
lambda : fl.send_from_directory("./static/", "favicon.ico")
)
base.add_url_rule("/login/", "login", login, methods = ("GET", "POST"))
base.add_url_rule("/logout/", "logout", logout)
base.add_url_rule("/user/", "userpage", userpage, methods = ("GET", "POST"))
base.add_url_rule("/user/<script>", "userScripts", staticCahcingCheck)
base.add_url_rule("/sheets/<user>/<sheet>/", "loadsheet", loadSheet)
base.add_url_rule("/sheets/<user>/<sheet>/get/", "getsheet", sendSheet)
base.add_url_rule(
"/sheets/<user>/<sheet>/save/",
"saveSheet", saveSheet, methods = ["POST"]
)
base.add_url_rule(
"/sheets/<user>/<sheet>/<script>", "sheetScripts",
lambda user, sheet, script : staticCahcingCheck(script)
)
# App Execution
if __name__ == "__main__":
if len(SSL_PATH_TO_CERT) > 0 and len(SSL_PATH_TO_PRIVKEY) > 0:
base.run(
debug = DEBUG,
ssl_context = (SSL_PATH_TO_CERT, SSL_PATH_TO_PRIVKEY),
host = HOST
)
else:
base.run(debug = DEBUG, host = HOST)
| [
"cook.char.p@gmail.com"
] | cook.char.p@gmail.com |
e51ce24cc9abf704617483f76ca2cd74285aeb65 | 5b93930ce8280b3cbc7d6b955df0bfc5504ee99c | /nodes/VanderPlas17Python/E_Chapter4/N_ThreeDimensionalPlotting/A_ThreeDimensionalPoints/index.py | 0b8df9722eaab651824247c3bb62acae5f54c7eb | [] | no_license | nimra/module_gen | 8749c8d29beb700cac57132232861eba4eb82331 | 2e0a4452548af4fefd4cb30ab9d08d7662122cf4 | refs/heads/master | 2022-03-04T09:35:12.443651 | 2019-10-26T04:40:49 | 2019-10-26T04:40:49 | 213,980,247 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,466 | py | # Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.HierBlock import HierBlock as hbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.ListBlock import ListBlock as lbk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
blocks = [
# Figure 4-92. An empty three-dimensional axes
#
# With this 3D axes enabled, we can now plot a variety of three-dimensional plot types.
# Three-dimensional plotting is one of the functionalities that benefits immensely from
# viewing figures interactively rather than statically in the notebook; recall that to use
# interactive figures, you can use %matplotlib notebook rather than %matplotlib
# inline when running this code.
#
# Three-Dimensional Points and Lines
# The most basic three-dimensional plot is a line or scatter plot created from sets of (x,
# y, z) triples. In analogy with the more common two-dimensional plots discussed ear‐
# lier, we can create these using the ax.plot3D and ax.scatter3D functions. The call
# signature for these is nearly identical to that of their two-dimensional counterparts,
# so you can refer to “Simple Line Plots” on page 224 and “Simple Scatter Plots” on
# page 233 for more information on controlling the output. Here we’ll plot a trigono‐
# metric spiral, along with some points drawn randomly near the line (Figure 4-93):
# In[4]: ax = plt.axes(projection='3d')
#
# # Data for a three-dimensional line
# zline = np.linspace(0, 15, 1000)
# xline = np.sin(zline)
# yline = np.cos(zline)
# ax.plot3D(xline, yline, zline, 'gray')
#
# # Data for three-dimensional scattered points
# zdata = 15 * np.random.random(100)
# xdata = np.sin(zdata) + 0.1 * np.random.randn(100)
# ydata = np.cos(zdata) + 0.1 * np.random.randn(100)
# ax.scatter3D(xdata, ydata, zdata, c=zdata, cmap='Greens');
#
#
#
#
# Three-Dimensional Plotting in Matplotlib | 291
#
# Figure 4-93. Points and lines in three dimensions
#
# Notice that by default, the scatter points have their transparency adjusted to give a
# sense of depth on the page. While the three-dimensional effect is sometimes difficult
# to see within a static image, an interactive view can lead to some nice intuition about
# the layout of the points.
#
# Three-Dimensional Contour Plots
# Analogous to the contour plots we explored in “Density and Contour Plots” on page
# 241, mplot3d contains tools to create three-dimensional relief plots using the same
# inputs. Like two-dimensional ax.contour plots, ax.contour3D requires all the input
# data to be in the form of two-dimensional regular grids, with the Z data evaluated at
# each point. Here we’ll show a three-dimensional contour diagram of a three-
# dimensional sinusoidal function (Figure 4-94):
# In[5]: def f(x, y):
# return np.sin(np.sqrt(x ** 2 + y ** 2))
#
# x = np.linspace(-6, 6, 30)
# y = np.linspace(-6, 6, 30)
#
# X, Y = np.meshgrid(x, y)
# Z = f(X, Y)
# In[6]: fig = plt.figure()
# ax = plt.axes(projection='3d')
# ax.contour3D(X, Y, Z, 50, cmap='binary')
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z');
#
#
#
#
# 292 | Chapter 4: Visualization with Matplotlib
#
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Three-Dimensional Points and Lines",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
[self.add(a) for a in blocks]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class ThreeDimensionalPoints(HierNode):
def __init__(self):
super().__init__("Three-Dimensional Points and Lines")
self.add(Content())
# eof
| [
"lawrence.mcafee@gmail.com"
] | lawrence.mcafee@gmail.com |
3a069bad015d28e46e4300891ad78a9a9a693361 | 18fe81edf8831f9ec3f7726e6fd43b89098b421b | /pure_tensorflow_implementation/eval_auc.py | fc3c5fb4b5661411886208a027bfa5a20fb06995 | [
"BSD-2-Clause"
] | permissive | IJDykeman/storm_damage_prediction | 04e62b9fa8533e4ff1b282ad93bab4e1ccd72f9a | 141a116f4f51148124c14135f4f03bf99f7f4f99 | refs/heads/master | 2021-04-22T12:53:22.567050 | 2018-03-23T05:28:55 | 2018-03-23T05:28:55 | 48,092,770 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,793 | py | # this is to make the environment compatible with the Atom Hydrogen
# interactive editor package. You can ignore this code if that doesn't mean
# anything to you.
PYSOLR_PATH = '/home/isaac/Desktop/storm_damage_prediction/pure_tensorflow_implementation'
import sys
if not PYSOLR_PATH in sys.path:
sys.path.append(PYSOLR_PATH)
#########
import model
reload(model)
import data_loading
import tensorflow as tf
from matplotlib import pyplot as plt
import numpy as np
import sklearn
import sklearn.manifold
print sklearn.metrics.roc_auc_score
# load the model and restore the weights from the latest checkpoint
def evaluate_auc(model):
data_handler = data_loading.DataHandler()
test_indices = data_handler.test_indices[:]
import random
random.seed(0)
random.shuffle(test_indices)
batch_size = 64
predictions = []
ground_truth = []
for i in range(50):
batch_indices = test_indices[i * batch_size:(i+1) * batch_size]
metamat, wind_speed, wind_dir, hcad, class_y = data_handler.get_data_batch_from_indices(batch_indices)
ground_truth.extend(class_y[:, 1])
pred_probabilities = model.sess.run(model.pred_probabilities,
feed_dict={model.heightmap_ph:metamat,
# model.extra_features_ph:extra_features,
model.wind_speed_placeholder: wind_speed,
model.wind_direction_placeholder: wind_dir,
model.hcad_placeholder: hcad,
model.labels_ph:class_y,
model.keep_prob_ph: 1})
predictions.extend(pred_probabilities[:, 1])
print "auc:", sklearn.metrics.roc_auc_score(ground_truth, np.array(predictions), average='macro', sample_weight=None)
if __name__ == '__main__':
model = model.Model()
model.restore()
evaluate_auc(model) | [
"ijdykeman@gmail.com"
] | ijdykeman@gmail.com |
5315800005b6ab44c0b386e08171342f172705f5 | e21a9d931272309f872251b13296b88edbe36a10 | /tests/test_proto/columns/stringcolumn.py | bdc77eb14c8fb4080a8e5dfb394420ece87687d7 | [
"Apache-2.0"
] | permissive | ikrivosheev/asynch | 826332ac01f45facc4523d2a7011594be1aefb6a | 04503164653d3632f512b01aa12ad9b8926e751e | refs/heads/master | 2023-04-03T12:19:31.373445 | 2021-04-07T14:29:47 | 2021-04-07T14:29:47 | 349,327,087 | 0 | 0 | Apache-2.0 | 2021-03-19T06:45:25 | 2021-03-19T06:45:24 | null | UTF-8 | Python | false | false | 827 | py | import pytest
from asynch.proto.columns import get_column_by_spec
from asynch.proto.columns.stringcolumn import FixedString, String
@pytest.mark.parametrize(
"spec, expected_column", [("String", String), ("FixedString(10)", FixedString)]
)
def test_create_string_column(spec, column_options, expected_column):
column = get_column_by_spec(spec, column_options)
assert isinstance(column, expected_column)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"spec, items, expected_buffer",
[("String", ["1"], bytearray(b"\x011")), ("FixedString(2)", ["12", "34"], bytearray(b"1234"))],
)
async def test_write_data_items(spec, items, expected_buffer, column_options):
column = get_column_by_spec(spec, column_options)
await column.write_items(items)
assert column.writer.buffer == expected_buffer
| [
"py.krivosheev@gmail.com"
] | py.krivosheev@gmail.com |
b86f83022aa6082e1105ad77b24b5ed44507e75e | baaf97917267619411686facf305333192035aa2 | /finalProject.py | d67b188e9645c06e822cd4ca0f3d480e81ac09ae | [] | no_license | RafaelDavisH/Restaurant-Menu-Application | a7516fb0dcc3a22dfd6009a435e16e5a4cd923f4 | f57c75619dd0b35068e12d697acc86890ce9a3bb | refs/heads/master | 2021-05-04T20:21:29.570771 | 2018-02-06T07:05:57 | 2018-02-06T07:05:57 | 119,789,446 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,382 | py | from flask import Flask, render_template, request, redirect, url_for, flash, jsonify
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Restaurant, MenuItem
from flask import session as login_session
import random, string
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
from flask import make_response
import requests
app = Flask(__name__)
CLIENT_ID = json.loads(
open('client_secret.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Restaurant Menu App"
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.bind=create_engine
DBSession = sessionmaker(bind = engine)
session = DBSession()
# Create a state token to prevent requst forgery
# Store it in the session for later validation
@app.route('/login')
def showLogin():
state = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32))
login_session['state'] = state
return render_template('login.html', STATE=state)
@app.route('/gconnect', methods=['POST'])
def gconnect():
print 0
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter'), 401)
response.headers['Content-Type'] = 'application/json'
return response
code = request.data
print 1
try:
#Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secret.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
print 2
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s' % access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID doesn't match app's."), 401)
# Check to see if user is already logged in
stored_credentials = login_session.get('credentials')
stored_gplus_id = login_session.get('gplus_id')
if stored_credentials is not None and gplus_id == stored_gplus_id:
response = make_response(
json.dumps('Current user is already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token':credentials.access_token, 'alt':'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data["name"]
login_session['picture'] = data["picture"]
login_session['email'] = data["email"]
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px; border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;">'
flash("you are now logged in as %s" % login_session['username'])
print "done!"
return output
#Making an API Endpoint (GET Request)
@app.route('/restaurants/JSON')
def restaurantsJSON():
restaurants = session.query(Restaurant).all()
return jsonify(Restaurant = [i.serialize for i in restaurants])
@app.route('/restaurant/<int:restaurant_id>/menu/JSON/')
def restaurantMenuJSON(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
items = session.query(MenuItem).filter_by(restaurant_id = restaurant_id).all()
return jsonify(MenuItems=[i.serialize for i in items])
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/JSON/')
def restaurantMenuItemJson(restaurant_id, menu_id):
menuItem = session.query(MenuItem).filter_by( id = menu_id).one()
return jsonify(MenuItem = menuItem.serialize)
# Show all restaurants
@app.route('/')
@app.route('/restaurants/')
def showRestaurants():
restaurants = session.query(Restaurant).all()
return render_template('restaurants.html', restaurants=restaurants)
@app.route('/restaurant/new/', methods=['GET', 'POST'])
def newRestaurant():
if request.method == 'POST':
newRestaurant = Restaurant(name = request.form['name'])
session.add(newRestaurant)
session.commit()
flash("New Restaurant Created")
return redirect(url_for('showRestaurants'))
else:
return render_template('newRestaurant.html')
@app.route('/restaurant/<int:restaurant_id>/edit/', methods=['GET','POST'])
def editRestaurant(restaurant_id):
editedRestaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
if request.method == 'POST':
if request.form['name']:
editedRestaurant.name = request.form['name']
session.add(editedRestaurant)
session.commit()
flash("Restaurant Sucessfully Edited")
return redirect(url_for('showRestaurants'))
else:
return render_template('editRestaurant.html', restaurant = editedRestaurant)
@app.route('/restaurant/<int:restaurant_id>/delete/', methods=['GET', 'POST'])
def deleteRestaurant(restaurant_id):
restaurantToDelete = session.query(Restaurant).filter_by(id = restaurant_id).one()
if request.method == "POST":
session.delete(restaurantToDelete)
session.commit()
flash("Restaurant Successfully Deleted!")
return redirect(url_for('showRestaurants'))
else:
return render_template('deleteRestaurant.html', restaurant = restaurantToDelete)
@app.route('/restaurant/<int:restaurant_id>/')
@app.route('/restaurant/<int:restaurant_id>/menu/')
def showMenu(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
items = session.query(MenuItem).filter_by(restaurant_id = restaurant.id)
return render_template('menu.html', restaurant=restaurant, items=items)
@app.route('/restaurant/<int:restaurant_id>/menu/new/', methods=['GET', 'POST'])
def newMenuItem(restaurant_id):
if request.method == 'POST':
newItem = MenuItem(name = request.form['name'], description = request.form['description'], price = request.form['price'], course = request.form['course'], restaurant_id = restaurant_id)
session.add(newItem)
session.commit()
flash("New Menu Item Created")
return redirect(url_for('showMenu', restaurant_id = restaurant_id))
else:
return render_template('newMenuItem.html', restaurant_id = restaurant_id)
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/edit/', methods=['GET', 'POST'])
def editMenuItem(restaurant_id, menu_id):
editedItem = session.query(MenuItem).filter_by(id = menu_id).one()
if request.method == 'POST':
if request.form['name']:
editedItem.name = request.form['name']
if request.form['description']:
editedItem.description = request.form['description']
if request.form['price']:
editedItem.price = request.form['price']
if request.form['course']:
editedItem.course = request.form['course']
session.add(editedItem)
session.commit()
flash("Menu Item Successfully Edited")
return redirect(url_for('showMenu', restaurant_id = restaurant_id))
else:
return render_template('editMenuItem.html', restaurant_id = restaurant_id, menu_id = menu_id, item = editedItem)
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/delete/', methods=['GET', 'POST'])
def deleteMenuItem(restaurant_id, menu_id):
itemToDelete = session.query(MenuItem).filter_by(id = menu_id).one()
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
flash("Menu Item Successfully Deleted")
return redirect(url_for('showMenu', restaurant_id = restaurant_id))
else:
return render_template('deleteMenuItem.html', restaurant_id = restaurant_id, item = itemToDelete)
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host = '0.0.0.0', port = 5000)
| [
"thinkprintgrafx@gmail.com"
] | thinkprintgrafx@gmail.com |
2d0bc0fdced19617b4451c2751afe1792f346b1a | 5c5ef7572b9f7a74a1db2ba196425b5b8a2e1047 | /Day2/Day2.py | f3ace7485b82bb13fa59564ac324717444cab7e9 | [] | no_license | sorrowed/Advent-2017 | 7c1400099db43e567c3d5c5eac9c90793e716c5d | 7cffd98a37b26ce608ae0b9a843d3b8cc22d1044 | refs/heads/master | 2021-05-06T22:23:36.669810 | 2017-12-13T20:50:46 | 2017-12-13T20:50:46 | 112,791,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | from itertools import product
sum = 0
file = open("input.txt","r")
for line in file.readlines():
values = [ int(v) for v in line.split() ]
sum += ( max(values) - min( values ) )
file.close()
print( sum )
sum = 0
def checksum( values ):
chk = 0
for p in values:
for s in values:
if p != s and p % s == 0:
chk = p / s
return chk
file = open("input.txt","r")
for line in file.readlines():
sum += checksum( [ int(v) for v in line.split() ] )
file.close()
print( sum )
| [
"nospam@sorrowed.nl"
] | nospam@sorrowed.nl |
96b042d9766626f5006a855200a45007c0831b07 | f995dc624ff61bc7b93bdaa524326d61501e0e7a | /venv/Lib/site-packages/urllib3/util/timeout.py | 49740e66f387562578e30171c59dfa1981679050 | [] | no_license | nickpatten04/debt_exercise | 1f35705eedf5e9a25810aac44e9a0de3a01b14cf | 0501e4f77839739aaa268770a50079f6b21249e4 | refs/heads/master | 2023-03-06T08:45:34.420435 | 2021-02-22T00:52:31 | 2021-02-22T00:52:31 | 341,036,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,925 | py | from __future__ import absolute_import
import time
# The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
from socket import _GLOBAL_DEFAULT_TIMEOUT
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
# Use time.monotonic if available.
current_time = getattr(time, "monotonic", time.time)
class Timeout(object):
"""Timeout configuration.
Timeouts can be defined as a default for a pool:
.. code-block:: python
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool):
.. code-block:: python
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``:
.. code-block:: python
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: int, float, or None
:param connect:
The maximum amount of time (in seconds) to wait for a connection
attempt to a server to succeed. Omitting the parameter will default the
connect timeout to the system default, probably `the global default
timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: int, float, or None
:param read:
The maximum amount of time (in seconds) to wait between consecutive
read operations for a response from the server. Omitting the parameter
will default the read timeout to the system default, probably `the
global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: int, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, "connect")
self._read = self._validate_timeout(read, "read")
self.total = self._validate_timeout(total, "total")
self._start_connect = None
def __repr__(self):
return "%s(connect=%r, read=%r, total=%r)" % (
type(self).__name__,
self._connect,
self._read,
self.total,
)
# __str__ provided for backwards compatibility
__str__ = __repr__
@classmethod
def _validate_timeout(cls, value, name):
"""Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If it is a numeric value less than or equal to
zero, or the type is not an integer, float, or None.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
if isinstance(value, bool):
raise ValueError(
"Timeout cannot be a boolean value. It must " "be an int, float or None."
)
try:
float(value)
except (TypeError, ValueError):
raise ValueError(
"Timeout value %s was %s, but it must be an " "int, float or None." % (name, value)
)
try:
if value <= 0:
raise ValueError(
"Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than or equal to 0." % (name, value)
)
except TypeError:
# Python 3
raise ValueError(
"Timeout value %s was %s, but it must be an " "int, float or None." % (name, value)
)
return value
@classmethod
def from_float(cls, timeout):
"""Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
"""Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read, total=self.total)
def start_connect(self):
"""Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
"""Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time in seconds.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
"""Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
"""Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (
self.total is not None
and self.total is not self.DEFAULT_TIMEOUT
and self._read is not None
and self._read is not self.DEFAULT_TIMEOUT
):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(), self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
| [
"np164s@corp.wayfair.com"
] | np164s@corp.wayfair.com |
d1d94bfd142bcbf682859745100d6a1ea04dedc8 | 7b7cd5b2bd95c369e9edde4437a616be16399984 | /LCD3.py | 26c74fdcd413535d76ed55000c5260576a9e0133 | [] | no_license | estevaofv/Doorbell-6--final- | 6256d5594a8f92eea7803c60ceae0dbd32e1be21 | 27ec6a071c3a141b30113a17252521a01902d669 | refs/heads/master | 2021-01-14T18:41:47.450561 | 2016-02-25T18:18:14 | 2016-02-25T18:18:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,975 | py | #!/usr/bin/python
import urllib2
from time import sleep
from Adafruit_CharLCDPlate import Adafruit_CharLCDPlate
import RPi.GPIO as GPIO
import time
from time import gmtime, strftime
import os
import subprocess
EMAIL='in06khattab@gmail.com'
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(7, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
# Initialize the LCD plate.
lcd = Adafruit_CharLCDPlate()
# Clear display and show greeting, pause 1 sec
lcd.clear()
lcd.backlight(lcd.ON)
lcd.message("Welcome to your\ndoorbell")
sleep(1)
def internet_on():
try:
response=urllib2.urlopen('http://www.google.com',timeout=10)
return True
except urllib2.URLError as err: pass
return False
lcd.clear()
if internet_on()==True:
lcd.message("Internet is set\nup :)")
else:
lcd.message("No internet use\nDoorbell wifi")
os.system("./call.sh")
while True:
if GPIO.input(7) == False: #button pressed
print("button pressed")
time1=strftime("%l:%M %p on %d-%m-%Y")
message="Ding Dong at "+strftime("%l:%M %p on %d-%m-%Y")
time2=strftime(" %l:%M %p")
lcd.clear()
lcd.message("Ding Dong at\n")
lcd.message(strftime("%d-%m-%Y %H:%M:%S"))
os.system("./call.sh")
os.system("sudo python camera.py")
#os.system("sudo python send_email_fast.py") #put a space within the quote after .py to insert an argument
os.system("sudo python send_email_attachment.py")
os.system("sudo python zapier_webhook.py"+ time2) #put a space within the quote after .py to insert an argument
os.system('sudo echo '+message+' | sendxmpp -v -t ' + EMAIL) #send hangouts message
os.system("sudo python tweet.py ")
time.sleep(0.2)
#Local video
if lcd.buttonPressed(lcd.LEFT):
proc = subprocess.Popen([ "raspivid -t 0 -b 2000000 -n -o - | gst-launch-1.0 -e -vvvv fdsrc ! h264parse ! flvmux ! rtmpsink location=rtmp://localhost/rtmp/live"], shell=True)
print("aa")
(out, err) = proc.communicate()
print "program output:", out
if lcd.buttonPressed(lcd.RIGHT):
print("aa")
proc = subprocess.Popen(["pkill gst-launch-1.0; pkill raspivid"], shell=True)
(out, err) = proc.communicate()
print "program output:", out
#os.system("raspivid -o - -t 0 -w 1270 -h 720 -fps 25 -b 600000 -g 50 | ./ffmpeg -re -ar 44100 -ac 2 -acodec pcm_s16le -f s16le -ac 2 -i /dev/zero -f h264 -i - -vcodec copy -acodec aac -ab 128k -g 50 -strict experimental -f flv rtmp://a.rtmp.youtube.com/live2/DoorBellDing.rpt9-wuhk-ctju-dgxw")
if lcd.buttonPressed(lcd.DOWN):
lcd.clear()
proc = subprocess.Popen([ "/sbin/ifconfig wlan0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
#print "program output:", out
lcd.message( out) | [
"in06khattab@yahoo.com"
] | in06khattab@yahoo.com |
b4d1ae788f20363105ce0a7aee177ef372443f97 | b653fb369f4c89cd0799319a4ea81bc40382da73 | /pscloud_training/models/lesson.py | f2d427e58acecfbaeeeae37221a21ad70bb69455 | [] | no_license | Ud0209/LYX | de94055c58bcddcc89ace991a403785c048538fe | adb12a41c304fe812532c6522b7f6958cb1ca46b | refs/heads/master | 2020-04-10T03:41:50.673833 | 2018-12-13T14:48:16 | 2018-12-13T14:48:16 | 160,777,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,344 | py | # -*- coding: utf-8 -*-
from odoo import api, fields, models
from odoo.exceptions import UserError, ValidationError
import datetime
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT
class TrainingLesson(models.Model):
_name = 'pscloud.training.lesson'
_description = "课程信息"
@api.multi
@api.depends('start_date', 'end_date')
def _compute_days(self):
for lesson in self:
if lesson.start_date and lesson.end_date:
start_date = datetime.datetime.strptime(lesson.start_date, DATE_FORMAT) if type('') == type(lesson.start_date) else lesson.start_date
end_date = datetime.datetime.strptime(lesson.end_date, DATE_FORMAT) if type('') == type(lesson.end_date) else lesson.end_date
lesson.continue_days = (end_date - start_date).days
name = fields.Char(string='Name')
teacher_id = fields.Many2one('res.partner', string='老师', domain=[('is_teacher', '=', True)])
student_ids = fields.Many2many('res.partner', string='学生', domain=[('is_student', '=', True)], readonly=True)
start_date = fields.Date(string='开始时间')
end_date = fields.Date(string='结束时间')
continue_days = fields.Integer(string='持续天数', compute='_compute_days', store=True)
state = fields.Selection([
('draft', '草稿'),
('confirm', '确认'),
], string='状态', readonly=True, copy=False, index=True, default='draft')
seat_qty = fields.Integer(string='座位数')
subject_id = fields.Many2one('pscloud.training.subject', string='科目')
person_id = fields.Many2one('res.partner', related='subject_id.person_id', readonly=True)
desc = fields.Text(string='描述')
_sql_constraints = [
('name_unique', 'UNIQUE(name)', '课程名称必须唯一.')
]
@api.constrains('start_date', 'end_date')
def _check_closing_date(self):
for lesson in self:
if lesson.end_date < lesson.start_date:
raise ValidationError('开始时间不能大于结束时间')
@api.multi
def name_get(self):
return [(lesson.id, '%s:%s' % (lesson.name, lesson.teacher_id.name)) for lesson in self]
@api.multi
def action_confirm(self):
return self.write({'state': 'confirm'})
# vim:et:si:sta:ts=4:sts=4:sw=4:tw=79: | [
"ud02009@163.com"
] | ud02009@163.com |
dd633f12b25d4c82a24b587931b3fc93e8651fa8 | 8566b1d2ac2032146861cd51640d61532b8461fb | /Studentor/users/views.py | 693deec2053579e96ac6450ce959d87bd16a151b | [] | no_license | marara02/Studentor | f0a93a3254ac2294afd48dca2eb885275983608a | e876c7f78bada5df12c00bf5bbff16751af89f2d | refs/heads/main | 2023-07-17T04:39:04.734374 | 2021-08-26T04:25:22 | 2021-08-26T04:25:22 | 369,729,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,397 | py | from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render, redirect
from django.views import View
from .forms import CreateUserForm
from django.contrib.auth import authenticate, login as dj_login, logout
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserUpdateForm, ProfileUpdateForm
from django.core.files.storage import FileSystemStorage
from .decorators import allowed_users, unauthenticated_user
from .models import Profile
def index(request):
return render(request, 'main/index.html')
def register(request):
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
user = form.cleaned_data.get('username')
messages.success(request, 'Account was created for ' + user)
return redirect('signin')
context = {'form': form}
return render(request, 'account/register.html', context)
@unauthenticated_user
def login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
dj_login(request, user)
return redirect('main')
else:
messages.info(request, 'Username or password incorrect')
context = {}
return render(request, 'account/login.html', context)
def logOut(request):
logout(request)
return redirect('signin')
login_required(login_url='login')
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST, request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account successfully updated')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user.profile)
p_form = ProfileUpdateForm(instance=request.user)
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'main/userprofile.html', context)
@login_required
def profiles(request, pk, *args, **kwargs):
global is_follower
prof = Profile.objects.get(pk=pk)
user = prof.user
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST, request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account successfully updated')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user.profile)
p_form = ProfileUpdateForm(instance=request.user)
"""
followers = profile.followers.all()
number_of_followers = len(followers)
for follower in followers:
if follower == request.user:
is_follower = True
break
else:
is_follower = False
"""
context = {
'u_form': u_form,
'p_form': p_form,
'profile': prof,
'user': user,
}
return render(request, 'main/userprofile.html', context)
@login_required
def profile_update(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST, request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account successfully updated')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form,
}
return render(request, 'main/profileUpdate.html', context)
class ListFollowers(View):
def get(self, request, pk, *args, **kwargs):
profile = Profile.objects.get(pk=pk)
followers = profile.followers.all()
context = {
'profile': profile,
'followers': followers,
}
return render(request, 'main/followers_list.html', context)
class AddFollower(LoginRequiredMixin, View):
def post(self, request, pk, *args, **kwargs):
pr = Profile.objects.get(pk=pk)
pr.followers.add(request.user)
return redirect('profilePlace', pk=pr.pk)
class RemoveFollower(LoginRequiredMixin, View):
def post(self, request, pk, *args, **kwargs):
pr = Profile.objects.get(pk=pk)
pr.followers.remove(request.user)
return redirect('profile', pk=pr.pk)
def deleted(request):
return render(request, 'main/delete.html')
def showthis(request):
count = Profile.objects.all().count()
context = {
'count_users': count
}
print(count)
return render(request, 'main/admin.html', context)
| [
"57908460+marara02@users.noreply.github.com"
] | 57908460+marara02@users.noreply.github.com |
22c4f877c0e1c24e6f69a838483da63e95fe699b | 1767269b049080a4150e54e8bd8e02f282168162 | /300.py | 3d7e839e424c63108dd25b82ba7569810cc74f6f | [] | no_license | alfredo-svh/DailyCodingProblem | e4be74f83fad268e270e73b4bb07f62696537e2c | 75c84a8c09869bfa0ad059beaab71fd8a91be3ee | refs/heads/master | 2022-11-16T14:32:12.654717 | 2022-10-27T13:05:34 | 2022-10-27T13:05:34 | 220,365,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,593 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 16:06:48 2020
@author: Alfredo
"""
# Daily Coding Problem #300
# Problem:
# On election day, a voting machine writes data in the form
# (voter_id, candidate_id) to a text file. Write a program that
# reads this file as a stream and returns the top 3 candidates at any
# given time. If you find a voter voting more than once, report this as fraud.
import heapq
def topThreeCandidates(fname):
#read file contents
f=open(fname, "r")
f1 = f.readlines()
f.close()
candidates = {}
voters = set()
for line in f1:
voter = line[1:-1].split(', ')[0]
candidate = line[1:-1].split(', ')[1]
if voter in voters:
return "FRAUD!"
else:
voters.add(voter)
if candidate in candidates:
candidates[candidate]+=1
else:
candidates[candidate] = 1
return heapq.nlargest(3, candidates.keys(), key= lambda x: candidates[x])
# Testing:
topThreeCandidates('test.txt')
# In:
#(voter1, candidate1)
#(voter2, candidate1)
#(voter3, candidate2)
#(voter4, candidate2)
#(voter5, candidate3)
#(voter6, candidate3)
#(voter12, candidate4)
#(voter7, candidate4)
#(voter8, candidate4)
#(voter9, candidate4)
#(voter10, candidate2)
#(voter11, candidate1)
# Out:
#['candidate4)', 'candidate2)', 'candidate1)']
# In:
#(voter1, candidate1)
#(voter2, candidate1)
#(voter3, candidate2)
#(voter2, candidate2)
#(voter5, candidate3)
# Out:
#'FRAUD!' | [
"noreply@github.com"
] | alfredo-svh.noreply@github.com |
10debb179a95e397fb72c6cffd3734fcaa6902a3 | a544830d355907a54610a69abab47344134a216f | /data/datasets/dukemtmcreid.py | 2fa7595809f484e23e1697a403e05a388850a890 | [] | no_license | youzunzhi/DFDGAN | 0529efcbea365eaa7ed3c0eb14c7417964f4a0df | e5855af9eb20b92c81fd89db5851fd595ec1ea54 | refs/heads/master | 2020-07-10T04:47:10.086186 | 2019-08-25T13:59:23 | 2019-08-25T13:59:23 | 204,170,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,605 | py | # encoding: utf-8
"""
@author: liaoxingyu
@contact: liaoxingyu2@jd.com
"""
import glob
import re
import urllib
import zipfile
import os.path as osp
from utils.iotools import mkdir_if_missing
from .bases import BaseImageDataset
class DukeMTMCreID(BaseImageDataset):
"""
DukeMTMC-reID
Reference:
1. Ristani et al. Performance Measures and a Data Set for Multi-Target, Multi-Camera Tracking. ECCVW 2016.
2. Zheng et al. Unlabeled Samples Generated by GAN Improve the Person Re-identification Baseline in vitro. ICCV 2017.
URL: https://github.com/layumi/DukeMTMC-reID_evaluation
Dataset statistics:
# identities: 1404 (train + query)
# images:16522 (train) + 2228 (query) + 17661 (gallery)
# cameras: 8
"""
dataset_dir = 'dukemtmc-reid'
def __init__(self, root='/home/haoluo/data', verbose=True, setid=0,
base_train_pid=0, base_train_camid=0,
base_query_pid=0, base_query_camid=0,
base_gallery_pid=0, base_gallery_camid=0, **kwargs):
super(DukeMTMCreID, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.dataset_url = 'http://vision.cs.duke.edu/DukeMTMC/data/misc/DukeMTMC-reID.zip'
self.train_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/bounding_box_train')
self.query_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/query')
self.gallery_dir = osp.join(self.dataset_dir, 'DukeMTMC-reID/bounding_box_test')
self._download_data()
self._check_before_run()
train = self._process_dir(self.train_dir, relabel=True, base_pid=base_train_pid, base_camid=base_train_camid, setid=setid)
query = self._process_dir(self.query_dir, relabel=False, base_pid=base_query_pid, base_camid=base_query_camid, setid=setid)
gallery = self._process_dir(self.gallery_dir, relabel=False, base_pid=base_gallery_pid, base_camid=base_gallery_camid, setid=setid)
if verbose:
print("=> DukeMTMC-reID loaded")
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)
self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)
self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)
def _download_data(self):
if osp.exists(self.dataset_dir):
print("This dataset has been downloaded.")
return
print("Creating directory {}".format(self.dataset_dir))
mkdir_if_missing(self.dataset_dir)
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
print("Downloading DukeMTMC-reID dataset")
urllib.request.urlretrieve(self.dataset_url, fpath)
print("Extracting files")
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(self.dataset_dir)
zip_ref.close()
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if not osp.exists(self.train_dir):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if not osp.exists(self.query_dir):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if not osp.exists(self.gallery_dir):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
def _process_dir(self, dir_path, relabel=False, base_pid=0, base_camid=0, setid=0):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
pid_container = set()
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
pid += base_pid
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
pid, camid = map(int, pattern.search(img_path).groups())
assert 1 <= camid <= 8
camid -= 1 # index starts from 0
pid += base_pid
camid += base_camid
if relabel:
pid = pid2label[pid]
pid += base_pid
dataset.append((img_path, pid, camid, setid))
return dataset
| [
"youzunzhi@163.com"
] | youzunzhi@163.com |
cf4bc697e7860156204aee79bd65444202aabd62 | f84be0daae92f31d1658ce6384f29000e98f318a | /ShutdownClock/Project/Main.py | 82dfb4318e552c1d46be64398ceddaf48d436031 | [] | no_license | raulianos09/Personal-Projects | c6bba2296a2bc0ac38c425ffca32563ab1dfd747 | 1f46864176696e98e4a6e0857eed17ddd8bb6188 | refs/heads/master | 2023-08-13T20:33:53.909166 | 2021-10-05T08:56:53 | 2021-10-05T08:56:53 | 413,735,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,684 | py | import os
import subprocess
from tkinter import *
from tkinter import font as tkFont
HOUR_OPTIONS = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
MINUTE_OPTIONS = ['05', '10', '15', '20', '25', '30', '35', '40', '45', '50', '55']
class MainWindow:
def __init__(self):
self.__initializeWindow()
self.__createMainFrame()
self.__createOptionsDropdown()
self.__createTimer()
self.startBtn = self.__createStartBtn(self.root)
self.defaultsBtn = self.__createDefaultsBtn(self.root)
self.__loadDefaults()
def __createMainFrame(self):
self._mainFrame = Frame(self.root)
self._mainFrame.pack()
def __initializeWindow(self):
self.root = Tk()
self.root.title("Shutdown clock")
self.root.geometry('300x400')
self.root.iconbitmap("Basic_Clock.ico")
self.root.config(bg='#000000')
def __createOptionsDropdown(self):
self.appOptionsVar = StringVar(self._mainFrame)
self.appOptionsVar.set("OPTIONS")
self.dropdown = OptionMenu(self._mainFrame, self.appOptionsVar, "Shutdown", "Sleep")
self._mainFrame.nametowidget(self.dropdown.menuname).config(font=tkFont.Font(family='Helvetica', size=20))
self.dropdown.config(width=150)
self.dropdown.config(font=tkFont.Font(family='Helvetica', size=25))
self.dropdown.pack()
def __createTimer(self):
self.__timerFrame = Frame(self.root)
self.hourDropdownVar = StringVar(self.__timerFrame)
self.hourDropdownVar.set("HH")
self.hourDropdown = OptionMenu(self.__timerFrame, self.hourDropdownVar, *HOUR_OPTIONS)
self.hourDropdown.config(font=tkFont.Font(family='Helvetica', size=25))
self.hourDropdown.grid(row=0, column=0)
self.minuteDropdownVar = StringVar(self.__timerFrame)
self.minuteDropdownVar.set("MM")
self.minuteDropdown = OptionMenu(self.__timerFrame, self.minuteDropdownVar, *MINUTE_OPTIONS)
self.minuteDropdown.config(font=tkFont.Font(family='Helvetica', size=25))
self.minuteDropdown.grid(row=0, column=1)
self.__timerFrame.nametowidget(self.hourDropdown.menuname).config(font=tkFont.Font(family='Helvetica', size=20))
self.__timerFrame.nametowidget(self.minuteDropdown.menuname).config(
font=tkFont.Font(family='Helvetica', size=20))
self.__timerFrame.pack(padx=20, pady=50)
def run(self):
self.root.mainloop()
def __createStartBtn(self, window):
button = Button(window, text="START", fg="black", command=self.__startCounter)
button.pack(side=BOTTOM)
button.config(width=150)
button.config(font=tkFont.Font(family='Helvetica', size=25))
return button
def __createDefaultsBtn(self, window):
button = Button(window, text="SET DEFAULT", fg="black", command=self.__saveDefaults)
button.pack(side=BOTTOM)
button.config(width=150)
button.config(font=tkFont.Font(family='Helvetica', size=25))
return button
def __loadDefaults(self):
file = open('defaults.txt', 'r')
lines = file.readlines()
self.appOptionsVar.set(lines[0].strip())
self.hourDropdownVar.set(lines[1].strip())
self.minuteDropdownVar.set(lines[2].strip())
file.close()
def __saveDefaults(self):
open('defaults.txt', 'w').close()
file = open('defaults.txt', 'w')
file.write(self.appOptionsVar.get())
file.write("\n")
file.write(self.hourDropdownVar.get())
file.write("\n")
file.write(self.minuteDropdownVar.get())
file.write("\n")
file.close()
def __startCounter(self):
option = self.appOptionsVar.get()
hour = self.hourDropdownVar.get()
minute = self.minuteDropdownVar.get()
self.root.withdraw()
self.__counter = Counter(option, hour, minute)
class Counter:
def __init__(self, option, hour, minute):
self.__initializeWindow()
self.__createMainFrame()
self.__option = option
self.__hour = hour
self.__minute = minute
self.__run()
def __createMainFrame(self):
self._mainFrame = Frame(self.root)
self._mainFrame.pack()
def __initializeWindow(self):
self.root = Tk()
self.root.title("Timer")
self.root.geometry('400x150')
self.root.iconbitmap("Basic_Clock.ico")
self.root.config(bg='#000000')
def __run(self):
self.timeLabel = Label(self._mainFrame, text=self.__hour + " : " + self.__minute + " : " + "0",
font=("helvetica", 60), width=12, bg="black",
fg="white", height=2)
self.timeLabel.pack()
self.startCountdown()
self.root.mainloop()
def countdown(self, count):
mins, secs = divmod(count, 60)
hours = 0
if mins > 60:
hours, mins = divmod(mins, 60)
labelText = "{0:2d}".format(hours) + " : " + "{0:2d}".format(mins) + " : " + "{0:2d}".format(secs)
self.timeLabel.config(text=labelText)
self._mainFrame.update()
if count > 0:
self.root.after(1000, self.countdown, count - 1)
else:
return
def startCountdown(self):
userinput = int(self.__hour) * 3600 + int(self.__minute) * 60
self.countdown(userinput)
if self.__option == "Sleep":
os.system(f"shutdown /h /t 0")
elif self.__option == "Shutdown":
os.system(f"shutdown /s /t 0")
if __name__ == "__main__":
app = MainWindow()
app.run()
| [
"72858436+raulianos09@users.noreply.github.com"
] | 72858436+raulianos09@users.noreply.github.com |
e566a19661fd0f6eb08e8dc063f46dd7afcf2c8c | 8c79cb30e898e7d17f76644702f3971e591aaf36 | /python/groups.py | c2c777d674ec7adb7955688786ebf3f0316cd46e | [] | no_license | kmaroufi/DataBase-Project | 5268f704873acc1ed511cbc2e1f1d3df728ea9b1 | 965a09488c93e6c9fe26953e44f0515921914ce9 | refs/heads/master | 2020-09-24T02:08:08.771737 | 2019-12-03T14:26:50 | 2019-12-03T14:26:50 | 225,636,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,024 | py | from util import *
conn = None
cur = None
user_tel_number = None
def handle_groups(cmd, db_conn):
global conn, cur, user_tel_number
conn = db_conn
cur = conn.cursor()
tmp = cmd.split()
task = tmp[1]
if task == "addGroup":
return add_group(cmd) # /
if task == "alterGroup":
return alter_group(cmd)
if task == "rmGroup":
return rm_group(cmd)
if task == "addBlockedUser":
return add_blocked_user(cmd)
if task == "rmBlockedUser":
return rm_blocked_user(cmd)
if task == "addAdmin":
return add_admin(cmd)
if task == "rmAdmin":
return rm_admin(cmd)
if task == "addPic":
return add_pic(cmd)
if task == "addUploadedPic":
return add_uploaded_pic(cmd)
if task == "rmPic":
return rm_pic(cmd)
if task == "addPmGroup":
return add_pm_group(cmd)
if task == "addForwardedPmGroup":
return add_forwarded_pm_group(cmd)
if task == "rmPmGroup":
return rm_pm_group(cmd)
if task == "addSeen":
return add_seen(cmd)
if task == "getProfile":
return get_profile(cmd)
if task == "getUsers":
return get_users(cmd)
if task == "getBlockedUsers":
return get_blocked_users(cmd)
if task == "getAdminUsers":
return get_admin_users(cmd)
if task == "getPmGroups":
return get_pm_groups(cmd)
if task == "addPin":
return add_pin(cmd)
if task == "rmPin":
return rm_pin(cmd)
if task == "getPin":
return get_pin(cmd)
if task == "getCreator":
return get_creator(cmd)
print("unknown command!")
return
def add_group(cmd):
params = splitter(cmd)
name = params[0]
c_tel_number = params[1]
if execute("INSERT INTO group_table (name, creator_tel_number) VALUES (%s, %s)", (name, c_tel_number)):
print(1)
conn.commit()
return True
def alter_group(cmd):
params = splitter(cmd)
tel_number = params[0]
group_id = params[1]
name = params[2]
# TODO
# if execute("UPDATE group_table SET name=%s WHERE group_id=%s", (name, group_id)):
return True
def rm_group(cmd):
params = splitter(cmd)
tel_number = params[0]
group_id = params[1]
if execute("SELECT creator_tel_number FROM group_creator_view WHERE creator_tel_number= %s AND group_id = %s",
(tel_number, group_id)):
if len(cur.fetchall()) == 1:
if execute("DELETE FROM group_table WHERE group_id = %s", (group_id,)):
print(1)
conn.commit()
else:
print("you are not creator")
return True
def add_blocked_user(cmd):
params = splitter(cmd)
tel_number = params[0]
tel_number2 = params[1]
group_id = params[2]
if tel_number == tel_number2:
print(0)
print("what are you doing?")
else:
if execute(
"SELECT * FROM membership_table WHERE tel_number=%s AND group_id=%s AND membership_table.is_admin=%s",
(tel_number, group_id, True)):
if len(cur.fetchall()) == 1:
if execute(
"SELECT * FROM membership_table WHERE tel_number=%s AND group_id=%s AND membership_table.is_admin=%s",
(tel_number2, group_id, True)):
if len(cur.fetchall()) == 0:
if execute("INSERT INTO group_blocked_users_table (tel_number, group_id) VALUES (%s, %s)",
(tel_number2, group_id)):
print(1)
conn.commit()
else:
print(0)
print("you can't block another admin!")
else:
print(0)
print("you are not admin!")
return True
def rm_blocked_user(cmd):
params = splitter(cmd)
tel_number = params[0]
tel_number2 = params[1]
group_id = params[2]
if tel_number == tel_number2:
print(0)
print("what are you doing?")
else:
if execute(
"SELECT * FROM membership_table WHERE tel_number=%s AND group_id=%s AND membership_table.is_admin=%s",
(tel_number, group_id, True)):
if len(cur.fetchall()) == 1:
if execute(
"DELETE FROM group_blocked_users_table (tel_number, group_id) WHERE (%s, %s)",
(tel_number2, group_id)):
print(1)
conn.commit()
else:
print(0)
print("this person is not in blocked list!")
else:
print(0)
print("you are not admin!")
return True
def add_admin(cmd):
params = splitter(cmd)
tel_number = params[0]
tel_number2 = params[1]
group_id = params[2]
if tel_number == tel_number2:
print(0)
print("what are you doing?")
else:
if execute(
"SELECT creator_tel_number FROM group_creator_view WHERE creator_tel_number=%s AND group_id = %s",
(tel_number, group_id)):
if len(cur.fetchall()) == 1:
if execute(
"SELECT * FROM membership_table WHERE tel_number=%s AND group_id=%s",
(tel_number2, group_id)):
if len(cur.fetchall()) == 1:
if execute(
"UPDATE membership_table SET is_admin=TRUE WHERE group_id=%s AND tel_number=%s",
(group_id, tel_number2)):
print(1)
conn.commit()
else:
print(0)
print("this person is not in the group!")
else:
print(0)
print("you are not group's creator!")
return True
def rm_admin(cmd):
params = splitter(cmd)
tel_number = params[0]
tel_number2 = params[1]
group_id = params[2]
if tel_number == tel_number2:
print(0)
print("what are you doing?")
else:
if execute(
"SELECT creator_tel_number FROM group_creator_view WHERE creator_tel_number=%s AND group_id = %s",
(tel_number, group_id)):
if len(cur.fetchall()) == 1:
if execute(
"SELECT * FROM membership_table WHERE tel_number=%s AND group_id=%s AND membership_table.is_admin=%s",
(tel_number2, group_id, True)):
if len(cur.fetchall()) == 1:
if execute(
"UPDATE membership_table SET is_admin=FALSE WHERE group_id=%s AND tel_number=%s",
(group_id, tel_number2)):
print(1)
conn.commit()
else:
print(0)
print("this person is not an admin!")
else:
print(0)
print("you are not group's creator!")
return True
def add_pic(cmd):
params = splitter(cmd)
tel_number = params[0]
group_id = params[1]
file_address = params[2]
if execute("SELECT * FROM membership_table WHERE tel_number=%s AND group_id=%s AND membership_table.is_admin=%s",
(tel_number, group_id, True)):
if len(cur.fetchall()) == 1:
if execute("SELECT * FROM media_table WHERE file_address=%s", (file_address,)):
if len(cur.fetchall()) == 0:
res = execute("INSERT INTO media_table(file_address, type) VALUES(%s, %s)", (file_address, 'image'))
if not res:
return
res = execute("INSERT INTO image_table(file_address) VALUES(%s)", (file_address,))
if not res:
return
if execute("INSERT INTO group_profile_table(group_id, image_address) VALUES(%s, %s)",
(group_id, file_address)):
print(1)
conn.commit()
else:
print(0)
print("you are not group's admin!")
return True
def rm_pic(cmd):
params = splitter(cmd)
tel_number = params[0]
group_id = params[1]
file_address = params[2]
date = params[3]
if execute("SELECT * FROM membership_table WHERE tel_number=%s AND group_id=%s AND membership_table.is_admin=%s",
(tel_number, group_id, True)):
if len(cur.fetchall()) == 1:
if execute("SELECT * FROM media_table WHERE file_address=%s", (file_address,)):
if len(cur.fetchall()) == 0:
print(0)
print("this pic is not in data base!")
if execute("DELETE FROM group_profile_table WHERE group_id=%s AND image_address=%s",
(group_id, file_address)):
print(1)
conn.commit()
else:
print(0)
print("you are not group's admin!")
return True
def add_pm_group(cmd):
params = splitter(cmd)
cs_tel_number = params[0]
group_id = params[1]
text = params[2]
file_address = params[3]
type = params[4]
message_id = -1
if execute(
"SELECT * FROM membership_table WHERE tel_number=%s AND group_id=%s",
(cs_tel_number, group_id)):
if len(cur.fetchall()) == 0:
print(0)
print("you are not in the group!")
return
# first, find out message_id
execute("SELECT MAX(message_id) FROM message_table WHERE creator_tel_number=%s", (cs_tel_number))
rows = cur.fetchall()
if len(rows) > 1:
print(0)
print("add_pm_chat: strange error")
return
elif rows[0][0] is not None:
message_id = int(rows[0][0]) + 1
else:
message_id = 1
if file_address != "":
if execute("SELECT * FROM media_table WHERE file_address=%s", (file_address,)):
if len(cur.fetchall()) == 0:
if not execute("INSERT INTO media_table(file_address, type) VALUES (%s, %s)", (file_address, 'image')):
return
else:
return
else:
file_address = None
if execute(
"INSERT INTO message_table(creator_tel_number, message_id, message_text, media_address) VALUES (%s, %s, %s, %s)",
(cs_tel_number, message_id, text, file_address)):
if execute(
"INSERT INTO group_sends_table(tel_number, group_id, creator_tel_number, message_id) VALUES (%s, %s, %s, %s)",
(cs_tel_number, group_id, cs_tel_number, message_id)):
print(1)
conn.commit()
return True
def add_forwarded_pm_group(cmd):
params = splitter(cmd)
s_tel_number = params[0]
group_id = params[1]
c_tel_number = params[2]
message_id = params[3]
if execute(
"SELECT * FROM membership_table WHERE tel_number=%s AND group_id=%s",
(s_tel_number, group_id)):
if len(cur.fetchall()) == 0:
print(0)
print("you are not in the group!")
return
if execute("SELECT * FROM message_table WHERE creator_tel_number=%s AND message_id=%s", (c_tel_number, message_id)):
if len(cur.fetchall()) == 0:
print(0)
print("this message doesn't exist!")
return
else:
return
if execute(
"INSERT INTO group_sends_table(tel_number, group_id, creator_tel_number, message_id) VALUES (%s, %s, %s, %s)",
(s_tel_number, group_id, c_tel_number, message_id)):
print(1)
conn.commit()
return True
def rm_pm_group(cmd):
params = splitter(cmd)
s_tel_number = params[0]
group_id = params[1]
c_tel_number = params[2]
message_id = params[3]
send_time = params[4]
return True
def add_seen(cmd):
params = splitter(cmd)
s_tel_number = params[0]
group_id = params[1]
c_tel_number = params[2]
message_id = params[3]
send_time = params[4]
v_tel_number = params[5]
if execute(
"SELECT * FROM membership_table WHERE tel_number=%s AND group_id=%s",
(v_tel_number, group_id)):
if len(cur.fetchall()) == 0:
print(0)
print("you are not in the group!")
return
if execute("INSERT INTO group_seen_table (tel_number, group_id, creator_tel_number, message_id, send_time, viewer_tel_number) VALUES (%s, %s, %s, %s, %s, %s)",
{s_tel_number, group_id, c_tel_number, message_id, send_time, v_tel_number}):
print(1)
conn.commit()
return True
def get_profile(cmd):
params = splitter(cmd)
tel_number = params[0]
group_id = params[1]
if execute(
"SELECT * FROM membership_table WHERE tel_number=%s AND group_id=%s",
(tel_number, group_id)):
if len(cur.fetchall()) == 0:
print(0)
print("you are not in the group!")
return
if execute("SELECT * FROM group_profile_view WHERE group_id = %s", (group_id, )):
print(1)
rows = cur.fetchall()
print(len(rows))
for row in rows:
for item in row:
print(item, end=";")
print()
return True
def get_users(cmd):
params = splitter(cmd)
tel_number = params[0]
group_id = params[1]
if execute(
"SELECT * FROM membership_table WHERE tel_number=%s AND group_id=%s",
(tel_number, group_id)):
if len(cur.fetchall()) == 0:
print(0)
print("you are not in the group!")
return
if execute("SELECT * FROM group_members_view WHERE group_id = %s", (group_id,)):
print(1)
rows = cur.fetchall()
print(len(rows))
for row in rows:
for item in row:
print(item, end=";")
print()
return True
def get_blocked_users(cmd):
params = splitter(cmd)
tel_number = params[0]
group_id = params[1]
if execute(
"SELECT * FROM membership_table WHERE tel_number=%s AND group_id=%s",
(tel_number, group_id)):
if len(cur.fetchall()) == 0:
print(0)
print("you are not in the group!")
return
if execute("SELECT * FROM group_blocked_view WHERE group_id = %s", (group_id,)):
print(1)
rows = cur.fetchall()
print(len(rows))
for row in rows:
for item in row:
print(item, end=";")
print()
return True
def get_admin_users(cmd):
params = splitter(cmd)
tel_number = params[0]
group_id = params[1]
if execute(
"SELECT * FROM membership_table WHERE tel_number=%s AND group_id=%s",
(tel_number, group_id)):
if len(cur.fetchall()) == 0:
print(0)
print("you are not in the group!")
return
if execute("SELECT * FROM group_members_view WHERE group_id = %s AND is_admin = TRUE", (group_id,)):
print(1)
rows = cur.fetchall()
print(len(rows))
for row in rows:
for item in row:
print(item, end=";")
print()
return True
def get_pm_groups(cmd):
params = splitter(cmd)
tel_number = params[0]
group_id = params[1]
if execute(
"SELECT * FROM membership_table WHERE tel_number=%s AND group_id=%s",
(tel_number, group_id)):
if len(cur.fetchall()) == 0:
print(0)
print("you are not in the group!")
return
if execute("SELECT * FROM group_messages_view WHERE group_id = %s", (group_id,)):
print(1)
rows = cur.fetchall()
print(len(rows))
for row in rows:
for item in row:
print(item, end=";")
print()
return True
def add_pin(cmd):
params = splitter(cmd)
group_id = params[0]
c_tel_number = params[1]
message_id = params[2]
send_time = params[3]
s_tel_number = params[4]
return True
def rm_pin(cmd):
params = splitter(cmd)
group_id = params[0]
c_tel_number = params[1]
message_id = params[2]
send_time = params[3]
s_tel_number = params[4]
return True
def get_pin(cmd):
params = splitter(cmd)
tel_number = params[0]
group_id = params[1]
if execute(
"SELECT * FROM membership_table WHERE tel_number=%s AND group_id=%s",
(tel_number, group_id)):
if len(cur.fetchall()) == 0:
print(0)
print("you are not in the group!")
return
if execute("SELECT * FROM group_pin_view WHERE group_id = %s", (group_id,)):
print(1)
rows = cur.fetchall()
print(len(rows))
for row in rows:
for item in row:
print(item, end=";")
print()
return True
def get_creator(cmd):
params = splitter(cmd)
tel_number = params[0]
group_id = params[1]
if execute(
"SELECT * FROM membership_table WHERE tel_number=%s AND group_id=%s",
(tel_number, group_id)):
if len(cur.fetchall()) == 0:
print(0)
print("you are not in the group!")
return
if execute("SELECT * FROM group_creator_view WHERE group_id = %s", (group_id,)):
print(1)
rows = cur.fetchall()
print(len(rows))
for row in rows:
for item in row:
print(item, end=";")
print()
return True
def execute(sql_cmd, args):
try:
cur.execute(sql_cmd, args)
return True
except Exception as e:
x = 0
print("0")
print(e)
return False
| [
"kmaroufi76@gmail.com"
] | kmaroufi76@gmail.com |
8a501952490fa9d33985f24cf23aa7cb69298554 | 452be58b4c62e6522724740cac332ed0fe446bb8 | /src/starboard/android/shared/gyp_configuration.gypi | 12dd79875f4d6246ee3cd44f16732f163bbd4628 | [
"Apache-2.0"
] | permissive | blockspacer/cobalt-clone-cab7770533804d582eaa66c713a1582f361182d3 | b6e802f4182adbf6a7451a5d48dc4e158b395107 | 0b72f93b07285f3af3c8452ae2ceaf5860ca7c72 | refs/heads/master | 2020-08-18T11:32:21.458963 | 2019-10-17T13:09:35 | 2019-10-17T13:09:35 | 215,783,613 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,374 | gypi | # Copyright 2016 The Cobalt Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Platform specific configuration for Android on Starboard. Automatically
# included by gyp_cobalt in all .gyp files by Cobalt together with base.gypi.
#
{
'variables': {
'target_os': 'android',
'final_executable_type': 'shared_library',
'gtest_target_type': 'shared_library',
'sb_widevine_platform' : 'android',
'gl_type': 'system_gles2',
'enable_remote_debugging': 0,
'linker_flags': [
# The NDK default "ld" is actually the gold linker for all architectures
# except arm64 (aarch64) where it's the bfd linker. Don't use either of
# those, rather use lld everywhere. See release notes for NDK 19:
# https://developer.android.com/ndk/downloads/revision_history
'-fuse-ld=lld',
],
# Define platform specific compiler and linker flags.
# Refer to base.gypi for a list of all available variables.
'compiler_flags_host': [
'-O2',
],
'compiler_flags_debug': [
'-frtti',
'-O0',
],
'compiler_flags_devel': [
'-frtti',
'-O2',
],
'compiler_flags_qa': [
'-fno-rtti',
'-gline-tables-only',
],
'compiler_flags_qa_size': [
'-Os',
],
'compiler_flags_qa_speed': [
'-O2',
],
'compiler_flags_gold': [
'-fno-rtti',
'-gline-tables-only',
],
'compiler_flags_gold_size': [
'-Os',
],
'compiler_flags_gold_speed': [
'-O2',
],
'platform_libraries': [
'-lEGL',
'-lGLESv2',
'-lOpenSLES',
'-landroid',
'-llog',
'-lmediandk',
],
'conditions': [
['cobalt_fastbuild==0', {
'compiler_flags_debug': [
'-g',
],
'compiler_flags_devel': [
'-g',
],
'compiler_flags_qa': [
'-gline-tables-only',
],
'compiler_flags_gold': [
'-gline-tables-only',
],
}],
],
},
'target_defaults': {
'target_conditions': [
['sb_pedantic_warnings==1', {
'cflags': [
'-Wall',
'-Wextra',
'-Wunreachable-code',
# Don't get pedantic about warnings from base macros. These must be
# disabled after the -Wall above, so this has to be done here rather
# than in the platform's target toolchain.
# TODO: Rebase base and use static_assert instead of COMPILE_ASSERT
'-Wno-unused-local-typedef', # COMPILE_ASSERT
'-Wno-missing-field-initializers', # LAZY_INSTANCE_INITIALIZER
# It's OK not to use some input parameters. Note that the order
# matters: Wall implies Wunused-parameter and Wno-unused-parameter
# has no effect if specified before Wall.
'-Wno-unused-parameter',
],
}],
['_type=="executable"', {
# Android Lollipop+ requires relocatable executables.
'cflags': [
'-fPIE',
],
'ldflags': [
'-pie',
],
},{
# Android requires relocatable shared libraries.
'cflags': [
'-fPIC',
],
}],
['use_asan==1', {
'cflags': [
'-fsanitize=address',
'-fno-omit-frame-pointer',
],
'ldflags': [
'-fsanitize=address',
# Force linking of the helpers in sanitizer_options.cc
'-Wl,-u_sanitizer_options_link_helper',
],
'defines': [
'ADDRESS_SANITIZER',
],
}],
['use_tsan==1', {
'cflags': [
'-fsanitize=thread',
'-fno-omit-frame-pointer',
],
'ldflags': [
'-fsanitize=thread',
],
'defines': [
'THREAD_SANITIZER',
],
}],
],
}, # end of target_defaults
}
| [
"trofimov_d_a@magnit.ru"
] | trofimov_d_a@magnit.ru |
721f08dccc478beceef57cb2da3bc704a671c7d2 | 6d651544b744cd7a9ca6ddc7d86aa08e5f95d35e | /my_flask_app/venv/bin/easy_install | 8d88c36dcd8fa61214355d2f351debf5fcedf74f | [] | no_license | Matiwosb/lecture2 | 738b09b55184d7704d986e08fcf7f80215db4adb | 95b1b9124131bf08b75a9d78b4a02f37ddcd58fd | refs/heads/master | 2022-04-24T11:31:29.858594 | 2020-04-28T17:53:14 | 2020-04-28T17:53:14 | 257,146,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | #!/mnt/c/Users/worku/my_flask_app/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"workubirbo@gmail.com"
] | workubirbo@gmail.com | |
6914467b4e480fb1fed13898dda10452a6241fef | 51b6d2fc53d5c632fcf01319842baebf13901e84 | /atcoder.jp/arc032/arc032_1/Main.py | 66bb6d66546bb0ff6cd9f30580c3f42ba9e3c722 | [] | no_license | mono-0812/procon | 35db3b2c21eff74fbd7b52db07f249380f6834ef | 68a4b53880a228a0164052b23d1326363efcbc20 | refs/heads/master | 2023-05-30T17:02:58.935074 | 2021-06-27T12:15:10 | 2021-06-27T12:15:10 | 345,896,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | n = int(input())
val = 0
for i in range(1,n+1):
val += i
for i in range(2,val//2):
if val%i == 0:
print("BOWWOW")
exit()
if val == 1:
print("BOWWOW")
exit()
print("WANWAN") | [
"frisk02.jar@gmail.com"
] | frisk02.jar@gmail.com |
fcc9505e88d02834c0fa710a05ca99a8fc847271 | 19b6d5b77e3c786865c99e8a73ab65189c86a7fb | /Day_03 | 80413438ed49c0c9867e4d8a4288026d49034b2f | [] | no_license | SatymSingh01/Myhackerankcodes | 5cc2e84d0d2cd2cd85a5e390590b0de00231def4 | c9850f2c3c08adee06f82793cc032226db3762eb | refs/heads/master | 2022-11-17T05:35:47.552320 | 2020-07-06T16:59:34 | 2020-07-06T16:59:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the solve function below.
meal_cost = float(input())
tip_percent = int(input())
tax_percent = int(input())
mealcost=meal_cost
tip=meal_cost*(tip_percent/100)
tax=meal_cost*(tax_percent/100)
total_cost=mealcost + tip +tax
print(round(total_cost))
| [
"noreply@github.com"
] | SatymSingh01.noreply@github.com | |
3d95e5fb632eba6064ce77c398754fd32ea70c1d | 57f19247ea0fda41ca552f6f4b6b508a23a91aaf | /MyApp/storm/django/backend/base.py | 2a99ce91a0c2376ea60eb362f09f1c399d8f3acc | [] | no_license | marcosstevens2012/GPP | 4d68ad7b3123af425311657d4e0c2c7d0c327cb4 | 80e28bd51665846276123e46a963dabf353ffaf9 | refs/heads/master | 2020-07-01T03:47:36.733712 | 2016-11-25T21:56:50 | 2016-11-25T21:56:50 | 74,096,896 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,492 | py | __metaclass__ = type
__all__ = [
'DatabaseWrapper', 'DatabaseError', 'IntegrityError',
]
from django.conf import settings
import transaction
from storm.django.stores import get_store, get_store_uri
from storm.exceptions import DatabaseError, IntegrityError
class StormDatabaseWrapperMixin(object):
_store = None
def _get_connection(self):
if self._store is None:
self._store = get_store(settings.DATABASE_NAME)
# Make sure that the store is registered with the transaction
# manager: we don't know what the connection will be used for.
self._store._event.emit("register-transaction")
self._store._connection._ensure_connected()
return self._store._connection._raw_connection
def _set_connection(self, connection):
# Ignore attempts to set the connection.
pass
connection = property(_get_connection, _set_connection)
def _valid_connection(self):
# Storm handles the connection liveness checks.
return True
def _cursor(self, *args):
cursor = super(StormDatabaseWrapperMixin, self)._cursor(*args)
return StormCursorWrapper(self._store, cursor)
def _commit(self):
#print "commit"
transaction.commit()
def _rollback(self):
#print "rollback"
transaction.abort()
def close(self):
# As we are borrowing Storm's connection, we shouldn't close
# it behind Storm's back.
self._store = None
class StormCursorWrapper(object):
"""A cursor wrapper that checks for disconnection errors."""
def __init__(self, store, cursor):
self._connection = store._connection
self._cursor = cursor
def _check_disconnect(self, *args, **kwargs):
from django.db import DatabaseError as DjangoDatabaseError
kwargs['extra_disconnection_errors'] = DjangoDatabaseError
return self._connection._check_disconnect(*args, **kwargs)
def execute(self, statement, *args):
"""Execute an SQL statement."""
return self._check_disconnect(self._cursor.execute, statement, *args)
def fetchone(self):
"""Fetch one row from the result."""
return self._check_disconnect(self._cursor.fetchone)
def fetchall(self):
"""Fetch all rows from the result."""
return self._check_disconnect(self._cursor.fetchall)
def fetchmany(self, *args):
"""Fetch multiple rows from the result."""
return self._check_disconnect(self._cursor.fetchmany, *args)
@property
def description(self):
"""Fetch the description of the result."""
return self._check_disconnect(getattr, self._cursor, "description")
@property
def rowcount(self):
"""Fetch the number of rows in the result."""
return self._check_disconnect(getattr, self._cursor, "rowcount")
@property
def query(self):
"""Fetch the last executed query."""
return self._check_disconnect(getattr, self._cursor, "query")
PostgresStormDatabaseWrapper = None
MySQLStormDatabaseWrapper = None
def DatabaseWrapper(*args, **kwargs):
store_uri = get_store_uri(settings.DATABASE_NAME)
# Create a DatabaseWrapper class that uses an underlying Storm
# connection. We don't support sqlite here because Django expects
# a bunch of special setup on the connection that Storm doesn't
# do.
if store_uri.startswith('postgres:'):
global PostgresStormDatabaseWrapper
if PostgresStormDatabaseWrapper is None:
from django.db.backends.postgresql_psycopg2.base import (
DatabaseWrapper as PostgresDatabaseWrapper)
class PostgresStormDatabaseWrapper(StormDatabaseWrapperMixin,
PostgresDatabaseWrapper):
pass
DatabaseWrapper = PostgresStormDatabaseWrapper
elif store_uri.startswith('mysql:'):
global MySQLStormDatabaseWrapper
if MySQLStormDatabaseWrapper is None:
from django.db.backends.mysql.base import (
DatabaseWrapper as MySQLDatabaseWrapper)
class MySQLStormDatabaseWrapper(StormDatabaseWrapperMixin,
MySQLDatabaseWrapper):
pass
DatabaseWrapper = MySQLStormDatabaseWrapper
else:
assert False, (
"Unsupported database backend: %s" % store_uri)
return DatabaseWrapper(*args, **kwargs)
| [
"marcosstevens2012@gmail.com"
] | marcosstevens2012@gmail.com |
80f401ed7057e8821313fd47a93c30a01355e742 | f75e70f81d25c9b561561c28bedd03eb4a28c6f4 | /catch-of-the-day/node_modules/react-scripts/node_modules/chokidar/node_modules/fsevents/build/config.gypi | 930ba9e48099692769955f361e2d053be554517a | [
"MIT"
] | permissive | david82213/react-practice | 8298e8f8b1354b6ceb9cef58533fbac8275502fd | 6dffe9e82db343afe92245367958d9210521eb4a | refs/heads/master | 2021-01-11T17:53:17.137101 | 2017-03-01T23:32:20 | 2017-03-01T23:32:20 | 79,859,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,819 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt58l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt58l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "58",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 51,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "51.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_inspector": "true",
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"want_separate_host_toolset_mkpeephole": 0,
"xcode_version": "7.0",
"nodedir": "/Users/Daniel/.node-gyp/7.4.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/Users/Daniel/Desktop/react-practice/catch-of-the-day/node_modules/react-scripts/node_modules/chokidar/node_modules/fsevents/lib/binding/Release/node-v51-darwin-x64/fse.node",
"module_name": "fse",
"module_path": "/Users/Daniel/Desktop/react-practice/catch-of-the-day/node_modules/react-scripts/node_modules/chokidar/node_modules/fsevents/lib/binding/Release/node-v51-darwin-x64",
"save_dev": "",
"legacy_bundling": "",
"dry_run": "",
"viewer": "man",
"only": "",
"browser": "",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"shell": "/bin/zsh",
"maxsockets": "50",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"init_license": "ISC",
"if_present": "",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "Infinity",
"long": "",
"local_address": "",
"git_tag_version": "true",
"cert": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"versions": "",
"message": "%s",
"key": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"global_style": "",
"cache_lock_retries": "10",
"heading": "npm",
"proprietary_attribs": "true",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/Users/Daniel/.npmrc",
"init_module": "/Users/Daniel/.npm-init.js",
"user": "",
"node_version": "7.4.0",
"save": "",
"editor": "vi",
"tag": "latest",
"progress": "true",
"global": "",
"searchstaleness": "900",
"optional": "true",
"force": "",
"bin_links": "true",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"tag_version_prefix": "v",
"strict_ssl": "true",
"scripts_prepend_node_path": "warn-only",
"save_prefix": "^",
"ca": "",
"save_exact": "",
"group": "20",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"cache_lock_stale": "60000",
"cache_min": "10",
"searchexclude": "",
"cache": "/Users/Daniel/.npm",
"color": "true",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/4.0.5 node/v7.4.0 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "0022",
"init_version": "1.0.0",
"init_author_name": "",
"git": "git",
"scope": "",
"unsafe_perm": "true",
"tmp": "/var/folders/4c/bs9426v13yvfjwxkd6qmyp0h0000gn/T",
"onload_script": "",
"prefix": "/usr/local",
"link": ""
}
}
| [
"david82213@hotmail.com"
] | david82213@hotmail.com |
d7a3fb5dd3e1c3722ef0445c7e9436013ba6190b | eb3303ac7984373c7cae844f66faaa3b9a2b3c5c | /Minimize_Easom_Function.py | 6e37a92bc71a6978054e21a9e1486b28ae6574a7 | [] | no_license | jefflai333/Cooperative-and-Adaptive-Algorithms | f4ed0568bd763f9fe1f8503fdbcafff9bb80544b | f4a43ab2c7410f356ee8b603c7c6edd1fb8e3adb | refs/heads/main | 2023-02-10T17:46:15.519568 | 2021-01-13T02:20:14 | 2021-01-13T02:20:14 | 329,169,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,233 | py | import math
from math import cos, exp, pi, sin
from random import uniform, random
def distance(point1):
return math.sqrt(pow(point1[0] - pi, 2) + pow(point1[1] - pi, 2))
def easom(x):
return -cos(math.radians(x[0])) * cos(math.radians(x[1])) * exp(-pow((x[0] - pi), 2) - pow((x[1] - pi), 2))
def temperature_reduction(annealing_schedule, alpha, temperature):
if annealing_schedule == "lin":
return temperature - alpha
elif annealing_schedule == "exp":
return pow(temperature, alpha)
elif annealing_schedule == "slow":
return temperature / (1 + alpha * temperature)
else:
# default annealing schedule is linear
return temperature - alpha
def cost_function(cost, curr_temp):
return exp(-cost / (curr_temp * 0.001))
def neighbouring_function(curr_solution, curr_temp):
# make sure that the neighbouring solution is within the bounds
# randomize the direction that the neighbouring solution will go towards
magnify_curr_temp = 0.1
exponent_curr_temp = 0.05
equation = magnify_curr_temp * exp(curr_temp * exponent_curr_temp)
while True:
random_radians = uniform(0, 2 * pi)
if -100 < curr_solution[0] + equation * sin(
random_radians) < 100 and -100 < curr_solution[1] + equation * cos(random_radians) < 100:
break
return [curr_solution[0] + equation * sin(random_radians), curr_solution[1] + equation * cos(random_radians)]
def simulated_annealing(init_temp, init_solution, alpha, iterations, annealing_schedule):
curr_solution = init_solution
curr_temp = init_temp
count = 0
max_attempts = 10000
if annealing_schedule == "lin":
min_temp = 0
elif annealing_schedule == "exp":
min_temp = 1.1
elif annealing_schedule == "slow":
min_temp = 0.01
# set iterations to 1 for slow annealing schedule
iterations = 1
else:
min_temp = 1.1
while curr_temp > min_temp:
for j in range(iterations):
neighbouring_solution = neighbouring_function(curr_solution, curr_temp)
cost_neighbouring_solution = easom(neighbouring_solution)
cost_curr_solution = easom(curr_solution)
cost = cost_neighbouring_solution - cost_curr_solution
if cost < 0:
curr_solution = neighbouring_solution
count = 0
else:
x = random()
if x < cost_function(cost, curr_temp):
curr_solution = neighbouring_solution
count = 0
else:
count += 1
# if no progress has been made after x attempts, then return the solution
if count > max_attempts:
return curr_solution
curr_temp = temperature_reduction(annealing_schedule, alpha, curr_temp)
return curr_solution
if __name__ == '__main__':
random_init = []
random_temp = []
annealing_schedules = [[1, "lin"], [2, "lin"], [4, "lin"], [0.95, "exp"], [0.85, "exp"], [0.75, "exp"], [0.05, "slow"], [0.15, "slow"], [0.25, "slow"]]
answers = []
for i in range(10):
# add a random point in between -100 and 100 for x and y and do it 10 times
random_init.append([uniform(-100, 100), uniform(-100, 100)])
random_temp.append(uniform(80, 100))
num_iterations = 1
for i in range(10):
for j in range(10):
for k in range(len(annealing_schedules)):
print("Simulated Annealing Round " + str(num_iterations))
curr_sol = simulated_annealing(random_temp[i], random_init[j], annealing_schedules[k][0], 1000, annealing_schedules[k][1])
# calculate distance from optimal solution
min_distance = distance(curr_sol)
answers.append([random_temp[i], random_init[j], annealing_schedules[k], curr_sol, min_distance])
num_iterations += 1
answers = sorted(answers, key=lambda x: x[4], reverse=True)
# print all answers, the answer at the end of the array will be the best solution
for i in range(len(answers)):
print(answers[i])
print(random_temp)
print(random_init)
| [
"jefflai333@gmail.com"
] | jefflai333@gmail.com |
0f39d66ebd4ab22ab567a96706377d1a230fc119 | 0248d6ac543102f166e5cd1ec669ed782cd1c4ea | /parsival_movement_creator/movement_code_creator.py | ec36d703f9f4c675ccb9f2ad214212f23137c09f | [] | no_license | thar/parsival | 7139a4d269789bfc45cf9d8c3f8f209f4e6f3664 | 5a7a02902e1883250513c7f84a1b079ee77bfbbc | refs/heads/master | 2021-08-22T04:39:24.318965 | 2017-11-29T09:07:16 | 2017-11-29T09:07:16 | 111,677,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,587 | py | from mako.template import Template
from mako.lookup import TemplateLookup
import os
class MovementList(object):
def __init__(self):
self.movements = []
script_path = os.path.dirname(os.path.realpath(__file__))
mylookup = TemplateLookup(directories=[script_path])
self.movement_list_template = mylookup.get_template('MovementList.mako')
self.movement_template = mylookup.get_template('Movement.mako')
script_path = os.path.dirname(os.path.realpath(__file__))
self.parsival_code_path = os.path.join(script_path, '../parsival_robot')
def add(self, movement):
if movement in self.movements:
print 'The movement %s already exists. Correct it' % movement
exit(1)
self.movements.append(movement)
def get_movements_names(self):
return [movement['name'] for movement in self.movements]
def save(self):
with open(os.path.join(self.parsival_code_path, 'MovementList.h'), 'w') as movement_list_file:
print os.path.join(self.parsival_code_path, 'MovementList.h')
movement_list_file.write(self.movement_list_template.render(movements=self.get_movements_names()))
for m in self.movements:
with open(os.path.join(self.parsival_code_path, 'movements/%s.h') % m['name'], 'w') as movement_file:
print os.path.join(self.parsival_code_path, 'movements/%s.h') % m['name']
movement_file.write(self.movement_template.render(movement=m))
m1 = {
"name": "MovementTest1",
"poses": [
{
"type": "TimedPose",
"time": 100,
"positions": "{85, 71, 152, 91, 112, 60, 100, 40, 80, 0, 0, 0, 00, 40, 80, 0, 0, 0, 112, 76, 145, 93, 92, 60}"
},
{
"type": "TimedPose",
"time": 100,
"positions": "{90, 50, 157, 115, 112, 60, 90, 40, 70, 0, 0, 0, 105, 40, 70, 0, 0, 0, 113, 75, 145, 97, 93, 60}"
},
{
"type": "TimedPose",
"time": 100,
"positions": "{95, 43, 169, 110, 110, 60, 80, 40, 70, 0, 0, 0, 105, 40, 70, 0, 0, 0, 113, 75, 145, 97, 93, 60}"
},
{
"type": "DirectPose",
"time": 0,
"positions": "{100, 76, 145, 93, 100, 60, 100, 30, 80, 0, 0, 0, 100, 30, 80, 0, 0, 0, 100, 76, 145, 93, 100, 60}"
},
{
"type": "StaticTimePose",
"time": 100,
"positions": ""
}
]
}
movements_list = MovementList()
movements_list.add(m1)
movements_list.save()
| [
"miguel.a.j82@gmail.com"
] | miguel.a.j82@gmail.com |
2ff30e5f5ab39f63fc4ea4cb41695c90136d8d02 | db2e5b9e21ca0e65293044829287746de37f6361 | /runwebxmpp.py | 13555e368248a3c95552aca9d4330212f64a6219 | [] | no_license | pvicente/KatooServer | c0965fdc84475bb0fe61568c2c51d515dd2ed781 | 9076cfe5796fd203467f840524fa38a3eda52f12 | refs/heads/master | 2022-08-12T11:20:54.297183 | 2019-06-04T12:09:03 | 2019-06-04T12:09:03 | 190,035,062 | 0 | 0 | null | 2022-07-06T20:09:59 | 2019-06-03T15:43:32 | Python | UTF-8 | Python | false | false | 2,126 | py | '''
Created on Aug 7, 2013
@author: pvicente
'''
from katoo import KatooApp, conf
from katoo.apns.api import KatooAPNSService
from katoo.rqtwisted import worker
from katoo.supervisor import HerokuUnidlingSupervisor, MetricsSupervisor, \
XMPPKeepAliveSupervisor
from katoo.utils.applog import getLoggerAdapter, getLogger
from katoo.utils.multiprocess import MultiProcess
from katoo.utils.time import sleep
from katoo.web import app
from socket import AF_INET
from twisted.internet import reactor
import os
application = KatooApp().app
if conf.ADOPTED_STREAM is None:
stream = reactor.listenTCP(port=conf.PORT, factory=app, backlog=conf.BACKLOG, interface=conf.LISTEN)
os.environ['ADOPTED_STREAM']=str(stream.fileno())
heroku_unidling_supervisor = HerokuUnidlingSupervisor()
heroku_unidling_supervisor.setServiceParent(application)
if conf.MULTIPROCESS>0:
m=MultiProcess(__file__, number=conf.MULTIPROCESS, fds=[stream.fileno()])
m.setServiceParent(application)
else:
reactor.adoptStreamPort(int(conf.ADOPTED_STREAM), AF_INET, app)
metrics_supervisor = MetricsSupervisor()
metrics_supervisor.setServiceParent(application)
xmpp_keepalive_supervisor = XMPPKeepAliveSupervisor()
xmpp_keepalive_supervisor.setServiceParent(application)
KatooAPNSService().service.setServiceParent(application)
if conf.REDIS_WORKERS > 0:
worker.LOGGING_OK_JOBS = conf.LOGGING_OK_JOBS
worker.SLEEP_CALL=sleep
worker.MAX_RETRIES=conf.BACKEND_MAX_RETRIES
worker.MAX_DELAY_TIME=conf.BACKEND_MAX_DELAY
w=worker.Worker([conf.MACHINEID, conf.DIST_QUEUE_LOGIN, conf.DIST_QUEUE_PUSH, conf.DIST_QUEUE_RELOGIN], name=conf.MACHINEID,
loops=conf.REDIS_WORKERS, default_result_ttl=conf.DIST_DEFAULT_TTL, default_warmup=conf.WORKER_WARMUP,
default_enqueue_failed_jobs=conf.DIST_ENQUEUE_FAILED_JOBS,
default_perform_job_in_thread=conf.DIST_PERFORM_JOB_IN_THREAD, default_thread_pool_size=conf.DIST_THREAD_POOL)
w.log = getLoggerAdapter(getLogger('WORKER', level='INFO'), id='WORKER')
w.setServiceParent(application) | [
"pedrovfer@gmail.com"
] | pedrovfer@gmail.com |
8e0ec5c953585aa962691f0bce2d260c8e78caa8 | 11c036911cf893325199d9e9a91a11cd1dca7c90 | /all-paths-from-source-to-target/solution.py | 1fd9a15570b8173bfb5bd501c9d9b6d36d73959b | [] | no_license | arpiagar/HackerEarth | 34f817f69e94d88657c1d8991a55aca302cdc890 | 4a94f1b11a353ab6b2837a1ac77bfbd7c91f91d2 | refs/heads/master | 2021-07-18T14:23:05.124943 | 2021-02-09T21:58:12 | 2021-02-09T21:58:12 | 19,204,412 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | #https://leetcode.com/problems/all-paths-from-source-to-target/submissions/
class Solution:
def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:
adj_map ={}
for i in range(len(graph)):
adj_map[i] = graph[i]
start = 0
out = []
self.findpath(start, len(graph)-1, [], adj_map, out)
return out
def findpath(self, current, end, temp, adj_map, out):
if current == end:
out.append(temp+[current])
temp.append(current)
for elem in adj_map[current]:
self.findpath(elem, end, [x for x in temp],adj_map, out)
| [
"arpit.agarwal@booking.com"
] | arpit.agarwal@booking.com |
f5851fc68cb29cd1cb3ca9369270cfd34a758271 | cf8f584ef11bfedea59b41476638520c9ae66c70 | /initial_project/selector_dt.py | 1e32ba75cccd5f1e0af331dd2bcaf77909e0d709 | [] | no_license | GustavoAC/machine_learning | 11f19d751dac0c05c761baf78b166916f6123b29 | 2afce182540363d05174b6c7b85cebd8c538160c | refs/heads/master | 2020-04-02T03:27:56.010486 | 2018-12-05T23:04:45 | 2018-12-05T23:04:45 | 153,967,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier
train_set = pd.read_csv('./dados/optdigits.tra', header = None)
test_set = pd.read_csv('./dados/optdigits.tes', header = None)
base_corr = train_set.corr()[64]
filter = [1 if np.abs(corr) > 0.1 else 0 for corr in base_corr]
print "Filter:", filter
def filter_set(data, filter):
ret = []
for row in data:
new_row = [row[i] for i in range(len(row)) if filter[i] == 1]
ret.append(new_row)
return ret
raw_data = filter_set(train_set.values[:, :-1], filter)
classes = train_set.values[:, 64]
test_data = filter_set(test_set.values[:, :-1], filter)
excepted_classes = test_set.values[:, 64]
decisionTree = DecisionTreeClassifier()
decisionTree = decisionTree.fit(raw_data, classes)
predict_results = decisionTree.predict(test_data)
correct = 0
wrong = 0
for row in range(len(predict_results)):
if predict_results[row] == excepted_classes[row]:
correct = correct + 1
else:
wrong = wrong + 1
print "Decision Tree results:"
print "Correct:", correct, ", incorrect:", wrong
| [
"guga.a.carvalho@gmail.com"
] | guga.a.carvalho@gmail.com |
2f529daab804ec0cba2e66c17c9fb00762f77f1f | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/MSSM_HiggsToMuMu/fragment_mhmodp_MA110_tb14_ggA.py | c41e893213325a4ae34cac52e2ac1bfb82a44725 | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 16,290 | py | COM_ENERGY = 13000.0 # GeV
CROSS_SECTION = 1 # pb
PROCESS = 'HiggsBSM:gg2A3 = on'
SLHA_TABLE = """BLOCK SPINFO
1 FeynHiggs
2 2.12.0
2 built on ott 13, 2016
BLOCK MODSEL
1 0 # Model
2 1 # GridPts
3 0 # Content
4 0 # RPV
5 0 # CPV
6 0 # FV
BLOCK SMINPUTS
1 1.28952828E+02 # invAlfaMZ
2 1.16637000E-05 # GF
3 1.19000000E-01 # AlfasMZ
4 9.11876000E+01 # MZ
5 4.16000000E+00 # Mb
6 1.73200000E+02 # Mt
7 1.77703000E+00 # Mtau
11 5.10998902E-04 # Me
13 1.05658357E-01 # Mmu
21 6.00000000E-03 # Md
22 3.00000000E-03 # Mu
23 9.50000000E-02 # Ms
24 1.28600000E+00 # Mc
BLOCK MINPAR
3 1.40000000E+01 # TB
BLOCK EXTPAR
0 0.00000000E+00 # Q
1 9.54716519E+01 # M1
2 2.00000000E+02 # M2
3 1.50000000E+03 # M3
11 1.51428571E+03 # At
12 1.51428571E+03 # Ab
13 1.51428571E+03 # Atau
23 2.00000000E+02 # MUE
25 1.40000000E+01 # TB
26 1.10000000E+02 # MA0
27 1.36249178E+02 # MHp
31 5.00000000E+02 # MSL(1)
32 5.00000000E+02 # MSL(2)
33 1.00000000E+03 # MSL(3)
34 5.00000000E+02 # MSE(1)
35 5.00000000E+02 # MSE(2)
36 1.00000000E+03 # MSE(3)
41 1.50000000E+03 # MSQ(1)
42 1.50000000E+03 # MSQ(2)
43 1.00000000E+03 # MSQ(3)
44 1.50000000E+03 # MSU(1)
45 1.50000000E+03 # MSU(2)
46 1.00000000E+03 # MSU(3)
47 1.50000000E+03 # MSD(1)
48 1.50000000E+03 # MSD(2)
49 1.00000000E+03 # MSD(3)
BLOCK MASS
1000012 4.95867543E+02 # MSf(1,1,1)
1000011 5.02277652E+02 # MSf(1,2,1)
2000011 5.01829194E+02 # MSf(2,2,1)
1000002 1.49903513E+03 # MSf(1,3,1)
2000002 1.49959271E+03 # MSf(2,3,1)
1000001 1.50116775E+03 # MSf(1,4,1)
2000001 1.50020357E+03 # MSf(2,4,1)
1000014 4.95867543E+02 # MSf(1,1,2)
1000013 5.02423599E+02 # MSf(1,2,2)
2000013 5.01683097E+02 # MSf(2,2,2)
1000004 1.49903561E+03 # MSf(1,3,2)
2000004 1.49959333E+03 # MSf(2,3,2)
1000003 1.50117558E+03 # MSf(1,4,2)
2000003 1.50019575E+03 # MSf(2,4,2)
1000016 9.97940189E+02 # MSf(1,1,3)
1000015 9.99882507E+02 # MSf(1,2,3)
2000015 1.00217597E+03 # MSf(2,2,3)
1000006 8.76435511E+02 # MSf(1,3,3)
2000006 1.13478716E+03 # MSf(2,3,3)
1000005 9.98375125E+02 # MSf(1,4,3)
2000005 1.00369029E+03 # MSf(2,4,3)
25 1.07205976E+02 # Mh0
35 1.27765372E+02 # MHH
36 1.10000000E+02 # MA0
37 1.36717470E+02 # MHp
1000022 8.71700213E+01 # MNeu(1)
1000023 1.50504867E+02 # MNeu(2)
1000025 -2.09664642E+02 # MNeu(3)
1000035 2.67461405E+02 # MNeu(4)
1000024 1.46059389E+02 # MCha(1)
1000037 2.67571171E+02 # MCha(2)
1000021 1.50000000E+03 # MGl
BLOCK DMASS
0 1.73200000E+02 # Q
25 1.81082768E-01 # Delta Mh0
35 5.31343857E-01 # Delta MHH
36 0.00000000E+00 # Delta MA0
37 1.10863524E-01 # Delta MHp
BLOCK NMIX
1 1 9.26359748E-01 # ZNeu(1,1)
1 2 -1.27356386E-01 # ZNeu(1,2)
1 3 3.18049606E-01 # ZNeu(1,3)
1 4 -1.56468580E-01 # ZNeu(1,4)
2 1 -3.34170358E-01 # ZNeu(2,1)
2 2 -6.94276511E-01 # ZNeu(2,2)
2 3 5.02142193E-01 # ZNeu(2,3)
2 4 -3.92636619E-01 # ZNeu(2,4)
3 1 9.39740580E-02 # ZNeu(3,1)
3 2 -1.30790295E-01 # ZNeu(3,2)
3 3 -6.78628020E-01 # ZNeu(3,3)
3 4 -7.16607833E-01 # ZNeu(3,4)
4 1 -1.46139200E-01 # ZNeu(4,1)
4 2 6.96171225E-01 # ZNeu(4,2)
4 3 4.31464572E-01 # ZNeu(4,3)
4 4 -5.54821848E-01 # ZNeu(4,4)
BLOCK UMIX
1 1 -6.10491734E-01 # UCha(1,1)
1 2 7.92022628E-01 # UCha(1,2)
2 1 7.92022628E-01 # UCha(2,1)
2 2 6.10491734E-01 # UCha(2,2)
BLOCK VMIX
1 1 -7.92022628E-01 # VCha(1,1)
1 2 6.10491734E-01 # VCha(1,2)
2 1 6.10491734E-01 # VCha(2,1)
2 2 7.92022628E-01 # VCha(2,2)
BLOCK STAUMIX
1 1 6.71540180E-01 # USf(1,1)
1 2 7.40968142E-01 # USf(1,2)
2 1 7.40968142E-01 # USf(2,1)
2 2 -6.71540180E-01 # USf(2,2)
BLOCK STOPMIX
1 1 7.08243538E-01 # USf(1,1)
1 2 -7.05968194E-01 # USf(1,2)
2 1 7.05968194E-01 # USf(2,1)
2 2 7.08243538E-01 # USf(2,2)
BLOCK SBOTMIX
1 1 6.03353498E-01 # USf(1,1)
1 2 7.97473860E-01 # USf(1,2)
2 1 7.97473860E-01 # USf(2,1)
2 2 -6.03353498E-01 # USf(2,2)
BLOCK ALPHA
-1.20310802E+00 # Alpha
BLOCK DALPHA
4.00520479E-02 # Delta Alpha
BLOCK HMIX Q= -0.99900000E+03
1 2.00000000E+02 # MUE
2 1.40000000E+01 # TB
BLOCK MSOFT Q= 0.00000000E+00
1 9.54716519E+01 # M1
2 2.00000000E+02 # M2
3 1.50000000E+03 # M3
31 5.00000000E+02 # MSL(1)
32 5.00000000E+02 # MSL(2)
33 1.00000000E+03 # MSL(3)
34 5.00000000E+02 # MSE(1)
35 5.00000000E+02 # MSE(2)
36 1.00000000E+03 # MSE(3)
41 1.50000000E+03 # MSQ(1)
42 1.50000000E+03 # MSQ(2)
43 1.00000000E+03 # MSQ(3)
44 1.50000000E+03 # MSU(1)
45 1.50000000E+03 # MSU(2)
46 1.00000000E+03 # MSU(3)
47 1.50000000E+03 # MSD(1)
48 1.50000000E+03 # MSD(2)
49 1.00000000E+03 # MSD(3)
BLOCK AE Q= 0.00000000E+00
1 1 0.00000000E+00 # Af(1,1)
2 2 0.00000000E+00 # Af(2,2)
3 3 1.51428571E+03 # Af(3,3)
BLOCK AU Q= 0.00000000E+00
1 1 0.00000000E+00 # Af(1,1)
2 2 0.00000000E+00 # Af(2,2)
3 3 1.51428571E+03 # Af(3,3)
BLOCK AD Q= 0.00000000E+00
1 1 0.00000000E+00 # Af(1,1)
2 2 0.00000000E+00 # Af(2,2)
3 3 1.51428571E+03 # Af(3,3)
BLOCK YE Q= 0.00000000E+00
1 1 4.11949279E-05 # Yf(1,1)
2 2 8.51780382E-03 # Yf(2,2)
3 3 1.43257887E-01 # Yf(3,3)
BLOCK YU Q= 0.00000000E+00
1 1 1.72749580E-05 # Yf(1,1)
2 2 7.40519865E-03 # Yf(2,2)
3 3 9.97340906E-01 # Yf(3,3)
BLOCK YD Q= 0.00000000E+00
1 1 4.76870467E-04 # Yf(1,1)
2 2 7.55022224E-03 # Yf(2,2)
3 3 3.21042816E-01 # Yf(3,3)
BLOCK VCKMIN
1 2.25300000E-01 # lambda
2 8.08000000E-01 # A
3 1.32000000E-01 # rhobar
4 3.41000000E-01 # etabar
BLOCK MSL2 Q= 0.00000000E+00
1 1 2.50000000E+05 # MSL2(1,1)
2 2 2.50000000E+05 # MSL2(2,2)
3 3 1.00000000E+06 # MSL2(3,3)
BLOCK MSE2 Q= 0.00000000E+00
1 1 2.50000000E+05 # MSE2(1,1)
2 2 2.50000000E+05 # MSE2(2,2)
3 3 1.00000000E+06 # MSE2(3,3)
BLOCK MSQ2 Q= 0.00000000E+00
1 1 2.25000000E+06 # MSQ2(1,1)
2 2 2.25000000E+06 # MSQ2(2,2)
3 3 1.00000000E+06 # MSQ2(3,3)
BLOCK MSU2 Q= 0.00000000E+00
1 1 2.25000000E+06 # MSU2(1,1)
2 2 2.25000000E+06 # MSU2(2,2)
3 3 1.00000000E+06 # MSU2(3,3)
BLOCK MSD2 Q= 0.00000000E+00
1 1 2.25000000E+06 # MSD2(1,1)
2 2 2.25000000E+06 # MSD2(2,2)
3 3 1.00000000E+06 # MSD2(3,3)
BLOCK TE Q= 0.00000000E+00
1 1 0.00000000E+00 # Tf(1,1)
2 2 0.00000000E+00 # Tf(2,2)
3 3 2.16933371E+02 # Tf(3,3)
BLOCK TU Q= 0.00000000E+00
1 1 0.00000000E+00 # Tf(1,1)
2 2 0.00000000E+00 # Tf(2,2)
3 3 1.51025909E+03 # Tf(3,3)
BLOCK TD Q= 0.00000000E+00
1 1 0.00000000E+00 # Tf(1,1)
2 2 0.00000000E+00 # Tf(2,2)
3 3 4.86150550E+02 # Tf(3,3)
BLOCK SELMIX
1 1 9.99994952E-01 # UASf(1,1)
1 4 -3.17744688E-03 # UASf(1,4)
2 2 8.95991689E-01 # UASf(2,2)
2 5 -4.44070819E-01 # UASf(2,5)
3 3 6.71540180E-01 # UASf(3,3)
3 6 7.40968142E-01 # UASf(3,6)
4 1 3.17744688E-03 # UASf(4,1)
4 4 9.99994952E-01 # UASf(4,4)
5 2 4.44070819E-01 # UASf(5,2)
5 5 8.95991689E-01 # UASf(5,5)
6 3 7.40968142E-01 # UASf(6,3)
6 6 -6.71540180E-01 # UASf(6,6)
BLOCK USQMIX
1 1 1.00000000E+00 # UASf(1,1)
1 4 2.56324573E-05 # UASf(1,4)
2 2 9.99939654E-01 # UASf(2,2)
2 5 1.09857908E-02 # UASf(2,5)
3 3 7.08243538E-01 # UASf(3,3)
3 6 -7.05968194E-01 # UASf(3,6)
4 1 -2.56324573E-05 # UASf(4,1)
4 4 1.00000000E+00 # UASf(4,4)
5 2 -1.09857908E-02 # UASf(5,2)
5 5 9.99939654E-01 # UASf(5,5)
6 3 7.05968194E-01 # UASf(6,3)
6 6 7.08243538E-01 # UASf(6,6)
BLOCK DSQMIX
1 1 9.99983621E-01 # UASf(1,1)
1 4 -5.72350775E-03 # UASf(1,4)
2 2 9.95984067E-01 # UASf(2,2)
2 5 -8.95306504E-02 # UASf(2,5)
3 3 6.03353498E-01 # UASf(3,3)
3 6 7.97473860E-01 # UASf(3,6)
4 1 5.72350775E-03 # UASf(4,1)
4 4 9.99983621E-01 # UASf(4,4)
5 2 8.95306504E-02 # UASf(5,2)
5 5 9.95984067E-01 # UASf(5,5)
6 3 7.97473860E-01 # UASf(6,3)
6 6 -6.03353498E-01 # UASf(6,6)
BLOCK CVHMIX
1 1 6.42067901E-01 # UH(1,1)
1 2 7.66647775E-01 # UH(1,2)
1 3 0.00000000E+00 # UH(1,3)
2 1 7.66647775E-01 # UH(2,1)
2 2 -6.42067901E-01 # UH(2,2)
2 3 0.00000000E+00 # UH(2,3)
3 1 0.00000000E+00 # UH(3,1)
3 2 0.00000000E+00 # UH(3,2)
3 3 1.00000000E+00 # UH(3,3)
DECAY 25 3.41875866E-01 # Gamma(h0)
5.79386054E-06 2 22 22 # BR(h0 -> photon photon)
1.25675978E-08 2 22 23 # BR(h0 -> photon Z)
3.99769989E-06 2 23 23 # BR(h0 -> Z Z)
4.26627439E-05 2 -24 24 # BR(h0 -> W W)
8.72148397E-04 2 21 21 # BR(h0 -> gluon gluon)
8.70593592E-09 2 -11 11 # BR(h0 -> Electron electron)
3.87237488E-04 2 -13 13 # BR(h0 -> Muon muon)
1.10093995E-01 2 -15 15 # BR(h0 -> Tau tau)
2.62716636E-10 2 -2 2 # BR(h0 -> Up up)
3.63853694E-05 2 -4 4 # BR(h0 -> Charm charm)
1.40154792E-06 2 -1 1 # BR(h0 -> Down down)
3.51960360E-04 2 -3 3 # BR(h0 -> Strange strange)
8.88204396E-01 2 -5 5 # BR(h0 -> Bottom bottom)
DECAY 35 5.63170498E-02 # Gamma(HH)
1.16717839E-04 2 22 22 # BR(HH -> photon photon)
1.47840898E-04 2 22 23 # BR(HH -> photon Z)
2.18717644E-03 2 23 23 # BR(HH -> Z Z)
1.71409582E-02 2 -24 24 # BR(HH -> W W)
8.81648520E-03 2 21 21 # BR(HH -> gluon gluon)
8.87804614E-09 2 -11 11 # BR(HH -> Electron electron)
3.94913988E-04 2 -13 13 # BR(HH -> Muon muon)
1.12026296E-01 2 -15 15 # BR(HH -> Tau tau)
1.27899209E-08 2 -2 2 # BR(HH -> Up up)
1.77155175E-03 2 -4 4 # BR(HH -> Charm charm)
1.36915081E-06 2 -1 1 # BR(HH -> Down down)
3.43823171E-04 2 -3 3 # BR(HH -> Strange strange)
8.57047720E-01 2 -5 5 # BR(HH -> Bottom bottom)
5.12556106E-06 2 23 36 # BR(HH -> Z A0)
DECAY 36 3.93746610E-01 # Gamma(A0)
-2.86350604E-07 2 22 22 # BR(A0 -> photon photon)
-1.30583748E-08 2 22 23 # BR(A0 -> photon Z)
-1.14138999E-03 2 21 21 # BR(A0 -> gluon gluon)
-8.74774011E-09 2 -11 11 # BR(A0 -> Electron electron)
3.89101425E-04 2 -13 13 # BR(A0 -> Muon muon)
-1.10693557E-01 2 -15 15 # BR(A0 -> Tau tau)
-9.42242712E-12 2 -2 2 # BR(A0 -> Up up)
-1.37852009E-06 2 -4 4 # BR(A0 -> Charm charm)
-1.39918445E-06 2 -1 1 # BR(A0 -> Down down)
-3.51366970E-04 2 -3 3 # BR(A0 -> Strange strange)
-8.87421499E-01 2 -5 5 # BR(A0 -> Bottom bottom)
-3.74978114E-10 2 23 25 # BR(A0 -> Z h0)
DECAY 37 5.45817921E-02 # Gamma(Hp)
8.12535787E-08 2 -11 12 # BR(Hp -> Electron nu_e)
3.47383952E-03 2 -13 14 # BR(Hp -> Muon nu_mu)
9.82302680E-01 2 -15 16 # BR(Hp -> Tau nu_tau)
1.12502083E-05 2 -1 2 # BR(Hp -> Down up)
1.25444246E-04 2 -3 2 # BR(Hp -> Strange up)
7.52663746E-05 2 -5 2 # BR(Hp -> Bottom up)
9.55241727E-07 2 -1 4 # BR(Hp -> Down charm)
2.82513027E-03 2 -3 4 # BR(Hp -> Strange charm)
1.05381213E-02 2 -5 4 # BR(Hp -> Bottom charm)
3.20594076E-05 2 -5 6 # BR(Hp -> Bottom top)
3.49913322E-04 2 24 25 # BR(Hp -> W h0)
2.35989426E-07 2 24 35 # BR(Hp -> W HH)
2.65023366E-04 2 24 36 # BR(Hp -> W A0)
DECAY 6 1.38339571E+00 # Gamma(top)
9.91238679E-01 2 5 24 # BR(top -> bottom W)
8.76132109E-03 2 5 37 # BR(top -> bottom Hp)
"""
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
SLHATableForPythia8 = cms.string('%s' % SLHA_TABLE),
comEnergy = cms.double(COM_ENERGY),
crossSection = cms.untracked.double(CROSS_SECTION),
maxEventsToPrint = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'Higgs:useBSM = on',
PROCESS,
'SLHA:allowUserOverride = off',
'SLHA:minMassSM = 100.',
'PhaseSpace:mHatMin = 56.0'
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters'
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"pietro.vischia@gmail.com"
] | pietro.vischia@gmail.com |
6416ddb447a7ceb30781bc3de024bd452393023a | 12c15c7ae150acaf8032f444db24440da2234b1a | /ArtificialIntelligence/DOCUMENTATION/Project2_Jimut/valueIterationAgents.py | 84b978c8e5c524d6638932a24ece1a152f975751 | [] | no_license | Jimut123/rkmveri-labs | 315ecd4607af72dd0851489e427a3ab09a8009ff | be19a453ea32460c454e3443798e3d8954fb084b | refs/heads/master | 2023-02-02T17:11:23.641187 | 2020-12-13T18:35:20 | 2020-12-13T18:35:20 | 201,784,550 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,614 | py | # valueIterationAgents.py
# -----------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# valueIterationAgents.py
# -----------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import mdp, util
from learningAgents import ValueEstimationAgent
import collections
class ValueIterationAgent(ValueEstimationAgent):
"""
* Please read learningAgents.py before reading this.*
A ValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs value iteration
for a given number of iterations using the supplied
discount factor.
"""
def __init__(self, mdp, discount = 0.9, iterations = 100):
"""
Your value iteration agent should take an mdp on
construction, run the indicated number of iterations
and then act according to the resulting policy.
Some useful mdp methods you will use:
mdp.getStates()
mdp.getPossibleActions(state)
mdp.getTransitionStatesAndProbs(state, action)
mdp.getReward(state, action, nextState)
mdp.isTerminal(state)
"""
self.mdp = mdp
self.discount = discount
self.iterations = iterations
self.values = util.Counter() # A Counter is a dict with default 0
self.runValueIteration()
def runValueIteration(self):
# Write value iteration code here
"*** YOUR CODE HERE ***"
for i in range(self.iterations): # every k
updatedValues = self.values.copy() # to use batch-version of MDP , hard copy the values
for state in self.mdp.getStates():
if self.mdp.isTerminal(state):
continue
actions = self.mdp.getPossibleActions(state)
optimal = max([self.getQValue(state,action) for action in actions])
updatedValues[state] = optimal
self.values = updatedValues
def getValue(self, state):
"""
Return the value of the state (computed in __init__).
"""
return self.values[state]
def computeQValueFromValues(self, state, action):
"""
Compute the Q-value of action in state from the
value function stored in self.values.
"""
"*** YOUR CODE HERE ***"
qval = 0
for s_prime, T in self.mdp.getTransitionStatesAndProbs(state, action):
qval += T * ( self.mdp.getReward(state, action, s_prime) + self.discount*self.getValue(s_prime) )
return qval
util.raiseNotDefined()
def computeActionFromValues(self, state):
"""
The policy is the best action in the given state
according to the values currently stored in self.values.
You may break ties any way you see fit. Note that if
there are no legal actions, which is the case at the
terminal state, you should return None.
"""
"*** YOUR CODE HERE ***"
# TODO
policy = util.Counter()
for action in self.mdp.getPossibleActions(state):
policy[action] = self.getQValue(state, action)
return policy.argMax()
util.raiseNotDefined()
def getPolicy(self, state):
return self.computeActionFromValues(state)
def getAction(self, state):
"Returns the policy at the state (no exploration)."
return self.computeActionFromValues(state)
def getQValue(self, state, action):
return self.computeQValueFromValues(state, action)
class AsynchronousValueIterationAgent(ValueIterationAgent):
"""
* Please read learningAgents.py before reading this.*
An AsynchronousValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs cyclic value iteration
for a given number of iterations using the supplied
discount factor.
"""
def __init__(self, mdp, discount = 0.9, iterations = 1000):
"""
Your cyclic value iteration agent should take an mdp on
construction, run the indicated number of iterations,
and then act according to the resulting policy. Each iteration
updates the value of only one state, which cycles through
the states list. If the chosen state is terminal, nothing
happens in that iteration.
Some useful mdp methods you will use:
mdp.getStates()
mdp.getPossibleActions(state)
mdp.getTransitionStatesAndProbs(state, action)
mdp.getReward(state)
mdp.isTerminal(state)
"""
ValueIterationAgent.__init__(self, mdp, discount, iterations)
def runValueIteration(self):
"*** YOUR CODE HERE ***"
#TODO
totalState = self.mdp.getStates()
for i in range(self.iterations): # every k
state = totalState[i % len(totalState)]
if self.mdp.isTerminal(state):
continue
actions = self.mdp.getPossibleActions(state)
optimal = max([self.getQValue(state,action) for action in actions])
self.values[state] = optimal
class PrioritizedSweepingValueIterationAgent(AsynchronousValueIterationAgent):
"""
* Please read learningAgents.py before reading this.*
A PrioritizedSweepingValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs prioritized sweeping value iteration
for a given number of iterations using the supplied parameters.
"""
def __init__(self, mdp, discount = 0.9, iterations = 100, theta = 1e-5):
"""
Your prioritized sweeping value iteration agent should take an mdp on
construction, run the indicated number of iterations,
and then act according to the resulting policy.
"""
self.theta = theta
ValueIterationAgent.__init__(self, mdp, discount, iterations)
def runValueIteration(self):
"*** YOUR CODE HERE ***"
q = util.PriorityQueue()
totalState = self.mdp.getStates()
pred = {}
for st in totalState:
if self.mdp.isTerminal(st):
continue
for ac in self.mdp.getPossibleActions(st):
for stt,_ in self.mdp.getTransitionStatesAndProbs(st, ac):
if stt in pred:
pred[stt].add(st)
else:
pred[stt] = {st}
for st in self.mdp.getStates():
if self.mdp.isTerminal(st):
continue
diff = abs(self.values[st] - max([ self.computeQValueFromValues(st, action) for action in self.mdp.getPossibleActions(st) ]) )
q.update(st, -diff)
for i in range(self.iterations):
if q.isEmpty():
break
st = q.pop()
if not self.mdp.isTerminal(st):
self.values[st] = max([self.computeQValueFromValues(st, action) for action in self.mdp.getPossibleActions(st)])
for p in pred[st]:
if self.mdp.isTerminal(p):
continue
difff = abs(self.values[p] - max([self.computeQValueFromValues(p, action) for action in self.mdp.getPossibleActions(p)]))
if difff > self.theta:
q.update(p, -difff)
| [
"jimutbahanpal@yahoo.com"
] | jimutbahanpal@yahoo.com |
43caf1de2da7fac86bcfdb234a60cee558ff0e0a | 7d23056a789ded9ff2b9e14f9c57e59295cdfd6d | /samples/src/com/zoho/crm/api/initializer/init.py | e6722a40d14a4971a467de5fc2f5fd8877382104 | [] | no_license | L1nuxFNC/zohocrm-python-sdk | 2e825fe4d7c6fb1374a5747cbd1e39b0dd4b706d | bba7328de07b137d2cb6e2aac31b8f57e0803026 | refs/heads/master | 2023-06-05T09:17:35.549980 | 2021-05-13T12:45:59 | 2021-05-13T12:45:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,893 | py | from zcrmsdk.src.com.zoho.crm.api.user_signature import UserSignature
from zcrmsdk.src.com.zoho.crm.api.dc import INDataCenter, USDataCenter, EUDataCenter, CNDataCenter, AUDataCenter
from zcrmsdk.src.com.zoho.api.authenticator.store import DBStore, FileStore
from zcrmsdk.src.com.zoho.api.logger import Logger
from zcrmsdk.src.com.zoho.crm.api.initializer import Initializer
from zcrmsdk.src.com.zoho.api.authenticator.oauth_token import OAuthToken, TokenType
class SDKInitializer(object):
@staticmethod
def initialize():
"""
Create an instance of Logger Class that takes two parameters
1 -> Level of the log messages to be logged. Can be configured by typing Logger.Levels "." and choose any level from the list displayed.
2 -> Absolute file path, where messages need to be logged.
"""
logger = Logger.get_instance(level=Logger.Levels.INFO, file_path="/Users/user_name/Documents/python_sdk_log.log")
# Create an UserSignature instance that takes user Email as parameter
user = UserSignature(email="abc@zoho.com")
"""
Configure the environment
which is of the pattern Domain.Environment
Available Domains: USDataCenter, EUDataCenter, INDataCenter, CNDataCenter, AUDataCenter
Available Environments: PRODUCTION(), DEVELOPER(), SANDBOX()
"""
environment = USDataCenter.PRODUCTION()
"""
Create a Token instance that takes the following parameters
1 -> OAuth client id.
2 -> OAuth client secret.
3 -> OAuth redirect URL.
4 -> REFRESH/GRANT token.
5 -> token type.
"""
token = OAuthToken(client_id="clientId", client_secret="clientSecret", redirect_url="redirectURL", token="REFRESH/ GRANT Token", token_type=TokenType.REFRESH / TokenType.GRANT)
"""
Create an instance of TokenStore
1 -> Absolute file path of the file to persist tokens
"""
store = FileStore(file_path='/Users/username/Documents/python_sdk_tokens.txt')
"""
Create an instance of TokenStore
1 -> DataBase host name. Default value "localhost"
2 -> DataBase name. Default value "zohooauth"
3 -> DataBase user name. Default value "root"
4 -> DataBase password. Default value ""
5 -> DataBase port number. Default value "3306"
"""
store = DBStore()
store = DBStore(host='host_name', database_name='database_name', user_name='user_name', password='password',
port_number='port_number')
"""
A Boolean value for the key (auto_refresh_fields) to allow or prevent auto-refreshing of the modules' fields in the background.
if True - all the modules' fields will be auto-refreshed in the background whenever there is any change.
if False - the fields will not be auto-refreshed in the background. The user can manually delete the file(s) or the specific module's fields using methods from ModuleFieldsHandler
"""
auto_refresh_fields = True
"""
The path containing the absolute directory path (in the key resource_path) to store user-specific files containing information about fields in modules.
"""
resource_path = '/Users/user_name/Documents/python-app'
"""
Call the static initialize method of Initializer class that takes the following arguments
1 -> UserSignature instance
2 -> Environment instance
3 -> Token instance
4 -> TokenStore instance
5 -> Logger instance
6 -> auto_refresh_fields
7 -> resource_path
"""
Initializer.initialize(user=user, environment=environment, token=token, store=store, logger=logger, auto_refresh_fields=auto_refresh_fields, resource_path=resource_path)
SDKInitializer.initialize()
| [
"aswinkumar.m@zohocorp.com"
] | aswinkumar.m@zohocorp.com |
51c541e3289874f889b9d1f896171b0f5fc4aec5 | f67dd0bb606116e8200f8d9fa8239066b0f654d1 | /reports/urls.py | 4d2ff1e74f51b5b7b365f069b03474c5f9fde532 | [] | no_license | shireenrao/dj-visualizations | 4e2b470ae91da34a231e017c83a1328bd62ca0f4 | dea897c9eb9001e1db25876af8be757727b250fb | refs/heads/main | 2023-07-10T23:21:32.607136 | 2021-08-13T15:50:07 | 2021-08-13T15:50:07 | 394,478,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | from django.urls import path
from .views import ReportListView # noqa
from .views import UploadTemplateView # isort:skip
from .views import ( # isort:skip
ReportDetailView,
create_report_view,
csv_upload_view,
render_pdf_view,
)
app_name = "reports"
urlpatterns = [
path("", ReportListView.as_view(), name="main"),
path("save/", create_report_view, name="create-report"),
path("upload/", csv_upload_view, name="upload"),
path("from_file/", UploadTemplateView.as_view(), name="from-file"),
path("<pk>/", ReportDetailView.as_view(), name="detail"),
path("<pk>/pdf/", render_pdf_view, name="pdf"),
]
| [
"shireenrao@gmail.com"
] | shireenrao@gmail.com |
804de891ad05b152cc545105549807dc0a719ebe | 9821fd0d137205ca3433c8d7563533a6adaa616d | /scrap kraken/box_making.py | e2a9e7c3d2e471e68da70183ba30c1b125aef545 | [] | no_license | Kraji/cryptoscience | 885c021a137971037412e216268970568f059e6a | 840c3335732c238f5961f536e98cf14a03a5cf0c | refs/heads/master | 2020-03-17T13:33:09.215443 | 2018-05-16T14:40:24 | 2018-05-16T14:40:24 | 133,636,175 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py | import numpy as np
import pandas as pd
# data_input: price, volume, time
def box_making(data_input, delta_t, initial_time,final_time):
dataShape = data_input.shape
(nRows, nCols)= dataShape
nb_data=nRows
price=data_input[:,0]
volume=data_input[:,1]
time=data_input[:,2]
if initial_time==-1:
initial_time=time[0]
if final_time==-1:
final_time=time[-1]
time_interval = final_time - initial_time
nb_bins = int(time_interval // delta_t +1)
data_compressed=np.zeros((nb_bins,5))
vwap = np.zeros(nb_bins)
weights = np.zeros(nb_bins)
time_stamp=initial_time+delta_t*np.arange(nb_bins)
variance=np.zeros(nb_bins)
nb_transaction=np.zeros(nb_bins)
initialization=0
for j in range(nb_data):
ind = int((time[j]-initial_time)//delta_t)
if ind > -1 :
if (ind==0 and initialization==0):
nb_transaction[0]=j
initialization=1
elif ind==ind2+1:
nb_transaction[ind]=j
vwap[ind] += volume[j] * price[j]
weights[ind] += volume[j]
ind2=ind
for k in range(len(vwap)):
if weights[k] > 0.001:
vwap[k] = vwap[k] / weights[k]
else:
vwap[k] = vwap[k-1]
for j in range(nb_data):
ind = int((time[j]-initial_time)//delta_t)
if ind>-1:
variance[ind] += volume[j] * (price[j]-vwap[ind])**2
for k in range(len(vwap)):
if weights[k] > 0.001:
variance[k] = variance[k] / weights[k]
else:
variance[k] = variance[k-1]
data_compressed[:,0]=vwap
data_compressed[:,1]=weights
data_compressed[:,2]=time_stamp
data_compressed[:,3]=variance
data_compressed[:,4]=nb_transaction
return data_compressed
def box_making_dataframe(data_input, delta_t, initial_time,final_time):
data_boxed = box_making(data_input, delta_t, initial_time,final_time)
df = pd.DataFrame(data=data_boxed[0:,0:], columns=['price','volume','time_stamp','variance','index_transaction'])
return df | [
"krajenbrink.alexandre@hotmail.fr"
] | krajenbrink.alexandre@hotmail.fr |
8d2aae07189384c340c69ca3809b9dc1deba887b | b1e56ddec89316ff9dadd76ac80c8edc03338cfe | /pyearthquake/orchestrator/explorer.py | 0ae531a31147780e78c796a790af6831fe5ac70d | [
"Apache-2.0"
] | permissive | saeki-masaki/earthquake | ac118bdd8f4fd87216c069ca6fc7eb19ce2c12ec | 783e5ddea9edafa5cda3413690d9e1e2d25f9004 | refs/heads/master | 2021-01-21T07:19:41.835103 | 2015-06-16T08:42:58 | 2015-06-16T08:42:58 | 37,563,761 | 0 | 0 | null | 2015-06-17T00:41:16 | 2015-06-17T00:41:15 | null | UTF-8 | Python | false | false | 13,215 | py | ## FIXME: remove unused imports
from abc import ABCMeta, abstractmethod
import colorama
import copy
import eventlet
from eventlet.greenthread import sleep
from eventlet.semaphore import Semaphore
from eventlet.timeout import Timeout
from eventlet.queue import *
from greenlet import GreenletExit
import networkx as nx
import six
import time
import random
import uuid
import json
from .. import LOG as _LOG
from ..entity import *
from ..util import *
from .state import *
from .watcher import *
from .digestible import *
LOG = _LOG.getChild('orchestrator.explorer')
class Graph(object):
"""
MOVE ME TO LIBEARTHQUAKE.SO
"""
def __init__(self, initial_state):
self._g = nx.DiGraph()
self.visit_node(initial_state)
def draw(self):
nx.draw(self._g)
matplotlib.peclot.show()
def get_leaf_nodes(self):
return [n for n,d in self._g.out_degree().items() if d==0]
def _print_nodes(self):
leaf_nodes = self.get_leaf_nodes()
LOG.debug('* Nodes (%d): %s', len(self._g.nodes()), [str(x) for x in self._g.nodes()])
LOG.debug('* Leaf Nodes (%d): %s', len(leaf_nodes), [str(x) for x in leaf_nodes])
def visit_node(self, state):
assert isinstance(state, StateBase)
count = self._g.node[state]['count'] if self._g.has_node(state) else 0
# LOG.debug('Visit state %s, count=%d->%d', state.to_short_str(), count, count+1)
self._g.add_node(state, count=count+1)
def visit_edge(self, state, next_state, digestible):
assert isinstance(state, StateBase)
assert isinstance(next_state, StateBase)
assert isinstance(digestible, DigestibleBase)
self.visit_node(state)
self.visit_node(next_state)
self._g.add_edge(state, next_state, digestible=digestible)
# self._print_nodes()
@six.add_metaclass(ABCMeta)
class ExplorerBase(object):
def __init__(self):
self.graph = None
self._event_q = Queue()
self.oc = None
self.state = None
self.initial_state = None
self.visited_terminal_states = {} #key: state, value: count (TODO: MOVE TO LIBEARTHQUAKE.SO)
self.time_slice = 0
def init_with_orchestrator(self, oc, initial_state):
self.oc = oc
self.initial_state = initial_state
self.state = self.initial_state.make_copy()
LOG.debug(colorama.Back.BLUE +
'set initial state=%s' +
colorama.Style.RESET_ALL, self.state.to_short_str())
self.graph = Graph(self.state)
def send_event(self, event):
assert isinstance(event, EventBase)
self._event_q.put(event)
def recv_events(self, timeout_msecs):
events = []
timeout = Timeout(timeout_msecs / 1000.0)
try:
while True:
event = self._event_q.get()
events.append(event)
except Timeout:
pass
except Exception as e:
raise e
finally:
timeout.cancel()
return events
def _worker__print_events_and_digestibles(self, digestibles, new_events, new_digestibles):
if digestibles:
LOG.debug('Before state %s, the following OLD %d digestibles had been yielded', self.state.to_short_str(), len(digestibles))
for digestible in digestibles: LOG.debug('* %s', digestible)
LOG.debug('In state %s, the following %d events happend', self.state.to_short_str(), len(new_events))
for e in new_events:
try: LOG.debug('* %f: %s', e.recv_timestamp, e.abstract_msg)
except Exception: LOG.debug('* %s', e)
LOG.debug('In state %s, the following NEW %d digestibles were yielded for the above %d events', self.state.to_short_str(), len(new_digestibles), len(new_events))
for new_digestible in new_digestibles: LOG.debug('* %s', new_digestible)
def worker(self):
digestibles = []
while True:
if self.oc.termination_detector.is_terminal_state(self.state): self.state = self.on_terminal_state()
new_events = self.recv_events(timeout_msecs=self.time_slice)
if not new_events and not digestibles: continue
new_digestibles = []
for e in new_events:
e_handled = False
for w in self.oc.watchers:
if w.handles(e): new_digestibles.extend(w.on_event(self.state, e)); e_handled = True
if not e_handled: new_digestibles.extend(self.oc.default_watcher.on_event(self.state, e))
self._worker__print_events_and_digestibles(digestibles, new_events, new_digestibles)
digestibles.extend(new_digestibles)
if not digestibles: LOG.warn('No DIGESTIBLE, THIS MIGHT CAUSE FALSE DEADLOCK, state=%s', self.state.to_short_str())
next_state, digestibles = self.do_it(digestibles)
if not digestibles: LOG.warn('No DIGESTIBLE, THIS MIGHT CAUSE FALSE DEADLOCK, next_state=%s', next_state.to_short_str())
LOG.debug('transit from %s to %s', self.state.to_short_str(), next_state.to_short_str())
self.state = next_state
def do_it(self, digestibles):
"""
select a digestible from digestibles and do it in the state.
returns: (next_state, other_digestibles)
FIXME: rename me!
"""
if not digestibles: return self.state, []
chosen_digestible = self.choose_digestible(digestibles)
LOG.debug('Chosen digestible: %s', chosen_digestible)
assert(any(digestible.event.uuid == chosen_digestible.event.uuid for digestible in digestibles))
digestibles_len_before_remove = len(digestibles)
digestibles.remove(chosen_digestible)
assert len(digestibles) == digestibles_len_before_remove - 1, 'hash race?'
other_digestibles = digestibles
if chosen_digestible:
next_state = self.do_transition(chosen_digestible)
else:
LOG.warn('No DIGESTIBLE chosen, THIS MIGHT CAUSE FALSE DEADLOCK, state=%s', self.state.to_short_str())
next_state = self.state
## NOTE: as other digestibles are also enabled in the NEXT state, we return other digestibles here.
## the worker will handle other digestibles in the next round.
return next_state, other_digestibles
@abstractmethod
def choose_digestible(self, digestibles):
pass
def do_transition(self, digestible):
assert isinstance(digestible, DigestibleBase)
LOG.debug(colorama.Back.BLUE +
"Invoking the action:\n" +
" action=%s\n" +
" event=%s\n" +
" state=%s\n" +
" digestible=%s\n" +
colorama.Style.RESET_ALL,
digestible.action, digestible.event,
self.state.to_short_str(),
digestible)
self.oc.call_action(digestible.action)
next_state = self.state.make_copy()
next_state.append_digestible(digestible)
LOG.debug(colorama.Back.BLUE +
'State Transition: %s->%s' +
colorama.Style.RESET_ALL, self.state.to_short_str(), next_state.to_short_str())
self.graph.visit_edge(self.state, next_state, digestible)
## NOTE: worker sets self.state to next_state
return next_state
def stat_on_terminal_state(self, past_all_states, past_visit_count, past_visit_count_sum):
"""
TODO: move to LIBEARTHQUAKE.SO
"""
if past_visit_count == 0:
banner = 'TERMINAL STATE(FRONTIER)'
new_all_states = past_all_states + 1
else:
banner = 'TERMINAL STATE(REVISITED)'
new_all_states = past_all_states
LOG.info(colorama.Back.RED + '%s state %s, count=%d->%d, count_sum=%d->%d, all_states=%d->%d' + colorama.Style.RESET_ALL,
banner,
self.state.to_short_str(),
past_visit_count, past_visit_count + 1,
past_visit_count_sum, past_visit_count_sum + 1,
past_all_states, new_all_states)
def regist_state_to_libeq(self):
json_dict = self.state.to_jsondict()
json_str = json.dumps(json_dict)
short_str = self.state.to_short_str()
rc = self.oc.libearthquake.EQRegistExecutionHistory_UnstableAPI(short_str, json_str)
assert rc == 0
def on_terminal_state(self):
LOG.debug(colorama.Back.RED +
'*** REACH TERMINAL STATE (%s) ***' +
colorama.Style.RESET_ALL, self.state.to_short_str())
self.regist_state_to_libeq()
## make stat (TODO: move to LIBEARTHQUAKE.SO)
all_states = len(self.visited_terminal_states)
visit_count_sum = sum(self.visited_terminal_states.values())
if self.state in self.visited_terminal_states:
visit_count = self.visited_terminal_states[self.state]
else:
visit_count = 0
self.visited_terminal_states[self.state] = 0
self.stat_on_terminal_state(all_states, visit_count, visit_count_sum)
self.visited_terminal_states[self.state] += 1
## notify termination to watchers
for w in self.oc.watchers: w.on_terminal_state(self.state)
## Reset
next_state = self.initial_state.make_copy()
LOG.debug('Reset to %s', next_state.to_short_str())
## notify reset to watchers
for w in self.oc.watchers: w.on_reset()
return next_state
class DumbExplorer(ExplorerBase):
def choose_digestible(self, digestibles):
assert (digestibles)
return digestibles[0]
class RandomExplorer(ExplorerBase):
def __init__(self, time_slice):
super(RandomExplorer, self).__init__()
self.time_slice = time_slice #msecs
def choose_digestible(self, digestibles):
assert (digestibles)
r = random.randint(0, len(digestibles)-1)
chosen_digestible = digestibles[r]
return chosen_digestible
class TimeBoundedRandomExplorer(RandomExplorer):
def __init__(self, time_slice, time_bound):
super(TimeBoundedRandomExplorer, self).__init__(time_slice)
self.saved_time_slice = time_slice
self.time_bound = time_bound #msecs
def choose_digestible(self, digestibles):
assert (digestibles)
now = time.time()
hurried = filter(lambda d: (now - d.event.recv_timestamp) * 1000.0 > self.time_bound, digestibles)
if len(hurried) > 0:
LOG.debug('Hurried to send the following %d digestibles, now=%s', len(hurried), now)
LOG.debug(hurried)
self.time_slice = 0
chosen_digestible = hurried[0]
else:
self.time_slice = self.saved_time_slice
r = random.randint(0, len(digestibles)-1)
chosen_digestible = digestibles[r]
return chosen_digestible
from networkx.algorithms.traversal.depth_first_search import dfs_tree
class GreedyExplorer(ExplorerBase):
def __init__(self, time_slice):
super(GreedyExplorer, self).__init__()
self.time_slice = time_slice
def get_subtrees(self, digestibles):
d = {}
frontier_digestibles = list(digestibles) # this is a shallow copy
g = self.graph._g ## FIXME: should not access others' private vars
assert self.state in g.edge
for next_state in g.edge[self.state]:
## NOTE: even if digestible==edge_digestible, event_uuid can differ. Do NOT return edge_digestible.
edge_digestible = g.edge[self.state][next_state]['digestible']
digestibles_matched = [digestible for digestible in digestibles if digestible == edge_digestible]
if not digestibles_matched: continue
digestible = digestibles_matched[0]
frontier_digestibles.remove(digestible)
subtree = dfs_tree(g, next_state)
d[digestible] = subtree
for digestible in frontier_digestibles:
d[digestible] = None
return d
def evaluate_digestible_subtree(self, digestible, subtree):
assert(digestible) # subtree may be None
if not subtree:
metric = 1.0
else:
subtree_nodes = subtree.number_of_nodes()
metric = 1.0 / subtree_nodes if subtree_nodes > 0 else 1.0
rand_factor = random.randint(9, 11) / 10.0
metric *= rand_factor
return metric
def choose_digestible(self, digestibles):
assert (digestibles)
digestible_metrics = {}
for digestible, subtree in self.get_subtrees(digestibles).items():
metric = self.evaluate_digestible_subtree(digestible, subtree)
LOG.debug('Evaluated: metric=%f, digestible=%s', metric, digestible)
digestible_metrics[digestible] = metric
chosen_digestible = max(digestible_metrics, key=digestible_metrics.get)
return chosen_digestible
| [
"suda.akihiro@lab.ntt.co.jp"
] | suda.akihiro@lab.ntt.co.jp |
5ed3bb65b560b99cfec6e558df1a9d101f8a2b02 | f5e4622951047792c630d22d5df3c511b3032f10 | /longest_consec_rep/main.py | 677583b9ef2279dba646532f722087c0e4fd9514 | [] | no_license | luke-iseger91/codewars | 26d439a1c18d9c2b09dbfb0ef262b29026aef970 | aa6b3bbc3cd87a83fbc773a967497be2fd88795e | refs/heads/master | 2022-12-27T09:38:01.683125 | 2020-10-13T15:36:05 | 2020-10-13T15:36:05 | 296,088,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | # https://www.codewars.com/kata/586d6cefbcc21eed7a001155
# For a given string s find the character c (or C) with longest consecutive repetition and return:
# (c,l)
# where l (or L) is the length of the repetition. If there are two or more characters with the same l return the first in order of appearance.
# For empty string return:
# ('', 0)
def longest_repetition(chars):
if len(chars) == 0:
return ('', 0)
longest_c= ""
longest_c_count = 0
char_counter = 1
for x in range(len(chars)):
try:
# print(x, f"testing {chars[x]} and {chars[x+1]}")
if chars[x] == chars[x+1]:
char_counter += 1
else:
char_counter = 1
except IndexError:
continue
if char_counter > longest_c_count:
longest_c = chars[x]
longest_c_count = char_counter
return (longest_c, longest_c_count)
# Test cases
# print(find_longest_c("aaa"))
assert longest_repetition("aaa") == ("a", 3)
# print(longest_repetition("vogelbekdier"))
assert longest_repetition("vogelbekdier") == ("v", 1)
assert longest_repetition("") == ("", 0)
# print(longest_repetition("aaabbaaaa"))
assert longest_repetition("aaabbaaaa") == ("a", 4) | [
"luke.iseger@gmail.com"
] | luke.iseger@gmail.com |
b3023f6b673630ce35cb11a46be80d68f7356735 | 366559554e952e966df8bd4e5b53aec71a0bef97 | /tools/configen/tests/test_modules/expected/IncompatibleDataclassArg.py | a2c579064ae77c4369491cae041bf984154092e1 | [
"MIT"
] | permissive | roopeshvs/hydra | 501ac5cdb4d8e1bc59593df45afd5469ad5277d7 | 40ca449f266faa355a061caa0804e4248cfb1b57 | refs/heads/master | 2022-12-09T15:43:09.085401 | 2020-09-05T20:30:02 | 2020-09-05T20:30:02 | 293,149,009 | 1 | 0 | MIT | 2020-09-05T20:25:34 | 2020-09-05T20:25:33 | null | UTF-8 | Python | false | false | 496 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/master/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from dataclasses import dataclass
from typing import *
from omegaconf import MISSING
@dataclass
class IncompatibleDataclassArgConf:
_target_: str = "tests.test_modules.IncompatibleDataclassArg"
num: int = MISSING
# [passthrough] incompat: Incompatible
| [
"omry@fb.com"
] | omry@fb.com |
72ba22a91588bf1a22d08ecacb33ec336da6f0d5 | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/operations/_vpn_server_configurations_operations.py | c68e670e8370ac82125f9b7a40a73e128c19f475 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 28,466 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnServerConfigurationsOperations(object):
"""VpnServerConfigurationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
vpn_server_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VpnServerConfiguration"
"""Retrieves the details of a VpnServerConfiguration.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:param vpn_server_configuration_name: The name of the VpnServerConfiguration being retrieved.
:type vpn_server_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnServerConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.VpnServerConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
vpn_server_configuration_name, # type: str
vpn_server_configuration_parameters, # type: "_models.VpnServerConfiguration"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnServerConfiguration"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_server_configuration_parameters, 'VpnServerConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
vpn_server_configuration_name, # type: str
vpn_server_configuration_parameters, # type: "_models.VpnServerConfiguration"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnServerConfiguration"]
"""Creates a VpnServerConfiguration resource if it doesn't exist else updates the existing
VpnServerConfiguration.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:param vpn_server_configuration_name: The name of the VpnServerConfiguration being created or
updated.
:type vpn_server_configuration_name: str
:param vpn_server_configuration_parameters: Parameters supplied to create or update
VpnServerConfiguration.
:type vpn_server_configuration_parameters: ~azure.mgmt.network.v2019_12_01.models.VpnServerConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnServerConfiguration or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_12_01.models.VpnServerConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnServerConfiguration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
vpn_server_configuration_name=vpn_server_configuration_name,
vpn_server_configuration_parameters=vpn_server_configuration_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
vpn_server_configuration_name, # type: str
vpn_server_configuration_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VpnServerConfiguration"
"""Updates VpnServerConfiguration tags.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:param vpn_server_configuration_name: The name of the VpnServerConfiguration being updated.
:type vpn_server_configuration_name: str
:param vpn_server_configuration_parameters: Parameters supplied to update
VpnServerConfiguration tags.
:type vpn_server_configuration_parameters: ~azure.mgmt.network.v2019_12_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnServerConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.VpnServerConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_server_configuration_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
vpn_server_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
vpn_server_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a VpnServerConfiguration.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:param vpn_server_configuration_name: The name of the VpnServerConfiguration being deleted.
:type vpn_server_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
vpn_server_configuration_name=vpn_server_configuration_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnServerConfigurationsResult"]
"""Lists all the vpnServerConfigurations in a resource group.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnServerConfigurationsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_12_01.models.ListVpnServerConfigurationsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnServerConfigurationsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnServerConfigurationsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnServerConfigurationsResult"]
"""Lists all the VpnServerConfigurations in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnServerConfigurationsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_12_01.models.ListVpnServerConfigurationsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnServerConfigurationsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnServerConfigurationsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnServerConfigurations'} # type: ignore
| [
"noreply@github.com"
] | catchsrinivas.noreply@github.com |
2c2642dbd3465024601d3957e2907b92c2db2440 | 102a652a37a5e2c302ddebee6daa10a5d543a9fd | /store/migrations/0002_auto_20210706_1519.py | 17c0a79cfe357e3d640a7c8a365bbeb64feac8e3 | [] | no_license | HDD10/Disquaire | bce93ba5fa21c87cbd396b1764b9fe185b955b01 | 5e1c19b078ae13bc01b3e7717dee78a188b9bc5a | refs/heads/main | 2023-06-14T20:44:37.650810 | 2021-07-12T23:44:30 | 2021-07-12T23:44:30 | 385,329,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | # Generated by Django 3.2.5 on 2021-07-06 15:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='album',
name='artists',
field=models.ManyToManyField(blank=True, related_name='albums', to='store.Artist'),
),
migrations.AddField(
model_name='booking',
name='album',
field=models.OneToOneField(default=0, on_delete=django.db.models.deletion.CASCADE, to='store.album'),
preserve_default=False,
),
]
| [
"hind.dekkan69@gmail.com"
] | hind.dekkan69@gmail.com |
6f81affffa39caa8ed5014aac07748adbc99c9c7 | fbea9961a5695e94d2532de3f1b70213131fe64b | /chap3/23.py | 8c409ef0fc5abfd3e0a9d340f0509599c7474b08 | [] | no_license | lethe2211/nlp100 | 094a528cf028060ce6294033f026cb740f0c853c | 547fdc83181a8e6811f946b5f56088c2e45f2ead | refs/heads/master | 2021-01-17T07:25:31.176173 | 2015-06-27T04:57:34 | 2015-06-27T04:57:34 | 35,734,702 | 19 | 3 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import itertools
import math
from collections import Counter, defaultdict
from jawiki_country_parser import JawikiCountryParser
import re
class Main(object):
def __init__(self):
pass
def solve(self):
'''
insert your code
'''
jcp = JawikiCountryParser()
article = jcp.get_article(u'イギリス')
p = re.compile(r'(={2,})\s*(.+?)\s*\1')
for match in p.findall(article):
print match[1], len(match[0])
return None
if __name__ == '__main__':
m = Main()
m.solve()
| [
"lethe2211@gmail.com"
] | lethe2211@gmail.com |
72e25a0371a3c9db8c9caba9cd141fe4cfcf5b8f | 2298f68bf08f31a57637f0bbb3014d2ac6c1b602 | /BasicPython/First_codes/While.py | a9b32e5156e6b3bedc295a748f8a3c7426d40a31 | [] | no_license | manelmengibar/EDX_Python_Codes | 5d6c793d2a229f49053f96367b40d1f9d44c0640 | 00d1f1a42dc216e82a238c98cef5dc716a380a36 | refs/heads/master | 2022-09-03T14:42:30.645837 | 2020-05-28T16:21:38 | 2020-05-28T16:21:38 | 264,144,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py |
#le pedimos al usuario que introduzca un número
m = input("write a number\n")
#convertimos el valor introducido a integer
n = int(m)
#hacemos una cuenta atrás desde el valor introducido hasta llegar a 0
while n > 0:
print (n)
n = n - 1
#Una vez el valor es igual a 0 se muesra en terminal lo siguiente:
print ("Blastoff!")
exit()
| [
"manelmengibar@gmail.com"
] | manelmengibar@gmail.com |
dd6398e4756bc1d70633d09a2c01a4591bf45d5a | dc99d95671170444cd7bf02e37da6ecda4a5f19e | /apps/courses/forms.py | 7c3607216ee4eed7c75516ebceebca0b96f618d5 | [] | no_license | bbright3493/python_real_war | 734d49ed9f7e1800d24dc754424a07b69d7d8c1f | 6e43bb7d814920222f3310bd6fd9f04cb3d5bbf1 | refs/heads/master | 2020-03-30T06:08:40.249185 | 2018-10-22T07:33:41 | 2018-10-22T07:33:41 | 150,841,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18-5-7 上午10:05
# @Author : Ztsong
from django import forms
from .models import ProgramUpload
#
# class ProgramUploadForm(forms.ModelForm):
# class Meta:
# model = ProgramUpload
# fields = ['image']
class ProgramUploadForm(forms.Form):
image = forms.ImageField()
| [
"44704708@qq.com"
] | 44704708@qq.com |
1a1db6be65849e768485d90464d7ed25487e7256 | 30ecc6a26f736814151f54aa15d0170ea94c8ea1 | /Main.py | 88b87017b568084aa5d690b3394bbc7a75fad508 | [] | no_license | skyyeat/Intonation-Hero | 1112017c988f47b3ee5a5739901799528a63a526 | 6868fb25924a4a2d1f66fd19f18b49e279505b5f | refs/heads/master | 2020-03-26T15:58:36.577854 | 2018-12-03T02:01:04 | 2018-12-03T02:01:04 | 145,074,876 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | from Mic import*
from Scene import*
class game (threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.freq = 0.0
def run(self):
pygame.init()
screen = pygame.display.set_mode(DISPLAY, FLAGS, DEPTH)
Treble = pygame.image.load('Treble.png')
Treble.convert()
pygame.display.set_caption("Intonation Hero")
pygame.display.set_icon(Treble)
pygame.mouse.set_visible(True)
timer = pygame.time.Clock()
running = True
manager = SceneMananger()
while running:
timer.tick(40)
if pygame.event.get(QUIT):
running = False
thread2.running = False
return
self.freq = thread2.freq
manager.scene.handle_events(pygame.event.get())
manager.scene.update(self.freq)
manager.scene.render(screen)
pygame.display.flip()
if __name__ == "__main__":
# Create new threads
thread1 = game()
thread2 = mic()
# Start new Threads
thread1.start()
thread2.start()
| [
"noreply@github.com"
] | skyyeat.noreply@github.com |
ec626fcce05227e389111ecdb0c34538cbe6e418 | 0090756d7a6eb6ab8389ad23b20e89cd68dbd0e4 | /배열insert.py | b895832beb6bb14ce872d0f5f7be1610194d477c | [] | no_license | ssh6189/2019.12.16 | 5c3093e03ac793d5f0a93cf99e78c6483fcee6d8 | c1021bb72b3fdc05d7f5e8ae350bbd6eee65b0d3 | refs/heads/master | 2020-12-13T19:19:04.558270 | 2020-01-17T08:47:04 | 2020-01-17T08:47:04 | 234,507,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | import numpy as np
a = np.arange(1, 10).reshape(3,3)
print(a)
#a 배열을 일차원 ㅐ열로 변환하고 1번 index에 99추가
np.insert(a, 1, 999)
#a배열의 axis 0방향 1번 인덱스에 추가
#인덱스가 1인 row에 999가 추가됨
np.insert(a, 1, 999, axis=0)
#a배열의 axis 1방향 1번 인덱스에 추가
#index가 1인 column에 999가 추가됨
np.insert(a, 1, 999, axis=1)
| [
"ssh6189@naver.com"
] | ssh6189@naver.com |
2dd51e433b8b72c118cd5ab02697d1acc729de11 | c7979f4f6435fe8d0d07fff7a430da55e3592aed | /AGC023/A2.py | 1cc626666ce7eb2d39610edc6e55643e3ba653a1 | [] | no_license | banboooo044/AtCoder | cee87d40bb98abafde19017f4f4e2f984544b9f8 | 7541d521cf0da848ecb5eb10ffea7d75a44cbbb6 | refs/heads/master | 2020-04-14T11:35:24.977457 | 2019-09-17T03:20:27 | 2019-09-17T03:20:27 | 163,818,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | N = int(input())
A = list(map(int,input().split(" ")))
for i in range(N):
| [
"touhoucrisis7@gmail.com"
] | touhoucrisis7@gmail.com |
739903e438fef6c28869523b51671b9c9be9a949 | d7242e07cc79aa7a5e32a376de7e5a9d5f294cf7 | /0x03-python-data_structures/5-no_c.py | c34d3e30b9b1202e8354dcff72dd82e10e9e070c | [] | no_license | Jfprado11/holbertonschool-higher_level_programming | 87d921290b43202cc8507d898444df9455138d0f | f277a5ecd4dd349d94d9a26542cf9eca23f92fad | refs/heads/main | 2023-07-28T20:30:24.632598 | 2021-09-22T21:32:32 | 2021-09-22T21:32:32 | 361,762,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | #!/usr/bin/python3
def no_c(my_string):
count_lower_C = my_string.count("c")
count_upper_C = my_string.count("C")
if count_lower_C > 0:
my_string = list(my_string)
while count_lower_C:
my_string.remove("c")
count_lower_C -= 1
my_string = "" .join(my_string)
if count_upper_C > 0:
my_string = list(my_string)
while count_upper_C:
my_string.remove("C")
count_upper_C -= 1
my_string = "".join(my_string)
return my_string
| [
"jfpc11@misena.edu.co"
] | jfpc11@misena.edu.co |
6575dc9dbea13d6c4f884545b37ecae27ae02705 | 55f4dd7e511299a11c781c38e705e511425806d9 | /blog/migrations/0013_post_cost.py | 45c2fd5551e35daf997e9c4e4ee22a1742e03c53 | [] | no_license | MemexVUB/studenteats | 4025ebf3078826865850cfd3bb49a0f61683c406 | 10048d169bbbba6219a59a328e404064ddacdd77 | refs/heads/master | 2022-08-31T10:59:33.772886 | 2020-05-22T12:10:21 | 2020-05-22T12:10:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | # Generated by Django 3.0 on 2019-12-16 23:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0012_post_rating'),
]
operations = [
migrations.AddField(
model_name='post',
name='cost',
field=models.IntegerField(default=0),
),
]
| [
"57940767+killerz224@users.noreply.github.com"
] | 57940767+killerz224@users.noreply.github.com |
71ce738fc5b5a9df210c2e09bd532eeef0ec0612 | 7ea62e8309509b6c47fe9e28bdd5fe3eedd15ce1 | /sitioSGAE/manage.py | 7522e456dcc4f9b9800a6f6a26b59c8a4f094d5d | [] | no_license | dard/SGAE | 4b934bc7966c4308f1b70fd82a69366a56e35733 | e05c8ef44b09eefe0a5f905eb2f03752ea3c192c | refs/heads/master | 2023-07-16T04:14:44.989262 | 2021-08-27T19:20:54 | 2021-08-27T19:20:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sitioSGAE.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"eloydardo@gmail.com"
] | eloydardo@gmail.com |
a1dcbd819e23f9d97da27c733de1252c3186f37b | b562b6bd4ac7c55d0a3f7e8b0c494bd27c0ed1ec | /training/__init__.py | d7bc3add80997d7b8385767c51cc624613e9a7da | [
"Apache-2.0"
] | permissive | ik-park/ml-agents-hyperparams | 7b1821fd1fe806744a4ec8a7d6340dcad4af6112 | 9cde275f4bec01f004a98b83322133d16fabc23f | refs/heads/master | 2022-02-26T20:44:46.176847 | 2019-08-04T11:57:54 | 2019-08-04T11:57:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | from .trainer_controller import *
from .training_data import *
from .training_event import *
from .runner import * | [
"hello@baske.com"
] | hello@baske.com |
e600e79c9fc504e5f183372ad97999fdea844994 | 1bc3b85020260fbcd8b6f3978a9f5e7cca9ff01c | /project4/inference.py | 864aac51dce62255c7dbf7ec60c4e30a6fe5a216 | [] | no_license | lavenderxy/Pacman_AI_Python | f655ddc12afbec31788e92c31500630d332cd256 | 980961b23ecf8f7c3a7a10a952cc4fd8650ec8b7 | refs/heads/master | 2020-04-06T05:45:38.849526 | 2015-01-19T05:50:30 | 2015-01-19T05:50:30 | 29,455,076 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 22,525 | py | # inference.py
# ------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
import util
import random
import busters
import game
class InferenceModule:
"""
An inference module tracks a belief distribution over a ghost's location.
This is an abstract class, which you should not modify.
"""
############################################
# Useful methods for all inference modules #
############################################
def __init__(self, ghostAgent):
"Sets the ghost agent for later access"
self.ghostAgent = ghostAgent
self.index = ghostAgent.index
def getJailPosition(self):
return (2 * self.ghostAgent.index - 1, 1)
def getPositionDistribution(self, gameState):
"""
Returns a distribution over successor positions of the ghost from the given gameState.
You must first place the ghost in the gameState, using setGhostPosition below.
"""
ghostPosition = gameState.getGhostPosition(self.index) # The position you set
actionDist = self.ghostAgent.getDistribution(gameState)
dist = util.Counter()
for action, prob in actionDist.items():
successorPosition = game.Actions.getSuccessor(ghostPosition, action)
dist[successorPosition] = prob
return dist
def setGhostPosition(self, gameState, ghostPosition):
"""
Sets the position of the ghost for this inference module to the specified
position in the supplied gameState.
"""
conf = game.Configuration(ghostPosition, game.Directions.STOP)
gameState.data.agentStates[self.index] = game.AgentState(conf, False)
return gameState
def observeState(self, gameState):
"Collects the relevant noisy distance observation and pass it along."
distances = gameState.getNoisyGhostDistances()
if len(distances) >= self.index: # Check for missing observations
obs = distances[self.index - 1]
self.observe(obs, gameState)
def initialize(self, gameState):
"Initializes beliefs to a uniform distribution over all positions."
# The legal positions do not include the ghost prison cells in the bottom left.
self.legalPositions = [p for p in gameState.getWalls().asList(False) if p[1] > 1]
self.initializeUniformly(gameState)
######################################
# Methods that need to be overridden #
######################################
def initializeUniformly(self, gameState):
"Sets the belief state to a uniform prior belief over all positions."
pass
def observe(self, observation, gameState):
"Updates beliefs based on the given distance observation and gameState."
pass
def elapseTime(self, gameState):
"Updates beliefs for a time step elapsing from a gameState."
pass
def getBeliefDistribution(self):
"""
Returns the agent's current belief state, a distribution over
ghost locations conditioned on all evidence so far.
"""
pass
class ExactInference(InferenceModule):
"""
The exact dynamic inference module should use forward-algorithm
updates to compute the exact belief function at each time step.
"""
def initializeUniformly(self, gameState):
"Begin with a uniform distribution over ghost positions."
self.beliefs = util.Counter()
for p in self.legalPositions: self.beliefs[p] = 1.0
self.beliefs.normalize()
def observe(self, observation, gameState):
"""
Updates beliefs based on the distance observation and Pacman's position.
The noisyDistance is the estimated manhattan distance to the ghost you are tracking.
The emissionModel below stores the probability of the noisyDistance for any true
distance you supply. That is, it stores P(noisyDistance | TrueDistance).
self.legalPositions is a list of the possible ghost positions (you
should only consider positions that are in self.legalPositions).
A correct implementation will handle the following special case:
* When a ghost is captured by Pacman, all beliefs should be updated so
that the ghost appears in its prison cell, position self.getJailPosition()
You can check if a ghost has been captured by Pacman by
checking if it has a noisyDistance of None (a noisy distance
of None will be returned if, and only if, the ghost is
captured).
"""
noisyDistance = observation
emissionModel = busters.getObservationDistribution(noisyDistance)
pacmanPosition = gameState.getPacmanPosition()
#print 'noisyDistance',noisyDistance
#print 'emissionModel',emissionModel
#print 'pacmanPosition',pacmanPosition
"*** YOUR CODE HERE ***"
# Replace this code with a correct observation update
# Be sure to handle the jail.
allPossible = util.Counter()
"""
for p in self.legalPositions:
trueDistance = util.manhattanDistance(p, pacmanPosition)
if emissionModel[trueDistance] > 0: allPossible[p] = 1.0
allPossible.normalize()
"""
"*** YOUR CODE HERE ***"
if not noisyDistance==None:
for p in self.legalPositions:
trueDistance=util.manhattanDistance(p,pacmanPosition)
if emissionModel[trueDistance]>0:
noisyDispro=emissionModel[trueDistance]
allPossible[p]=self.beliefs[p]*emissionModel[trueDistance]
#print "wwwww",noisyDispro
#print "sssss",noisyDistance
#self.beliefs[i]=self.beliefs[i]*allPossible[i]
elif noisyDistance==None:
jail=self.getJailPosition()
allPossible[jail]=1.0
self.beliefs = allPossible
self.beliefs.normalize()
def elapseTime(self, gameState):
"""
Update self.beliefs in response to a time step passing from the current state.
The transition model is not entirely stationary: it may depend on Pacman's
current position (e.g., for DirectionalGhost). However, this is not a problem,
as Pacman's current position is known.
In order to obtain the distribution over new positions for the
ghost, given its previous position (oldPos) as well as Pacman's
current position, use this line of code:
newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))
Note that you may need to replace "oldPos" with the correct name
of the variable that you have used to refer to the previous ghost
position for which you are computing this distribution.
newPosDist is a util.Counter object, where for each position p in self.legalPositions,
newPostDist[p] = Pr( ghost is at position p at time t + 1 | ghost is at position oldPos at time t )
(and also given Pacman's current position). You may also find it useful to loop over key, value pairs
in newPosDist, like:
for newPos, prob in newPosDist.items():
...
As an implementation detail (with which you need not concern
yourself), the line of code above for obtaining newPosDist makes
use of two helper methods provided in InferenceModule above:
1) self.setGhostPosition(gameState, ghostPosition)
This method alters the gameState by placing the ghost we're tracking
in a particular position. This altered gameState can be used to query
what the ghost would do in this position.
2) self.getPositionDistribution(gameState)
This method uses the ghost agent to determine what positions the ghost
will move to from the provided gameState. The ghost must be placed
in the gameState with a call to self.setGhostPosition above.
"""
"*** YOUR CODE HERE ***"
beliefsnew=util.Counter()
for p in self.legalPositions:
ghostPos=self.setGhostPosition(gameState,p)
newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, p))
#print 'nnnn',newPosDist
#newPosDist format : (18.0,9.0):0.45 newPos and probability
#print 'ssss',self.beliefs[p]
for newPos, pro in newPosDist.items():
#print 'ssss',self.beliefs[p]
#print 'eeee',newPosDist[p]
belief=self.beliefs[p]
beliefsnew[newPos]=beliefsnew[newPos]+pro*belief
self.beliefs=beliefsnew
def getBeliefDistribution(self):
return self.beliefs
class ParticleFilter(InferenceModule):
"""
A particle filter for approximately tracking a single ghost.
Useful helper functions will include random.choice, which chooses
an element from a list uniformly at random, and util.sample, which
samples a key from a Counter by treating its values as probabilities.
"""
def __init__(self, ghostAgent, numParticles=300):
InferenceModule.__init__(self, ghostAgent);
self.setNumParticles(numParticles)
def setNumParticles(self, numParticles):
self.numParticles = numParticles
def initializeUniformly(self, gameState):
"Initializes a list of particles. Use self.numParticles for the number of particles"
"*** YOUR CODE HERE ***"
i=0
self.particles = []
number=self.numParticles
legal=self.legalPositions
while not i >= number:
for p in self.legalPositions:
if i < number:
self.particles.append(p)
i += 1
def observe(self, observation, gameState):
"""
Update beliefs based on the given distance observation. Make
sure to handle the special case where all particles have weight
0 after reweighting based on observation. If this happens,
resample particles uniformly at random from the set of legal
positions (self.legalPositions).
A correct implementation will handle two special cases:
1) When a ghost is captured by Pacman, all particles should be updated so
that the ghost appears in its prison cell, self.getJailPosition()
You can check if a ghost has been captured by Pacman by
checking if it has a noisyDistance of None (a noisy distance
of None will be returned if, and only if, the ghost is
captured).
2) When all particles receive 0 weight, they should be recreated from the
prior distribution by calling initializeUniformly. Remember to
change particles to jail if called for.
"""
noisyDistance = observation
emissionModel = busters.getObservationDistribution(noisyDistance)
pacmanPosition = gameState.getPacmanPosition()
"*** YOUR CODE HERE ***"
weight=0
problist=[]
particlelist=[]
jail=util.Counter()
allpossible=util.Counter()
belief=util.Counter()
pacman=pacmanPosition
if not noisyDistance == None:
for p in self.particles:
truedis = util.manhattanDistance(p, pacman)
noisypro=emissionModel[truedis]
#print 'nnnnn', noisypro
allpossible[p] =allpossible[p]+emissionModel[truedis]
problist.append(allpossible[p])
self.particles=problist
#util.Counter def totalCount returns the sum of counts for all keys
#print 'ddddd',allpossible[p]
#recreate from the distribution
#use the sample function in util
weight=allpossible[p]
if weight==0:
self.initializeUniformly(gameState)
else:
num=self.numParticles
for m in range(num):
newdisr=util.sample(allpossible)
particlelist.append(newdisr)
self.particles = particlelist
elif noisyDistance==None:
jailpos=self.getJailPosition()
jail[jailpos]=1.0
self.particles=jail
#util.raiseNotDefined()
def elapseTime(self, gameState):
"""
Update beliefs for a time step elapsing.
As in the elapseTime method of ExactInference, you should use:
newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))
to obtain the distribution over new positions for the ghost, given
its previous position (oldPos) as well as Pacman's current
position.
"""
"*** YOUR CODE HERE ***"
particles=[]
length=len(self.particles)
#print 'lllll',length
for p in self.particles:
newPosDist=self.getPositionDistribution(self.setGhostPosition(gameState,p))
newdist=util.sample(newPosDist)
particles.append(newdist)
self.particles=particles
#util.raiseNotDefined()
def getBeliefDistribution(self):
"""
Return the agent's current belief state, a distribution over
ghost locations conditioned on all evidence and time passage.
"""
"*** YOUR CODE HERE ***"
distr = util.Counter()
for p in self.particles:
distr[p] += 1
distr.normalize()
return distr
#util.raiseNotDefined()
class MarginalInference(InferenceModule):
"A wrapper around the JointInference module that returns marginal beliefs about ghosts."
def initializeUniformly(self, gameState):
"Set the belief state to an initial, prior value."
if self.index == 1: jointInference.initialize(gameState, self.legalPositions)
jointInference.addGhostAgent(self.ghostAgent)
def observeState(self, gameState):
"Update beliefs based on the given distance observation and gameState."
if self.index == 1: jointInference.observeState(gameState)
def elapseTime(self, gameState):
"Update beliefs for a time step elapsing from a gameState."
if self.index == 1: jointInference.elapseTime(gameState)
def getBeliefDistribution(self):
"Returns the marginal belief over a particular ghost by summing out the others."
jointDistribution = jointInference.getBeliefDistribution()
dist = util.Counter()
for t, prob in jointDistribution.items():
dist[t[self.index - 1]] += prob
return dist
class JointParticleFilter:
"JointParticleFilter tracks a joint distribution over tuples of all ghost positions."
def __init__(self, numParticles=600):
self.setNumParticles(numParticles)
def setNumParticles(self, numParticles):
self.numParticles = numParticles
def initialize(self, gameState, legalPositions):
"Stores information about the game, then initializes particles."
self.numGhosts = gameState.getNumAgents() - 1
self.ghostAgents = []
self.legalPositions = legalPositions
self.gameState = gameState
self.initializeParticles()
def initializeParticles(self):
"Initializes particles randomly. Each particle is a tuple of ghost positions. Use self.numParticles for the number of particles"
"*** YOUR CODE HERE ***"
particles = list()
aParticle = list()
self.particles = list()
self.world = util.Counter()
self.weighted = 0
x = util.Counter()
x[(5,5)] = 0
x[(6,6)] = 0
x[(7,7)] = 1
width = len(self.gameState.getWalls()[0:])
height = len(self.gameState.getWalls()[0])
gridSpace = width * height #including the outer walls
moveSpace = (width - 1) * (height - 3) #no outer walls, but has inner walls, row 1 is jail
walls = self.gameState.getWalls()
for i in range(width):
for j in range(height)[3:]:
if not walls[i][j]:
self.world[(i,j)] = 1
self.world.normalize()
for i in range(self.numParticles):
for j in range(self.numGhosts):
aParticle.append(util.sample(self.world, None))
particles.append(tuple(aParticle))
aParticle = list()
self.particles = particles
#util.raiseNotDefined()
def addGhostAgent(self, agent):
"Each ghost agent is registered separately and stored (in case they are different)."
self.ghostAgents.append(agent)
def elapseTime(self, gameState):
"""
Samples each particle's next state based on its current state and the gameState.
To loop over the ghosts, use:
for i in range(self.numGhosts):
...
Then, assuming that "i" refers to the index of the
ghost, to obtain the distributions over new positions for that
single ghost, given the list (prevGhostPositions) of previous
positions of ALL of the ghosts, use this line of code:
newPosDist = getPositionDistributionForGhost(setGhostPositions(gameState, prevGhostPositions),
i, self.ghostAgents[i])
Note that you may need to replace "prevGhostPositions" with the
correct name of the variable that you have used to refer to the
list of the previous positions of all of the ghosts, and you may
need to replace "i" with the variable you have used to refer to
the index of the ghost for which you are computing the new
position distribution.
As an implementation detail (with which you need not concern
yourself), the line of code above for obtaining newPosDist makes
use of two helper functions defined below in this file:
1) setGhostPositions(gameState, ghostPositions)
This method alters the gameState by placing the ghosts in the supplied positions.
2) getPositionDistributionForGhost(gameState, ghostIndex, agent)
This method uses the supplied ghost agent to determine what positions
a ghost (ghostIndex) controlled by a particular agent (ghostAgent)
will move to in the supplied gameState. All ghosts
must first be placed in the gameState using setGhostPositions above.
The ghost agent you are meant to supply is self.ghostAgents[ghostIndex-1],
but in this project all ghost agents are always the same.
"""
weighting = self.world
world = util.Counter()
self.world = util.Counter()
aParticle = list()
particles = list()
jail = range(self.numGhosts)
alive = gameState.getLivingGhosts()
for i in range(self.numGhosts):
jail[i] = self.getJailPosition(i)
newParticles = []
for oldParticle in self.particles:
newParticle = list(oldParticle)
prevGhostPositions = newParticle
for i in range(self.numGhosts):
newPosDist = getPositionDistributionForGhost(setGhostPositions(gameState, prevGhostPositions), i, self.ghostAgents[i])
if alive[i+1]:
aParticle.append(util.sample(newPosDist, None))
else:
aParticle.append(jail[i])
particles.append(tuple(aParticle))
aParticle = list()
newParticles = particles
self.particles=newParticles
def getJailPosition(self, i):
return (2 * i + 1, 1);
def observeState(self, gameState):
"""
Resamples the set of particles using the likelihood of the noisy observations.
As in elapseTime, to loop over the ghosts, use:
for i in range(self.numGhosts):
...
A correct implementation will handle two special cases:
1) When a ghost is captured by Pacman, all particles should be updated so
that the ghost appears in its prison cell, position self.getJailPosition(i)
where "i" is the index of the ghost.
You can check if a ghost has been captured by Pacman by
checking if it has a noisyDistance of None (a noisy distance
of None will be returned if, and only if, the ghost is
captured).
2) When all particles receive 0 weight, they should be recreated from the
prior distribution by calling initializeParticles. Remember to
change ghosts' positions to jail if called for.
"""
pacmanPosition = gameState.getPacmanPosition()
noisyDistances = gameState.getNoisyGhostDistances()
if len(noisyDistances) < self.numGhosts: return
emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]
"*** YOUR CODE HERE ***"
self.weight =util.Counter()
alive = gameState.getLivingGhosts()
numLivingGhost = 0
for i in alive:
if i:
numLivingGhost +=1
#print "live =", numLivingGhost
jail = range(self.numGhosts)
ghostjail=[]
particles=list()
allpossible = util.Counter()
weight=0
for i in range(self.numGhosts):
if noisyDistances[i] == None:
ghostjail.append(i)
for particle in self.particles:
for i in ghostjail:
particle = self.getParticleWithGhostInJail(particle, i)
noisypro = 1
for i in range(self.numGhosts):
if i not in ghostjail:
ghostpo = particle[i]
truedis = util.manhattanDistance(ghostpo, pacmanPosition)
noisypro *= emissionModels[i][truedis]
allpossible[particle] += noisypro
self.beliefs = allpossible
weight=allpossible[particle]
if weight == 0:
self.initializeParticles()
else:
self.beliefs.normalize()
for i in range(len(self.particles)):
newdisr = util.sample(self.beliefs)
self.particles[i] = newdisr
def getBeliefDistribution(self):
dist = util.Counter()
for part in self.particles:
dist[part] += 1
dist.normalize()
return dist
# One JointInference module is shared globally across instances of MarginalInference
jointInference = JointParticleFilter()
def getPositionDistributionForGhost(gameState, ghostIndex, agent):
"""
Returns the distribution over positions for a ghost, using the supplied gameState.
"""
# index 0 is pacman, but the students think that index 0 is the first ghost.
ghostPosition = gameState.getGhostPosition(ghostIndex+1)
actionDist = agent.getDistribution(gameState)
dist = util.Counter()
for action, prob in actionDist.items():
successorPosition = game.Actions.getSuccessor(ghostPosition, action)
dist[successorPosition] = prob
return dist
def setGhostPositions(gameState, ghostPositions):
"Sets the position of all ghosts to the values in ghostPositionTuple."
for index, pos in enumerate(ghostPositions):
conf = game.Configuration(pos, game.Directions.STOP)
gameState.data.agentStates[index + 1] = game.AgentState(conf, False)
return gameState
| [
"xinyu.yan1008@gmail.com"
] | xinyu.yan1008@gmail.com |
c65bde98d3e0942873c39c992230006faaff86b4 | 29e5717d4c18819eba7a835e68cec047cdf0361f | /pnnl/EconomizerRCxAgent/economizer/diagnostics/economizer_dx.py | 42d2deb0307cd52546cc8dd93de48adc679bd230 | [] | no_license | heliazandi/volttron-applications | 679969883bbe2c841986b0194d2cdf13c6a23d5a | 98f68a834e967b514af8d78fa86a5e7894d27ab5 | refs/heads/master | 2021-01-23T10:30:09.882360 | 2018-02-12T16:18:29 | 2018-02-12T16:18:29 | 93,069,041 | 1 | 1 | null | 2017-06-01T14:57:15 | 2017-06-01T14:57:15 | null | UTF-8 | Python | false | false | 15,942 | py | '''
Copyright (c) 2016, Battelle Memorial Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
This material was prepared as an account of work sponsored by an
agency of the United States Government. Neither the United States
Government nor the United States Department of Energy, nor Battelle,
nor any of their employees, nor any jurisdiction or organization
that has cooperated in the development of these materials, makes
any warranty, express or implied, or assumes any legal liability
or responsibility for the accuracy, completeness, or usefulness or
any information, apparatus, product, software, or process disclosed,
or represents that its use would not infringe privately owned rights.
Reference herein to any specific commercial product, process, or
service by trade name, trademark, manufacturer, or otherwise does
not necessarily constitute or imply its endorsement, recommendation,
r favoring by the United States Government or any agency thereof,
or Battelle Memorial Institute. The views and opinions of authors
expressed herein do not necessarily state or reflect those of the
United States Government or any agency thereof.
PACIFIC NORTHWEST NATIONAL LABORATORY
operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
under Contract DE-AC05-76RL01830
'''
import logging
from datetime import timedelta as td
ECON2 = 'Not Economizing When Unit Should Dx'
ECON3 = 'Economizing When Unit Should Not Dx'
DX = '/diagnostic message'
EI = '/energy impact'
DATA = '/data/'
RAT = 'ReturnAirTemperature'
MAT = 'MixedAirTemperature'
OAT = 'OutsideAirTemperature'
OAD = 'OutsideDamperSignal'
CC = 'CoolCall'
FS = 'SupplyFanSpeed'
EC = 'EconomizerCondition'
ST = 'State'
def create_table_key(table_name, timestamp):
return '&'.join([table_name, timestamp.strftime('%m-%d-%y %H:%M')])
class EconCorrectlyOn(object):
'''Air-side HVAC economizer diagnostic for AHU/RTU systems.
EconCorrectlyOn uses metered data from a BAS or controller to diagnose
if an AHU/RTU is economizing when it should.
'''
def __init__(self, oaf_economizing_threshold, open_damper_threshold,
data_window, no_required_data, cfm, eer, analysis):
self.oat_values = []
self.rat_values = []
self.mat_values = []
self.fan_speed_values = []
self.oad_values = []
self.timestamp = []
self.output_no_run = []
self.open_damper_threshold = float(open_damper_threshold)
self.oaf_economizing_threshold = float(oaf_economizing_threshold)
self.data_window = float(data_window)
self.no_required_data = no_required_data
self.cfm = cfm
self.eer = eer
self.table_key = None
self.analysis = analysis
self.max_dx_time = 60
'''Application result messages'''
self.alg_result_messages = [
'Conditions are favorable for economizing but the '
'the outdoor-air damper is frequently below 100% open.',
'No problems detected.',
'Conditions are favorable for economizing and the '
'damper is 100% open but the OAF indicates the unit '
'is not brining in near 100% OA.'
]
def econ_alg2(self, dx_result, cooling_call, oatemp, ratemp,
matemp, damper_signal, econ_condition, cur_time,
fan_sp):
'''Check app. pre-quisites and assemble data set for analysis.'''
if not cooling_call:
dx_result.log('{}: The unit is not cooling, data corresponding to '
'{} will not be used.'.format(ECON2, cur_time), logging.DEBUG)
self.output_no_run.append(cur_time)
if (self.output_no_run[-1] - self.output_no_run[0]) >= td(minutes=(self.data_window)):
dx_result.log('{}: unit is not cooling or economizing, keep collecting data.'.format(ECON2), logging.DEBUG)
self.output_no_run = []
dx_status = 3
return dx_result, dx_status
if not econ_condition:
dx_result.log('{}: Conditions are not favorable for economizing, '
'data corresponding to {} will not be used.'
.format(ECON2, cur_time), logging.DEBUG)
self.output_no_run.append(cur_time)
if (self.output_no_run[-1] - self.output_no_run[0]) >= td(minutes=(self.data_window)):
dx_result.log('{name}: the unit is not cooling or economizing, keep collecting data.'.format(name=ECON2), logging.DEBUG)
self.output_no_run = []
dx_status = 3
return dx_result, dx_status
self.oat_values.append(oatemp)
self.mat_values.append(matemp)
self.rat_values.append(ratemp)
self.timestamp.append(cur_time)
self.oad_values.append(damper_signal)
dx_result.log('{}: Debugger - aggregate data'.format(ECON2))
fan_sp = fan_sp/100.0 if fan_sp is not None else 1.0
self.fan_speed_values.append(fan_sp)
self.timestamp.append(cur_time)
elapsed_time = (self.timestamp[-1] - self.timestamp[0]).total_seconds()/60
elapsed_time = elapsed_time if elapsed_time > 0 else 1.0
if (elapsed_time >= self.data_window and len(self.timestamp) >= self.no_required_data):
self.table_key = create_table_key(self.analysis, self.timestamp[-1])
if elapsed_time > self.max_dx_time:
dx_result.insert_table_row(self.table_key, {ECON2 + DX: 13.2})
dx_result = self.clear_data(dx_result)
dx_status = 2
return dx_result, dx_status
dx_result.log('{}: Debugger - running algorithm'.format(ECON2))
dx_result = self.not_economizing_when_needed(dx_result, cur_time)
dx_status = 1
return dx_result, dx_status
dx_result.log('{}: Debugger - collecting data'.format(ECON2))
dx_status = 0
return dx_result, dx_status
def not_economizing_when_needed(self, dx_result, cur_time):
'''If the detected problems(s) are consistent then generate a fault
message(s).
'''
def energy_impact_calculation(energy_impact):
energy_calc = \
[1.08 * spd * self.cfm * (ma - oa) / (1000.0 * self.eer)
for ma, oa, spd in zip(self.mat_values, self.oat_values,
self.fan_speed_values)
if (ma - oa) > 0 and color_code == 'RED']
if energy_calc:
dx_time = (len(energy_calc) - 1) * avg_step if len(energy_calc) > 1 else 1.0
energy_impact = (sum(energy_calc) * 60.0) / (len(energy_calc) * dx_time)
energy_impact = round(energy_impact, 2)
return energy_impact
oaf = [(m - r) / (o - r) for o, r, m in zip(self.oat_values, self.rat_values, self.mat_values)]
avg_step = (self.timestamp[-1] - self.timestamp[0]).total_seconds()/60 if len(self.timestamp) > 1 else 1
avg_oaf = sum(oaf) / len(oaf) * 100.0
avg_damper_signal = sum(self.oad_values)/len(self.oad_values)
energy_impact = 0.0
if avg_damper_signal < self.open_damper_threshold:
msg = '{}: {}'.format(ECON2, self.alg_result_messages[0])
color_code = 'RED'
dx_msg = 11.1
energy_impact = energy_impact_calculation(energy_impact)
else:
if (100.0 - avg_oaf) <= self.oaf_economizing_threshold:
msg = '{}: {}'.format(ECON2, self.alg_result_messages[1])
color_code = 'GREEN'
dx_msg = 10.0
else:
msg = '{}: {}'.format(ECON2, self.alg_result_messages[2])
color_code = 'RED'
dx_msg = 12.1
energy_impact = energy_impact_calculation(energy_impact)
dx_table = {
ECON2 + DX: dx_msg,
ECON2 + EI: energy_impact
}
dx_result.insert_table_row(self.table_key, dx_table)
dx_result.log(msg, logging.INFO)
dx_result = self.clear_data(dx_result)
return dx_result
def clear_data(self, dx_result):
'''
reinitialize class insufficient_oa data.
'''
self.oad_values = []
self.oat_values = []
self.rat_values = []
self.mat_values = []
self.fan_speed_values = []
self.timestamp = []
return dx_result
class EconCorrectlyOff(object):
'''Air-side HVAC economizer diagnostic for AHU/RTU systems.
EconCorrectlyOff uses metered data from a BAS or controller to diagnose
if an AHU/RTU is economizing when it should not.
'''
def __init__(self, data_window, no_required_data, min_damper_sp,
excess_damper_threshold, cooling_enabled_threshold,
desired_oaf, cfm, eer, analysis):
self.oat_values = []
self.rat_values = []
self.mat_values = []
self.oad_values = []
self.cool_call_values = []
self.cfm = cfm
self.eer = eer
self.fan_speed_values = []
self.timestamp = []
# Application result messages
self.alg_result_messages = \
['The outdoor-air damper should be at the minimum position but is '
'significantly above that value.',
'No problems detected.',
'The diagnostic led to inconclusive results, could not '
'verify the status of the economizer. ']
self.max_dx_time = 60
self.data_window = float(data_window)
self.no_required_data = no_required_data
self.min_damper_sp = float(min_damper_sp)
self.excess_damper_threshold = float(excess_damper_threshold)
self.cooling_enabled_threshold = float(cooling_enabled_threshold)
self.desired_oaf = float(desired_oaf)
self.analysis = analysis
def econ_alg3(self, dx_result, oatemp, ratemp, matemp,
damper_signal, econ_condition, cur_time,
fan_sp, cooling_call):
'''Check app. pre-quisites and assemble data set for analysis.'''
if econ_condition:
dx_result.log('{}: Conditions are favorable for economizing, '
'data corresponding to {} will not be used.'
.format(ECON3, cur_time), logging.DEBUG)
dx_status = 3
return dx_result, dx_status
self.oad_values.append(damper_signal)
self.oat_values.append(oatemp)
self.mat_values.append(matemp)
self.rat_values.append(ratemp)
self.timestamp.append(cur_time)
dx_result.log('{}: Debugger - aggregating data'.format(ECON3))
fan_sp = fan_sp/100.0 if fan_sp is not None else 1.0
self.fan_speed_values.append(fan_sp)
elapsed_time = (self.timestamp[-1] - self.timestamp[0]).total_seconds()/60
elapsed_time = elapsed_time if elapsed_time > 0 else 1.0
if elapsed_time >= self.data_window and len(self.timestamp) >= self.no_required_data:
self.table_key = create_table_key(self.analysis, self.timestamp[-1])
if elapsed_time > self.max_dx_time:
dx_result.insert_table_row(self.table_key, {ECON3 + DX: 23.2})
dx_result = self.clear_data(dx_result)
dx_status = 2
return dx_result, dx_status
dx_result.log('{}: Debugger - running algorithm'.format(ECON3))
dx_result = self.economizing_when_not_needed(dx_result, cur_time)
dx_status = 1
return dx_result, dx_status
dx_result.log('{}: Debugger - collecting data'.format(ECON3))
dx_status = 0
return dx_result, dx_status
def economizing_when_not_needed(self, dx_result, cur_time):
'''If the detected problems(s)
are consistent then generate a
fault message(s).
'''
def energy_impact_calculation(energy_impact):
energy_calc = [
(1.08 * spd * self.cfm * (ma - (oa * desired_oaf +
(ra * (1.0 - desired_oaf))))) /
(1000.0 * self.eer)
for ma, oa, ra, spd in zip(self.mat_values,
self.oat_values,
self.rat_values,
self.fan_speed_values)
if (ma - (oa * desired_oaf + (ra * (1.0 - desired_oaf)))) > 0]
if energy_calc:
dx_time = (len(energy_calc) - 1) * avg_step if len(energy_calc) > 1 else 1.0
energy_impact = (sum(energy_calc) * 60.0) / (len(energy_calc) * dx_time)
energy_impact = round(energy_impact, 2)
return energy_impact
avg_step = (self.timestamp[-1] - self.timestamp[0]).total_seconds()/60 if len(self.timestamp) > 1 else 1
desired_oaf = self.desired_oaf / 100.0
energy_impact = 0.0
avg_damper = sum(self.oad_values) / len(self.oad_values)
if (avg_damper - self.min_damper_sp) > self.excess_damper_threshold:
msg = msg = '{}: {}'.format(ECON3, self.alg_result_messages[0])
color_code = 'RED'
dx_msg = 21.1
energy_impact = energy_impact_calculation(energy_impact)
else:
msg = msg = '{}: {}'.format(ECON3, self.alg_result_messages[1])
color_code = 'GREEN'
dx_msg = 20.0
dx_table = {
ECON3 + DX: dx_msg,
ECON3 + EI: energy_impact
}
dx_result.insert_table_row(self.table_key, dx_table)
dx_result.log(msg, logging.INFO)
dx_result = self.clear_data(dx_result)
return dx_result
def clear_data(self, dx_result):
'''
reinitialize class insufficient_oa data
'''
self.oad_values = []
self.oat_values = []
self.rat_values = []
self.mat_values = []
self.fan_speed_values = []
self.timestamp = []
return dx_result
| [
"robert.lutes@pnnl.gov"
] | robert.lutes@pnnl.gov |
11efba1ae3661c89193e56bf23e8ad669d33ab17 | f468922ed9484d2a8036cf2adedcb85b49473ee2 | /classifier_utils.py | 4fd7cd24b3955d06459ab9ebc0ab971996618efd | [
"Apache-2.0"
] | permissive | mizterbas/chemprotnew | 9e7102dc28b6ba13cfc389a50cc100a3943e6f39 | 78c6f913a91594349039c57d82d972d4dadddd86 | refs/heads/main | 2022-12-31T03:24:35.140745 | 2020-10-16T15:54:39 | 2020-10-16T15:54:39 | 304,673,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,867 | py | # coding=utf-8
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for GLUE classification tasks."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import collections
import csv
import os
from albert import fine_tuning_utils
from albert import modeling
from albert import optimization
from albert import tokenization
import tensorflow.compat.v1 as tf
from tensorflow.contrib import data as contrib_data
from tensorflow.contrib import metrics as contrib_metrics
from tensorflow.contrib import tpu as contrib_tpu
# Added for BioALbert
from albert import tf_metrics
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
guid=None,
example_id=None,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.example_id = example_id
self.guid = guid
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def __init__(self, use_spm, do_lower_case):
super(DataProcessor, self).__init__()
self.use_spm = use_spm
self.do_lower_case = do_lower_case
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
def process_text(self, text):
if self.use_spm:
return tokenization.preprocess_text(text, lower=self.do_lower_case)
else:
return tokenization.convert_to_unicode(text)
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MNLI", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MNLI", "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MNLI", "test_matched.tsv")),
"test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
# Note(mingdachen): We will rely on this guid for GLUE submission.
guid = self.process_text(line[0])
text_a = self.process_text(line[8])
text_b = self.process_text(line[9])
if set_type == "test":
label = "contradiction"
else:
label = self.process_text(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MisMnliProcessor(MnliProcessor):
"""Processor for the Mismatched MultiNLI data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MNLI", "dev_mismatched.tsv")),
"dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MNLI", "test_mismatched.tsv")),
"test")
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MRPC", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MRPC", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "MRPC", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = self.process_text(line[3])
text_b = self.process_text(line[4])
if set_type == "test":
guid = line[0]
label = "0"
else:
label = self.process_text(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ChemProtProcessor(DataProcessor):
def get_train_examples(self, data_dir):
l1 = self._read_tsv(os.path.join(data_dir, "train.tsv"))
return self._create_examples(l1, "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return [i.lower() for i in ["CPR:3", "CPR:4", "CPR:5", "CPR:6", "CPR:9", "false"]]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
#skip header
if i==0 and set_type=='test':
continue
guid = line[0]
text_a = self.process_text(line[1])
if set_type == "test":
label = self.get_labels()[-1]
else:
try:
label = self.process_text(line[2])
except IndexError:
logging.exception(line)
exit(1)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "CoLA", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "CoLA", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "CoLA", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
#guid = line[0]
guid = "%s-%s" % (set_type, i)
#text_a = self.process_text(line[1])
text_a = self.process_text(line[0])
label = "0"
else:
#text_a = self.process_text(line[3])
#label = self.process_text(line[1])
guid = "%s-%s" % (set_type, i)
text_a = self.process_text(line[0])
label = self.process_text(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "SST-2", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "SST-2", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "SST-2", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
#return ["0", "1"]
return ["b", "i","o","x"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
if set_type != "test":
guid = "%s-%s" % (set_type, i)
text_a = self.process_text(line[0])
label = self.process_text(line[1])
else:
#guid = self.process_text(line[0])
#guid = "%s-%s" % (set_type, line[0])
guid = "%s-%s" % (set_type, i)
#text_a = self.process_text(line[1])
text_a = self.process_text(line[0])
label = "o"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "STS-B", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "STS-B", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "STS-B", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = self.process_text(line[0])
# guid = "%s-%s" % (set_type, line[0])
text_a = self.process_text(line[7])
text_b = self.process_text(line[8])
if set_type != "test":
label = float(line[-1])
else:
label = 0
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QQP", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QQP", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QQP", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = line[0]
# guid = "%s-%s" % (set_type, line[0])
if set_type != "test":
try:
text_a = self.process_text(line[3])
text_b = self.process_text(line[4])
label = self.process_text(line[5])
except IndexError:
continue
else:
text_a = self.process_text(line[1])
text_b = self.process_text(line[2])
label = "0"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QNLI", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QNLI", "dev.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "QNLI", "test.tsv")),
"test_matched")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = self.process_text(line[0])
# guid = "%s-%s" % (set_type, line[0])
text_a = self.process_text(line[1])
text_b = self.process_text(line[2])
if set_type == "test_matched":
label = "entailment"
else:
label = self.process_text(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "RTE", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "RTE", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "RTE", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = self.process_text(line[0])
# guid = "%s-%s" % (set_type, line[0])
text_a = self.process_text(line[1])
text_b = self.process_text(line[2])
if set_type == "test":
label = "entailment"
else:
label = self.process_text(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "WNLI", "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "WNLI", "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "WNLI", "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = self.process_text(line[0])
# guid = "%s-%s" % (set_type, line[0])
text_a = self.process_text(line[1])
text_b = self.process_text(line[2])
if set_type != "test":
label = self.process_text(line[-1])
else:
label = "0"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class AXProcessor(DataProcessor):
"""Processor for the AX data set (GLUE version)."""
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "diagnostic", "diagnostic.tsv")),
"test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
# Note(mingdachen): We will rely on this guid for GLUE submission.
guid = self.process_text(line[0])
text_a = self.process_text(line[1])
text_b = self.process_text(line[2])
if set_type == "test":
label = "contradiction"
else:
label = self.process_text(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer, task_name):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
if task_name != "sts-b":
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in ALBERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if task_name != "sts-b":
label_id = label_map[example.label]
else:
label_id = example.label
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file, task_name):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer, task_name)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_float_feature(values):
f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_float_feature([feature.label_id])\
if task_name == "sts-b" else create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder, task_name, use_tpu, bsz,
multiple=1):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
labeltype = tf.float32 if task_name == "sts-b" else tf.int64
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length * multiple], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length * multiple], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length * multiple], tf.int64),
"label_ids": tf.FixedLenFeature([], labeltype),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
if use_tpu:
batch_size = params["batch_size"]
else:
batch_size = bsz
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
contrib_data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(albert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings, task_name,
hub_module):
"""Creates a classification model."""
(output_layer, _) = fine_tuning_utils.create_albert(
albert_config=albert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
use_einsum=True,
hub_module=hub_module)
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
if task_name != "sts-b":
probabilities = tf.nn.softmax(logits, axis=-1)
predictions = tf.argmax(probabilities, axis=-1, output_type=tf.int32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
else:
probabilities = logits
logits = tf.squeeze(logits, [-1])
predictions = logits
per_example_loss = tf.square(logits - labels)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, probabilities, logits, predictions)
def model_fn_builder(albert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, task_name, hub_module=None,
optimizer="adamw"):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, probabilities, logits, predictions) = \
create_model(albert_config, is_training, input_ids, input_mask,
segment_ids, label_ids, num_labels, use_one_hot_embeddings,
task_name, hub_module)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps,
use_tpu, optimizer)
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
if task_name not in ["sts-b", "cola", "sst-2"]:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions,
weights=is_real_example)
loss = tf.metrics.mean(
values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
elif task_name == "sts-b":
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
"""Compute Pearson correlations for STS-B."""
# Display labels and predictions
concat1 = contrib_metrics.streaming_concat(logits)
concat2 = contrib_metrics.streaming_concat(label_ids)
# Compute Pearson correlation
pearson = contrib_metrics.streaming_pearson_correlation(
logits, label_ids, weights=is_real_example)
# Compute MSE
# mse = tf.metrics.mean(per_example_loss)
mse = tf.metrics.mean_squared_error(
label_ids, logits, weights=is_real_example)
loss = tf.metrics.mean(
values=per_example_loss,
weights=is_real_example)
return {"pred": concat1, "label_ids": concat2, "pearson": pearson,
"MSE": mse, "eval_loss": loss,}
elif task_name in ["cola", "sst-2"]:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
"""Compute Matthew's correlations for COLA."""
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
# https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
tp, tp_op = tf.metrics.true_positives(
labels=label_ids, predictions=predictions,
weights=is_real_example)
tn, tn_op = tf.metrics.true_negatives(
labels=label_ids, predictions=predictions,
weights=is_real_example)
fp, fp_op = tf.metrics.false_positives(
labels=label_ids, predictions=predictions,
weights=is_real_example)
fn, fn_op = tf.metrics.false_negatives(
labels=label_ids, predictions=predictions,
weights=is_real_example)
# computing precision, recall and f1 score
# Added for BioAlbert
precision = tf_metrics.precision(label_ids,predictions,num_labels,[1,2],average="micro")
recall = tf_metrics.recall(label_ids,predictions,num_labels,[1,2],average="micro")
f1 = tf_metrics.f1(label_ids,predictions,num_labels,[1,2],average="micro")
# Compute Matthew's correlation
mcc = tf.div_no_nan(
tp * tn - fp * fn,
tf.pow((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn), 0.5))
# Compute accuracy
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions,
weights=is_real_example)
loss = tf.metrics.mean(
values=per_example_loss,
weights=is_real_example)
return {"matthew_corr": (mcc, tf.group(tp_op, tn_op, fp_op, fn_op)),
"accuracy": accuracy, "eval_loss": loss,
"precision":precision,
"recall":recall,
"f1_score": f1,}
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
predictions={
"probabilities": probabilities,
"predictions": predictions
},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, task_name):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer, task_name)
features.append(feature)
return features
| [
"noreply@github.com"
] | mizterbas.noreply@github.com |
44685fe6f9efa4068a850e9767859e5f04694261 | 1564d12d61f669ce9f772f3ef7563167f7fe13bf | /codeforces/606/B.MakeThemOdd.py | 9c2d937efa9b206dced25914e93f323bacc2266a | [] | no_license | sakshamk6999/codingPractice | 73ec4873defb0f0d2e47173150a589ee12e5e0a1 | f727aac6d87448b19fc9d48660dc6978fe5edc14 | refs/heads/master | 2020-12-01T20:22:36.299535 | 2020-02-04T05:55:53 | 2020-02-04T05:55:53 | 230,757,937 | 0 | 0 | null | 2020-02-12T20:38:12 | 2019-12-29T14:00:22 | Python | UTF-8 | Python | false | false | 849 | py | from collections import defaultdict
import heapq
for _ in range(int(input())):
n = int(input())
rec = {}
rec = defaultdict(lambda : 0, rec)
a = sorted(list(map(int, input().split())))
e = []
l = 0
for i in a:
if i % 2 == 0 and rec[-1 * i] == 0:
e.append(-1 * i)
rec[-1 * i] = 1
l += 1
heapq.heapify(e)
ans = 0
while l > 0:
# print(e)
ans += 1
temp = heapq.heappop(e)
# print("temp", -1 * temp)
rec[temp] = 0
temp = (-1 * temp) // 2
if temp % 2 == 0:
if rec[-1 * temp] == 1:
# print("temp is in", - 1 * temp)
l -= 1
else:
rec[-1 * temp] = 1
heapq.heappush(e, -1 * temp)
else:
l -= 1
print(ans) | [
"sakshamkhatwani@gmail.com"
] | sakshamkhatwani@gmail.com |
2e3f4ff25ef42365bc0e0fb650aeacf29131316a | c21e238ba538b6d6084eebe8482c6a12355c3cac | /PycharmProjects/python-study/for/for-if-test.py | 61167fe50b470383bef0af5718a9327c7a2e92c0 | [] | no_license | jiyiyun/programing | 6ac0ad1d3050767e619d07ce6aea1f8f9305a503 | a85ae0ef1808c355df0d0c45472f4116b9db84ff | refs/heads/master | 2021-01-14T08:04:07.453615 | 2019-01-02T07:35:51 | 2019-01-02T07:35:51 | 81,915,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | #!/usr/bin/env python
a = int(raw_input("please the number of people\n"))
b=range(a+1)
while len(b) > 3:
b = b[::3]
if len(b)<3:
print b
break | [
"jiyiyun@gmail.com"
] | jiyiyun@gmail.com |
ce239e64d17fae9f37a851aa52a0294d745dd66e | 12b9f93bbb88cc7ba7be8d1a350d38e94339b5d4 | /Python-Chapter 7-quadratic75.py | 80d0b52fe2910b2a7b102a03910642c56c5a33d0 | [] | no_license | Rmahesh7/Python-Beginner-practice | 72280e37feb1a6e29baf4d787fba9b6c6d43f712 | d6082ec192c553aa8d4b2a76105a8632221eff8e | refs/heads/main | 2023-08-28T16:05:07.085048 | 2021-10-29T12:48:31 | 2021-10-29T12:48:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | # quadratic75.py
import math # Makes the math library available.
def quadratic75():
print ("This program finds the real solutions to a quadractic.\n")
try:
a = float(input("Enter coefficient a: "))
b = float(input("Enter coefficient b: "))
c = float(input("Enter coefficient c: "))
discRoot = math.sqrt(b ** 2 - 4 * a * c)
root1 = (-b + discRoot) / (2 * a)
root2 = (-b - discRoot) / (2 * a)
print("\nThe solutions are:", root1, root2)
except ValueError:
print("\nNo real roots")
quadratic75()
| [
"noreply@github.com"
] | Rmahesh7.noreply@github.com |
b216afaf441625a2413426f3a80c058f619f1924 | df1c29396006adca68841ecc5ac31082b9f9931a | /fet/tools/decorators.py | 76f632e8cb7dc9dc2c2b37c5dd83b17301f05af0 | [] | no_license | cangmean/fet | b7b6f960600f203958f567b925ae68221fe3032f | 10d5eb9f8d630b619c60e077061ef139643f73ec | refs/heads/master | 2021-07-07T00:36:27.210738 | 2020-07-19T08:19:41 | 2020-07-19T08:19:41 | 143,129,703 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,884 | py | # coding=utf-8
import hashlib
from functools import wraps
from flask import current_app
from flask import jsonify
from flask import request
InvalidSignature = 'InvalidSignature'
ServerError = 'ServerError'
InvalidIPAddress = 'InvalidIPAddress'
NotFoundAuthApp = 'NotFoundAuthApp'
def get_ip():
return request.headers.get("X-Forwarded-For") or request.remote_addr
def ip_required(white_list=None):
""" ip限制"""
if white_list is None:
white_list = []
elif not isinstance(white_list, list):
white_list = [white_list]
def deco(func):
@wraps(func)
def _deco(*args, **kw):
default_white_list = current_app.config.get('WHITE_LIST')
white_list.extend(default_white_list)
ip = get_ip()
if ip not in white_list:
return jsonify(status=-1, message=InvalidIPAddress)
return func(*args, **kw)
return _deco
return deco
def make_signature(app, secret, timestamp):
""" 生成签名"""
data = [app, str(timestamp), secret]
return hashlib.md5('|'.join(data)).hexdigest()
def sign_required(func):
""" 接口签名验证"""
@wraps(func)
def deco(*args, **kw):
auth_apps = current_app.config.get('AUTH_APPS') or {}
if not auth_apps:
return jsonify(status=-1, message=NotFoundAuthApp)
params = request.args.to_dict()
try:
app, t, sig = params['_app'], params['_t'], params['_sig']
except KeyError:
return jsonify(status=-1, message=InvalidSignature)
if app not in auth_apps:
return jsonify(status=-1, message=InvalidSignature)
signature = make_signature(app, auth_apps[app], t)
if sig != signature:
return jsonify(status=-1, message=InvalidSignature)
return func(*args, **kw)
return deco
| [
"changmin.jin@17zuoye.comm"
] | changmin.jin@17zuoye.comm |
e1cf41c5f7676c0a10f1bf2efbd1e6552ba57b1d | ce30e80439f841fd7869399ca1ba726fe778fe6c | /qt.py | 6c9d20c5b1bc5720f5c6ec104881761ac5cc2a52 | [] | no_license | oleglite/giis | d04edba7a66ae6ee8c92c9723a9453eee1ff5a10 | f48bc0466c2d79a6419d3af89094159faa3927de | refs/heads/master | 2016-08-06T00:36:11.280065 | 2013-12-23T10:18:36 | 2013-12-23T10:18:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | try:
from PySide.QtCore import *
from PySide.QtGui import *
except ImportError:
from PyQt4.QtCore import *
from PyQt4.QtGui import * | [
"beloglazov.oleg@gmail.com"
] | beloglazov.oleg@gmail.com |
63c42c8252a987d933a0cdd969dd663b8fa82454 | eaab7d4250f7987394361231cf2ceebe264c5ebf | /lazy_client_core/tests/__init__.py | ca58ee6535220445e0707d63d8c892e6986943c1 | [] | no_license | stevezau/lazy_client | 118eebc78a75753e23a781e97941a2de52001999 | 31d85a0cc3beb3aa5297a49b9a0ef69fd48bd1d6 | refs/heads/master | 2021-01-17T15:09:54.771174 | 2017-05-14T11:57:27 | 2017-05-14T11:57:27 | 14,830,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | from lazy_client_core.tests.downloads import * | [
"stevezau@gmail.com"
] | stevezau@gmail.com |
b10e77388a81a9751481c8cd18a9bd1d71383946 | b869ab00a927fb8f99f07940903370291923009a | /Project/gatt-server/venv/bin/pip | 367da5666e67185ebdf6b8c480d378c7eda825a0 | [] | no_license | MBillemaz/Ynov-IoT-Training | 11e9ed7af25b15b0cbd4b051700fb3f94eb8fa00 | f0c1fd8a94e1f69cd8aa9b9e38b41ba199e14ad1 | refs/heads/master | 2020-04-27T16:56:48.970749 | 2019-05-14T20:56:10 | 2019-05-14T20:56:10 | 174,498,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/home/pi/bluetoothServer/bluez-5.43/test/venv/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"maxime.bill@hotmail.fr"
] | maxime.bill@hotmail.fr | |
39e6f93c12b8f033a74d722711c03e7cdeda73f8 | e5df6f614800210137a18312a248cf55d6c14c63 | /hw2/eval.py | 8b2b25c4a48f2360a48a2a9e04801d55d96871a2 | [] | no_license | gdoggg2032/ADL2016 | ae402ea88f61a1c76a08164dc45ad3dac1281025 | fb40f83ae201ce12350c7ec965dc37c94a931628 | refs/heads/master | 2022-11-04T16:46:08.824116 | 2016-12-28T11:42:14 | 2016-12-28T11:42:14 | 70,919,131 | 0 | 1 | null | 2022-10-20T13:52:55 | 2016-10-14T14:44:53 | Python | UTF-8 | Python | false | false | 3,491 | py | #! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn
import sys
# Parameters
# ==================================================
# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("model", "", "Checkpoint directory from training run")
tf.flags.DEFINE_boolean("eval_train", False, "Evaluate on all training data")
tf.flags.DEFINE_string("vocab", "", "vocab file")
tf.flags.DEFINE_string("test_data", "../dataset_cnn_eng/testing_data.txt", "path to test data")
tf.flags.DEFINE_string("predict", "./answer.txt", "path to predict")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# CHANGE THIS: Load data. Load your own data here
if FLAGS.eval_train:
x_raw, y_test = data_helpers.load_data_and_labels()
y_test = np.argmax(y_test, axis=1)
else:
# x_raw = ["a masterpiece four years in the making", "everything is off."]
# y_test = [1, 0]
x_raw = open(FLAGS.test_data).readlines()
y_test = None
# Map data into vocabulary
vocab_path = FLAGS.vocab#os.path.join(FLAGS.checkpoint_dir, "..", "vocab")
vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)
x_test = np.array(list(vocab_processor.transform(x_raw)))
print("\nEvaluating...\n")
# Evaluation
# ==================================================
model_file = FLAGS.model
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(model_file))
saver.restore(sess, model_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
# Generate batches for one epoch
batches = data_helpers.batch_iter(list(x_test), FLAGS.batch_size, 1, shuffle=False)
# Collect the predictions here
all_predictions = []
for x_test_batch in batches:
batch_predictions = sess.run(predictions, {input_x: x_test_batch, dropout_keep_prob: 1.0})
all_predictions = np.concatenate([all_predictions, batch_predictions])
# Print accuracy if y_test is defined
if y_test is not None:
correct_predictions = float(sum(all_predictions == y_test))
print("Total number of test examples: {}".format(len(y_test)))
print("Accuracy: {:g}".format(correct_predictions/float(len(y_test))))
else:
# print all_predictions
# print len(all_predictions)
f = open(FLAGS.predict, "w")
for p in all_predictions:
print >> f, int(p)
| [
"gdoggg2032@gmail.com"
] | gdoggg2032@gmail.com |
0d00be6ffa67dcb44dadf1e7fb59c96d3cefdc76 | dabc9c7ec7cce125a12c6243ff67fd91e620d636 | /tap/tests/test_pytest_plugin.py | c91e8b40631e9c79c21ada77df44a0db95c9ba65 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | Mark-E-Hamilton/tappy | 7634209c2862c9e837b58602d4b59636fd9a8e89 | 62c1a4ef1d9e724d3c7bbb31361c17c3bf071d04 | refs/heads/master | 2021-01-15T09:04:09.813683 | 2016-03-21T04:51:45 | 2016-03-21T04:51:45 | 53,630,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,332 | py | # Copyright (c) 2016, Matt Layman
try:
from unittest import mock
except ImportError:
import mock
import tempfile
from tap.plugins import _pytest
from tap.tests import TestCase
from tap.tracker import Tracker
class TestPytestPlugin(TestCase):
def setUp(self):
"""The pytest plugin uses module scope so a fresh tracker
must be installed each time."""
# When running this suite with pytest, save and restore the tracker.
self._tracker = _pytest.tracker
_pytest.tracker = Tracker()
def tearDown(self):
_pytest.tracker = self._tracker
def _make_config(self):
config = mock.Mock()
config.option.tap_stream = False
config.option.tap_files = False
config.option.tap_outdir = None
config.option.tap_combined = False
return config
def test_includes_options(self):
group = mock.Mock()
parser = mock.Mock()
parser.getgroup.return_value = group
_pytest.pytest_addoption(parser)
self.assertEqual(group.addoption.call_count, 4)
def test_tracker_stream_set(self):
config = self._make_config()
config.option.tap_stream = True
_pytest.pytest_configure(config)
self.assertTrue(_pytest.tracker.streaming)
def test_tracker_outdir_set(self):
outdir = tempfile.mkdtemp()
config = self._make_config()
config.option.tap_outdir = outdir
_pytest.pytest_configure(config)
self.assertEqual(_pytest.tracker.outdir, outdir)
def test_tracker_combined_set(self):
config = self._make_config()
config.option.tap_combined = True
_pytest.pytest_configure(config)
self.assertTrue(_pytest.tracker.combined)
def test_track_when_call_report(self):
"""Only the call reports are tracked."""
_pytest.tracker = mock.Mock()
report = mock.Mock(when='setup', outcome='passed')
_pytest.pytest_runtest_logreport(report)
self.assertFalse(_pytest.tracker.add_ok.called)
def test_tracks_ok(self):
_pytest.tracker = mock.Mock()
location = ('test_file.py', 1, 'TestFake.test_me')
report = mock.Mock(when='call', outcome='passed', location=location)
_pytest.pytest_runtest_logreport(report)
_pytest.tracker.add_ok.assert_called_once_with(
'TestFake', 'TestFake.test_me')
def test_tracks_not_ok(self):
_pytest.tracker = mock.Mock()
location = ('test_file.py', 1, 'TestFake.test_me')
report = mock.Mock(when='call', outcome='failed', location=location)
_pytest.pytest_runtest_logreport(report)
_pytest.tracker.add_not_ok.assert_called_once_with(
'TestFake', 'TestFake.test_me', diagnostics='')
def test_tracks_skip(self):
_pytest.tracker = mock.Mock()
location = ('test_file.py', 1, 'TestFake.test_me')
longrepr = ('', '', 'Skipped: a reason')
report = mock.Mock(
when='call', outcome='skipped', location=location,
longrepr=longrepr)
_pytest.pytest_runtest_logreport(report)
_pytest.tracker.add_skip.assert_called_once_with(
'TestFake', 'TestFake.test_me', 'a reason')
def test_generates_reports_for_stream(self):
config = self._make_config()
config.option.tap_stream = True
_pytest.tracker = mock.Mock()
_pytest.pytest_unconfigure(config)
_pytest.tracker.generate_tap_reports.assert_called_once_with()
def test_generates_reports_for_files(self):
config = self._make_config()
config.option.tap_files = True
_pytest.tracker = mock.Mock()
_pytest.pytest_unconfigure(config)
_pytest.tracker.generate_tap_reports.assert_called_once_with()
def test_generates_reports_for_combined(self):
config = self._make_config()
config.option.tap_combined = True
_pytest.tracker = mock.Mock()
_pytest.pytest_unconfigure(config)
_pytest.tracker.generate_tap_reports.assert_called_once_with()
def test_skips_reporting_with_no_output_option(self):
config = self._make_config()
_pytest.tracker = mock.Mock()
_pytest.pytest_unconfigure(config)
self.assertFalse(_pytest.tracker.generate_tap_reports.called)
| [
"matthewlayman@gmail.com"
] | matthewlayman@gmail.com |
3e24d2a7f20ab61e3e2fb2c5f3ee18a374a5c133 | acafefde785283830e1bfdbf385350d6d9c0dc47 | /read_dicom.py | 7553ed9659367194ded84ce18142f815e48e2e3d | [] | no_license | trevallion/MedicalImaging | 784a9fbba2e5cdf4172c302b1009def8c08a22ba | 3f4c2a25edaf01fdceb8b38338c208ca69c47367 | refs/heads/master | 2022-11-23T04:13:03.080193 | 2020-07-23T19:05:49 | 2020-07-23T19:05:49 | 280,210,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | import matplotlib.pyplot as plt
import pydicom
from pydicom.data import get_testdata_files
def get_data():
files = get_testdata_files('CT_small.dcm')
print(__doc__)
filename = get_testdata_files('CT_small.dcm')[0]
dataset = pydicom.dcmread(filename)
# Normal mode:
print()
print("Filename.........:", filename)
print("Storage type.....:", dataset.SOPClassUID)
print()
pat_name = dataset.PatientName
display_name = pat_name.family_name + ", " + pat_name.given_name
print("Patient's name...:", display_name)
print("Patient id.......:", dataset.PatientID)
print("Modality.........:", dataset.Modality)
print("Study Date.......:", dataset.StudyDate)
if 'PixelData' in dataset:
rows = int(dataset.Rows)
cols = int(dataset.Columns)
print("Image size.......: {rows:d} x {cols:d}, {size:d} bytes".format(
rows=rows, cols=cols, size=len(dataset.PixelData)))
if 'PixelSpacing' in dataset:
print("Pixel spacing....:", dataset.PixelSpacing)
# use .get() if not sure the item exists, and want a default value if missing
print("Slice location...:", dataset.get('SliceLocation', "(missing)"))
# plot the image using matplotlib
plt.imshow(dataset.pixel_array, cmap=plt.cm.bone)
plt.show() | [
"896510+trevallion@users.noreply.github.com"
] | 896510+trevallion@users.noreply.github.com |
e36cba2db79f18ed6432af22f03c4f53dd4f61b1 | 2dfbb97b47fd467f29ffb26faf9a9f6f117abeee | /leetcode/242.py | 0b7a2589d14a456369352fe3820fb247d6675b0b | [] | no_license | liuweilin17/algorithm | 0e04b2d36dfb6b7b1b0e0425daf69b62273c54b5 | d3e8669f932fc2e22711e8b7590d3365d020e189 | refs/heads/master | 2020-12-30T11:03:40.085105 | 2020-04-10T03:46:01 | 2020-04-10T03:46:01 | 98,844,919 | 3 | 1 | null | 2018-10-05T03:01:02 | 2017-07-31T03:35:14 | C++ | UTF-8 | Python | false | false | 1,014 | py | ###########################################
# Let's Have Some Fun
# File Name: 242.py
# Author: Weilin Liu
# Mail: liuweilin17@qq.com
# Created Time: Fri Oct 19 00:40:47 2018
###########################################
#coding=utf-8
#!/usr/bin/python
# valid anagram
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
dt = {}
l1 = len(s)
l2 = len(t)
if l1 != l2:
return False
for c in s:
if c in dt.keys():
dt[c] += 1
else:
dt[c] = 1
for c in t:
if c in dt.keys():
dt[c] -= 1
if dt[c] < 0:
return False
else:
return False
return True
if __name__ == '__main__':
so = Solution()
s = "anagram"
t = "nagaram"
print so.isAnagram(s, t)
s = "rat"
t = "car"
print so.isAnagram(s, t)
| [
"liuweilin17@qq.com"
] | liuweilin17@qq.com |
818b20ac454ef8f772d87fb729b7474c68a5f9a6 | d024ccbb4cc04af3866a4db1ac1d8c1d7395d909 | /boj/1152.py | 8af0d3fc1c7b3184f1a6c89454aee7a18af2623a | [] | no_license | demetoir/ps-solved-code | ff0418dddd10f3b053c9b8d32af48027b10c8481 | f4d4fd2183176b083f2287c9d89c6d5a1e983cc5 | refs/heads/master | 2022-10-14T20:11:34.581439 | 2020-06-12T11:24:11 | 2020-06-12T11:24:11 | 68,782,768 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | print(len(list(map(str,input().split())))) | [
"wnsqlehlswk@gmail.com"
] | wnsqlehlswk@gmail.com |
7eacb9ca621e2a660599a473bfdbc1136d01a7a6 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1/bbbeebun/codejam_01.py | 7a489d790a6fd40192e6c72e498da86daa2ff2b1 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,308 | py | def has_completed(mapping):
count = 0
for key in mapping:
count += mapping[key]
if count == 10:
return True
else:
return False
def update_mapping(current_n, mapping):
current_n_str = str(current_n)
for each in current_n_str:
if mapping[each] == 0:
mapping[each] = 1
def counting_sheep(n):
if n == 0:
return 'INSOMNIA'
mapping = {
'0':0, '1':0, '2':0,
'3':0, '4':0, '5':0,
'6':0, '7':0, '8':0,
'9':0
}
current_n = n
update_mapping(current_n, mapping)
while not has_completed(mapping):
current_n += n
update_mapping(current_n, mapping)
return current_n
i = 1
dataset = [0,1,2,11,1692,213858,999995,292164,265199,1000000,10,663708,25,674735,762196,519439,205639,686594,851051,506636,72961,571071,380018,721364,271918,124,362718,40,779467,125000,9,4,104652,20,999998,34,133688,911210,71670,403183,3,999999,777164,999991,999996,954404,999997,200,771909,535557,621518,246569,816478,12500,854110,434198,610249,562071,679849,999992,5,427795,889527,739756,866179,8,513404,125,211763,408914,1250,225473,541210,687079,839403,6,557598,816751,584871,857249,999993,999994,467549,364901,988598,659695,402255,657006,637531,224284,441246,192103,166,565718,300682,596698,584551,410726,7,90188]
for each in dataset:
print 'Case #'+str(i) +': ' + str(counting_sheep(each))
i += 1 | [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
f4b6779132fd1af173eb1ef6f7c5386930b2ff3b | 5f16e048917fe818b31d1bc424d178f8207378f0 | /File_handling/Series.py | 613281f492bff0c3e1ffed8da40d6e91ae718615 | [] | no_license | gan3i/Python_first | 71c8a30d2c8f1d21c9e64a633da0a6ef9fcbc6fa | d184e234694eb6724bc511afcfcbb4929ee4513f | refs/heads/master | 2021-06-06T23:52:55.691179 | 2021-05-28T08:28:57 | 2021-05-28T08:28:57 | 151,419,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | """ read annd print an integer series ."""
import sys
from pprint import pprint
def read_series(filename):
# try:
# f=open(filename,mode="rt",encoding="UTF-8")
# return[int(line.strip()) for line in f]
# # series=[]
# # for line in f:
# # a=int(line.strip())
# # series.append(a)
# # return series
# finally:
# f.close()
with open(filename,mode="rt",encoding="UTF-8") as f:
return[int(line.strip()) for line in f]
def main(filename):
series=read_series(filename)
pprint(series)
if __name__=="__main__":
# main(sys.argv[1])
main("wasteland.txt")
| [
"gnageshappa@aptean.com"
] | gnageshappa@aptean.com |
78be71c2e11ed6ee6f456a9a4c355cca5f58a026 | dfcadafb9b7aee820a6eebba7b67dc31c0cabda5 | /codeforces/1430/B.py | 0d937ff0e42b1fc8691e34097975a3f909c41fcc | [] | no_license | sainad2222/my_cp_codes | 0b631872e96ff84897dd498caf4a6ed5ba4f9c15 | 4621a2b6d80ea5dc36401481bba58096192e0822 | refs/heads/master | 2023-02-15T20:42:03.524179 | 2021-01-12T09:14:00 | 2021-01-15T07:45:41 | 324,394,919 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | for _ in range(int(input())):
n, k = map(int, input().split())
lis = list(map(int,input().split()))
lis.sort()
ans = lis[-1]
if n == 1:
print(lis[0])
i = n - 2
while k > 0 and i >= 0:
ans += lis[i]
i -= 1
k -= 1
print(ans) | [
""
] | |
e80955484513ccba86db2848360993c199ec20c9 | 00d49fd00fbe561c4e1a1b91c49a2883735656c2 | /TwoStackQueue.py | a5b6f4f47110a178ee3fc1a460ff76f994a985c0 | [] | no_license | zhuyanlong/SwordOffer | 65754becac490aa55a694a957b58329adccf3178 | 807e9a0ad1676f53a6ce7d862ec6591428194d5e | refs/heads/master | 2022-12-07T16:05:17.309975 | 2020-08-26T05:53:25 | 2020-08-26T05:53:25 | 289,850,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | # https://www.nowcoder.com/practice/54275ddae22f475981afa2244dd448c6?tpId=13&&tqId=11158&rp=1&ru=/ta/coding-interviews&qru=/ta/coding-interviews/question-ranking
# -*- coding:utf-8 -*-
class Solution:
def __init__(self):
self.stack1=[]
self.stack2=[]
def push(self, node):
# write code here
while self.stack2:
self.stack1.append(self.stack2.pop())
self.stack1.append(node)
def pop(self):
# return xx
while self.stack1:
self.stack2.append(self.stack1.pop())
return self.stack2.pop()
def main():
s=Solution()
s.push(1)
s.push(2)
s.push(3)
s.push(4)
print(s.pop())
s.push(5)
print(s.pop())
print(s.pop())
print(s.pop())
print(s.pop())
main() | [
"noreply@github.com"
] | zhuyanlong.noreply@github.com |
175fc28b8579856f45012ee0fc0cb42a65e8bce4 | 1258a24298c0ade60ee450a3575d8a49f5e5fbb6 | /Write Better Python/buggylogs.py | 26e72106302b3a1033fc8b08e3382e301e5fde86 | [] | no_license | chrisbeldam/TreeHouse-Python | 3abc8d44e0316a385ba43c01eda29b1bd45da023 | 90c03a2df853206d37ce826279127f40002188d3 | refs/heads/master | 2020-12-30T16:28:10.704947 | 2017-06-02T08:55:55 | 2017-06-02T08:55:55 | 90,992,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | import pdb
# logging.warning("The french have the grail")
# logging.info("info")
#Python Debugger
my_list = [5,2,1,True,"abcdefg",3,False,4]
pdb.set_trace() # Debugger - Need to remove when done
del my_list[3] # Deletes True
del my_list[3] # Deletes String
del my_list[4] # Deletes False
print(my_list)
| [
"chrisgbeldam@gmail.com"
] | chrisgbeldam@gmail.com |
c74975ed871b1946d909bc0add8a2c87c9846246 | 5bd983aeb7fb90f4d9da36f4945a9a9fb034da57 | /node_modules/fsevents/build/config.gypi | 30e1fc44dcc86fc66149f2ccd570d86b4162e1da | [
"MIT"
] | permissive | nitishtripathi/React-Native-and-Redux-Stock-Manager-App | 8b6f5503ca77b265ed7ff7d326d1157b1a0a181e | dbf0ea35446fa2394e06a5e785d2adffca11200a | refs/heads/master | 2022-12-02T19:02:24.729175 | 2020-08-11T20:22:31 | 2020-08-11T20:22:31 | 286,832,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,685 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt67l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "6",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/nitishtripathi/Library/Caches/node-gyp/14.7.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/nitishtripathi/.npm-init.js",
"userconfig": "/Users/nitishtripathi/.npmrc",
"cidr": "",
"node_version": "14.7.0",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/nitishtripathi/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.7 node/v14.7.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/5g/dc3f94ts5cd68b0pcmgdkqz80000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"Nitish.trp.99@gmail.com"
] | Nitish.trp.99@gmail.com |
a6ac3722c39b122f6bdc51ff3f28a3da0a5a0c0d | 3dfc4543e5d353e13f8fccf87751f6152e5f2def | /aux/py_02_extract_user_data.py | 77a8b89381e1278e0c524df2a26a7aef17f06009 | [] | no_license | NetDem-USC/training | ba2d80101a953c3b8656958ebeacfeb80d9ff98b | 272ed3e4a9f59c6d5f3a5960bf92290a4242bd18 | refs/heads/master | 2021-01-11T23:12:36.230007 | 2017-05-02T16:32:28 | 2017-05-02T16:32:28 | 78,556,382 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,597 | py | import os, json, sys, re
'''
Usage:
cd ~/Dropbox/research/Social\ Media\ and\ ICs
python code/py_02_extract_user_data.py data/user-data.csv data/icourts-hashtag-tweets.json
'''
outfile = sys.argv[1]
filename = sys.argv[2]
user_data = {}
user_list = {}
filehandle = open(filename, 'r')
for line in filehandle:
try:
tweet = json.loads(line)
text = tweet['text']
except:
continue
try:
user_id = tweet['user']['id_str']
except:
continue
user_list[user_id] = 1 + user_list.get(user_id,0)
if tweet['user']['location'] is None:
tweet['user']['location'] = ''
if tweet['user']['description'] is None:
tweet['user']['description'] = ''
user_data[user_id] = "{0},{1},{2},{3},{4},{5},{6},{7},{8},{9}".format(
tweet['created_at'][4:16] + ' ' + tweet['created_at'][26:30],
tweet['user']['screen_name'],
tweet['user']['id_str'],
tweet['user']['friends_count'],
tweet['user']['followers_count'],
tweet['user']['lang'],
tweet['user']['verified'],
tweet['user']['location'].replace(",","").encode('utf-8'),
tweet['user']['description'].replace(",","").encode('utf-8'),
user_list[user_id])
outhandle = open(outfile, 'w')
file_key = "DateLastTweet,ScreenName,UserId,FriendsCount,FollowersCount,Language,Verified,Location,Description,Tweets"
outhandle.write("{0}\n".format(file_key))
for user, user_string in user_data.items():
outhandle.write("{0}\n".format(user_string))
outhandle.close()
| [
"pablo.barbera@nyu.edu"
] | pablo.barbera@nyu.edu |
23c5ebd791f99f5b57e925ce96b37c3ab4fa5892 | b28e672b151a72a913f77bc7922268f10c9ab167 | /sarsa.py | 5dd2af190074112cd5da43f9bf8e2bc539ad9243 | [
"Unlicense"
] | permissive | Sanyam07/opengym | 048ac6295e5bc07a1afdfe07b34626d7f9bad866 | ce01211296efd87ebda96366c2dc46b904243eec | refs/heads/master | 2020-12-20T03:40:39.108344 | 2019-03-31T18:31:39 | 2019-03-31T18:31:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,284 | py | # SARSA
import json
from random import random
from random import choice
import math
import functools
def cloneJSON( item ):
return json.loads(json.dumps(item))
def setReward( weights, state, action, reward ):
state = json.dumps(state)
weights[state] = {} if state not in weights else weights[state]
weights[state][action] = reward
def getRewards( weights, state, action_list, defaultReward ):
actions = {} if json.dumps(state) not in weights else weights[json.dumps(state)]
result = {}
for action in action_list :
if action in actions :
result[action] = actions[action]
else :
result[action] = defaultReward
return result
def sarsaEquation(state0, action0, reward1, state1, action1, alpha, gamma, weights, defaultReward, getRewards, setReward) :
# formula : ( 1 - a )Q(t) + (a)( r + yQ(t+1) )
a = alpha
Qt0 = getRewards( weights, state0, [action0], defaultReward )[action0]
Qt1 = getRewards( weights, state1, [action1], defaultReward )[action1]
r = reward1
y = gamma
result = (1-a)*Qt0 + a*(r+y*Qt1)
setReward( weights, state0, action0, result )
return result
def randomPolicy( actions, epsilon ) :
actions = [a for a in actions.keys()]
return choice(actions)
def greedyPolicy( actions, epsilon ) :
best_score = functools.reduce( lambda x,y : max(x,y) , actions.values() )
return next(filter( lambda key : actions[key] == best_score , actions.keys() ))
def epsilonGreedyPolicy( actions, epsilon ) :
if ( random() <= epsilon ) :
return randomPolicy(actions,epsilon)
else :
return greedyPolicy(actions,epsilon)
policies = {
'greedy' : greedyPolicy,
'epsilonGreedy' : epsilonGreedyPolicy,
'random' : randomPolicy
}
defaults = {
'alpha' : 0.2, # default to a low(-ish) learning rate
'gamma' : 0.8, # default of a high(-ish) dependance on future expectation
'defaultReward' : 0,
'epsilon' : 0.001,
'policy' : 'greedy'
}
class Sarsa:
def __init__(self, config=None):
global defaults
self.config = config or cloneJSON(defaults)
self.weights = {}
def getRewards(self, state, action_list) :
return cloneJSON(getRewards(self.weights,state,action_list,self.config['defaultReward']))
def update (self, state0, action0, reward1, state1, action1) :
return sarsaEquation(state0,action0,reward1,state1,action1,
self.config['alpha'],self.config['gamma'],
self.weights,self.config['defaultReward'],getRewards,setReward)
def chooseAction(self, state, action_list) :
global policies
actions = getRewards(self.weights,state,action_list,self.config['defaultReward'])
return policies[self.config['policy']](actions,self.config['epsilon'])
class TransformState:
def __init__(self, transFunc, config=None, impl=None):
if impl != None :
self.impl = impl
else :
self.impl = Sarsa(config)
self.transFunc = transFunc
def getRewards(self, state, action_list) :
return self.impl.getRewards(self.transFunc(state),action_list)
def update(self, state0, action0, reward1, state1, action1) :
return self.impl.update(self.transFunc(state0), action0, reward1, self.transFunc(state1), action1)
def chooseAction(self, state, action_list):
return self.impl.chooseAction(self.transFunc(state), action_list)
class Combine:
def __init__(self, implList, config=None):
self.implList = implList
self.config = config or cloneJSON(defaults)
def update(self, state0, action0, reward1, state1, action1) :
for impl in self.implList :
return impl.update(state0, action0, reward1, state1, action1)
def chooseAction(self, state, action_list):
rewards = [ impl.getRewards(state,action_list) for impl in self.implList ]
actions = {}
for action in action_list :
actions[action] = 0
for reward in rewards :
for action in actionList :
if action in reward :
actions[action] += reward[action]
return policies[self.config['policy']](actions,self.config['epsilon'])
| [
"anton.panchishin@gmail.com"
] | anton.panchishin@gmail.com |
e83e880b8a2c1e1ce9f33320d28ba893539c7298 | 88cd3787b1c1b1fe119b4ed8b36c8909cb1210e9 | /testapp/migrations/0005_auto_20180603_1439.py | 0dc34e0a6f48cbabdee83c702fca2ce7604a2696 | [] | no_license | NullOnSpace/MysiteInitial | 039c63cae558af893d17afe206b53d7631168091 | 068f1e44547c6015b9ae183d463c9c0465323569 | refs/heads/master | 2020-03-26T20:18:34.103920 | 2018-09-03T15:14:31 | 2018-09-03T15:14:31 | 145,316,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | # Generated by Django 2.0.4 on 2018-06-03 14:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testapp', '0004_auto_20180531_1746'),
]
operations = [
migrations.RemoveField(
model_name='main',
name='icid',
),
migrations.AddField(
model_name='icon',
name='lastaccess',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='main',
name='isgirl',
field=models.IntegerField(blank=True, choices=[(0, 'Male'), (1, 'Female')], null=True, verbose_name='gender'),
),
migrations.AlterField(
model_name='noblelist',
name='nl',
field=models.CharField(blank=True, choices=[('0', '未知'), ('1', '骑士'), ('2', '子爵'), ('3', '伯爵'), ('4', '公爵'), ('5', '国王'), ('6', '皇帝'), ('7', '游侠'), ('-', '普通')], max_length=1, null=True),
),
]
| [
"portuguese_d029@163.com"
] | portuguese_d029@163.com |
f8745d2bb25d904e2387a7880537875bab8cf88b | 9ab43bb940e7ba41f4eaebe0b9d99fda25dbeea2 | /pythia/SimulationMods/python/IntegrationData.py | 9b4c276ea8076696e3c377a22d36fa89208507d1 | [
"Apache-2.0"
] | permissive | iamaris/CDFCodes | 6c3b09ac437bd46d7b15a3a09152a6763847abe4 | fdb7e392831b35cbcf6b685111a601aee3ecb395 | refs/heads/master | 2021-01-17T11:59:36.341561 | 2014-01-06T21:35:59 | 2014-01-06T21:35:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | py | #
# A simple class to represent the blob of quantities that is accumulated
# by the cdfIntegrate program.
#
from integ_utils import removeBrackets
# The following is needed for Python 1.5.2
import string
class IntegrationData:
"""The data structure representing all the information accumulated for
a single leg"""
# We use a float for nsteps so that the fromString function is simple--
# all conversion are to floats.
def __init__(self,
rL = 0.0,
iL = 0.0,
bdl = 0.0,
dx = 0.0,
nsteps = 0.0):
self.floatdata = (rL, iL, bdl, dx, nsteps)
def __str__(self):
return str(self.floatdata)
def radL(self):
return self.floatdata[0]
def intL(self):
return self.floatdata[1]
def BdL(self):
return self.floatdata[2]
def dx(self):
return self.floatdata[3]
def nSteps(self):
return self.floatdata[4]
def fromString(self, aString):
floatdataString = removeBrackets(aString)
t = string.split(floatdataString)
self.floatdata = map(float, t)
if __name__ == "__main__":
# The test string was taken from actual cdfIntegrate program output.
test_string = "[5.81274e-05\0112.0468e-05\0110\0111.76838\0114]"
id = IntegrationData()
id.fromString(test_string)
assert id.radL() == 5.81274e-05, "radL wrong"
| [
"aris@cmu.edu"
] | aris@cmu.edu |
02217b2ac522c12b51097a9f17a901690b23d965 | 0769a4f6bed83d30d8e7394397328381dd9a0cf6 | /TP1cliente_echo.py | 0558ad31dfa1ea23f3d943148aa8cdb1b81c16cf | [] | no_license | ryacyna/Lab_Redes2018 | c7533903fb4a738417b7179360617a178928e311 | 5381ca2f8331bc50f842ec2d1afc2df933262eba | refs/heads/master | 2020-04-04T12:11:55.120771 | 2018-11-13T19:55:14 | 2018-11-13T19:55:14 | 155,917,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py |
import socket
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", type=int, help="Puerto del servidor")
parser.add_argument("-s", "--server", help="Nombre del servidor")
args = parser.parse_args()
enqueport = 10000
servidor = "127.0.0.1"
if args.port:
print "Conectarse al puerto: ", args.port
enqueport = args.port
if args.server:
print "El nombre del servidor es: ", args.server
servidor = args.server
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect((servidor, enqueport ))
nombrecliente = raw_input('Cual es su nombre Cliente ? ')
while 1:
data = raw_input("'Exit' para terminar - "+nombrecliente+' > ')
clientsocket.send( nombrecliente+' > '+ data)
if 'Exit' in data:
break
newdata = clientsocket.recv(1024)
print newdata
clientsocket.close()
| [
"noreply@github.com"
] | ryacyna.noreply@github.com |
d20851286ef8b0a6b8ec5ac048c1b5e6e8369f6a | 703242f8dc1e260c24635e9e72aba492720ae32b | /skulpt/test/run/t17.py | d638a8dee9bc97bf74ddb3083291e83a6f3ce01f | [
"MIT",
"Python-2.0"
] | permissive | justyn/sr-sim-js | 0db24c4394b80490ff6f901bbc533466eff2d5ea | 8548af8d37e857ec681dd76fbe09da07cbd2686d | refs/heads/master | 2016-09-06T08:26:35.157310 | 2013-08-20T21:51:02 | 2013-08-20T21:51:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | if "a" is "a":
print "OK"
| [
"commits@justyn.eu"
] | commits@justyn.eu |
08d60b0fdf4f6abfda5e2ac10591021283fc44bf | 8e1be167066e30eff91c26c0757211cf3cf8b016 | /django/orm/book_authors_proj/apps/books_authors_app/migrations/0001_initial.py | 5818682c02c3c18e31b135482e2c1adb636304db | [] | no_license | dojo-solutions/online-ft-python | 074d0ba968f5a77eaec1bca0904232f2aa29051a | b4f6941d0bba376d121a40a6429b815d5b03c32f | refs/heads/master | 2020-04-21T11:52:31.390772 | 2019-03-02T01:27:54 | 2019-03-02T01:27:54 | 169,542,448 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,422 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2019-02-21 18:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=45)),
('last_name', models.CharField(max_length=45)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('desc', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='author',
name='books',
field=models.ManyToManyField(related_name='authors', to='books_authors_app.Book'),
),
]
| [
"wes@tao.team"
] | wes@tao.team |
2ba20372fe4994021d5cf6e43c9b163c1c106b64 | 05a9e0bb7e33099f94dfc8af53b4837bc5c9d287 | /python/ext_examples/torch/bench/linear.py | 1a840cbd0db9eba60313d59db7025e1b6a7852df | [] | no_license | HiroIshida/snippets | 999c09efadae80397cb82a424328bb1dbda4915f | f64dcd793184be64682b55bdaee7392fd97a0916 | refs/heads/master | 2023-09-01T08:18:42.523625 | 2023-09-01T04:08:20 | 2023-09-01T04:08:20 | 207,662,767 | 7 | 2 | null | 2022-08-01T23:20:42 | 2019-09-10T21:04:01 | C++ | UTF-8 | Python | false | false | 782 | py | import torch.nn as nn
import tqdm
import numpy as np
import matplotlib.pyplot as plt
import time
import torch
import threadpoolctl
def measure_perf(depth, with_grad: bool = False):
lst = []
for _ in range(depth):
lst.append(nn.Linear(40, 40))
lst.append(nn.ReLU())
lst.append(nn.Linear(40, 1))
lst.append(nn.Sigmoid())
net = nn.Sequential(*lst)
arr = np.random.randn(1, 40)
ten = torch.from_numpy(arr).float()
ten.requires_grad_(with_grad)
ts = time.time()
n_trial = 100
for _ in range(n_trial):
val1 = net(ten)
if with_grad:
val1.backward()
perf = (time.time() - ts) / n_trial
return perf
perfs = [measure_perf(n, True) for n in tqdm.tqdm(range(50))]
plt.plot(perfs)
plt.show()
| [
"spitfire.docomo@gmail.com"
] | spitfire.docomo@gmail.com |
b5ed3013b2eafda68318a223d46dce0287cafaff | 32fdc94d1b8d98085db5d1e8caae4161d3e70667 | /3rd_party/python3.7/lib/python3.7/site-packages/mining-0.2.2-py3.7-linux-x86_64.egg/mining/utils/listc.py | 298f9060e40d10832fcab747bdea37497e80d1e6 | [
"Python-2.0"
] | permissive | czfdlut/ticket_proxy | fa0f1924a86babfa7ce96cf97e929f7bf78643b7 | 0d7c19448741bc9030484a97c1b8f118098213ad | refs/heads/master | 2022-12-23T05:25:58.207123 | 2019-11-20T03:58:31 | 2019-11-20T03:58:31 | 174,579,562 | 1 | 3 | null | 2022-12-18T01:18:07 | 2019-03-08T17:22:48 | Python | UTF-8 | Python | false | false | 310 | py | def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, 'listc.cpython-37m-x86_64-linux-gnu.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
| [
"czfdlut@163.com"
] | czfdlut@163.com |
60d6d2780c6ee5ec375dd631da6785483699b839 | c9e368329bb602233d7cd6449bd7db385c946078 | /week_1/lists_n_loops/driver.py | 194b18680bf6120ee29cb1ac09212af8c4949e7f | [] | no_license | ansabgillani/arbisoft-training | 55d9d350442a66c91d61b50f2d6ce6eb8d7701a9 | 4c04f02e5cacc78565c6d5f3e759b5741202693c | refs/heads/main | 2023-06-04T10:16:36.743531 | 2021-06-29T09:33:17 | 2021-06-29T09:33:17 | 379,847,820 | 0 | 0 | null | 2021-06-29T09:33:18 | 2021-06-24T07:56:38 | Python | UTF-8 | Python | false | false | 2,458 | py | import time
import random as rnd
import matplotlib.pyplot as plt
from index_search import (
search_key_while,
search_key_enumerate,
search_key_listed_for,
search_key_ranged_for,
search_key_comprehension
)
if __name__ == "__main__":
def calculate_execution_time(function):
start = time.time()
function(nums, key)
end = time.time()
execution_time = end - start
return execution_time
while True:
# input number of elements
size_n = int(input("Enter the number of elements in the list: "))
nums = [rnd.randrange(1, 100) for i in range(size_n)]
# Asking for the key to be searched
key = int(input("Enter the number "
"you want to search : "))
# Calculating Execution of while loop
while_exec_time = calculate_execution_time(search_key_while)
print(f"Time taken to for the "
f"while loop : {while_exec_time}")
# Calculating Execution of enumerate loop
enumerate_exec_time = calculate_execution_time(search_key_enumerate)
print(f"Time taken to for the "
f"enumerate loop : {enumerate_exec_time}")
# Calculating Execution of listed for loop
for_exec_time = calculate_execution_time(search_key_listed_for)
print(f"Time taken to for the "
f"listed for loop : {for_exec_time}")
# Calculating Execution of ranged for loop
range_exec_time = calculate_execution_time(search_key_ranged_for)
print(f"Time taken to for the "
f"ranged for loop : {range_exec_time}")
# Calculating Execution of while loop
comprehension_exec_time = calculate_execution_time(search_key_comprehension)
print(f"Time taken to for the "
f"list comprehension : {comprehension_exec_time}")
x_axis = ['While',
'Enumerate',
'Listed For',
'Range',
'List Comprehension']
y_axis = [while_exec_time,
enumerate_exec_time,
for_exec_time,
range_exec_time,
comprehension_exec_time]
plt.title(f'Input Size: {size_n}')
plt.bar(x_axis, y_axis)
plt.show()
ex = int(input("Press 0 to exit or "
"any other key to continue: "))
if ex == 0:
break
| [
"syedansab@Ansabs-Mac.local"
] | syedansab@Ansabs-Mac.local |
32fb9a2a330ac6fa993cae29751e0c894fb2e922 | 1af44bdcbc3c15d3f6e436a7924dfd45f504ab3a | /01.jump to python/chpter 2/62em.py | 4db6566d344540379fdc05693d0ca4cb074461b8 | [] | no_license | wql7654/bigdata_exam | f57c8b475690cbc5978009dbf8008bedff602e2a | c07ee711bb84407428ba31165185b9607b6825e8 | refs/heads/master | 2023-04-07T00:50:59.563714 | 2021-05-25T02:46:43 | 2021-05-25T02:46:43 | 180,915,985 | 0 | 0 | null | 2023-03-25T01:08:09 | 2019-04-12T02:36:08 | Jupyter Notebook | UTF-8 | Python | false | false | 191 | py |
a=['life','is','too','hard']
re=" ".join(a)
print(re)
re=re.split()
print(re)
re=','.join(a)
print(re)
re=re.split(',')
print(re)
re.sort()
print(re)
re=" ".join(re)
print(re)
| [
"studerande5@gmail.com"
] | studerande5@gmail.com |
3a3ea60ad718a461be8d18cc3e2bd03af4794584 | 6b237224d6f771bd74691dbb717438972a79329a | /benchmark_pyvips_v2_nocache.py | beeb6f1c05c255665cece885dccaded095173307 | [
"Apache-2.0"
] | permissive | Geeks-Sid/stress-test | 6dd61ce6177df17c72c2758c69f2a143e92d143e | 07145c3780c553c1e31b2ef5cbf149d66ffa0efd | refs/heads/main | 2023-03-29T00:26:36.726667 | 2021-04-01T16:20:08 | 2021-04-01T16:20:08 | 352,326,401 | 0 | 1 | Apache-2.0 | 2021-04-01T16:20:09 | 2021-03-28T12:42:36 | Python | UTF-8 | Python | false | false | 6,305 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 8 20:03:35 2019
@author: siddhesh
(Caleb helped and is super cool)
"""
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
import argparse
import os
import torch
from multiprocessing import Pool, cpu_count, Manager
from albumentations import (
RandomBrightnessContrast,
HueSaturationValue,
RandomGamma,
GaussNoise,
GaussianBlur,
HorizontalFlip,
VerticalFlip,
Compose,
Normalize,
)
import pyvips
from tqdm import tqdm
import concurrent.futures
import time
import threading
pyvips.cache_set_max(0)
class GenClassDataset(Dataset):
def __init__(self, csv_file, ref_file, params, valid=False):
self.csv_file = csv_file
self.ref_file = ref_file
self.df = pd.read_csv(csv_file)
self.patients = pd.read_csv(ref_file)
self.params = params
self.n_processes = params["threads"]
self.valid = valid
self.pyvips_objs = {}
self._lock = threading.Lock()
print("Initializing Dataset")
self.intialize_slideobjects()
print("Done Initializing")
def intialize_slideobjects(self):
print("Resetting")
with concurrent.futures.ThreadPoolExecutor(
max_workers=self.params["threads"]
) as executor:
for index in range(self.params["threads"]):
executor.submit(self.load_slides, index)
def load_slides(self, k):
print("In here Loading Slide thread no. : ", k)
sub_patients = pd.DataFrame(columns=self.patients.columns)
for i in range(len(self.patients)):
if i % self.n_processes == k:
sub_patients = sub_patients.append(
self.patients.iloc[i, :], ignore_index=True
)
slides_to_add = {}
for i in range(sub_patients.shape[0]):
# This can definitely be optimized
pid = sub_patients.iloc[i, 0]
path = sub_patients.iloc[i, 1]
# Add a value to the dictionary with the PID as the key and the path as the value
slides_to_add[pid] = path
# Go through this data partition, actually create the objects, then update main dictionary after objects are created.
self.add_pyvips_objects(slides_to_add)
# Opens slides, passes to update method
def add_pyvips_objects(self, pid_path_dict):
new_dict = {}
# Iterate over the pids and paths, and actually create the pyvips objects.
for pid, path in pid_path_dict.items():
# Create the object, insert into new dictionary
# Update this to be a parameter for which the level is to be fetched
new_dict[pid] = pyvips.Image.openslideload(path, level=0)
print("Updating lock...")
# After new dictionary is created, push to main dictionary
self.locked_update(new_dict)
# Need to install threadlock to prevent race conditioning
# This is what actually adds them to the shared dictionary.
def locked_update(self, new_dict):
with self._lock:
print("Locking and updating...")
# Create copy, update dictionary with new values (pids => pyvips objects)
local_copy = self.pyvips_objs
local_copy.update(new_dict)
# After update, restore and release lock
print("Main dictionary updated. Lock released.")
self.pyvips_objs = local_copy
def __len__(self):
return len(self.df)
def __getitem__(self, patient_id):
pid = self.df.iloc[patient_id, 0]
x = int(self.df.iloc[patient_id, 1])
y = int(self.df.iloc[patient_id, 2])
slide_ob = self.pyvips_objs[pid]
patch = np.array(slide_ob.fetch((x, y), 0, (1024, 1024)).convert("RGB"))
label = self.df.iloc[patient_id, 3]
if self.valid:
image = self.train_transforms(image=patch)
else:
image = self.validation_transforms(image=patch)
patch = image["image"]
patch = np.transpose(patch, (2, 0, 1))
patch = torch.FloatTensor(patch)
return patch, label
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
"--input_path",
dest="input_path",
help="input path for the csv",
required=True,
)
parser.add_argument(
"-r", "--ref_path", dest="ref_path", help="path for the reference csv"
)
parser.add_argument(
"-o",
"--output_path",
dest="output_path",
help="output path for the landmark files",
required=True,
)
parser.add_argument(
"-t", "--threads", dest="threads", help="number of threads, by default will use"
)
args = parser.parse_args()
input_path = os.path.abspath(args.input_path)
ref_path = os.path.abspath(args.ref_path)
output_path = os.path.abspath(args.output_path)
sub_dummy = []
j = int(args.threads)
print("#" * 80)
print("Num Workers : ", j)
tstart = time.time()
x = [i for i in range(64, 131073, 64)]
y = []
i = 0
params = {}
params["threads"] = j
dataset_train = GenClassDataset(input_path, ref_path, params, valid=False)
train_loader = DataLoader(
dataset_train, batch_size=32, shuffle=True, num_workers=j, pin_memory=False
)
start = time.time()
print(len(train_loader))
for batch_idx, (image_data, label) in enumerate(train_loader):
del image_data, label
if i >= len(x):
print("i went too far, It had to be done.")
break
if batch_idx * 32 == x[i]:
y.append(time.time() - start)
print("Number of Images : ", x[i], i)
print("Time Taken : ", (time.time() - start))
i += 1
if batch_idx == 65536:
dataset_train.reset_slide_objects()
tend = time.time()
print("Time taken for {} workers : {}".format(j, tend - start))
y = np.array(y)
x = np.array(x)
os.makedirs(output_path, exist_ok=True)
np.savez_compressed(
os.path.join(output_path, "Cluster_login_node_not_ram_multiloader", x=x, y=y)
)
| [
"noreply@github.com"
] | Geeks-Sid.noreply@github.com |
877d2b03715d504aae165d9ee43a4910341d7eec | 0b18de87567514d723fdd031506ab35604d7e472 | /pkg/synthesis.py | 9b04e493e4930cd77a40f9721c6e6dadd2a2ca1d | [] | no_license | alexsevas/SimpleTTS | 8de6a8d86930a96fcbc7190935b19a5516a841a3 | a3fc17c952e25cad8536ee68d610fa86e2bbfb0d | refs/heads/master | 2022-02-24T11:25:18.392454 | 2019-10-15T17:13:42 | 2019-10-15T17:13:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,360 | py | import os
import torch
import numpy as np
import scipy.io.wavfile as wavfile
from pkg.train import load
from pkg.networks import Text2Mel, SuperRes
from pkg.utils import plot_spectrum, plot_attention, PrettyBar, spectrogram2wav
from pkg.hyper import Hyper
from pkg.data import process_text
def synthesis(text_list, plot=True):
device = "cuda:2"
# load graphs
graph0 = Text2Mel()
graph1 = SuperRes()
load(os.path.join(Hyper.logdir, "text2mel/pkg/trained.pkg"), graph0, device='cpu')
load(os.path.join(Hyper.logdir, "superres/pkg/trained.pkg"), graph1, device='cpu')
graph0.eval()
graph1.eval()
# make dir
syn_dir = os.path.join(Hyper.root_dir, "synthesis")
if not os.path.exists(syn_dir):
os.makedirs(syn_dir)
# phase1: text to mel
graph0.to(device)
texts = [process_text(text, padding=True) for text in text_list]
texts = torch.LongTensor(np.asarray(texts)).to(device)
mels = torch.FloatTensor(np.zeros((len(texts), Hyper.dim_f, Hyper.data_max_mel_length))).to(device)
prev_atten = None
bar = PrettyBar(Hyper.data_max_mel_length - 1)
bar.set_description("Text to Mel")
for t in bar:
_, new_mel = graph0(texts, mels, None if t == 0 else t - 1, prev_atten)
mels[:, :, t + 1].data.copy_(new_mel[:, :, t].data)
prev_atten = graph0.attention
for i in range(len(text_list)):
# mels[:, :, :-1].data.copy_(mels[:, :, 1:].data)
if plot:
plot_attention(graph0.attention[i].cpu().data, "atten", i, True, syn_dir)
plot_spectrum(mels[i].cpu().data, "mels", i, True, syn_dir)
del graph0
# phase2: super resolution
graph1.to(device)
_, mags = graph1(mels)
bar = PrettyBar(len(text_list))
bar.set_description("Super Resolution")
for i in bar:
wav = spectrogram2wav(mags[i].cpu().data.numpy())
wavfile.write(os.path.join(syn_dir, "syn_{}.wav".format(i)), Hyper.audio_samplerate, wav)
if plot:
plot_spectrum(mags[i].cpu().data, "mags", i, True, syn_dir)
del graph1
if __name__ == "__main__":
synthesis(
["in being comparatively modern.",
"The birch canoe slid on the smooth planks",
"I can't believe you any more.",
"This is bullshit.",
"Give me 10101, because it's .123 times better than h110..."])
| [
"a.s.potapov@tinkoff.ru"
] | a.s.potapov@tinkoff.ru |
802903e97b7dc6a387a1b5cfe85aaaf3f929915f | 09c073981b41775781774fff10e1eb7a0da9f1be | /python_practice.py | 7dde9714e9c4ed1a0bc6b2ea919aa2c3fede7e2d | [] | no_license | Subodh2044/Election_Analysis | 872b3fddf53b6cbabdd4c49c39f4a2501ffeee4d | 6bfe11f7f1e0efe15b812f953b17ac165d043091 | refs/heads/master | 2022-12-25T07:09:23.511791 | 2020-09-21T02:57:44 | 2020-09-21T02:57:44 | 295,298,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py |
counties_dict={}
counties_dict["Arapahoe"] = 422829
counties_dict
| [
"byansubodh@gmail.com"
] | byansubodh@gmail.com |
c021b16e9e70d0d06c9a794e86bb55a587cbfb39 | 8481846f85f664fdfb02567e7b07ac7bf6251467 | /fill_IDs.py | ee06f9b0415aa14ac18c36379d0a1eef8e0c8746 | [] | no_license | serinachang5/bechdel | 1247cc2930c5bb8b94154899537a29224a171062 | 105d3ab8f1c0b0cd3106e0c0c03d4dcd77c32eaa | refs/heads/master | 2020-03-08T18:12:42.169571 | 2018-05-23T00:26:15 | 2018-05-23T00:26:15 | 128,290,679 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | import csv
import imdb
import jellyfish as jelly
def getData(filename):
data = []
for row in filename:
data.append([row["Title"], row["Source"], row["File_path"], row["IMDb_id"]])
return data
def getMissingID(data, result_filename):
res = []
movie_db = imdb.IMDb()
for item in data:
if item[0] != '' and item[-1] == '':
movie_by_title = movie_db.search_movie(item[0])
first = movie_by_title[0:1]
#print(first[0].movieID)
res.append((item[0], item[1], item[2], first[0].movieID))
else:
res.append((item[0], item[1], item[2], item[3]))
with open(result_filename, "w") as fin:
writer = csv.writer(fin, delimiter=",")
writer.writerow(["Title", "Source", "File_path", "IMDb_id"])
writer.writerows(res)
if __name__ == "__main__":
#agarwal = csv.DictReader(open("cleaned_agarwal_alignments.csv"))
gorinski = csv.DictReader(open("gorinski_alignments.csv"))
all_ = getData(gorinski)
#ids = getMissingID(all_, "agarwal_alignments_with_IDs.csv")
ids = getMissingID(all_, "gorinski_alignments_with_IDs.csv")
| [
"navrajnarula@gmail.com"
] | navrajnarula@gmail.com |
90b679c74e2aa6adb56cdaf86a7a859d0a99bac9 | 334ea94a185fd48f5e8d4f6a321444c61b216a86 | /stream_configure.py | c05522f8adde8f23d701475693897efd199959d6 | [
"MIT"
] | permissive | brishtiteveja/Alive-CLI | 6e40d320846af5f4a9b1be0eaf1673ac33de09b0 | 305640534fd00b63900cb0ea7207f90034a97e1f | refs/heads/master | 2023-03-06T01:40:57.722688 | 2021-02-12T14:00:48 | 2021-02-12T14:00:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | py | import sys
if len(sys.argv) != 7:
print('Usage: python3 ' + sys.argv[0] + ' <network> <api_node> <link> <alivedb_pubkey> <username> <private_key>')
sys.exit(0)
api = sys.argv[2]
link = sys.argv[3]
pub = sys.argv[4]
sender = sys.argv[5]
key = sys.argv[6]
if sys.argv[1] == 'dtc':
# Avalon
import json
import time
import requests
import base58
import hashlib
import secp256k1
tx = {
'type': 21,
'data': {
'link': link,
'pub': pub
},
'sender': sender,
'ts': round(time.time() * 1000)
}
txString = json.dumps(tx,separators=(',',':'))
tx['hash'] = hashlib.sha256(txString.encode('UTF-8')).hexdigest()
pk = secp256k1.PrivateKey(base58.b58decode(key))
hexhash = bytes.fromhex(tx['hash'])
sign = pk.ecdsa_sign(hexhash,raw=True,digest=hashlib.sha256)
signature = base58.b58encode(pk.ecdsa_serialize_compact(sign)).decode('UTF-8')
tx['signature'] = signature
headers = {
'Accept': 'application/json, text/plain, */*',
'Content-Type': 'application/json'
}
broadcast = requests.post(api + '/transact',data=json.dumps(tx,separators=(',',':')),headers=headers)
print(broadcast.text)
elif sys.argv[1] == 'hive':
# Hive
from beem import Hive
hive_client = Hive(node=api,keys=[key])
json_data = {
'op': 2,
'link': link,
'pub': pub
}
hive_client.custom_json('alive-test',json_data,required_posting_auths=[sender])
else:
raise ValueError('Invalid network') | [
"engtianxi@outlook.com"
] | engtianxi@outlook.com |
b9363339a258e9695ebce785436523f9a6e68cfc | 5d7c5fb6c9f067256de694f07f5db793d7c071b3 | /dianping.com/pachong/utils/getCodeImg_asSave.py | 657f64ca961766fc01cc8912f395aad130e23a60 | [] | no_license | asdfhalkehflkajdhf/utils-py | e2f0ed813f3f0ca3cb726e8da6d02858aac3a7fb | 6da43113c7ee1e7a77de097926f6e1719be3e403 | refs/heads/master | 2023-05-27T00:21:52.382590 | 2020-02-16T06:54:55 | 2020-02-16T06:54:55 | 144,437,420 | 2 | 0 | null | 2023-05-22T21:38:50 | 2018-08-12T04:23:23 | JavaScript | UTF-8 | Python | false | false | 2,833 | py | # 1、截屏,裁减
# 验证码位置选择
# 系统工具,元素位置
# location = img.location
# left = location['x']
# top = location['y']
# right = left + size['width']
# bottom = top + size['height']
# 2、右击另存为
# 另存为路径设置,和对话框处理,直接设置浏览器参数
# req = requests.get(url='https://verify.meituan.com/v2/captcha?request_code=45c4c73efcea42ed85a8f774f8e43bda&action=login',
# headers={'Date': 'Tue, 04 Dec 2018 14:22:15 GMT', 'M-TraceId':'659577734713269804'}) # buld post body data
# # 将获取到的图片二进制流写入本地文件
# with open('ttt.png', 'wb') as f:
# # 对于图片类型的通过r.content方式访问响应内容,将响应内容写入baidu.png中
# f.write(req.content)
from selenium import webdriver
import os
import requests
import time
from selenium import webdriver
# 1、截屏,裁减
# 验证码位置选择
# 系统工具,元素位置
# location = img.location
# left = location['x']
# top = location['y']
# right = left + size['width']
# bottom = top + size['height']
# 2、右击另存为
# 另存为路径设置,和对话框处理,直接设置浏览器参数
# req = requests.get(url='https://verify.meituan.com/v2/captcha?request_code=45c4c73efcea42ed85a8f774f8e43bda&action=login',
# headers={'Date': 'Tue, 04 Dec 2018 14:22:15 GMT', 'M-TraceId':'659577734713269804'}) # buld post body data
# # 将获取到的图片二进制流写入本地文件
# with open('ttt.png', 'wb') as f:
# # 对于图片类型的通过r.content方式访问响应内容,将响应内容写入baidu.png中
# f.write(req.content)
from PIL import Image
options = webdriver.ChromeOptions()
prefs = {'profile.default_content_settings.popups': 0, 'download.default_directory': 'e:/login'}
options.add_experimental_option('prefs', prefs)
browser = webdriver.Chrome(executable_path="chromedriver.exe", chrome_options=options)
href = 'https://verify.meituan.com/v2/captcha?request_code=45c4c73efcea42ed85a8f774f8e43bda&action=login'
browser.get(href)
img = browser.find_element_by_tag_name('img')
# browser.switch_to.active_element()
# 保存验证码
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
action = ActionChains(browser).move_to_element(img)#移动到该元素
action.context_click(img)#右键点击该元素
'''无法操作系统菜单,只能是页面内的'''
time.sleep(1)
action.key_down(Keys.SHIFT).send_keys('v').key_up(Keys.SHIFT).perform()#点击键盘向下箭头
action.key_down(Keys.ENTER).key_up(Keys.ENTER).perform()
# action.perform()#执行保存
| [
"root@MiWiFi-R3-srv.(none)"
] | root@MiWiFi-R3-srv.(none) |
93e266b719d7e1c5ef5d818cc5b6c82aebc830e0 | 032fcfa68e5f60898414d691b4f1012ce95eb0d4 | /src/migrations/0005_bill_total_money.py | 5b3e9f18fed11401535dddfe36f4ecb7e91fbef9 | [] | no_license | codethoisinhvien/quanlynhahang | cefbf5c65dfd3c2a2c2972f525d61c7927665153 | 26c08a00b800ccb57fbf8f4b68b9a7ccf8814c58 | refs/heads/master | 2022-05-01T02:32:07.533397 | 2020-07-02T04:28:45 | 2020-07-02T04:28:45 | 237,424,713 | 0 | 0 | null | 2022-04-22T22:59:57 | 2020-01-31T12:22:14 | Python | UTF-8 | Python | false | false | 383 | py | # Generated by Django 2.2.11 on 2020-06-15 16:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('src', '0004_auto_20200607_1008'),
]
operations = [
migrations.AddField(
model_name='bill',
name='total_money',
field=models.IntegerField(default=0),
),
]
| [
"nguyentruonggiangptdq@gmail.com"
] | nguyentruonggiangptdq@gmail.com |
ef57d7fcd27a97d3e6735a48de43f28da2f7893b | dbd0ffd6169415943d63262f98f314543043c491 | /remote_transform.py | d31c5cd35d89808d8677f473609398293eebe114 | [] | no_license | eranheres/sagemaker-byom-example | b7e3ccdd80c9dbc5d52da17c9e9eadf4f04bed6d | cd6eec4e2d446ac85c2567340dbde1fa8de738a3 | refs/heads/main | 2023-02-24T18:53:59.899593 | 2021-01-26T12:55:38 | 2021-01-26T12:55:38 | 331,443,652 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | #!/usr/bin/env python3
import sagemaker
import boto3
import pprint
pp = pprint.PrettyPrinter(indent=4)
try:
role = sagemaker.get_execution_role()
except ValueError:
iam = boto3.client('iam')
role = iam.get_role(RoleName='sagemaker')['Role']['Arn']
model_name = "sagemaker-example-fixed"
# getting our model information (docker uri and model data)
client = boto3.client('sagemaker')
model_info = client.describe_model(ModelName=model_name)
print(model_info)
pp.pprint(model_info)
image_uri = model_info['PrimaryContainer']['Image']
model_data = model_info['PrimaryContainer']['ModelDataUrl']
# upload batch data for transforming
sess = sagemaker.Session()
BATCH_WORK_DIRECTORY = 'tmp/batch-data'
s3_batch_prefix = 'sagemaker-example/batch-data'
batch_input = sess.upload_data(BATCH_WORK_DIRECTORY, key_prefix=s3_batch_prefix)
model = sagemaker.model.Model(
image_uri=image_uri,
model_data=model_data,
role=role
)
# creating a transormer and transforming
print("creating transformer")
transformer = model.transformer(instance_count=1, instance_type='ml.m4.xlarge')
print("transforming")
transformer.transform(data=batch_input, data_type='S3Prefix', content_type='text/csv', split_type='Line')
print("waiting for termination")
transformer.wait()
| [
"eran.heres@gmail.com"
] | eran.heres@gmail.com |
eb9ff236f9d591cfb6a61d337418b89d5fa742b4 | 369b1b63bb639f5d7d0acec3a03dda7ed2ce5c7d | /restserver/common/response.py | 4b73b8ec7dcf116592b33638a34b6d274c7e09cb | [] | no_license | lafenicecc/chaincode-dev-env | 9787c32bf72cac89da7d41d4a56137a36c595abd | fe3ee08461840f089968cfa66f1390fba522ebca | refs/heads/master | 2020-05-29T11:02:08.582941 | 2017-08-20T12:55:38 | 2017-08-20T12:55:38 | 65,706,365 | 5 | 2 | null | 2017-07-24T07:39:56 | 2016-08-15T05:19:08 | Go | UTF-8 | Python | false | false | 860 | py | from flask import jsonify
CODE_OK = 200
CODE_CREATED = 201
CODE_NO_CONTENT = 204
CODE_BAD_REQUEST = 400
CODE_FORBIDDEN = 403
CODE_NOT_FOUND = 404
CODE_METHOD_NOT_ALLOWED = 405
CODE_NOT_ACCEPTABLE = 406
CODE_CONFLICT = 409
response_ok = {
"status": "OK",
"code": CODE_OK,
"error": "",
"data": {}
}
response_fail = {
"status": "FAIL",
"code": CODE_BAD_REQUEST,
"error": "",
"data": {}
}
def make_ok_resp(error="", data={}, code=CODE_OK):
response_ok['code'] = code
response_ok["error"] = error
response_ok["data"] = data
return jsonify(response_ok), CODE_OK
def make_fail_resp(error="Invalid request", data={},
code=CODE_BAD_REQUEST):
response_fail['code'] = code
response_fail["error"] = error
response_fail["data"] = data
return jsonify(response_fail), CODE_BAD_REQUEST
| [
"ck-cc@hotmail.com"
] | ck-cc@hotmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.