blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f37b4202698b801244e4f37eb349143a2286421f | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /070_oop/007_exceptions/_exercises/templates/GoCongr/035_warnings.py | 64f70487a6f6966148382366c83ea4f50b5aa248 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 682 | py | # # w____
# ________ w____
#
#
# ___ input_body_parameter name unit supposed_maximum
# parameter _ fl.. inp.. 'Enter your @ (in @): '.f.... n.. u...
# __ ? < _ 0:
# r____ V... n.. + ' cannot be negative')
# __ ? > s...
# w____.w... 'suspiciously large value of ' + n..
# r_ ?
#
#
# ___ input_mass
# r_ i... n... _'mass' u... _'kg' s.... _ 100
#
#
# ___ input_height
# r_ i... n.. _ 'height' u... _ 'm' s.... _ 2
#
#
# ___ calculate_bmi mass height
# r_ m... / h.. ** 2)
#
#
# ___ main
# mass _ i._m.
# height _ i._h.
# bmi _ c... mass height
# print('Your body mass index is', ?
#
#
# __ _______ __ ____
# ?
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
066185891cf7e7576cfef986ce1c0702a45d8e9a | 93c53bbc8c4e11341d2722bb4f81c02820040019 | /src/deepproblog/examples/Coins/data/render.py | 874f2a29b1335f0d6595520edb2dfcd00ab55fd0 | [
"Apache-2.0"
] | permissive | 22842219/deepproblog | bbcaa011a97416570a8cb9c8c206378e92864f74 | 6d38e783990551f4030780a1d69c7138fada2020 | refs/heads/master | 2021-12-02T20:53:54.277847 | 2021-08-23T19:32:06 | 2021-08-23T19:32:06 | 416,207,305 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | import argparse
import os
import subprocess
parser = argparse.ArgumentParser(
description="Render the image data for a given csv file."
)
parser.add_argument("set", nargs="+")
parser.add_argument("-b", "--blender_path", default=None)
if __name__ == "__main__":
parsed = parser.parse_args()
blender_path = parsed.blender_path
if blender_path is None:
blender_path = "blender"
for s in parsed.set:
print("Rendering ", s)
path = os.path.dirname(os.path.abspath(__file__))
res = 512
subprocess.call(
[
blender_path,
path + "/blender_files/scene.blend1",
"-b",
"-P",
path + "/blender_files/render_script.py",
"--",
path,
s,
str(res),
],
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
| [
"robin.manhaeve@cs.kuleuven.be"
] | robin.manhaeve@cs.kuleuven.be |
ecb2035d79c085ddb38a35c124bd8f85b3dffa78 | 7563b6c93cb3ff5d3f8177c2433e12a7770a6ae9 | /controllers/asylum.py | 5f0cf66ffea4651dced3b1d960237ef6bd30a6f7 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mswdresden/eden | 53f8258b731edf13c83884d327a11c8d819c2487 | 3f753a20ce2b7cedd2c55770ed333c069df50cf1 | refs/heads/master | 2020-07-28T15:51:19.321264 | 2017-07-07T11:45:36 | 2017-07-07T11:45:36 | 73,409,134 | 0 | 0 | null | 2016-11-10T18:22:17 | 2016-11-10T18:22:15 | null | UTF-8 | Python | false | false | 3,383 | py | # -*- coding: utf-8 -*-
"""
Asylum Controllers
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -------------------------------------------------------------------------
def index():
"""
Application Home page
"""
module_name = settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
def person_rheader(r, tabs=[]):
if r.representation != "html":
# RHeader is a UI facility & so skip for other formats
return None
if r.record is None:
# List or Create form: rheader makes no sense here
return None
tabs = [(T("Basic Details"), None),
(T("Status"), "asylum_status")]
rheader_tabs = s3_rheader_tabs(r, tabs)
person = r.record
rheader = DIV(TABLE(
TR(
TH("%s: " % T("Name")),
person.name,
TH("%s: " % T("First Name")),
person.firstname,
),
TR(
TH("%s: " % T("Is this a status ...")),
#person.name,
#s3db.pr_person_represent(course.person_id),
#s3db.asylum_person_represent(asylum_status.person_id),
s3db.asylum_person_represent(0),
#s3db.asylum_status.person_id,
#val = s3db.asylum_ip_func,
#"aaaaaa",
#"bbbbbb",
#print val
#s3db.person_represent(person.person_id),
)
), rheader_tabs)
return rheader
# -------------------------------------------------------------------------
def person():
print 'hallo msw (asylum person controller)'
return s3_rest_controller(rheader=person_rheader)
# -------------------------------------------------------------------------
def status():
return s3_rest_controller()
# -------------------------------------------------------------------------
def msw():
"""
Application Home page
"""
print "Your ip is, i'll send it to the view ... " + s3db.asylum_ip_func()
return dict(bummi = str("Your ip isaaa " + s3db.asylum_ip_func()))
#module_name = settings.modules[module].name_nice
#response.title = module_name
#return dict(module_name=module_name)
# -----------------------------------------------------------
def display_form():
form=FORM('Your Name:',
INPUT(_name='name', requires=IS_NOT_EMPTY()),
INPUT(_type='submit'))
if form.accepts(request,session):
response.flash = 'form accepted'
elif form.errors:
response.flash = 'form has errors'
else:
response.flash = 'please fill in the form correctly'
return dict(form=form,name="Katharina Witt")
# ------------------------------------------------------------
db.define_table('numbers',
Field('a', 'integer'),
Field('b', 'integer'),
Field('c', 'integer', readable=False, writable=False))
import time
def my_form_processing(form):
c = form.vars.a * form.vars.b
if c < 0:
form.errors.b = 'a*b cannot be negative'
else:
form.vars.c = c
def insert_numbers():
form = SQLFORM(db.numbers)
if form.process(onvalidation=my_form_processing).accepted:
session.flash = 'record inserted'
redirect(URL())
return dict(form=form)
| [
"msw@3dd2.com"
] | msw@3dd2.com |
64d66805916ab184fcaef2fa588bfe9b5ab6d4d7 | 9792bdc5933a5ef0f886fa4e474a9f69e00b1bdb | /src/mem/ruby/SConscript | bbc2470e6aa5bc93a674def039b9892e9075bfe2 | [] | no_license | BurningAbys2/VIPS_self | 86285a21b5eda0f30415b832311ac197084b45fe | 5372336b3f7d73fd6bd26aacb6cbfbbe6274c637 | refs/heads/master | 2021-05-03T15:24:26.208044 | 2017-04-29T23:36:32 | 2017-04-29T23:36:32 | 62,321,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,814 | # -*- mode:python -*-
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import os
import sys
from os.path import basename, isdir, join as joinpath
import SCons
Import('*')
DebugFlag('ProtocolTrace')
DebugFlag('RubyCache')
DebugFlag('RubyCacheTrace')
DebugFlag('RubyDma')
DebugFlag('hxmDma')
DebugFlag('RubyGenerated')
DebugFlag('RubyMemory')
DebugFlag('RubyNetwork')
DebugFlag('RubyPort')
DebugFlag('RubyPrefetcher')
DebugFlag('RubyQueue')
DebugFlag('RubySequencer')
DebugFlag('RubyEvent')
DebugFlag('RubySlicc')
DebugFlag('hxmRubyPrivate')
DebugFlag('RubySystem')
DebugFlag('RubyTester')
DebugFlag('RubyStats')
DebugFlag('RubySynStats')
DebugFlag('RubyResourceStalls')
CompoundFlag('Ruby', [ 'RubyQueue', 'RubyNetwork', 'RubyTester',
'RubyGenerated', 'RubySlicc', 'RubySystem', 'RubyCache',
'RubyMemory', 'RubyDma', 'RubyPort', 'RubySequencer', 'RubyCacheTrace',
'RubyPrefetcher'])
if env['PROTOCOL'] == 'None':
Return()
def do_embed_text(target, source, env):
"""convert a text file into a file that can be embedded in C
using an #include statement, that defines a \"const char *\" pointing
to the same text.
This is useful to embed scripts and configuration files in object files.
"""
escape = [ "\'", "\"", "\\", "\?" ]
# reads the text file in, line by line, converting it to a C string
fin = open(str(source[0]), 'r')
fout = open(str(target[0]), 'w' )
fout.write("static const char *%s =\n" % source[1].get_contents());
for l in fin:
# add escape sequences for the characters in escape
fout.write("\"")
for char in l:
if char == "\n":
break
if char in escape:
fout.write("\\")
fout.write(char)
else:
fout.write(char)
fout.write("\\n\"\n");
fout.write(";\n");
fin.close()
fout.close()
#
# Link includes
#
generated_dir = Dir('../protocol')
def MakeIncludeAction(target, source, env):
f = file(str(target[0]), 'w')
for s in source:
print >>f, '#include "%s"' % str(s.abspath)
f.close()
def MakeInclude(source):
target = generated_dir.File(basename(source))
include_action = MakeAction(MakeIncludeAction, Transform("MAKE INC", 1))
env.Command(target, source, include_action)
MakeInclude('slicc_interface/AbstractEntry.hh')
MakeInclude('slicc_interface/AbstractCacheEntry.hh')
MakeInclude('slicc_interface/Message.hh')
MakeInclude('slicc_interface/NetworkMessage.hh')
MakeInclude('slicc_interface/RubyRequest.hh')
# External types
MakeInclude('common/Address.hh')
MakeInclude('common/DataBlock.hh')
MakeInclude('common/MachineID.hh')
MakeInclude('common/NetDest.hh')
MakeInclude('common/Set.hh')
MakeInclude('filters/GenericBloomFilter.hh')
MakeInclude('network/MessageBuffer.hh')
MakeInclude('structures/Prefetcher.hh')
MakeInclude('structures/CacheMemory.hh')
MakeInclude('structures/PageTableBuffer.hh')
MakeInclude('system/DMASequencer.hh')
MakeInclude('structures/DirectoryMemory.hh')
MakeInclude('structures/WireBuffer.hh')
MakeInclude('structures/PerfectCacheMemory.hh')
MakeInclude('structures/PersistentTable.hh')
MakeInclude('system/Sequencer.hh')
MakeInclude('structures/TBETable.hh')
MakeInclude('structures/TimerTable.hh')
| [
"heal@localhost.(none)"
] | heal@localhost.(none) | |
a50a9acd2b6bb3436866491f04bc2b1c6e3bdfcd | c8334686c9ec0cd78d3e72caeb31b660c59b718f | /predict_old.py | 70c41ce035fad4129e11a91b96538c2f1f568ee1 | [
"MIT"
] | permissive | feiliu23/picturesques.ai | 4d1ed7215545ccd3444c87cd3467eee8bfd5e45c | 261609c51118559ee3ce6b45a2bc7b5d9c73b34c | refs/heads/master | 2020-03-18T22:08:23.304008 | 2018-05-04T06:02:43 | 2018-05-04T06:02:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,711 | py | import pandas as pd
import numpy as np
import torch
import torchvision.transforms as transforms
from torch.autograd import Variable
from PIL import Image
class ImagePredictor(object):
def __init__(self, model_path):
self.model = torch.load(model_path)
self.transform = transforms.Compose(
[transforms.CenterCrop(256),
# transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
self.use_gpu = torch.cuda.is_available()
def predict(self, image_path):
"""Given a image path, return the predicted score of this image"""
image = self.load_image(image_path)
scores = self.model(image)
scores = scores.exp() / (scores.exp().sum())
return scores.data.numpy()[0][1]
def rank(self, image_path_list):
"""
Given a list of path, return a list of indices that can sort the array in descending order
See: https://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.html
"""
return np.argsort(np.array([-self.predict(x) for x in image_path_list]))
def load_image(self, image_path):
"""load image, returns tensor"""
image = Image.open(image_path).convert('RGB')
image = self.transform(image).float()
image = Variable(image, requires_grad=True)
image = image.unsqueeze(0) #this is for VGG, may not be needed for ResNet
if self.use_gpu:
return image.cuda()
return image
if __name__ == '__main__':
m = ImagePredictor('cnn_model.pt')
print('Single Prediction: ', m.predict('images\\neg\\png0.jpg'))
image_list = ['images\\neg\\png0.jpg', 'images\\neg\\png1.jpg', 'images\\pos\\png11.jpg',
'images\\pos\\png18.jpg']
print('Ranking Prediction: ', m.rank(image_list)) | [
"cwang98@dons.usfca.edu"
] | cwang98@dons.usfca.edu |
1edb79a9fc5cdd76785d4f5fbdf777056346feff | 2bcc421ee345b00cf805c543b37d18b5d019dc04 | /adafruit-circuitpython-bundle-6.x-mpy-20201126/examples/adafruit_io_simpletest.py | 13f48ce77609ae495c9aae8bea5cbbb6b5a5fc34 | [] | no_license | saewoonam/sc-current-source-titano | 5a1ad46889c1b09c168424901fd71cb4eab5c61b | 1c136aa8b61268d9ac0b5a682b30ece70ab87663 | refs/heads/main | 2023-03-02T22:12:26.685537 | 2021-02-09T03:28:01 | 2021-02-09T03:28:01 | 317,299,900 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,684 | py | # Example of using the Adafruit IO CircuitPython MQTT client
# to subscribe to an Adafruit IO feed and publish random data
# to be received by the feed.
#
# Example by Tony DiCola for Adafruit Industries
# Modified by Brent Rubell for Adafruit Industries, 2019
import time
from random import randint
import board
import busio
from digitalio import DigitalInOut
from adafruit_esp32spi import adafruit_esp32spi
from adafruit_esp32spi import adafruit_esp32spi_wifimanager
import adafruit_esp32spi.adafruit_esp32spi_socket as socket
import neopixel
import adafruit_minimqtt.adafruit_minimqtt as MQTT
from adafruit_io.adafruit_io import IO_MQTT
### WiFi ###
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
# If you are using a board with pre-defined ESP32 Pins:
esp32_cs = DigitalInOut(board.ESP_CS)
esp32_ready = DigitalInOut(board.ESP_BUSY)
esp32_reset = DigitalInOut(board.ESP_RESET)
# If you have an externally connected ESP32:
# esp32_cs = DigitalInOut(board.D9)
# esp32_ready = DigitalInOut(board.D10)
# esp32_reset = DigitalInOut(board.D5)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
"""Use below for Most Boards"""
status_light = neopixel.NeoPixel(
board.NEOPIXEL, 1, brightness=0.2
) # Uncomment for Most Boards
"""Uncomment below for ItsyBitsy M4"""
# status_light = dotstar.DotStar(board.APA102_SCK, board.APA102_MOSI, 1, brightness=0.2)
# Uncomment below for an externally defined RGB LED
# import adafruit_rgbled
# from adafruit_esp32spi import PWMOut
# RED_LED = PWMOut.PWMOut(esp, 26)
# GREEN_LED = PWMOut.PWMOut(esp, 27)
# BLUE_LED = PWMOut.PWMOut(esp, 25)
# status_light = adafruit_rgbled.RGBLED(RED_LED, BLUE_LED, GREEN_LED)
wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light)
# Define callback functions which will be called when certain events happen.
# pylint: disable=unused-argument
def connected(client):
# Connected function will be called when the client is connected to Adafruit IO.
# This is a good place to subscribe to feed changes. The client parameter
# passed to this function is the Adafruit IO MQTT client so you can make
# calls against it easily.
print("Connected to Adafruit IO! Listening for DemoFeed changes...")
# Subscribe to changes on a feed named DemoFeed.
client.subscribe("DemoFeed")
def subscribe(client, userdata, topic, granted_qos):
# This method is called when the client subscribes to a new feed.
print("Subscribed to {0} with QOS level {1}".format(topic, granted_qos))
def unsubscribe(client, userdata, topic, pid):
# This method is called when the client unsubscribes from a feed.
print("Unsubscribed from {0} with PID {1}".format(topic, pid))
# pylint: disable=unused-argument
def disconnected(client):
# Disconnected function will be called when the client disconnects.
print("Disconnected from Adafruit IO!")
# pylint: disable=unused-argument
def message(client, feed_id, payload):
# Message function will be called when a subscribed feed has a new value.
# The feed_id parameter identifies the feed, and the payload parameter has
# the new value.
print("Feed {0} received new value: {1}".format(feed_id, payload))
# Connect to WiFi
print("Connecting to WiFi...")
wifi.connect()
print("Connected!")
# Initialize MQTT interface with the esp interface
MQTT.set_socket(socket, esp)
# Initialize a new MQTT Client object
mqtt_client = MQTT.MQTT(
broker="io.adafruit.com",
username=secrets["aio_username"],
password=secrets["aio_key"],
)
# Initialize an Adafruit IO MQTT Client
io = IO_MQTT(mqtt_client)
# Connect the callback methods defined above to Adafruit IO
io.on_connect = connected
io.on_disconnect = disconnected
io.on_subscribe = subscribe
io.on_unsubscribe = unsubscribe
io.on_message = message
# Connect to Adafruit IO
print("Connecting to Adafruit IO...")
io.connect()
# Below is an example of manually publishing a new value to Adafruit IO.
last = 0
print("Publishing a new message every 10 seconds...")
while True:
# Explicitly pump the message loop.
io.loop()
# Send a new message every 10 seconds.
if (time.monotonic() - last) >= 5:
value = randint(0, 100)
print("Publishing {0} to DemoFeed.".format(value))
io.publish("DemoFeed", value)
last = time.monotonic()
| [
"nams@nist.gov"
] | nams@nist.gov |
eb65aa908bc3f8644df26af7356ffda6535785b4 | 2c77eb263a8ab47446dd218d63d67ab0ad362779 | /solarpv/utils.py | 41787a88a35d547f2e07bee6a3ed308ed2dfd1b1 | [
"MIT"
] | permissive | Lkruitwagen/solar-pv-global-inventory | 54bd6b09ef815d1bb723533ff675764f0b17bd4b | 9940a454de88a39ca92dbabf07e98d8623f0ec8b | refs/heads/master | 2023-09-06T07:58:34.519882 | 2021-11-25T08:55:36 | 2021-11-25T08:55:36 | 223,820,779 | 100 | 17 | null | null | null | null | UTF-8 | Python | false | false | 8,713 | py | import requests, json, os, logging, math
def download_file_from_google_drive(_id, destination):
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
logging.info(f'Getting file id: {_id} from google drive')
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : _id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : _id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
logging.info(f'Saving file to {destination}')
save_response_content(response, destination)
def exists_or_download(fpath):
DRIVE_IDS = json.load(open('./drive_ids.json','r'))
if os.path.exists(fpath):
return fpath
else:
logging.info(f'No file found... downloading from drive.')
download_file_from_google_drive(DRIVE_IDS[fpath], fpath)
return fpath
def exists_or_mkdir(fpath):
if not os.path.exists(fpath):
logging.info(f'Making new path: {fpath}')
os.makedirs(fpath)
def V_inv(point1, point2, miles=False):
# WGS 84
a = 6378137 # meters
f = 1 / 298.257223563
b = 6356752.314245 # meters; b = (1 - f)a
MILES_PER_KILOMETER = 0.621371
MAX_ITERATIONS = 200
CONVERGENCE_THRESHOLD = 1e-12 # .000,000,000,001
"""
Vincenty's formula (inverse method) to calculate the distance (in
kilometers or miles) between two points on the surface of a spheroid
Doctests:
>>> vincenty((0.0, 0.0), (0.0, 0.0)) # coincident points
0.0
>>> vincenty((0.0, 0.0), (0.0, 1.0))
111.319491
>>> vincenty((0.0, 0.0), (1.0, 0.0))
110.574389
>>> vincenty((0.0, 0.0), (0.5, 179.5)) # slow convergence
19936.288579
>>> vincenty((0.0, 0.0), (0.5, 179.7)) # failure to converge
>>> boston = (42.3541165, -71.0693514)
>>> newyork = (40.7791472, -73.9680804)
>>> vincenty(boston, newyork)
298.396057
>>> vincenty(boston, newyork, miles=True)
185.414657
"""
# short-circuit coincident points
if point1[0] == point2[0] and point1[1] == point2[1]:
return 0.0,0,0
U1 = math.atan((1 - f) * math.tan(math.radians(point1[0])))
U2 = math.atan((1 - f) * math.tan(math.radians(point2[0])))
L = math.radians(point2[1] - point1[1])
Lambda = L
sinU1 = math.sin(U1)
cosU1 = math.cos(U1)
sinU2 = math.sin(U2)
cosU2 = math.cos(U2)
for iteration in range(MAX_ITERATIONS):
sinLambda = math.sin(Lambda)
cosLambda = math.cos(Lambda)
sinSigma = math.sqrt((cosU2 * sinLambda) ** 2 +
(cosU1 * sinU2 - sinU1 * cosU2 * cosLambda) ** 2)
if sinSigma == 0:
return 0.0 # coincident points
cosSigma = sinU1 * sinU2 + cosU1 * cosU2 * cosLambda
sigma = math.atan2(sinSigma, cosSigma)
sinAlpha = cosU1 * cosU2 * sinLambda / sinSigma
cosSqAlpha = 1 - sinAlpha ** 2
try:
cos2SigmaM = cosSigma - 2 * sinU1 * sinU2 / cosSqAlpha
except ZeroDivisionError:
cos2SigmaM = 0
C = f / 16 * cosSqAlpha * (4 + f * (4 - 3 * cosSqAlpha))
LambdaPrev = Lambda
Lambda = L + (1 - C) * f * sinAlpha * (sigma + C * sinSigma *
(cos2SigmaM + C * cosSigma *
(-1 + 2 * cos2SigmaM ** 2)))
if abs(Lambda - LambdaPrev) < CONVERGENCE_THRESHOLD:
break # successful convergence
else:
return None # failure to converge
uSq = cosSqAlpha * (a ** 2 - b ** 2) / (b ** 2)
A = 1 + uSq / 16384 * (4096 + uSq * (-768 + uSq * (320 - 175 * uSq)))
B = uSq / 1024 * (256 + uSq * (-128 + uSq * (74 - 47 * uSq)))
deltaSigma = B * sinSigma * (cos2SigmaM + B / 4 * (cosSigma *
(-1 + 2 * cos2SigmaM ** 2) - B / 6 * cos2SigmaM *
(-3 + 4 * sinSigma ** 2) * (-3 + 4 * cos2SigmaM ** 2)))
s = b * A * (sigma - deltaSigma)
num = (math.cos(U2)*math.sin(Lambda))
den = (math.cos(U1)*math.sin(U2)-math.sin(U1)*math.cos(U2)*math.cos(Lambda))
#print 'num',num
#print 'den',den
alpha1 = math.atan2(num,den)
if alpha1<0:
alpha1+=2*math.pi
num = (math.cos(U1)*math.sin(Lambda))
den = (-1.0*math.sin(U1)*math.cos(U2)+math.cos(U1)*math.sin(U2)*math.cos(Lambda))
#print 'num',num
#print 'den',den
alpha2 = math.atan2(num,den)
if alpha2<0:
alpha2+=2*math.pi
s /= 1000 # meters to kilometers
if miles:
s *= MILES_PER_KILOMETER # kilometers to miles
return round(s, 6), math.degrees(alpha1), math.degrees(alpha2)
def V_dir(point1, s, alpha1,miles=False):
#print 'v_dir'
# WGS 84
a = 6378137 # meters
f = 1 / 298.257223563
b = 6356752.314245 # meters; b = (1 - f)a
MILES_PER_KILOMETER = 0.621371
MAX_ITERATIONS = 200
CONVERGENCE_THRESHOLD = 1e-12 # .000,000,000,001
"""
Vincenty's formula (inverse method) to calculate the distance (in
kilometers or miles) between two points on the surface of a spheroid
Doctests:
>>> vincenty((0.0, 0.0), (0.0, 0.0)) # coincident points
0.0
>>> vincenty((0.0, 0.0), (0.0, 1.0))
111.319491
>>> vincenty((0.0, 0.0), (1.0, 0.0))
110.574389
>>> vincenty((0.0, 0.0), (0.5, 179.5)) # slow convergence
19936.288579
>>> vincenty((0.0, 0.0), (0.5, 179.7)) # failure to converge
>>> boston = (42.3541165, -71.0693514)
>>> newyork = (40.7791472, -73.9680804)
>>> vincenty(boston, newyork)
298.396057
>>> vincenty(boston, newyork, miles=True)
185.414657
"""
#alpha1 in degrees
alpha1=math.radians(alpha1)
U1 = math.atan((1.0-f)*math.tan(math.radians(point1[0])))
#print U1
sigma1 = math.atan2((math.tan(U1)),(math.cos(alpha1)))
sinAlpha=math.cos(U1)*math.sin(alpha1)
cosSqAlpha=1.0-(sinAlpha**2)
uSq = cosSqAlpha*(a**2-b**2)/(b**2)
A = 1 + uSq/16384.0*(4096.0+uSq*(-768.0+uSq*(320.0-175*uSq)))
B = uSq/1024*(256+uSq*(-128+uSq*(74-47*uSq)))
sigma=s/b/A
#print sigma
for iteration in range(MAX_ITERATIONS):
sigma2m = 2*sigma1+sigma
deltasigma = B*math.sin(sigma)*(math.cos(sigma2m)+1.0/4*B*(math.cos(sigma)*(-1+2*(math.cos(sigma2m)**2))-1.0/6*B*math.cos(sigma2m)*(-3+4*(math.sin(sigma)**2))*(-3+4*(math.cos(sigma2m)**2))))
sigmaprev = sigma
sigma = s/b/A+deltasigma
#print sigma
if abs(sigma - sigmaprev) < CONVERGENCE_THRESHOLD:
#print 'converge'
break # successful convergence
else:
print ('no converg')
return None # failure to converge
num = math.sin(U1)*math.cos(sigma)+math.cos(U1)*math.sin(sigma)*math.cos(alpha1)
den = (1.0-f)*math.sqrt(sinAlpha**2+(math.sin(U1)*math.sin(sigma)-math.cos(U1)*math.cos(sigma)*math.cos(alpha1))**2)
#print num
#print den
lat2= math.atan2(num,den)
num=math.sin(sigma)*math.sin(alpha1)
den = math.cos(U1)*math.cos(sigma)-math.sin(U1)*math.sin(sigma)*math.cos(alpha1)
Lambda = math.atan2(num,den)
C = f/16.0*(cosSqAlpha*(4+f*(4.0-3.0*cosSqAlpha)))
L = Lambda - (1.0-C)*f*sinAlpha*(sigma+C*math.sin(sigma)*(math.cos(sigma2m)+C*math.cos(sigma)*(-1+2.0*(math.cos(sigma2m)**2))))
L2 = math.radians(point1[1])+L
num = sinAlpha
den = -1*math.sin(U1)*math.sin(sigma)+math.cos(U1)*math.cos(sigma)*math.cos(alpha1)
#print num
#print den
alpha2 = math.atan2(num,den)
if alpha2<0:
alpha2+=math.pi*2
#print alpha2
# short-circuit coincident points
return (math.degrees(lat2),math.degrees(L2)),math.degrees(alpha2)
def get_utm_zone(lat,lon):
"""A function to grab the UTM zone number for any lat/lon location
"""
zone_str = str(int((lon + 180)/6) + 1)
if ((lat>=56.) & (lat<64.) & (lon >=3.) & (lon <12.)):
zone_str = '32'
elif ((lat >= 72.) & (lat <84.)):
if ((lon >=0.) & (lon<9.)):
zone_str = '31'
elif ((lon >=9.) & (lon<21.)):
zone_str = '33'
elif ((lon >=21.) & (lon<33.)):
zone_str = '35'
elif ((lon >=33.) & (lon<42.)):
zone_str = '37'
return zone_str | [
"lucas.kruitwagen@gmail.com"
] | lucas.kruitwagen@gmail.com |
f49434213b1ef00aa5e79b01f4dfce6fdee2fa7a | 759d180ac42a74a9291a1fafd86f120226224f6e | /file.py | cf5c3ab8b666fe9248ddcab5e7f27ec562a97429 | [] | no_license | Iaraseverino/my-first-repo | 817bfe6782289b2bf651ca4bf8913902ea2fa0e6 | 804956b2b79513b590d9686a2c072e9ce3500521 | refs/heads/main | 2022-12-25T22:32:22.006835 | 2020-10-13T20:27:15 | 2020-10-13T20:27:15 | 302,570,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 23 10:17:10 2020
@author: Iari Severino
"""
| [
"iari_Severino@hotmail.com"
] | iari_Severino@hotmail.com |
2ee2d1a1527594c2438196891250d5c469b88f2d | 43ab9b064ab92ac8487bcaa5d0f6546b9483bbec | /python/mouse.py | 5fa7fad3df42cb7a5352029415bd2f6a837022da | [] | no_license | uhbad/micromouseIFC | fc52975d15ab2d1a67985af3e1e447a9c2e5a491 | fc767f29e46f8f1ef4dcc2c6cad7b8d4f3dbc376 | refs/heads/master | 2021-01-20T21:49:08.098996 | 2016-09-17T21:40:02 | 2016-09-17T21:40:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | class Mouse:
def __init__(self, start_wall, n):
self.position = #start position in maze
self.direction = #will be four different directions
self.n = n #number of squares in the maze
| [
"jon.hightower.310@gmail.com"
] | jon.hightower.310@gmail.com |
112fe187347b14db8e486b104480e002a756dd8c | 7ae32748fb910d2542e35c57543fc89f98cd2b1d | /tests/test_lib.py | e9e421020e8b554caa7f433988afc2ac71c66236 | [
"Apache-2.0"
] | permissive | sanjaymsh/dtfabric | 451c87d987f438fccfbb999079d2f55d01650b68 | 9e216f90b70d8a3074b2125033e0773e3e482355 | refs/heads/master | 2022-12-19T09:13:02.370724 | 2020-09-27T05:11:25 | 2020-09-27T05:11:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,535 | py | # -*- coding: utf-8 -*-
"""Shared test case."""
from __future__ import unicode_literals
import os
import sys
import unittest
from dtfabric import reader
from dtfabric import registry
def skipUnlessHasTestFile(path_segments): # pylint: disable=invalid-name
"""Decorator to skip a test if the test file does not exist.
Args:
path_segments (list[str]): path segments inside the test data directory.
Returns:
function: to invoke.
"""
fail_unless_has_test_file = getattr(
unittest, 'fail_unless_has_test_file', False)
path = os.path.join('test_data', *path_segments)
if fail_unless_has_test_file or os.path.exists(path):
return lambda function: function
if sys.version_info[0] < 3:
path = path.encode('utf-8')
# Note that the message should be of type str which is different for
# different versions of Python.
return unittest.skip('missing test file: {0:s}'.format(path))
class BaseTestCase(unittest.TestCase):
"""The base test case."""
_TEST_DATA_PATH = os.path.join(os.getcwd(), 'test_data')
# Show full diff results, part of TestCase so does not follow our naming
# conventions.
maxDiff = None
def _CreateDefinitionRegistryFromFile(self, path):
"""Creates a data type definition registry from a file.
Args:
path (str): path to the data definition file.
Returns:
DataTypeDefinitionsRegistry: data type definition registry or None
on error.
"""
definitions_registry = registry.DataTypeDefinitionsRegistry()
self._FillDefinitionRegistryFromFile(definitions_registry, path)
return definitions_registry
def _FillDefinitionRegistryFromFile(self, definitions_registry, path):
"""Fills a data type definition registry from a file.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
path (str): path to the data definition file.
"""
definitions_reader = reader.YAMLDataTypeDefinitionsFileReader()
with open(path, 'rb') as file_object:
definitions_reader.ReadFileObject(definitions_registry, file_object)
def _GetTestFilePath(self, path_segments):
"""Retrieves the path of a test file in the test data directory.
Args:
path_segments (list[str]): path segments inside the test data directory.
Returns:
str: path of the test file.
"""
# Note that we need to pass the individual path segments to os.path.join
# and not a list.
return os.path.join(self._TEST_DATA_PATH, *path_segments)
| [
"joachim.metz@gmail.com"
] | joachim.metz@gmail.com |
5df1a79f2d4f00c0022fddc64d2a4fbb0d6b3bf8 | a3f1b4e8cd827421bba3c34031535702232eb419 | /public/neumeeditor/serializers/user.py | b4959b8d7df2e5e675e7af89ae3ceb87bfa1e326 | [] | permissive | jacobsanz97/cantus | dbb888b7d511abe63cc0c5e77b11381e8e895360 | b97033ca34fe1389a296560496d31c2f75c098a2 | refs/heads/master | 2020-08-22T12:33:25.852200 | 2019-03-11T20:44:45 | 2019-03-11T20:44:45 | 216,396,078 | 0 | 0 | MIT | 2019-10-20T16:58:22 | 2019-10-20T16:58:21 | null | UTF-8 | Python | false | false | 208 | py | from django.contrib.auth.models import User
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username')
| [
"andrew.f.fogarty@gmail.com"
] | andrew.f.fogarty@gmail.com |
26903997659e0a6ffeafaf3ae4e966b68f912e5f | a9e3f3ad54ade49c19973707d2beb49f64490efd | /Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/cms/djangoapps/contentstore/management/commands/update_course_outline.py | b3ba3bd289199b663c7d1951a01790bf3d31bc50 | [
"MIT",
"AGPL-3.0-only",
"AGPL-3.0-or-later"
] | permissive | luque/better-ways-of-thinking-about-software | 8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d | 5809eaca7079a15ee56b0b7fcfea425337046c97 | refs/heads/master | 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 | MIT | 2021-11-22T12:12:31 | 2019-01-02T14:21:30 | JavaScript | UTF-8 | Python | false | false | 867 | py | """
Management command to create the course outline for a course. This is done
automatically when Studio publishes a course, but this command can be used to
do it manually for debugging, error recovery, or backfilling purposes.
Should be invoked from the Studio process.
"""
from django.core.management.base import BaseCommand
from opaque_keys.edx.keys import CourseKey
from ...tasks import update_outline_from_modulestore
class Command(BaseCommand):
"""
Invoke with:
python manage.py cms update_course_outline <course_key>
"""
help = "Updates a single course outline based on modulestore content."
def add_arguments(self, parser):
parser.add_argument('course_key')
def handle(self, *args, **options):
course_key = CourseKey.from_string(options['course_key'])
update_outline_from_modulestore(course_key)
| [
"rafael.luque@osoco.es"
] | rafael.luque@osoco.es |
b05483ba8c1ad06505e52f1248e4d3046941e926 | 0537a1dcfd7580ac4e8ee472c22ece352c010ef6 | /PlantaGUIFuentes/env/bin/flask | e0bddf37e3a08ae0babeac5103bd691c12a2537d | [] | no_license | CamiloSanchez0312/Proyecto2Complejidad | a8e935f0f613b93770f751a8d1589769cbad7fd4 | 9b072540838756759ee9a0ed978b980a1db746ba | refs/heads/master | 2023-01-02T19:55:44.663823 | 2020-10-31T04:24:56 | 2020-10-31T04:24:56 | 308,413,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | #!/home/sanrop/Complejidad/project2/PlantaGUIFuentes/Proyecto2Complejidad/PlantaGUIFuentes/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"santiago.19988@gmail.com"
] | santiago.19988@gmail.com | |
51c55ec4d3adb87bc769bf5e76a5abfeeda74e4f | 9e567b8241ce00e9d53843f5aba11c4a119b079f | /tags/v0_5_2/toolkits/basemap/lib/matplotlib/toolkits/basemap/greatcircle.py | 2e205689bff14ecd1a2a0c9c1f4120bfd5ffb277 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-us-govt-public-domain",
"MIT"
] | permissive | neilpanchal/matplotlib | 3d2a7133e858c4eefbb6c2939eb3f7a328b18118 | 7565d1f2943e0e7b4a3f11ce692dfb9b548d0b83 | refs/heads/master | 2020-06-11T09:20:43.941323 | 2011-01-21T21:50:16 | 2011-01-21T21:50:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,660 | py | import numarray as N
import math
__version__ = '1.0'
class GreatCircle:
"""
formula for perfect sphere from Ed Williams' 'Aviation Formulary'
(http://williams.best.vwh.net/avform.htm)
code for ellipsoid posted to GMT mailing list by Jim Leven in Dec 1999
Version: %s
Contact: Jeff Whitaker <jeffrey.s.whitaker@noaa.gov>
""" % __version__
def __init__(self,rmajor,rminor,lon1,lat1,lon2,lat2):
"""
Define a great circle by specifying:
rmajor - radius of major axis of ellipsoid
rminor - radius of minor axis of ellipsoid.
lon1 - starting longitude of great circle
lat1 - starting latitude
lon2 - ending longitude
lat2 - ending latitude
All must be given in degrees.
Instance variables:
distance - distance along great circle in radians.
lon1,lat1,lon2,lat2 - start and end points (in radians).
"""
# convert to radians from degrees.
lat1 = math.radians(lat1)
lon1 = math.radians(lon1)
lat2 = math.radians(lat2)
lon2 = math.radians(lon2)
self.a = rmajor
self.f = (rmajor-rminor)/rmajor
self.lat1 = lat1
self.lat2 = lat2
self.lon1 = lon1
self.lon2 = lon2
# distance along geodesic in meters.
d,a12,a21 = vinc_dist(self.f, self.a, lat1, lon1, lat2, lon2 )
self.distance = d
self.azimuth12 = a12
self.azimuth21 = a21
# great circle arc-length distance (in radians).
self.gcarclen = 2.*math.asin(math.sqrt((math.sin((lat1-lat2)/2))**2+\
math.cos(lat1)*math.cos(lat2)*(math.sin((lon1-lon2)/2))**2))
# check to see if points are antipodal (if so, route is undefined).
if self.gcarclen == math.pi:
self.antipodal = True
else:
self.antipodal = False
def points(self,npoints):
"""
compute arrays of npoints equally spaced
intermediate points along the great circle.
input parameter npoints is the number of points
to compute.
Returns lons, lats (lists with longitudes and latitudes
of intermediate points in degrees).
For example npoints=10 will return arrays lons,lats of 10
equally spaced points along the great circle.
"""
# must ask for at least 2 points.
if npoints <= 1:
raise ValueError,'npoints must be greater than 1'
elif npoints == 2:
return [math.degrees(self.lon1),math.degrees(self.lon2)],[math.degrees(self.lat1),math.degrees(self.lat2)]
# can't do it if endpoints are antipodal, since
# route is undefined.
if self.antipodal:
raise ValueError,'cannot compute intermediate points on a great circle whose endpoints are antipodal'
d = self.gcarclen
delta = 1.0/(npoints-1)
f = delta*N.arange(npoints) # f=0 is point 1, f=1 is point 2.
incdist = self.distance/(npoints-1)
lat1 = self.lat1
lat2 = self.lat2
lon1 = self.lon1
lon2 = self.lon2
# perfect sphere, use great circle formula
if self.f == 0.:
A = N.sin((1-f)*d)/math.sin(d)
B = N.sin(f*d)/math.sin(d)
x = A*math.cos(lat1)*math.cos(lon1)+B*math.cos(lat2)*math.cos(lon2)
y = A*math.cos(lat1)*math.sin(lon1)+B*math.cos(lat2)*math.sin(lon2)
z = A*math.sin(lat1) +B*math.sin(lat2)
lats=N.arctan2(z,N.sqrt(x**2+y**2))
lons=N.arctan2(y,x)
lons = map(math.degrees,lons.tolist())
lats = map(math.degrees,lats.tolist())
# use ellipsoid formulas
else:
latpt = self.lat1
lonpt = self.lon1
azimuth = self.azimuth12
lons = [math.degrees(lonpt)]
lats = [math.degrees(latpt)]
for n in range(npoints-2):
latptnew,lonptnew,alpha21=vinc_pt(self.f,self.a,latpt,lonpt,azimuth,incdist)
d,azimuth,a21=vinc_dist(self.f,self.a,latptnew,lonptnew,lat2,lon2)
lats.append(math.degrees(latptnew))
lons.append(math.degrees(lonptnew))
latpt = latptnew; lonpt = lonptnew
lons.append(math.degrees(self.lon2))
lats.append(math.degrees(self.lat2))
return lons,lats
#
# ---------------------------------------------------------------------
# | |
# | geodetic.py - a collection of geodetic functions |
# | |
# ---------------------------------------------------------------------
#
#
# ----------------------------------------------------------------------
# | Algrothims from Geocentric Datum of Australia Technical Manual |
# | |
# | http://www.anzlic.org.au/icsm/gdatum/chapter4.html |
# | |
# | This page last updated 11 May 1999 |
# | |
# | Computations on the Ellipsoid |
# | |
# | There are a number of formulae that are available |
# | to calculate accurate geodetic positions, |
# | azimuths and distances on the ellipsoid. |
# | |
# | Vincenty's formulae (Vincenty, 1975) may be used |
# | for lines ranging from a few cm to nearly 20,000 km, |
# | with millimetre accuracy. |
# | The formulae have been extensively tested |
# | for the Australian region, by comparison with results |
# | from other formulae (Rainsford, 1955 & Sodano, 1965). |
# | |
# | * Inverse problem: azimuth and distance from known |
# | latitudes and longitudes |
# | * Direct problem: Latitude and longitude from known |
# | position, azimuth and distance. |
# | * Sample data |
# | * Excel spreadsheet |
# | |
# | Vincenty's Inverse formulae |
# | Given: latitude and longitude of two points |
# | (phi1, lembda1 and phi2, lembda2), |
# | Calculate: the ellipsoidal distance (s) and |
# | forward and reverse azimuths between the points (alpha12, alpha21). |
# | |
# ----------------------------------------------------------------------
def vinc_dist( f, a, phi1, lembda1, phi2, lembda2 ) :
"""
Returns the distance between two geographic points on the ellipsoid
and the forward and reverse azimuths between these points.
lats, longs and azimuths are in radians, distance in metres
Returns ( s, alpha12, alpha21 ) as a tuple
"""
if (abs( phi2 - phi1 ) < 1e-8) and ( abs( lembda2 - lembda1) < 1e-8 ) :
return 0.0, 0.0, 0.0
two_pi = 2.0*math.pi
piD4 = two_pi/8.0
b = a * (1.0 - f)
TanU1 = (1-f) * math.tan( phi1 )
TanU2 = (1-f) * math.tan( phi2 )
U1 = math.atan(TanU1)
U2 = math.atan(TanU2)
lembda = lembda2 - lembda1
last_lembda = -4000000.0 # an impossibe value
omega = lembda
# Iterate the following equations,
# until there is no significant change in lembda
while ( last_lembda < -3000000.0 or lembda != 0 and abs( (last_lembda - lembda)/lembda) > 1.0e-9 ) :
sqr_sin_sigma = pow( math.cos(U2) * math.sin(lembda), 2) + \
pow( (math.cos(U1) * math.sin(U2) - \
math.sin(U1) * math.cos(U2) * math.cos(lembda) ), 2 )
Sin_sigma = math.sqrt( sqr_sin_sigma )
Cos_sigma = math.sin(U1) * math.sin(U2) + math.cos(U1) * math.cos(U2) * math.cos(lembda)
sigma = math.atan2( Sin_sigma, Cos_sigma )
Sin_alpha = math.cos(U1) * math.cos(U2) * math.sin(lembda) / math.sin(sigma)
alpha = math.asin( Sin_alpha )
Cos2sigma_m = math.cos(sigma) - (2 * math.sin(U1) * math.sin(U2) / pow(math.cos(alpha), 2) )
C = (f/16) * pow(math.cos(alpha), 2) * (4 + f * (4 - 3 * pow(math.cos(alpha), 2)))
last_lembda = lembda
lembda = omega + (1-C) * f * math.sin(alpha) * (sigma + C * math.sin(sigma) * \
(Cos2sigma_m + C * math.cos(sigma) * (-1 + 2 * pow(Cos2sigma_m, 2) )))
u2 = pow(math.cos(alpha),2) * (a*a-b*b) / (b*b)
A = 1 + (u2/16384) * (4096 + u2 * (-768 + u2 * (320 - 175 * u2)))
B = (u2/1024) * (256 + u2 * (-128+ u2 * (74 - 47 * u2)))
delta_sigma = B * Sin_sigma * (Cos2sigma_m + (B/4) * \
(Cos_sigma * (-1 + 2 * pow(Cos2sigma_m, 2) ) - \
(B/6) * Cos2sigma_m * (-3 + 4 * sqr_sin_sigma) * \
(-3 + 4 * pow(Cos2sigma_m,2 ) )))
s = b * A * (sigma - delta_sigma)
alpha12 = math.atan2( (math.cos(U2) * math.sin(lembda)), \
(math.cos(U1) * math.sin(U2) - math.sin(U1) * math.cos(U2) * math.cos(lembda)))
alpha21 = math.atan2( (math.cos(U1) * math.sin(lembda)), \
(-math.sin(U1) * math.cos(U2) + math.cos(U1) * math.sin(U2) * math.cos(lembda)))
if ( alpha12 < 0.0 ) :
alpha12 = alpha12 + two_pi
if ( alpha12 > two_pi ) :
alpha12 = alpha12 - two_pi
alpha21 = alpha21 + two_pi / 2.0
if ( alpha21 < 0.0 ) :
alpha21 = alpha21 + two_pi
if ( alpha21 > two_pi ) :
alpha21 = alpha21 - two_pi
return s, alpha12, alpha21
# END of Vincenty's Inverse formulae
#-------------------------------------------------------------------------------
# Vincenty's Direct formulae |
# Given: latitude and longitude of a point (phi1, lembda1) and |
# the geodetic azimuth (alpha12) |
# and ellipsoidal distance in metres (s) to a second point, |
# |
# Calculate: the latitude and longitude of the second point (phi2, lembda2) |
# and the reverse azimuth (alpha21). |
# |
#-------------------------------------------------------------------------------
def vinc_pt( f, a, phi1, lembda1, alpha12, s ) :
"""
Returns the lat and long of projected point and reverse azimuth
given a reference point and a distance and azimuth to project.
lats, longs and azimuths are passed in decimal degrees
Returns ( phi2, lambda2, alpha21 ) as a tuple
"""
two_pi = 2.0*math.pi
piD4 = math.pi/4.0
if ( alpha12 < 0.0 ) :
alpha12 = alpha12 + two_pi
if ( alpha12 > two_pi ) :
alpha12 = alpha12 - two_pi
b = a * (1.0 - f)
TanU1 = (1-f) * math.tan(phi1)
U1 = math.atan( TanU1 )
sigma1 = math.atan2( TanU1, math.cos(alpha12) )
Sinalpha = math.cos(U1) * math.sin(alpha12)
cosalpha_sq = 1.0 - Sinalpha * Sinalpha
u2 = cosalpha_sq * (a * a - b * b ) / (b * b)
A = 1.0 + (u2 / 16384) * (4096 + u2 * (-768 + u2 * \
(320 - 175 * u2) ) )
B = (u2 / 1024) * (256 + u2 * (-128 + u2 * (74 - 47 * u2) ) )
# Starting with the approximation
sigma = (s / (b * A))
last_sigma = 2.0 * sigma + 2.0 # something impossible
# Iterate the following three equations
# until there is no significant change in sigma
# two_sigma_m , delta_sigma
while ( abs( (last_sigma - sigma) / sigma) > 1.0e-9 ) :
two_sigma_m = 2 * sigma1 + sigma
delta_sigma = B * math.sin(sigma) * ( math.cos(two_sigma_m) \
+ (B/4) * (math.cos(sigma) * \
(-1 + 2 * math.pow( math.cos(two_sigma_m), 2 ) - \
(B/6) * math.cos(two_sigma_m) * \
(-3 + 4 * math.pow(math.sin(sigma), 2 )) * \
(-3 + 4 * math.pow( math.cos (two_sigma_m), 2 ))))) \
last_sigma = sigma
sigma = (s / (b * A)) + delta_sigma
phi2 = math.atan2 ( (math.sin(U1) * math.cos(sigma) + math.cos(U1) * math.sin(sigma) * math.cos(alpha12) ), \
((1-f) * math.sqrt( math.pow(Sinalpha, 2) + \
pow(math.sin(U1) * math.sin(sigma) - math.cos(U1) * math.cos(sigma) * math.cos(alpha12), 2))))
lembda = math.atan2( (math.sin(sigma) * math.sin(alpha12 )), (math.cos(U1) * math.cos(sigma) - \
math.sin(U1) * math.sin(sigma) * math.cos(alpha12)))
C = (f/16) * cosalpha_sq * (4 + f * (4 - 3 * cosalpha_sq ))
omega = lembda - (1-C) * f * Sinalpha * \
(sigma + C * math.sin(sigma) * (math.cos(two_sigma_m) + \
C * math.cos(sigma) * (-1 + 2 * math.pow(math.cos(two_sigma_m),2) )))
lembda2 = lembda1 + omega
alpha21 = math.atan2 ( Sinalpha, (-math.sin(U1) * math.sin(sigma) + \
math.cos(U1) * math.cos(sigma) * math.cos(alpha12)))
alpha21 = alpha21 + two_pi / 2.0
if ( alpha21 < 0.0 ) :
alpha21 = alpha21 + two_pi
if ( alpha21 > two_pi ) :
alpha21 = alpha21 - two_pi
return phi2, lembda2, alpha21
# END of Vincenty's Direct formulae
##---------------------------------------------------------------------------
# Notes:
#
# * "The inverse formulae may give no solution over a line
# between two nearly antipodal points. This will occur when
# lembda ... is greater than pi in absolute value". (Vincenty, 1975)
#
# * In Vincenty (1975) L is used for the difference in longitude,
# however for consistency with other formulae in this Manual,
# omega is used here.
#
# * Variables specific to Vincenty's formulae are shown below,
# others common throughout the manual are shown in the Glossary.
#
#
# alpha = Azimuth of the geodesic at the equator
# U = Reduced latitude
# lembda = Difference in longitude on an auxiliary sphere (lembda1 & lembda2
# are the geodetic longitudes of points 1 & 2)
# sigma = Angular distance on a sphere, from point 1 to point 2
# sigma1 = Angular distance on a sphere, from the equator to point 1
# sigma2 = Angular distance on a sphere, from the equator to point 2
# sigma_m = Angular distance on a sphere, from the equator to the
# midpoint of the line from point 1 to point 2
# u, A, B, C = Internal variables
#
#
# Sample Data
#
# Flinders Peak
# -37o57'03.72030"
# 144o25'29.52440"
# Buninyong
# -37o39'10.15610"
# 143o55'35.38390"
# Ellipsoidal Distance
# 54,972.271 m
#
# Forward Azimuth
# 306o52'05.37"
#
# Reverse Azimuth
# 127o10'25.07"
#
#
##*******************************************************************
# Test driver
if __name__ == "__main__" :
# WGS84
a = 6378137.0
b = 6356752.3142
f = (a-b)/a
print "\n Ellipsoidal major axis = %12.3f metres\n" % ( a )
print "\n Inverse flattening = %15.9f\n" % ( 1.0/f )
print "\n Test Flinders Peak to Buninyon"
print "\n ****************************** \n"
phi1 = -(( 3.7203 / 60. + 57) / 60. + 37 )
lembda1 = ( 29.5244 / 60. + 25) / 60. + 144
print "\n Flinders Peak = %12.6f, %13.6f \n" % ( phi1, lembda1 )
deg = int(phi1)
min = int(abs( ( phi1 - deg) * 60.0 ))
sec = abs(phi1 * 3600 - deg * 3600) - min * 60
print " Flinders Peak = %3i\xF8%3i\' %6.3f\", " % ( deg, min, sec ),
deg = int(lembda1)
min = int(abs( ( lembda1 - deg) * 60.0 ))
sec = abs(lembda1 * 3600 - deg * 3600) - min * 60
print " %3i\xF8%3i\' %6.3f\" \n" % ( deg, min, sec )
phi2 = -(( 10.1561 / 60. + 39) / 60. + 37 )
lembda2 = ( 35.3839 / 60. + 55) / 60. + 143
print "\n Buninyon = %12.6f, %13.6f \n" % ( phi2, lembda2 )
deg = int(phi2)
min = int(abs( ( phi2 - deg) * 60.0 ))
sec = abs(phi2 * 3600 - deg * 3600) - min * 60
print " Buninyon = %3i\xF8%3i\' %6.3f\", " % ( deg, min, sec ),
deg = int(lembda2)
min = int(abs( ( lembda2 - deg) * 60.0 ))
sec = abs(lembda2 * 3600 - deg * 3600) - min * 60
print " %3i\xF8%3i\' %6.3f\" \n" % ( deg, min, sec )
dist, alpha12, alpha21 = vinc_dist ( f, a, math.radians(phi1), math.radians(lembda1), math.radians(phi2), math.radians(lembda2) )
alpha12 = math.degrees(alpha12)
alpha21 = math.degrees(alpha21)
print "\n Ellipsoidal Distance = %15.3f metres\n should be 54972.271 m\n" % ( dist )
print "\n Forward and back azimuths = %15.6f, %15.6f \n" % ( alpha12, alpha21 )
deg = int(alpha12)
min = int( abs(( alpha12 - deg) * 60.0 ) )
sec = abs(alpha12 * 3600 - deg * 3600) - min * 60
print " Forward azimuth = %3i\xF8%3i\' %6.3f\"\n" % ( deg, min, sec )
deg = int(alpha21)
min = int(abs( ( alpha21 - deg) * 60.0 ))
sec = abs(alpha21 * 3600 - deg * 3600) - min * 60
print " Reverse azimuth = %3i\xF8%3i\' %6.3f\"\n" % ( deg, min, sec )
# Test the direct function */
phi1 = -(( 3.7203 / 60. + 57) / 60. + 37 )
lembda1 = ( 29.5244 / 60. + 25) / 60. + 144
dist = 54972.271
alpha12 = ( 5.37 / 60. + 52) / 60. + 306
phi2 = lembda2 = 0.0
alpha21 = 0.0
phi2, lembda2, alpha21 = vinc_pt ( f, a, math.radians(phi1), math.radians(lembda1), math.radians(alpha12), dist )
phi2 = math.degrees(phi2)
lembda2 = math.degrees(lembda2)
alpha21 = math.degrees(alpha21)
print "\n Projected point =%11.6f, %13.6f \n" % ( phi2, lembda2 )
deg = int(phi2)
min = int(abs( ( phi2 - deg) * 60.0 ))
sec = abs( phi2 * 3600 - deg * 3600) - min * 60
print " Projected Point = %3i\xF8%3i\' %6.3f\", " % ( deg, min, sec ),
deg = int(lembda2)
min = int(abs( ( lembda2 - deg) * 60.0 ))
sec = abs(lembda2 * 3600 - deg * 3600) - min * 60
print " %3i\xF8%3i\' %6.3f\"\n" % ( deg, min, sec )
print " Should be Buninyon \n"
print "\n Reverse azimuth = %10.6f \n" % ( alpha21 )
deg = int(alpha21)
min = int(abs( ( alpha21 - deg) * 60.0 ))
sec = abs(alpha21 * 3600 - deg * 3600) - min * 60
print " Reverse azimuth = %3i\xF8%3i\' %6.3f\"\n\n" % ( deg, min, sec )
# lat/lon of New York
lat1 = 40.78
lon1 = -73.98
# lat/lon of London.
lat2 = 51.53
lon2 = 0.08
print 'New York to London:'
gc = GreatCircle((2*a+b)/3.,(2*a+b)/3.,lon1,lat1,lon2,lat2)
print 'geodesic distance using a sphere with WGS84 mean radius = ',gc.distance
print 'lon/lat for 10 equally spaced points along geodesic:'
lons,lats = gc.points(10)
for lon,lat in zip(lons,lats):
print lon,lat
gc = GreatCircle(a,b,lon1,lat1,lon2,lat2)
print 'geodesic distance using WGS84 ellipsoid = ',gc.distance
print 'lon/lat for 10 equally spaced points along geodesic:'
lons,lats = gc.points(10)
for lon,lat in zip(lons,lats):
print lon,lat
| [
"(no author)@f61c4167-ca0d-0410-bb4a-bb21726e55ed"
] | (no author)@f61c4167-ca0d-0410-bb4a-bb21726e55ed |
0731338214b7d071ebe70967a84fe483b50203e3 | 2c957e97817d07f6a618140d374022328d51b840 | /newsWeb/newsWeb/urls.py | 1073ca5bcd50cd1d02ea1f2df213cb39d1b7e72f | [] | no_license | HorribleMe/thssxsy.github.io | a8906b0c220872b3f2773a93f96a6985e8d28477 | 00aee839697c19461b090a608127480f2fe6f677 | refs/heads/master | 2020-12-25T15:29:08.351945 | 2018-05-11T10:32:07 | 2018-05-11T10:32:07 | 62,387,566 | 0 | 3 | null | 2016-07-17T08:32:49 | 2016-07-01T11:12:15 | CSS | UTF-8 | Python | false | false | 1,202 | py | """newsWeb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('news.urls')),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^show/$', 'news.views.news_show', name='show'),
url(r'^account_info/', include('account.urls')),
url(r'^visit/$', 'account.views.visit', name='visit'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"pxxsy@163.com"
] | pxxsy@163.com |
13fd2f0bba875a9a3ee0e1e4dea1ec8de4320261 | 84a9e12041b41db2155d45339fff2499e4d5cd83 | /routes/api/v1/__init__.py | ac0ccc6258b5042c5b6961ece9b37fb44c80d8de | [] | no_license | hacker0limbo/watchlist | 32b01831a2147caf8b7662ceccc06c089a1b3884 | 91afbe677f099869827e262d5385caa10b3583e6 | refs/heads/master | 2023-05-24T16:44:03.435148 | 2019-11-17T11:45:31 | 2019-11-17T11:45:31 | 220,356,378 | 3 | 2 | null | 2023-05-01T21:16:59 | 2019-11-08T00:47:52 | Python | UTF-8 | Python | false | false | 158 | py | from flask import Blueprint
router = Blueprint('api_v1_bp', __name__)
# 需要在注册蓝图之后导入引用蓝图的包
from routes.api.v1 import movie
| [
"stephen.yin@outlook.com"
] | stephen.yin@outlook.com |
b0084b0539780db5582ce0d7f2cdd843f26384e9 | 6defeaa9e3eff61cd861c855ed2f65db2a457564 | /onmt/keyphrase/shrink_pred_files.py | b0f94a88ab0e0f5a110485f683a9c904dd885b63 | [
"MIT"
] | permissive | memray/OpenNMT-kpg-release | 50439d2a58d4499b3a4b1d1fdb586d266c4367e7 | d16bf09e21521a6854ff3c7fe6eb271412914960 | refs/heads/master | 2023-08-17T14:32:04.442881 | 2023-01-31T03:24:46 | 2023-01-31T03:24:46 | 213,238,221 | 222 | 34 | MIT | 2023-07-22T18:03:01 | 2019-10-06T20:23:17 | Jupyter Notebook | UTF-8 | Python | false | false | 6,961 | py | # -*- coding: utf-8 -*-
"""
Some pred files use up too much space, e.g. /zfs1/pbrusilovsky/rum20/kp/OpenNMT-kpg/output/keyphrase/meng17-one2seq/meng17-one2seq-kp20k-topmodels/meng17-one2seq-fullbeam/meng17-one2seq-beam50-maxlen40/pred/kp20k-meng17-verbatim_prepend-rnn-BS64-LR0.05-Layer1-Dim150-Emb100-Dropout0.0-Copytrue-Reusetrue-Covtrue-PEfalse-Contboth-IF1_step_95000/kp20k.pred is 8.3GB, beam=10 size=2.0GB.
So this
"""
import json
import os
__author__ = "Rui Meng"
__email__ = "rui.meng@pitt.edu"
if __name__ == '__main__':
# root_path = ' /zfs1/pbrusilovsky/rum20/kp/OpenNMT-kpg/output/keyphrase/'
# root_path = '/zfs1/pbrusilovsky/rum20/kp/transfer_exps/kp/'
# root_path = '/zfs1/pbrusilovsky/rum20/kp/transfer_exps/kp_o2o/'
# root_path = '/zfs1/hdaqing/rum20/kp/fairseq-kpg/exps/'
# root_path = '/zfs1/hdaqing/rum20/kp/fairseq-kpg/exps/kp_fewshot10k'
# root_path = '/zfs1/hdaqing/rum20/kp/transfer_exps/kp_fewshot-v2'
root_path = '/zfs1/hdaqing/rum20/kp/transfer_exps/bart_DAFT-v1-DA1e6_FT1e5'
# root_path = '/zfs1/pbrusilovsky/rum20/kp/OpenNMT-kpg/output/keyphrase/meng17-one2seq/meng17-one2seq-kp20k-v3/meng17-one2seq-fullbeam/'
# root_path = '/zfs1/pbrusilovsky/rum20/kp/OpenNMT-kpg/output/keyphrase/meng17-one2seq/meng17-one2seq-kp20k-v2/meng17-one2seq-fullbeam/'
# root_path = '/zfs1/pbrusilovsky/rum20/kp/OpenNMT-kpg/output/keyphrase/meng17-one2seq/meng17-one2seq-kp20k-topmodels/meng17-one2seq-fullbeam/meng17-one2seq-beam50-maxlen40/'
# root_path = '/zfs1/pbrusilovsky/rum20/kp/OpenNMT-kpg/output/keyphrase/meng17-one2one/meng17-one2one-kp20k-v3/meng17-one2one-fullbeam/'
# root_path = '/zfs1/pbrusilovsky/rum20/kp/OpenNMT-kpg/output/keyphrase/meng17-one2one/'
# root_path = '/zfs1/pbrusilovsky/rum20/kp/OpenNMT-kpg/output/order_matters/transformer/meng17-one2seq-beam50-maxlen40/'
print(root_path)
dataset_line_counts = {
'kp20k': 19987,
# 'kp20k_valid2k': 2000,
'inspec': 500,
'krapivin': 460,
'nus': 211,
'semeval': 100,
# 'duc': 308,
'kp20k_test': 19987,
'openkp_test': 6614,
'kptimes_test': 10000,
'jptimes_test': 10000,
'stackex_test': 16000,
'kp20k_valid2k_test': 2000,
'openkp_valid2k_test': 2000,
'kptimes_valid2k_test': 2000,
'stackex_valid2k_test': 2000,
}
total_size_shrinked = 0
for root, dirs, files in os.walk(root_path, topdown=True):
for filename in files:
# print()
# print('-=' * 50)
# print(filename)
# print('-=' * 50)
'''
Delete report
'''
if filename.endswith('.report'):
dataset_name = filename[:-7].split('-')[-1][5:]
if dataset_name in dataset_line_counts:
report_path = os.path.join(root, filename)
print('Deleting .report: [%s] %s' % (dataset_name, report_path))
ori_size = os.stat(report_path).st_size // 1024 // 1024
print('\t file size = %d MB' % (ori_size))
total_size_shrinked += ori_size
os.remove(report_path)
if filename.endswith('.report.txt'):
dataset_name = filename[:-11]
if dataset_name in dataset_line_counts:
report_path = os.path.join(root, filename)
print('Deleting .report: [%s] %s' % (dataset_name, report_path))
ori_size = os.stat(report_path).st_size // 1024 // 1024
print('\t file size = %d MB' % (ori_size))
total_size_shrinked += ori_size
os.remove(report_path)
'''
Reduce .pred file size
'''
if not filename.endswith('.pred'):
continue
dataset_name = filename[:-5].split('-')[-1][5:]
if dataset_name not in dataset_line_counts: continue
pred_path = os.path.join(root, filename)
print('Shrinking .pred: [%s] %s' % (dataset_name, pred_path))
ori_size = os.stat(pred_path).st_size // 1024 // 1024
print('\t file size = %d MB' % (ori_size))
# ensure the pred is complete
with open(pred_path, 'r') as pred_file:
lines = [l if lid==0 else '' for lid, l in enumerate(pred_file)]
if len(lines) != dataset_line_counts[dataset_name]:
# print('Prediction ongoing, skip!')
continue
pred_dict = json.loads(lines[0])
# not a model output
if 'attns' not in pred_dict:
continue
# indicating it's already shrinked, skip
if pred_dict['src'] == None:
# if pred_dict['attns'] == None and pred_dict['dup_pred_tuples'] == None:
# print('This pred file has been shrinked, skip!')
continue
tmp_pred_path = pred_path + '.tmp'
tmp_pred_file = open(tmp_pred_path, 'w')
with open(pred_path, 'r') as pred_file:
for lid, line in enumerate(pred_file):
try:
pred_dict = json.loads(line)
except:
tmp_pred_file.write(line.strip() + '\n')
print("Error occurs while loading line %d in %s" % (lid, pred_path))
continue
# for k,v in pred_dict.items():
# print('%s' % k)
pred_dict['src'] = None
pred_dict['preds'] = None
# pred_dict['pred_scores'] = None
pred_dict['attns'] = None
pred_dict['copied_flags'] = None
pred_dict['ori_pred_sents'] = None
pred_dict['ori_pred_scores'] = None
pred_dict['ori_preds'] = None
pred_dict['dup_pred_tuples'] = None
tmp_pred_file.write(json.dumps(pred_dict)+'\n')
# tmp_pred_file.close()
print('\tDumped to: ' + pred_path + '.tmp')
new_size = os.stat(tmp_pred_path).st_size // 1024 // 1024
print('\t new file size = %d MB' % (new_size))
print('\t reduced size = %d MB' % (ori_size-new_size))
total_size_shrinked += (ori_size - new_size)
# replace the original file to release space
os.remove(pred_path)
os.rename(tmp_pred_path, pred_path)
print('Total shrinked size = %d MB' % (total_size_shrinked))
| [
"memray0@gmail.com"
] | memray0@gmail.com |
f6373c989bd22ce2c8959f9b63d9482b3dba981a | 271dbb5f0c23ae40f19a8df7dd3f15a44fbe5ae1 | /it-king/day01/while.py | 5c0bdb341ea377ada180d290ccf004639cd7d5df | [] | no_license | obligate/python3-king | a4d1c5c145c3b1c42efe059cf2bbd797d0b3c528 | 2b31400468c7a2621f29f24f82e682eb07c0e17d | refs/heads/master | 2020-05-02T11:45:16.218771 | 2019-03-27T08:05:39 | 2019-03-27T08:05:39 | 177,938,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | # Author: Peter
count = 0
while True:
print('count:', count)
count = count + 1
| [
"peter@tidebuy.net"
] | peter@tidebuy.net |
d2fe1e73f2006461b52cbe899998417ea9ba8635 | 8e36722e7df7c34c8a2492398cda49454ca1e0c1 | /blog_project/settings.py | b5606080ac6918df9fa8827f3149e80316cd74da | [] | no_license | sabinbhattaraii/bolg_app | 02447674fec490caa92dda8658519dd77a070adf | 193dbf373cc161d2414930fd55cd369e3f634208 | refs/heads/master | 2022-12-14T10:03:02.837462 | 2020-09-13T06:34:34 | 2020-09-13T06:34:34 | 295,089,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,532 | py | """
Django settings for blog_project project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3lb86#alen#4*_%ux+%lvmbt7p2mez!53mf6rhf3tmg+q5_ucg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'blog.apps.BlogConfig',
'accounts.apps.AccountsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,"static"),
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home' | [
"sabin.bhattarai2012@gmail.com"
] | sabin.bhattarai2012@gmail.com |
7585e048831c6dbf45a703831dfbe6264e32231b | 53db8482926ba5bf6ee9b86337ec021855f861e6 | /testsuite2/min.py | 6747558168de22b686a6cbcef985768072b5afa7 | [] | no_license | nilimsarma/piethon | 012e1de5d705d90a3639a4957eaf2b866c0d494c | 5667901e15e1d5cdd80122848e8bf1af06a4d404 | refs/heads/master | 2021-01-10T06:24:25.203977 | 2015-11-29T11:01:21 | 2015-11-29T11:01:21 | 47,059,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | def main():
x=-1
y=7
z=2
if x<=y:
if y<=z:
print x
print y
print z
else:
if x<=z:
print x
print z
print y
else:
print z
print x
print y
end
end
else:
if z<=y:
print z
print x
print y
end
end
if y<=x:
if x<=z:
print y
print x
print z
else:
if y<=z:
print y
print z
print x
end
end
end
end
| [
"nilim.ch.sarma@gmail.com"
] | nilim.ch.sarma@gmail.com |
75fa9eb0e5a538923378f956a3d163065a1d0871 | c866fd1690f34fca4b7cfb68f31f1fa96995d562 | /accounts/models.py | 78d6f21b97cae508a0899152fe97125720bf3cfb | [] | no_license | graynneji/OnlineStore | a1e9fa2f9803f5579716cedfd7310530ed3a2ada | 545758bbf398b2d481487d21426cc4aee0b1f4b2 | refs/heads/master | 2023-07-17T23:02:13.201463 | 2021-08-29T19:03:40 | 2021-08-29T19:03:40 | 394,409,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,329 | py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
# Create your models here.
class MyAccountManager(BaseUserManager):
#creating the user
def create_user(self,first_name,last_name,username,email,password=None):
if not email:
raise ValueError('User Must have an email address')
if not username:
raise ValueError('User must have a username')
#this is login
user = self.model(
email = self.normalize_email(email), # what normalize email address does is if you enter capital letter email it will turn it to small letters
username = username,
first_name = first_name,
last_name = last_name,
)
user.set_password(password)
user.save(using=self._db)
return user
#creating super user
def create_superuser(self, first_name, last_name,email,username,password):
user = self.create_user(
email = self.normalize_email(email),
username =username,
password = password,
first_name = first_name,
last_name = last_name,
)
#set permission to true for superuser
user.is_admin = True
user.is_active = True
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class Account(AbstractBaseUser):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
username = models.CharField(max_length=50, unique=True)
email = models.EmailField(max_length=100, unique=True)
phone_number = models.CharField(max_length=50)
# Required
date_joined = models.DateTimeField(auto_now_add=True)
last_login = models.DateTimeField(auto_now_add=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
is_superadmin = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'first_name', 'last_name']
objects = MyAccountManager()
def __str__(self):
return self.email
def has_perm(self,perm,obj=None):
return self.is_admin
def has_module_perms(self,add_label):
return True
| [
"graynneji@outlook.com"
] | graynneji@outlook.com |
7bc6e0de57886169f7e3a66b1b4d421fad48b590 | 1a3a985eca5f52d312dc1f19154c6f28f0011b2d | /tests/test_store.py | 23cdd5f5bc8a0bc0500117adb5572a057723de17 | [
"BSD-3-Clause"
] | permissive | chrisbrake/PythonSandbox | f2441ca4866f1cbe1f3b1a6bf3b0e9fa5652a431 | 8cd2ea847676d6a300b55c560f49cd980f760b00 | refs/heads/master | 2021-06-06T06:47:37.738105 | 2020-02-17T04:41:01 | 2020-02-17T04:41:01 | 99,748,910 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | from os import remove
from tempfile import mkstemp
from unittest import TestCase
from store import store
class TestStore(TestCase):
def setUp(self):
(_, self.tmp_file) = mkstemp()
def tearDown(self):
remove(self.tmp_file)
def test_get_exclusive_lock_success(self):
with open(self.tmp_file, 'w') as f:
store.get_exclusive_lock(f, timeout=1)
f.write('test data')
| [
"chris.brake@gmail.com"
] | chris.brake@gmail.com |
08323bda50e0dab41f2e989939243cd515c6a5a6 | 5756536ee020ddb96f015de778e367829a3ad44e | /src/SPSSProcessor/__init__.py | 9b77736f158a722de9cf2da918f083ad46e86de9 | [
"MIT"
] | permissive | razvan-cretu/SPSSProcessor | 614db9f0e159dcbbaff00e9ea8ad8927355422e3 | 86dc35e60f1e6fae2c0df3d84110db5d3f187dc7 | refs/heads/main | 2023-07-05T23:40:58.247320 | 2021-08-10T12:39:27 | 2021-08-10T12:39:27 | 390,399,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | from .Processor import SavFile | [
"razvan.cretu@dynata.com"
] | razvan.cretu@dynata.com |
a513630f1ca0daea326fbcf05af83a602d42a158 | 3713b023f5c5784f628043d9c86b88ee3ff43e92 | /main_file_original.py | 4f8b4aeaf6ab3bb522cef7ae4ca081e3ed6d1730 | [] | no_license | aman1931998/Auto-Boosting-CSGO | bad234e20ca0c695a4e0cbe7fbb832ac0ac8af15 | 512ecdb550a1d3d6a94bc457f8809a5f6db14277 | refs/heads/main | 2023-08-27T15:41:59.783349 | 2021-09-21T04:08:08 | 2021-09-21T04:08:08 | 401,274,264 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,215 | py | import time
import beepy
import shutil, os
import pyautogui as pg
import keyboard as kb
from time import sleep
import numpy as np
from PIL import Image, ImageGrab, ImageOps
import pickle, cv2
import psutil
import pyperclip as pc
from positions import *
from functions import *
from major_functions import *
import argparse
import sys
from dynamic_data_functions import load_old_account_database, load_current_mm_batch, load_mm_batches_index, load_winner_index
from loading_functions import load_account_database
#%% Parsing arguments for running main script.
parser = argparse.ArgumentParser()
parser.add_argument("--clear_old_instance", type = str, help = "Clear old instances", default = True)
parser.add_argument("--after_launch_timeout", type = int, help = "Time to wait after launching panel(s)", default = 150)
parser.add_argument("--launch_timeout", type = float, help = "Time to wait after launching 1 panel", default = 0.5)
parser.add_argument("--untrusted", type = bool, help = "Launch in untrusted mode", default = False)
parser.add_argument("--map_name", type = str, help = "Select the map name", default = "anubis")
parser.add_argument("--match_output", type = str, help = "Match outcome", default = "tie")
parser.add_argument("--winner", type = str, help = "Winner batch", default = "upper")
parser.add_argument("--winner_score", type = str, nargs = 2, help = "Set the score for the match w.r.t. winning lobby", default = "16 4")
parser.add_argument("--current_score", type = str, nargs = 2, help = "Current Score", default = "0 0")
# parser.add_argument("--max_matches", type = int, help = "Number pf matches to play.", default = 4)
args = parser.parse_args()
print("args", args)
#%% Config settings
clear_old_instance = bool(args.clear_old_instance) # Whether to clear old instances.
launch_timeout = float(args.launch_timeout) # Timeout after launching a panel
after_launch_timeout = int(args.after_launch_timeout) # Timeout after launching all panels
untrusted = False #bool(args.untrusted) # Launch in Trusted mode or not.
map_name = str(args.map_name).lower() # Name of the map to play
match_output = str(args.match_output).lower() # Output of the match. Eg: 16 14 or 16 0 or 15 15
winner = str(args.winner).lower() # Winner lobby [upper or lower]
print(clear_old_instance, launch_timeout, after_launch_timeout, untrusted, map_name, match_output, winner)
try: winner_score = list(map(int, str(args.winner_score).split()))
except: winner_score = list(map(int, args.winner_score))
try: current_score = list(map(int, str(args.current_score).split()))
except: current_score = list(map(int, args.current_score))
#%% Config [Default]
# clear_old_instance = True #False in match 2
# after_launch_timeout = 150
# launch_timeout = 1
# untrusted = False
# map_name = "anubis"
# match_output = 'winlose'
# winner = 'upper' # or 'u'
# current_map = 'anubis'
# winner_score = [16, 4] #[15, 15] #or [16, 0]
# current_score = [0, 0] # or [4, 2]
#%% Paths [Default]
friend_code_dict_path = os.path.join('dynamic', "friend_codes.pkl") # Path to friend-codes file.
from loading_functions import load_mm_rank_database, load_pr_rank_database
mm_rank_database = load_mm_rank_database()
pr_rank_database = load_pr_rank_database()
account_data = load_account_database()
mm_batch = load_current_mm_batch()
winner_index = int(load_winner_index())
mm_batch['winner'] = mm_batch['winner'][winner_index]
t1_initial_time = time.time()
print("Getting acccount details") # Getting 10 SteamIDs, Usernames, Passwords for this batch.
USERNAME_UPPER, PASSWORD_UPPER, STEAM_ID_UPPER, USERNAME_LOWER, PASSWORD_LOWER, STEAM_ID_LOWER = get_accounts()
print("Clearing earlier instances and panels.")
if not clear_old_instance:
try:
PIDs = load_PIDs()
for key in PIDs.keys(): #key = list(PIDs.keys())[0]
if type(PIDs[key]) == list:
clear_old_instance = True
except:
clear_old_instance = True
if not clear_old_instance:
PIDs = load_PIDs() #TODO Check if we can remove this fn to avoid r/w calls.
# Check what else is neeeded for continuingg.
else:
cleaner()
cleaner()
PIDs = {"u1": [], "u2": [], "u3": [], "u4": [], "u5": [], "l1": [], "l2": [], "l3": [], "l4": [], "l5": [] }
#%% Launching panels.
print("Launching panels.")
for i in range(5):#i = 0
PIDs = get_panel_pids(USERNAME_UPPER[i], PASSWORD_UPPER[i], STEAM_ID_UPPER[i],
"u" + str(i+1), PIDs, launch_timeout = launch_timeout,
trusted_mode = not untrusted, map_name = map_name, clear_old_instance = False)
print(PIDs)
PIDs = get_panel_pids(USERNAME_LOWER[i], PASSWORD_LOWER[i], STEAM_ID_LOWER[i],
"l" + str(i+1), PIDs, launch_timeout = launch_timeout,
trusted_mode = not untrusted, map_name = map_name, clear_old_instance = False)
print(PIDs)
print("Saving PIDs...")
PIDs = save_PIDs(PIDs)
#%% Checking and getting panels ready.
print("Waiting for %d seconds for panels to load and start checking..."%(after_launch_timeout))
time.sleep(after_launch_timeout)
panels_to_fix = ['u1', 'u2', 'u3', 'u4', 'u5', 'l1', 'l2', 'l3', 'l4', 'l5']
panels_ready, exit_count_, max_exit_count = [], 0, 6 #### Main settings ['u1', 'u2', 'u3', 'u4', 'u5', 'l1', 'l2', 'l3', 'l4', 'l5']
panels_launch_successful = False
while not panels_launch_successful:
exit_count_ += 1
if exit_count_ == max_exit_count:
os.system("ipconfig /release"); time.sleep(0.1); os.system("ipconfig /flushdns"); time.sleep(0.1); os.system("ipconfig /renew"); time.sleep(0.1)
accept_args = get_accept_args()
runfile('main_file.py', accept_args)
sys.exit(0)
# panels_to_check = panels_to_fix
panels_to_fix = check_launched_panel_wrapper(checker_image = None, panels_to_check = panels_to_fix)
if panels_to_fix == []:
print("Panels Launch Successful.")
panels_launch_successful = True
print("Panels to fix!!!!: ", *panels_to_fix)
PIDs = kill_PIDs(PIDs, panels_to_fix)
print(PIDs)
for panel in panels_to_fix:
panel_top_left_x, panel_top_left_y, (username, password, steamid) = get_top_left_position_from_panel_name(panel, include_account_details = True)
PIDs = get_panel_pids(username, password, steamid, panel, PIDs, launch_timeout = launch_timeout, trusted_mode = not untrusted, map_name = map_name)
print("Saving PIDs")
print(PIDs)
PIDs = save_PIDs(PIDs)
print(PIDs)
panels_error = []
tt1 = time.time()
#time.sleep(60)
for panel in ['u1', 'u2', 'u3', 'u4', 'u5', 'l1', 'l2', 'l3', 'l4', 'l5']:
if panel in panels_to_fix:
continue
if panel in panels_ready:
continue
pos_x, pos_y = get_top_left_position_from_panel_name(panel, include_account_details = False)
index = CSGO_UPPER_POS_X.index(pos_x)
click_only(CSGO_TITLE_BAR_X[index] + pos_x, CSGO_TITLE_BAR_Y[index] + pos_y, 0.2, 2)
if check_launched_panel(POS_X = pos_x, POS_Y = pos_y):
vac_signature_error_op = ready_panel(pos_x, pos_y, untrusted_check = untrusted, untrusted_check_times = 1, panel_number = panel, do = True)
if not vac_signature_error_op:
panels_ready.append(panel)
else:
panels_error.append(panel)
tt2 = time.time()
if (tt2-tt1) < after_launch_timeout and panels_to_fix != []: ### added
time.sleep(after_launch_timeout - (tt2-tt1))
print("!!!!!!Panels to fix: ", *panels_error)
PIDs = kill_PIDs(PIDs, panels_error)
print(PIDs)
panels_to_fix += panels_error
t2_time_to_launch = time.time()
print("Time taken to launch and get panels ready. %.2f"%(t2_time_to_launch - t1_initial_time))
# identify_and_clear_errors(all_panels = True, error_coordinates = error_coordinates, error_with_numpy_images = error_with_numpy_images,
# error_ok_button = error_ok_button)
#%% Checks whether any one lobby is already created or not.
# Case 1: Fresh panels are loaded: both lobbies need to be created
# Case 2: Panels already loaded and last match was 16:0, then 1 lobby is already ready we need second only.
# Case 3: Panels are ready but last match resulted in both lobby disconnects.
# Case N: Try to do this:
# When match is over
# Wait for some time [Formality wait]
# Disconnect the loosing team immediately and start making new lobby
# By this time, another lobby should be ready already.
friend_code_dict = load_friend_code_dict_file(friend_code_dict_path)
upper_batch = [CSGO_UPPER_POS_X, CSGO_UPPER_POS_Y, USERNAME_UPPER]
lower_batch = [CSGO_LOWER_POS_X, CSGO_LOWER_POS_Y, USERNAME_LOWER]
# get rank snippets
# right_visible_arrangement()
# get_rank_snippets(all_panels = True)
# #%% Cooldown check
# if cd_check_wrapper(all_panels = True):
# #runfile('new_driver_code.py')
# runfile('driver_code.py')
# sys.exit(0)
# Getting panels ready
after_lobby_blue_check_wrapper(arrangement_needed = True, checker_image = None, ignore_first_attempt = True)
# for batch in [upper_batch, lower_batch]: #batch = upper_batch.copy()
# POS_X, POS_Y, USERNAME = batch
# create_lobby(POS_X, POS_Y, USERNAME)
# identify_and_clear_errors(all_panels = True, error_coordinates = error_coordinates, error_with_numpy_images = error_with_numpy_images,
# error_ok_button = error_ok_button)
restart_if_panel_not_responding()
t4_lobbies_created = time.time()
print("Lobbies created, time taken: %.2f"%(t4_lobbies_created - t2_time_to_launch))
# identify_and_clear_errors(all_panels = True, error_coordinates = error_coordinates, error_with_numpy_images = error_with_numpy_images,
# error_ok_button = error_ok_button)
time.sleep(1)
right_visible_arrangement(include_play_button = True)
start_mm_search(arrangement_needed = False)
#%%
from dynamic_data_functions import reset_match_verification, verify_match_completion, toggle_main_completion
from helper_functions import generate_unique_mismatchID, generate_unique_matchID
mm_mismatchID = generate_unique_mismatchID(include_prefix = True)
mismatch_data = {}
search_details = {"search_start_time": None,
"search_error_count": 0,
"search_mismatchID": None,
"search_end_time": None}
search_start_time = datetime.now()
search_details['search_start_time'] = search_start_time
search_mismatchID = mm_mismatchID
search_details['search_mismatchID'] = search_mismatchID
vac_max_count = 5
vac_count = 0
while True:
vac_status = check_green_mm_search_wrapper()
if vac_status:
print("VAC STATUS: %s"%(vac_status))
vac_count+=1
start_mm_search(arrangement_needed = False)
else:
print("VAC Status successful.")
break
if vac_count == vac_max_count:
print("VAC Error: Relaunching panels.")
time.sleep(2)
accept__args = get_accept_args()
runfile('main_file.py', accept__args)
# time.sleep(5)
restart_if_panel_not_responding()
#avast_popup(test_image = None, checker_image = avast_popup_checker_image, cancel_match_ = False)
#%%
#%%
from logging_functions import log_current_mismatch_details, log_current_match_details, update_account_data_completed
#TODO LOOP IT FAILED TO REACH CHECK
while True:
panels_with_failed_connection = failed_to_reach_servers_check_wrapper(arrangement_needed = False, checker_image = None, checker_full_image = None)
if panels_with_failed_connection == []:
print("No Connection Errors.")
break
else:
log_current_mismatch_details(mm_mismatchID, mm_batch, mismatch_data, match_found = False, total_search_time = 0)
accept__args = get_accept_args()
runfile('main_file.py', accept__args)
sys.exit(0)
#%%
#%%
while True:
terminate_and_matches_played, mismatch_data = auto_accept_check(mismatch_data)
search_details['search_end_time'] = datetime.now()
# If match is not found after a given time.
if terminate_and_matches_played == False:
# TODO function to add a set of
# TODO!!!!!!!!!!!!!!!!!!!!!!!!
#TODO add mismatch log function
log_current_mismatch_details(mm_mismatchID, mm_batch, mismatch_data, match_found = False, total_search_time = (datetime.now() - search_start_time).seconds)
break
#sys.exit(0)
search_details['search_error_count'] = len(mismatch_data.keys())
search_end_time = search_details['search_end_time']
match_found = True
# add log mismatch fn (with match_)fpund = True
# see whether we have to add it here or after connection check.
time.sleep(5)
# TODO UPDATE AND CHANGE
print("Waiting for panels to connect to server.")
reconnection_output = map_loading_check_wrapper(map_name = map_name, method = 'all', max_time = 30)
if type(reconnection_output) != bool:
log_current_mismatch_details(mm_mismatchID, mm_batch, mismatch_data, match_found = False, total_search_time = (datetime.now() - search_start_time).seconds)
cancel_match()
failsafe = True
#log_current_mismatch_details(mm_mismatchID, mm_batch, mismatch_data, match_found = match_found, total_search_time = (search_end_time - search_start_time).seconds)
match_id = generate_unique_matchID(include_prefix = True)
match_time_details = {}
match_time_details['match_start_time'] = datetime.now()
print("Waiting for Warmup to end with extra 10 seconds time gap.")
time.sleep(60 + 5 + 15 + 5)
reset_match_verification()
accept_args = "--map_name %s --match_output %s --winner %s --winner_score %d %d --current_score %d %d"%(map_name, match_output, winner, winner_score[0], winner_score[1], current_score[0], current_score[1])
runfile('ingame_script.py', args = accept_args)
after_match_cleanup(0)
match_end_time = datetime.now()
cooldown_details = {"team1": [], "team2": []}
for i in range(4, -1, -1):
cd_op = cd_check(CSGO_UPPER_POS_X[i], CSGO_UPPER_POS_Y[i])
if cd_op != None:
cd_time = match_end_time
else:
cd_time = None
cd_data = {"type": cd_op, "time": match_end_time}
cooldown_details['team1'].insert(0, cd_data)
cd_op = cd_check(CSGO_LOWER_POS_X[i], CSGO_LOWER_POS_Y[i])
if cd_op != None:
cd_time = match_end_time
else:
cd_time = None
cd_data = {"type": cd_op, "time": match_end_time}
cooldown_details['team2'].insert(0, cd_data)
# TODO
status = verify_match_completion()
if not status:
log_current_mismatch_details(mm_mismatchID, mm_batch, mismatch_data, match_found = False, total_search_time = (datetime.now() - search_start_time).seconds)
break
log_current_mismatch_details(mm_mismatchID, mm_batch, mismatch_data, match_found = match_found, total_search_time = (search_end_time - search_start_time).seconds)
match_time_details['match_end_time'] = match_end_time
xp_gained_details = {"team1_xp_gained": [], "team2_xp_gained": []}
from loading_functions import get_xp_gained_for_next_week_of_accounts, get_match_number_of_accounts, get_week_match_count_of_accounts, get_week_number_of_accounts
from helper_functions import calculate_xp_gained
team1, team2 = get_xp_gained_for_next_week_of_accounts([USERNAME_UPPER, USERNAME_LOWER])
team1 = [calculate_xp_gained(i, rounds_won = winner_score[0] if winner == 'upper' else winner_score[1]) for i in team1]
team2 = [calculate_xp_gained(i, rounds_won = winner_score[0] if winner == 'lower' else winner_score[1]) for i in team2]
xp_gained_details['team1_xp_gained'] = team1
xp_gained_details['team2_xp_gained'] = team2
team1 = {"username": mm_batch['batch_1'],
"mm_rank_update": [],
"pr_rank_update": [],
"match_number": [],
"week_number": [],
"week_match_count": []}
team2 = {"username": mm_batch['batch_2'],
"mm_rank_update": [],
"pr_rank_update": [],
"match_number": [],
"week_number": [],
"week_match_count": []}
team1['match_number'], team2['match_number'] = get_match_number_of_accounts([USERNAME_UPPER, USERNAME_LOWER])
team1['match_number'] = [str(int(i) + 1) for i in team1['match_number']]
team2['match_number'] = [str(int(i) + 1) for i in team2['match_number']]
team1['week_number'], team2['week_number'] = get_week_number_of_accounts([USERNAME_UPPER, USERNAME_LOWER])
team1['week_match_count'], team2['week_match_count'] = get_week_match_count_of_accounts([USERNAME_UPPER, USERNAME_LOWER])
team1['week_match_count'] = [str(int(i) + 1) for i in team1['week_match_count']]
team2['week_match_count'] = [str(int(i) + 1) for i in team2['week_match_count']]
from capture_functions import get_mm_rank_snippet, get_pr_rank_snippet
from image_functions import identify_mm_rank, identify_pr_rank
for i in range(5): #i = 0
if cooldown_details['team1'][i]['type'] is None:
team1['mm_rank_update'].append(account_data[mm_batch['batch_1'][i]]['MM_Rank'])
else:
team1['mm_rank_update'].append(identify_mm_rank(rank_snippet = get_mm_rank_snippet(CSGO_UPPER_POS_X[i], CSGO_UPPER_POS_Y[i], return_numpy_object = True), mm_rank_database = mm_rank_database))
if cooldown_details['team2'][i]['type'] is None:
team2['mm_rank_update'].append(account_data[mm_batch['batch_2'][i]]['MM_Rank'])
else:
team2['mm_rank_update'].append(identify_mm_rank(rank_snippet = get_mm_rank_snippet(CSGO_LOWER_POS_X[i], CSGO_LOWER_POS_Y[i], return_numpy_object = True), mm_rank_database = mm_rank_database))
if cooldown_details['team1'][i]['type'] is None:
team1['pr_rank_update'].append(account_data[mm_batch['batch_1'][i]]['PR_Rank'])
else:
team1['pr_rank_update'].append(identify_pr_rank(rank_snippet = get_pr_rank_snippet(CSGO_UPPER_POS_X[i], CSGO_UPPER_POS_Y[i], return_numpy_object = True), mm_rank_database = mm_rank_database))
if cooldown_details['team2'][i]['type'] is None:
team2['pr_rank_update'].append(account_data[mm_batch['batch_2'][i]]['PR_Rank'])
else:
team2['pr_rank_update'].append(identify_pr_rank(rank_snippet = get_pr_rank_snippet(CSGO_LOWER_POS_X[i], CSGO_LOWER_POS_Y[i], return_numpy_object = True), mm_rank_database = mm_rank_database))
from helper_functions import get_current_week_details
log_current_match_details(match_id = match_id,
team1 = team1,
team2 = team2,
time_stamp = match_time_details['match_start_time'],
search_details = search_details,
match_time_details = match_time_details, xp_gained_details = xp_gained_details)
# TODO: Cooldown_details
update_account_data_completed(mm_batch = mm_batch,
match_id = match_id,
team1 = team1,
team2 = team2,
time_stamp = match_time_details['match_start_time'],
xp_gained_details = xp_gained_details,
cooldown_details = cooldown_details,
week_index = get_current_week_details(include_datetime_obj = False))
toggle_main_completion()
#%%
#%%
#%%
#%%
if cd_check_wrapper(True):
#runfile('new_driver_code.py')
runfile('driver_code.py')
sys.exit(0)
# create_lobby(CSGO_UPPER_POS_X, CSGO_UPPER_POS_Y, USERNAME_UPPER)
# create_lobby(CSGO_LOWER_POS_X, CSGO_LOWER_POS_Y, USERNAME_LOWER)
# right_visible_arrangement(include_play_button = True)
# identify_and_clear_errors(all_panels = True, error_coordinates = error_coordinates, error_with_numpy_images = error_with_numpy_images,
# error_ok_button = error_ok_button)
# restart_if_panel_not_responding()
# after_lobby_blue_check_wrapper(arrangement_needed = True, checker_image = None)
# identify_and_clear_errors(all_panels = True, error_coordinates = error_coordinates, error_with_numpy_images = error_with_numpy_images,
# error_ok_button = error_ok_button)
# right_visible_arrangement(include_play_button = True)
# start_mm_search(arrangement_needed = False)
# identify_and_clear_errors(all_panels = True, error_coordinates = error_coordinates, error_with_numpy_images = error_with_numpy_images,
# error_ok_button = error_ok_button)
# time.sleep(5)
# FIALED TO REACH CHECK
# runfile('driver_code.py')
| [
"noreply@github.com"
] | noreply@github.com |
5a3533fe380107f7a518cfd59cc2bc0bf7a77c6a | 7556542c8c6ae157542300ce45388a8cb0213edb | /cocitation/co-citation-finding.py | 7e0a03491b4cf421e14f206531faccb9b8550960 | [
"Apache-2.0"
] | permissive | hyyc116/Therapies_finding | 2229f567c157d17a7ed947d62a78d3487151540c | 1ee36190e5b85ac89d2836c67ab60c1168c3b1b0 | refs/heads/master | 2021-01-17T12:46:32.491077 | 2017-04-06T20:28:45 | 2017-04-06T20:28:45 | 84,074,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,042 | py | #coding:utf-8
import sys
sys.path.append(".")
sys.path.append("..")
from tools.xml_parser import *
reload(sys)
sys.setdefaultencoding('utf-8')
import re
from collections import defaultdict
import json
#Get references
def parse_references_with_index(indexpath):
count =0
for path in open(indexpath):
count+=1
if not path.strip().endswith('.nxml'):
continue
if count%100==1:
sys.stderr.write('{:}\n'.format(count))
path = path.strip()
doc = parse_doc(path)
titles = []
for title in parse_pmc_references(doc):
titles.append(title)
headers = re.sub(r"\s+",' ','. '.join(titles)+".")
doi = parse_pmc_doi(doc)
print doi+"\t"+headers.encode('utf-8')
#get body text
def parse_indexes(indexpath,nplist):
count=0
find_doc_count=0
tf_dic=defaultdict(list)
for path in open(indexpath):
count+=1
if not path.strip().endswith('.nxml'):
continue
if count%10==1:
sys.stderr.write('PROGRESS:{:},'.format(count))
sys.stderr.write('find {:} docs.\n'.format(find_doc_count))
path = path.strip()
content = open(path).read().lower()
if "parkinson's disease" not in content:
continue
find_doc_count+=1
content = parse_body_abstext(path)
content = re.sub(r'\s+'," ",content.replace('-'," ").lower())
for np in nplist:
if np in content:
tf_dic[np].append(path)
open("parkinson-tf.dict",'w').write(json.dumps(tf_dic))
for np in tf_dic.keys():
print np+"\t"+str(len(set(tf_dic[np])))
def parse_body_abstext(path):
doc = parse_doc(path)
content = doc.select('sec p')
# abstext = doc.select('abstract')[0].get_text()
ps=[]
for p in content:
ps.append(re.sub(r'\s+'," ",p.get_text()))
return " ".join(ps)
def score_therapies(df_path,tf_path):
df_dict=defaultdict(int)
tf_dict = defaultdict(int)
for line in open(df_path):
splits = line.split("\t")
therapy = re.sub(r'\s+'," ",splits[0].replace("-"," "))
df_dict[therapy]=int(splits[2])
for line in open(tf_path):
splits = line.split("\t")
tf_dict[splits[0]] = int(splits[1])
results=defaultdict(float)
for t in df_dict.keys():
tf = tf_dict.get(t,0.5)
results[t]=df_dict[t]/float(tf)
for k,v in sorted(results.items(),key=lambda x:x[1],reverse=True):
print "{:}\t{:.5f}\t{:}\t{:}".format(k,v,df_dict[k],tf_dict.get(k,0.5))
if __name__=="__main__":
clas = sys.argv[1]
if clas=='ref':
parse_references_with_index(sys.argv[1])
elif clas=='tf':
indexpath=sys.argv[2]
dfpath=sys.argv[3]
nplist = [re.sub(r'\s+'," ",line.strip().split('\t')[0].replace("-"," ")) for line in open(dfpath)]
parse_indexes(indexpath,nplist)
elif clas=='score':
score_therapies(sys.argv[2],sys.argv[3])
| [
"hyyc116@gmail.com"
] | hyyc116@gmail.com |
403c3b7201f5d83d0a46a8a9b9532c309c084f3f | 4bd3d4acf9f050d8efcc9a59c00fc2d2d7fea306 | /dein/.cache/init.vim/temp/16196/20180821130948/rplugin/python3/sosowa_scraper/driver.py | 56655f4e201b1cb10b555f03853b5c65bc0c4e3c | [
"MIT"
] | permissive | sy4may0/neovim-init.vim | 817f3102e1e2ed36e18bf47c472564dcf94a4ddd | 25aacb7a1f0902a57a4fb48422a35e04881af88b | refs/heads/master | 2021-06-05T12:29:00.761671 | 2018-08-21T12:32:42 | 2018-08-21T12:32:42 | 111,979,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | import sosowa_requester
import sys
sr1 = sosowa_requester.sosowa_requester(sys.argv[1])
p = sr1.get_sosowa_product_list(50)
sr1.get_sosowa_article(p['1204215351'])
sr1.get_sosowa_article(p['1204215351'])
article = sr1.get_sosowa_article(p['1204215351'])
array = article.get_article('content')
for i in array:
print(i+"EOF")
| [
"sy4may0@hundred-jpy.cc"
] | sy4may0@hundred-jpy.cc |
b4fdc7658ac4704ce5381aaa8c4af1d7403d5ace | efb9e219ddee84c70f47fb9b768544e2212b625f | /venv/Scripts/easy_install-3.7-script.py | 7d00d3b6ee4ce5709369d6deb017aaa613d84e56 | [] | no_license | dennohgitau/loginpy | 0a8d80dae5f2f588ed4b0c09f683133975202daa | e524dacd9bdf4f97b61fbbed46b9371506c68192 | refs/heads/main | 2023-03-12T06:53:32.878987 | 2022-05-17T07:50:54 | 2022-05-17T07:50:54 | 493,147,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | #!C:\Users\GFITAU\PycharmProjects\LoginSQL\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"denisgitaustudy@gmail.com"
] | denisgitaustudy@gmail.com |
d4a331fd126d3de9e4c2126c8127d132a767d784 | 501176c17ecfda9fc2641c407b044b51364afa8e | /BootCamp/python/example/example.py | c4a02a67d17d6beae597df85db0c307a24e907bd | [] | no_license | melissa-koi/betterbuys | fcc6d6bfc1f37a644258d7bcf52eb93597674fd6 | edc40636c14ee341835bd8f77fd9ae91767b220a | refs/heads/main | 2023-05-26T13:59:59.503289 | 2021-06-10T05:35:43 | 2021-06-10T05:35:43 | 375,577,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,146 | py | # import sys
#
# name = sys.argv[1]
# print("How old are you?")
# age = int(input())
#
# print(name)
# print(age)
# height = 69
# if height > 70:
# print("You are really tall")
# elif height > 60:
# print("You are of average height")
# else:
# print("You are really short")
# name = ""
# list_a = []
#
# if list_a:
# print("True")
# else:
# print("False")
# list_a = range(0, 5)
# print(list_a)
# for i in range(0, 7):
# print("I would love " + str(i) + " cookies")
# numbers = [1, 2, 3, 4, 5]
# for i in numbers:
# if i % 2 == 0:
# print(i)
# players = 11
# while players >= 5:
# print("The remaining players are ", players)
# players -= 1
# number = 0
# while True:
# print("I love candy " + str(number))
# number += 1
# if number == 7:
# break
# numTaken = [3, 5, 7, 11, 13]
# print("Available Numbers")
#
# for i in range(1, 21):
# if i in numTaken:
# continue # or break
# print(i)
# my_list = []
# my_other_list = list()
# list_a = ["a", "b", "c", "d"] # list of strings
# list_b = [1, 2, 3, 4, 5, 6] # list of numbers
# list_c = [1, "west", 34, "longitude"] # mixed list
# list_d = [ ["a","b","c","d"],[1,2,3,4,5,6],[1,"west",34,"longitude"]] # nested list
#
# list_a.extend(list_b)
# print(list_a)
# print(list_b)
# my_cat = {'name': 'Mr.Sniffles', 'age': 18, 'color': 'black'}
#
# print(my_cat['name'])
# print(my_cat)
#
# print(list(my_cat.keys()))
# print("Enter a string")
# input_string = input()
# characters = {}
#
# for character in input_string:
# characters.setdefault(character, 0)
# characters[character] = characters[character] + 1
#
# print(characters)
# print('What is your name?')
# name = input()
# print('How old are you?')
# age = input()
# print(f"My name is {name} and i am {age} years old")
# name = "James"
# age = 19
# weight = '79' # Kilograms
#
# age_weight_ratio = int(weight)/age
# age_weight_ratio2 = float(weight)/age
#
# print(age_weight_ratio)
# print(age_weight_ratio2)
def fun_a(a=1, b=4):
print(a + b)
fun_a()
def fun_b():
pass
def fun_c(a, b):
return a + b
sum = fun_c(5, 8)
print(sum)
| [
"melissawangui3@gmail.com"
] | melissawangui3@gmail.com |
b88abcb14ac3398093095546610832e7d0670631 | acb3c776bb796858710cd03c9f6b42bfaf0c8b55 | /accounts/forms.py | 6f67f1301b03d925267d847df780edbe90c22477 | [] | no_license | Code-Institute-Submissions/DjangoMilestoneProject | 9f89d85358daa7f9ce43216db88b8ed3788ef2ae | 505d2f116cd90596d557ae8e00e7c130e8919c95 | refs/heads/master | 2022-12-19T10:15:25.767129 | 2020-08-27T15:04:37 | 2020-08-27T15:04:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,892 | py | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from django.core.exceptions import ValidationError
from .models import UserProfile, UserFootball
# Login Information
class UserLoginForm(forms.Form):
"""Form to be used to log users in"""
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
# Registration Information
class UserRegistrationForm(UserCreationForm):
"""Form used to register a new user"""
password1 = forms.CharField(
label="Password",
widget=forms.PasswordInput)
password2 = forms.CharField(
label="Password Confirmation",
widget=forms.PasswordInput)
class Meta:
model = User
fields = ['email', 'username', 'password1', 'password2']
def clean_email(self):
email = self.cleaned_data.get('email')
username = self.cleaned_data.get('username')
if User.objects.filter(email=email).exclude(username=username):
raise forms.ValidationError(u'Email address must be unique')
return email
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if not password1 or not password2:
raise ValidationError("Please confirm you password")
if password1 != password2:
raise ValidationError("Passwords must match")
return password2
# Profile Information
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
exclude = ('user',)
def __init__(self, *args, **kwargs):
"""
Add placeholders and classes, remove auto-generated
labels and set autofocus on first field
"""
super().__init__(*args, **kwargs)
placeholders = {
'first_name': 'First Name',
'last_name': 'Last Name',
'default_phone_number': 'Phone Number',
'default_country': 'Country',
'default_postcode': 'Postal Code',
'default_town_or_city': 'Town or City',
'default_street_address1': 'Street Address 1',
'default_street_address2': 'Street Address 2',
'default_county': 'County',
}
for field in self.fields:
if field == 'default_phone_number':
self.fields[field].widget.attrs['autofocus'] = True
if self.fields[field].required:
placeholder = {placeholders[field]}
else:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields[field].widget.attrs['class'] = 'border-black rounded-0'
self.fields[field].label = False
# Football Fan Information
class UserFootballForm(forms.ModelForm):
class Meta:
model = UserFootball
exclude = ('user',)
def __init__(self, *args, **kwargs):
"""
Add placeholders and classes, remove auto-generated
labels and set autofocus on first field
"""
super().__init__(*args, **kwargs)
placeholders = {
'club': 'Football Club',
'favorite_player': 'Football Idol',
'favorite_shirts': 'Favorite Shirts',
'size': 'Shirt Size',
}
for field in self.fields:
if field == 'club':
self.fields[field].widget.attrs['autofocus'] = True
if self.fields[field].required:
placeholder = {placeholders[field]}
else:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields[field].widget.attrs['class'] = 'border-black rounded-0'
self.fields[field].label = False
| [
"felipelitran@gmail.com"
] | felipelitran@gmail.com |
5f3b77b8ea7b33ecee62dff19499387e3da1e40e | 63f85ffae77a564ca296777b294ab3e4d2957ce9 | /tfSeq2SeqModels/decoders/transformer_decoder.py | 78583e920e9c7fd481e1b006c929fe6d30e9bc45 | [] | no_license | chenxinglili/eastonCode | 0c89789e2656b9236d773424973d933ac9045697 | 0334a41f6df7bb38a18a6918dfebd189c64395e8 | refs/heads/master | 2020-06-26T10:11:02.112948 | 2019-07-02T13:42:36 | 2019-07-02T13:42:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,892 | py | '''@file rnn_decoder.py
the while_loop implementation'''
import tensorflow as tf
import logging
from .rna_decoder import RNADecoder
from tfSeq2SeqModels.tools.utils import dense
from tfModels.tensor2tensor import common_attention
from ..tools.utils import residual, multihead_attention, ff_hidden
inf = 1e10
class Transformer_Decoder(RNADecoder):
"""ctc's `decoder` where the acoustic model is trained with ctc and the distribution
is shrinked without blank frams. The decoder operates on the shrinked distribution
which is a sequence labeling problem and the training targets are generated by
OCD.
"""
def __init__(self, args, is_train, global_step, embed_table=None, name=None):
super().__init__(args, is_train, global_step, embed_table, name)
# use decoder heres
self.num_blocks = args.model.decoder.num_blocks
self.num_cell_units = args.model.decoder.num_cell_units
self.attention_dropout_rate = args.model.decoder.attention_dropout_rate if is_train else 0.0
self.residual_dropout_rate = args.model.decoder.residual_dropout_rate if is_train else 0.0
self.num_heads = args.model.decoder.num_heads
self.size_embedding = args.model.decoder.size_embedding
self._ff_activation = (lambda x, y: x * tf.sigmoid(y)) \
if args.model.decoder.activation == 'glu' else tf.nn.relu # glu
self.softmax_temperature = args.model.decoder.softmax_temperature
self.lambda_lm = self.args.lambda_lm
def decode(self, encoded, len_encoded, decoder_input):
"""
used for MLE training
"""
decoder_output = self.decoder_impl(decoder_input, encoded, len_encoded)
logits = tf.layers.dense(
decoder_output,
self.args.dim_output,
use_bias=False,
name='decoder_fc')
preds = tf.to_int32(tf.argmax(logits, axis=-1))
return logits, preds
def decoder_with_caching(self, encoded, len_encoded):
"""
gread search, used for self-learning training or infer
"""
batch_size = tf.shape(encoded)[0]
token_init = tf.fill([batch_size, 1], self.start_token)
logits_init = tf.zeros([batch_size, 1, self.dim_output], dtype=tf.float32)
finished_init = tf.zeros([batch_size], dtype=tf.bool)
len_decoded_init = tf.ones([batch_size], dtype=tf.int32)
cache_decoder_init = tf.zeros([batch_size, 0, self.num_blocks, self.num_cell_units])
encoder_padding = tf.equal(tf.sequence_mask(len_encoded, maxlen=tf.shape(encoded)[1]), False) # bool tensor
encoder_attention_bias = common_attention.attention_bias_ignore_padding(encoder_padding)
def step(i, preds, cache_decoder, logits, len_decoded, finished):
preds_emb = self.embedding(preds)
decoder_input = preds_emb
decoder_output, cache_decoder = self.decoder_with_caching_impl(
decoder_input,
cache_decoder,
encoded,
encoder_attention_bias)
cur_logit = tf.layers.dense(
inputs=decoder_output[:, -1, :],
units=self.dim_output,
activation=None,
use_bias=False,
name='decoder_fc')
cur_ids = tf.to_int32(tf.argmax(cur_logit, -1))
preds = tf.concat([preds, cur_ids[:, None]], axis=1)
logits = tf.concat([logits, cur_logit[:, None]], 1)
# Whether sequences finished.
has_eos = tf.equal(cur_ids, self.end_token)
finished = tf.logical_or(finished, has_eos)
len_decoded += 1-tf.to_int32(finished)
return i+1, preds, cache_decoder, logits, len_decoded, finished
def not_finished(i, preds, cache, logit, len_decoded, finished):
return tf.logical_and(
tf.reduce_any(tf.logical_not(finished)),
tf.less(
i,
tf.reduce_min([tf.shape(encoded)[1], self.args.max_len]) # maxlen = 25
)
)
i, preds, cache_decoder, logits, len_decoded, finished = tf.while_loop(
cond=not_finished,
body=step,
loop_vars=[0, token_init, cache_decoder_init, logits_init, len_decoded_init, finished_init],
shape_invariants=[tf.TensorShape([]),
tf.TensorShape([None, None]),
tf.TensorShape([None, None, None, None]),
tf.TensorShape([None, None, self.dim_output]),
tf.TensorShape([None]),
tf.TensorShape([None])]
)
# len_decoded = tf.Print(len_decoded, [finished], message='finished: ', summarize=1000)
len_decoded -= 1-tf.to_int32(finished) # for decoded length cut by encoded length
logits = logits[:, 1:, :]
preds = preds[:, 1:]
not_padding = tf.sequence_mask(len_decoded, dtype=tf.int32)
preds = tf.multiply(tf.to_int32(preds), not_padding)
return logits, preds, len_decoded
def decoder_with_caching_impl(self, decoder_input, decoder_cache, encoder_output, encoder_attention_bias):
# Positional Encoding
decoder_input += common_attention.add_timing_signal_1d(decoder_input)
# Dropout
decoder_output = tf.layers.dropout(decoder_input,
rate=self.residual_dropout_rate,
training=self.is_train)
new_cache = []
# rest block with residual
for i in range(self.num_blocks):
with tf.variable_scope("block_{}".format(i)):
# Multihead Attention (self-attention)
# the caching_impl only need to calculate decoder_output[:, -1:, :] !
decoder_output = residual(decoder_output[:, -1:, :],
multihead_attention(
query_antecedent=decoder_output,
memory_antecedent=None,
bias=None,
total_key_depth=self.num_cell_units,
total_value_depth=self.num_cell_units,
num_heads=self.num_heads,
dropout_rate=self.attention_dropout_rate,
num_queries=1,
output_depth=self.num_cell_units,
name="decoder_self_attention",
summaries=False),
dropout_rate=self.residual_dropout_rate)
# Multihead Attention (vanilla attention)
decoder_output = residual(decoder_output,
multihead_attention(
query_antecedent=decoder_output,
memory_antecedent=encoder_output,
bias=encoder_attention_bias,
total_key_depth=self.num_cell_units,
total_value_depth=self.num_cell_units,
output_depth=self.num_cell_units,
num_heads=self.num_heads,
dropout_rate=self.attention_dropout_rate,
num_queries=1,
name="decoder_vanilla_attention",
summaries=False),
dropout_rate=self.residual_dropout_rate)
# Feed Forward
decoder_output = residual(decoder_output,
ff_hidden(
decoder_output,
hidden_size=4 * self.num_cell_units,
output_size=self.num_cell_units,
activation=self._ff_activation),
dropout_rate=self.residual_dropout_rate)
decoder_output = tf.concat([decoder_cache[:, :, i, :], decoder_output], axis=1)
new_cache.append(decoder_output[:, :, None, :])
new_cache = tf.concat(new_cache, axis=2) # [batch_size, n_step, num_blocks, num_hidden]
return decoder_output, new_cache
def decoder_impl(self, decoder_input, encoder_output, len_encoded):
# encoder_padding = tf.equal(tf.reduce_sum(tf.abs(encoder_output), axis=-1), 0.0)
encoder_padding = tf.equal(tf.sequence_mask(len_encoded, maxlen=tf.shape(encoder_output)[1]), False) # bool tensor
# [-0 -0 -0 -0 -0 -0 -0 -0 -0 -1e+09] the pading place is -1e+09
encoder_attention_bias = common_attention.attention_bias_ignore_padding(encoder_padding)
decoder_output = self.embedding(decoder_input)
# Positional Encoding
decoder_output += common_attention.add_timing_signal_1d(decoder_output)
# Dropout
decoder_output = tf.layers.dropout(decoder_output,
rate=self.residual_dropout_rate,
training=self.is_train)
# Bias for preventing peeping later information
self_attention_bias = common_attention.attention_bias_lower_triangle(tf.shape(decoder_input)[1])
# Blocks
for i in range(self.num_blocks):
with tf.variable_scope("block_{}".format(i)):
# Multihead Attention (self-attention)
decoder_output = residual(decoder_output,
multihead_attention(
query_antecedent=decoder_output,
memory_antecedent=None,
bias=self_attention_bias,
total_key_depth=self.num_cell_units,
total_value_depth=self.num_cell_units,
num_heads=self.num_heads,
dropout_rate=self.attention_dropout_rate,
output_depth=self.num_cell_units,
name="decoder_self_attention",
summaries=False),
dropout_rate=self.residual_dropout_rate)
# Multihead Attention (vanilla attention)
decoder_output = residual(decoder_output,
multihead_attention(
query_antecedent=decoder_output,
memory_antecedent=encoder_output,
bias=encoder_attention_bias,
# bias=None,
total_key_depth=self.num_cell_units,
total_value_depth=self.num_cell_units,
output_depth=self.num_cell_units,
num_heads=self.num_heads,
dropout_rate=self.attention_dropout_rate,
name="decoder_vanilla_attention",
summaries=False),
dropout_rate=self.residual_dropout_rate)
# Feed Forward
decoder_output = residual(decoder_output,
ff_hidden(
decoder_output,
hidden_size=4 * self.num_cell_units,
output_size=self.num_cell_units,
activation=self._ff_activation),
dropout_rate=self.residual_dropout_rate)
return decoder_output
def beam_decode_rerank(self, encoded, len_encoded):
"""
beam search rerank at end with language model integration (self-attention model)
the input to te score is <sos> + tokens !!!
"""
beam_size = self.beam_size
batch_size = tf.shape(len_encoded)[0]
# beam search Initialize
# repeat each sample in batch along the batch axis [1,2,3,4] -> [1,1,2,2,3,3,4,4]
encoded = tf.tile(encoded[:, None, :, :],
multiples=[1, beam_size, 1, 1]) # [batch_size, beam_size, *, hidden_units]
encoded = tf.reshape(encoded,
[batch_size * beam_size, -1, encoded.get_shape()[-1].value])
len_encoded = tf.reshape(tf.tile(len_encoded[:, None], multiples=[1, beam_size]), [-1]) # [batch_size * beam_size]
# [[<S>, <S>, ..., <S>]], shape: [batch_size * beam_size, 1]
token_init = tf.fill([batch_size * beam_size, 1], self.args.sos_idx)
logits_init = tf.zeros([batch_size * beam_size, 0, self.dim_output], dtype=tf.float32)
len_decoded_init = tf.ones_like(len_encoded, dtype=tf.int32)
# the score must be [0, -inf, -inf, ...] at init, for the preds in beam is same in init!!!
scores_init = tf.constant([0.0] + [-inf] * (beam_size - 1), dtype=tf.float32) # [beam_size]
scores_init = tf.tile(scores_init, multiples=[batch_size]) # [batch_size * beam_size]
finished_init = tf.zeros_like(scores_init, dtype=tf.bool)
cache_decoder_init = tf.zeros([batch_size*beam_size,
0,
self.num_blocks,
self.num_cell_units])
if self.lm:
cache_lm_init = tf.zeros([batch_size*beam_size,
0,
self.lm.args.model.decoder.num_blocks,
self.lm.args.model.decoder.num_cell_units])
else:
cache_lm_init = tf.zeros([0, 0, 0, 0])
# collect the initial states of lstms used in decoder.
base_indices = tf.reshape(tf.tile(tf.range(batch_size)[:, None], multiples=[1, beam_size]), shape=[-1])
encoder_padding = tf.equal(tf.sequence_mask(len_encoded, maxlen=tf.shape(encoded)[1]), False) # bool tensor
encoder_attention_bias = common_attention.attention_bias_ignore_padding(encoder_padding)
def step(i, preds, scores, cache_decoder, cache_lm, logits, len_decoded, finished):
"""
the cache has no specific shape, so no can be put in the all_states
"""
preds_emb = self.embedding(preds)
decoder_input = preds_emb
decoder_output, cache_decoder = self.decoder_with_caching_impl(
decoder_input,
cache_decoder,
encoded,
encoder_attention_bias)
cur_logit = tf.layers.dense(
inputs=decoder_output[:, -1, :],
units=self.dim_output,
activation=None,
use_bias=False,
name='decoder_fc')
logits = tf.concat([logits, cur_logit[:, None]], 1)
z = tf.nn.log_softmax(cur_logit) # [batch*beam, size_output]
# the langueage model infer
if self.args.model.shallow_fusion:
assert self.lm
preds_emb = self.lm.decoder.embedding(preds)
with tf.variable_scope(self.args.top_scope, reuse=True):
with tf.variable_scope(self.args.lm_scope):
lm_output, cache_lm = self.lm.decoder.decoder_with_caching_impl(preds_emb, cache_lm)
logit_lm = dense(
inputs=lm_output[:, -1, :],
units=self.dim_output,
kernel=tf.transpose(self.lm.decoder.fully_connected),
use_bias=False)
z_lm = self.lambda_lm * tf.nn.log_softmax(logit_lm) # [batch*beam, size_output]
else:
z_lm = tf.zeros_like(z)
# rank the combined scores
next_scores, next_preds = tf.nn.top_k(z+z_lm, k=beam_size, sorted=True)
next_preds = tf.to_int32(next_preds)
# beamed scores & Pruning
scores = scores[:, None] + next_scores # [batch_size * beam_size, beam_size]
scores = tf.reshape(scores, shape=[batch_size, beam_size * beam_size])
_, k_indices = tf.nn.top_k(scores, k=beam_size)
k_indices = base_indices * beam_size * beam_size + tf.reshape(k_indices, shape=[-1]) # [batch_size * beam_size]
# Update scores.
scores = tf.reshape(scores, [-1])
scores = tf.gather(scores, k_indices)
# Update predictions.
next_preds = tf.reshape(next_preds, shape=[-1])
next_preds = tf.gather(next_preds, indices=k_indices)
# k_indices: [0~batch*beam*beam], preds: [0~batch*beam]
# preds, cache_lm, cache_decoder: these data are shared during the beam expand among vocab
preds = tf.gather(preds, indices=k_indices // beam_size)
cache_lm = tf.gather(cache_lm, indices=k_indices // beam_size)
cache_decoder = tf.gather(cache_decoder, indices=k_indices // beam_size)
preds = tf.concat([preds, next_preds[:, None]], axis=1) # [batch_size * beam_size, i]
has_eos = tf.equal(next_preds, self.end_token)
finished = tf.logical_or(finished, has_eos)
len_decoded += 1-tf.to_int32(finished)
# i = tf.Print(i, [i], message='i: ', summarize=1000)
return i+1, preds, scores, cache_decoder, cache_lm, logits, len_decoded, finished
def not_finished(i, preds, scores, cache_decoder, cache_lm, logit, len_decoded, finished):
# i = tf.Print(i, [i], message='i: ', summarize=1000)
return tf.logical_and(
tf.reduce_any(tf.logical_not(finished)),
tf.less(
i,
tf.reduce_min([tf.shape(encoded)[1], self.args.max_len]) # maxlen = 100
)
)
_, preds, scores_am, _, _, logits, len_decoded, finished = tf.while_loop(
cond=not_finished,
body=step,
loop_vars=[0, token_init, scores_init, cache_decoder_init, cache_lm_init, logits_init, len_decoded_init, finished_init],
shape_invariants=[tf.TensorShape([]),
tf.TensorShape([None, None]),
tf.TensorShape([None]),
tf.TensorShape([None, None, None, None]),
tf.TensorShape([None, None, None, None]),
tf.TensorShape([None, None, self.dim_output]),
tf.TensorShape([None]),
tf.TensorShape([None])]
)
# [batch_size * beam_size, ...]
len_decoded -= 1-tf.to_int32(finished) # for decoded length cut by encoded length
preds = preds[:, 1:]
not_padding = tf.sequence_mask(len_decoded, dtype=tf.int32)
preds *= not_padding
# [batch_size , beam_size, ...]
if self.args.model.rerank:
assert self.lm
with tf.variable_scope(self.args.top_scope, reuse=True):
with tf.variable_scope(self.args.lm_scope):
scores_lm, distribution = self.lm.decoder.score(preds, len_decoded)
scores_lm = self.args.lambda_rerank * scores_lm
else:
scores_lm = tf.zeros_like(scores_am)
scores = scores_am + scores_lm
# tf.nn.top_k is used to sort `scores`
scores_sorted, sorted = tf.nn.top_k(tf.reshape(scores, [batch_size, beam_size]),
k=beam_size,
sorted=True)
sorted = base_indices * beam_size + tf.reshape(sorted, shape=[-1]) # [batch_size * beam_size]
# [batch_size * beam_size, ...]
logits_sorted = tf.gather(logits, sorted)
preds_sorted = tf.gather(preds, sorted)
len_decoded_sorted = tf.gather(len_decoded, sorted)
scores_lm_sorted = tf.gather(scores_lm, sorted)
scores_am_sorted = tf.gather(scores_am, sorted)
# [batch_size, beam_size, ...]
scores_lm_sorted = tf.reshape(scores_lm_sorted, shape=[batch_size, beam_size])
scores_am_sorted = tf.reshape(scores_am_sorted, shape=[batch_size, beam_size])
preds_sorted = tf.reshape(preds_sorted, shape=[batch_size, beam_size, -1]) # [batch_size, beam_size, max_length]
logits_sorted = tf.reshape(logits_sorted, [batch_size, beam_size, -1, self.dim_output])
len_decoded_sorted = tf.reshape(len_decoded_sorted, [batch_size, beam_size])
# return logits, final_preds, len_encoded
return [logits_sorted, preds_sorted, len_decoded_sorted, scores_am_sorted, scores_lm_sorted], preds_sorted[:, 0, :], len_decoded_sorted[:, 0]
def forward(self, i, preds, state_decoder):
"""
self.cell
self.encoded
"""
prev_emb = self.embedding(preds[:, -1])
decoder_input = tf.concat([self.encoded[:, i, :], prev_emb], axis=1)
decoder_input.set_shape([None, self.num_cell_units_en+self.size_embedding])
with tf.variable_scope(self.name or 'decoder', reuse=True):
with tf.variable_scope("decoder_lstms"):
output_decoder, state_decoder = tf.contrib.legacy_seq2seq.rnn_decoder(
decoder_inputs=[decoder_input],
initial_state=state_decoder,
cell=self.cell)
cur_logit = tf.layers.dense(
inputs=output_decoder[0],
units=self.dim_output,
activation=None,
use_bias=False,
name='fully_connected'
)
cur_ids = tf.to_int32(tf.argmax(cur_logit, -1))
return cur_ids, state_decoder
| [
"yicheng1994@outlook.com"
] | yicheng1994@outlook.com |
26f14b35c752a7d1fd10c8f885a2e3131898d6cf | db0dee282250f796b80f4a401313e88d9d916d88 | /blocks.py | d1ce28abe22860443a14238617822d9a35e8122d | [] | no_license | KoSTyA-bel/- | ee1e5b212556d27ff4cd95dcf7b4ba002c2f354e | 7fbbd09faa1665c8d7f715dca126eb617bd3a852 | refs/heads/main | 2023-01-29T15:29:43.378391 | 2020-12-13T17:05:00 | 2020-12-13T17:05:00 | 319,144,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,918 | py | from pygame import *
from settings import *
import os
class Platform(sprite.Sprite):
def __init__(self, x, y, way = "%s/block/block.png" % ICON_DIR):
sprite.Sprite.__init__(self)
self.image = Surface((PLATFORM_WIDTH, PLATFORM_HEIGHT))
self.image.fill(Color(PLATFORM_COLOR))
self.image = image.load(way).convert_alpha()
self.rect = Rect(x, y, PLATFORM_WIDTH, PLATFORM_HEIGHT)
class BlockDie(Platform):
def __init__(self, x, y):
Platform.__init__(self, x, y)
self.image = image.load("%s/block/die.png" % ICON_DIR).convert_alpha()
class End(Platform):
def __init__(self, x, y):
Platform.__init__(self, x, y)
self.image = image.load("%s/block/win.png" % ICON_DIR).convert_alpha()
class Half(Platform):
def __init__(self, x, y):
Platform.__init__(self, x, y)
self.image = image.load("%s/block/H.png" % ICON_DIR).convert_alpha()
self.rect = Rect(x, y, PLATFORM_WIDTH, PLATFORM_HEIGHT / 2)
class Magnit(Platform):
def __init__(self, x, y):
Platform.__init__(self, x, y)
self.image = image.load("%s/block/magnit.png" % ICON_DIR).convert_alpha()
class Coin(Platform):
def __init__(self, x, y):
Platform.__init__(self, x, y)
self.image = image.load("block/coin.png")
self.rect = Rect(x + 6, y + 6, PLATFORM_WIDTH-16, PLATFORM_HEIGHT)
# class Movable(Platform):
# def __init__(self, x, y):
# Platform.__init__(self, x, y)
# self.image = image.load("block/move.png")
# self.xvel = 0 #скорость перемещения. 0 - стоять на месте
# self.yvel = 0 # скорость вертикального перемещения
# self.onGround = False # На земле ли я?
# def move(self, xvel, platforms):
# self.rect.x += xvel
# def getY(self):
# return(self.rect.y)
| [
"kostyafedorakin@gmail.com"
] | kostyafedorakin@gmail.com |
02aa59d26fc48158bc4766c151f95dc07c225252 | 09cb3293dc340e9fddc09967a0ea508baaaeaa0f | /venv/Scripts/django-admin.py | 837d0d9d7292c77d91588931818ad72488fdbd5e | [] | no_license | sreekanth9393/djangoproject5 | f9a4aa03fd13231b1416f2b7492b4c32fb9f8d41 | db7270c274bdde80de48d20faf2f0800625d5e95 | refs/heads/master | 2020-09-10T23:48:20.024736 | 2019-11-15T13:18:28 | 2019-11-15T13:18:28 | 221,869,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | #!C:\Pycharm\PycharmProjects\djangoproject5\venv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"sreekanthsomavarapu@gmail.com"
] | sreekanthsomavarapu@gmail.com |
2102df986d73ba8bded087840712c105503e1d9e | 1e660c91d0ae300ad6907a97941441fc8e73d5dc | /api/models/mixins.py | aa77a920c2b5911c3ee17653ec4e9346cb85c4ce | [] | no_license | SEUNAGBEYE/Stocky | 55d65e8ba7e7ff5228863e3c242c6499b2078ca7 | b2129b0a166a08d14c809cf4e0d711a7c469c91c | refs/heads/develop | 2023-02-23T11:26:46.160005 | 2019-04-01T04:11:06 | 2019-04-01T04:11:06 | 178,017,757 | 0 | 0 | null | 2023-02-07T22:21:11 | 2019-03-27T15:00:34 | Python | UTF-8 | Python | false | false | 2,735 | py | """Module for generic model operations mixin."""
from .config import db
class ModelMixin:
"""Mixin class with generic model operations."""
def save(self):
"""
Save a model instance
"""
db.session.add(self)
db.session.commit()
return self
def update_(self, **kwargs):
"""
Updates a record
Args:
kwargs (dict): Key-value pair of the attributes to update
Returns:
(dict) The updated record
"""
for field, value in kwargs.items():
setattr(self, field, value)
db.session.commit()
@classmethod
def get(cls, id):
"""
Gets a record by id
Args:
id (int): Unique identifier for the recod
Returns:
(dict) The found record
"""
return cls.query.get(id)
@classmethod
def get_or_404(cls, id):
"""
Gets a record or return 404
Args:
id (int): Unique identifier for the recod
Returns:
(dict) The found record
Raises:
(exception) Not found exeption if the record does not exist
"""
record = cls.get(id)
if not record:
raise ValidationError(
{
'message':
f'{re.sub(r"(?<=[a-z])[A-Z]+",lambda x: f" {x.group(0).lower()}" , cls.__name__)} not found' # noqa
},
404)
return record
def delete(self):
"""
Soft delete a model instance.
"""
pass
@classmethod
def count(cls):
"""
Returns the number of records that satify a query
"""
return cls.query.count()
@classmethod
def find_or_create(cls, data, **kwargs):
"""
Finds a model instance or creates it
Args:
data (dict): details of the record to be created
Returns:
(dict) The found record or newly created record
"""
instance = cls.query.filter_by(**kwargs).first()
if not instance:
instance = cls(**data).save()
return instance
@classmethod
def bulk_create(cls, objects):
"""
Saves a list of records (dict) to database
Args:
objects (list): List of records to be saved to database
Returns:
(list): A list of the newly created records
"""
resource_list = [cls(**item) for item in objects]
db.session.add_all(resource_list)
db.session.commit()
return resource_list
| [
"agbeyeseun1@gmail.com"
] | agbeyeseun1@gmail.com |
5cc2a666123a92f9eb91a1cc6b9b54c6c187a56c | 59c9c3b48fc42796b025d76a3f2bbca437be2b35 | /youkou_djT/apps/doc/migrations/0001_initial.py | 8c9fa8352e400ba0c383dbf6d5e0faa0abf2a041 | [] | no_license | hx123456666/youkou | f1b76a011e8445443730cc0c84ae85bc281f3ec0 | ee67b5e5c4555dc8a423325501b64645283ef5dc | refs/heads/master | 2020-04-16T14:03:35.810152 | 2019-03-21T03:10:27 | 2019-03-21T03:10:27 | 165,653,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | # Generated by Django 2.1.2 on 2019-03-19 05:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Doc',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='逻辑删除')),
('file_url', models.URLField(help_text='文件url', verbose_name='文件url')),
('title', models.CharField(help_text='文档标题', max_length=150, verbose_name='文档标题')),
('desc', models.TextField(help_text='文档描述', verbose_name='文档描述')),
('image_url', models.URLField(default='', help_text='图片url', verbose_name='图片url')),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
'db_table': 'tb_docs',
},
),
]
| [
"huangxiang@163.con"
] | huangxiang@163.con |
aa0da87f9190e8296e72752194ba5b8957bb36fa | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/meetup/2b4a2462e86149f3a94264f7c35aef7a.py | ac0930b773da25cb6f1e91324fa9ea02ed62294a | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 614 | py | from calendar import monthrange
from datetime import date
def meetup_day(year, month, day_of_the_week, which):
month_length = monthrange(year, month)[1]
days_in_month = (date(year, month, day)
for day in range(1, month_length + 1))
candidates = [date_
for daye_ in days_in_month
if day_name(date_) == day_of_the_week]
if which == 'teenth':
return next(d for d in candidates if 13 <= d.day <= 19)
if which == 'last':
return candidates[-1]
return candidates[int(which[0]) - 1 ]
def day_name(date_):
return date_.strftime('%A')
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
080ddfede231890852c5760f01660aee75767770 | 3e5d982a3b50ab9b3c9513061c3da1d3c9fbc06c | /model.py | 9d6c35900343991b19a04c5c9724aa496b6d30ef | [] | no_license | zhulingchen/my-pix2pix | 2e7347d7e5d7758c68c13e63040f9417ef5f19a3 | aaa89ff663847fb44e31f69187fd77167d023cd3 | refs/heads/main | 2022-12-31T01:25:46.345864 | 2020-10-28T03:50:14 | 2020-10-28T03:50:14 | 302,352,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,846 | py | import os
import yaml
import time
from datetime import datetime
import warnings
import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.optim import lr_scheduler
import torchvision.transforms as transforms
from torchsummary import summary
import numpy as np
from PIL import Image
from dataset import *
def get_norm_layer(name):
name = name.lower()
if name == 'batch':
return nn.BatchNorm2d
elif name == 'instance':
return nn.InstanceNorm2d
else:
raise NotImplementedError('Normalization layer name {:s} is not supported.'.format(name))
def get_gan_loss(name, device):
name = name.lower()
if name == 'vanilla':
def bce_with_logits_and_singleton_target_loss(input, target):
assert isinstance(input, torch.Tensor) and isinstance(target, (bool, int, float))
target_tensor = torch.tensor(target).expand_as(input).float().to(device)
return F.binary_cross_entropy_with_logits(input, target_tensor)
return bce_with_logits_and_singleton_target_loss
elif name == 'wgangp':
def wgangp_loss(input, target):
assert isinstance(input, torch.Tensor) and isinstance(target, (bool, int, float))
return -input.mean() if bool(target) else input.mean()
return wgangp_loss
else:
raise NotImplementedError('GAN loss name {:s} is not supported.'.format(name))
def denormalize_image(image):
assert isinstance(image, torch.Tensor)
image_numpy = (np.transpose(image.cpu().numpy(), (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(np.uint8)
class LayerNormWrapper(nn.Module):
"""A wrapper module of nn.LayerNorm that uses input shapes during the forward process"""
def __init__(self, eps=1e-5):
super(LayerNormWrapper, self).__init__()
self.eps = eps
def forward(self, input):
return F.layer_norm(input, input.shape[1:], eps=self.eps)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
+ -------------------identity--------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, n_outer_channels, n_inner_channels, n_input_channels=None,
submodule=None, outermost=False, innermost=False, norm_layer='batch_norm', use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
n_outer_channels (int): the number of filters in the outer conv layer
n_inner_channels (int): the number of filters in the inner conv layer
n_input_channels (int): the number of channels in input images/features
submodule (UnetSkipConnectionBlock): previously defined submodules
outermost (bool): if this module is the outermost module
innermost (bool): if this module is the innermost module
norm_layer (str): normalization layer name
use_dropout (bool): if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
norm_layer = get_norm_layer(norm_layer)
use_bias = (norm_layer == nn.InstanceNorm2d)
if n_input_channels is None:
n_input_channels = n_outer_channels
downconv = nn.Conv2d(n_input_channels, n_inner_channels,
kernel_size=4, stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(n_inner_channels)
uprelu = nn.ReLU(True)
upnorm = norm_layer(n_outer_channels)
if outermost:
upconv = nn.ConvTranspose2d(n_inner_channels * 2, n_outer_channels, # in_channels is doubled because of the previous concatenation
kernel_size=4, stride=2, padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(n_inner_channels, n_outer_channels,
kernel_size=4, stride=2, padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(n_inner_channels * 2, n_outer_channels, # in_channels is doubled because of the previous concatenation
kernel_size=4, stride=2, padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
model = down + [submodule] + up
if use_dropout:
model += [nn.Dropout(0.5)]
self.model = nn.Sequential(*model)
def forward(self, x):
# add skip connections by concatenation on the channel axis in the non-outermost blocks
return self.model(x) if self.outermost else torch.cat([x, self.model(x)], 1)
class Pix2pixGenerator(nn.Module):
"""Define a Unet-based generator"""
def __init__(self, n_input_channels, n_output_channels, num_downs,
n_first_conv_filters=64, norm_layer='batch_norm', use_dropout=False):
"""Construct a U-net generator
Parameters:
n_input_channels (int): the number of channels in input images
n_output_channels (int): the number of channels in output images
num_downs (int): the number of downsamplings in UNet.
For example, if |num_downs| == 7, image of size 128x128 will become of size 1x1 # at the bottleneck
n_first_conv_filters (int): the number of filters in the last conv layer
norm_layer (str): normalization layer name
Construct the U-net from the innermost layer to the outermost layer
It is a recursive process.
"""
super(Pix2pixGenerator, self).__init__()
# add the innermost layer
unet_block = UnetSkipConnectionBlock(n_first_conv_filters * 8, n_first_conv_filters * 8,
innermost=True, norm_layer=norm_layer)
# add intermediate layers with n_first_conv_filters * 8 filters
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(n_first_conv_filters * 8, n_first_conv_filters * 8,
submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from n_first_conv_filters * 8 to n_first_conv_filters
unet_block = UnetSkipConnectionBlock(n_first_conv_filters * 4, n_first_conv_filters * 8,
submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(n_first_conv_filters * 2, n_first_conv_filters * 4,
submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(n_first_conv_filters, n_first_conv_filters * 2,
submodule=unet_block, norm_layer=norm_layer)
# add the outermost layer
self.model = UnetSkipConnectionBlock(n_output_channels, n_first_conv_filters, n_input_channels=n_input_channels,
submodule=unet_block, outermost=True, norm_layer=norm_layer)
def forward(self, input_src):
return self.model(input_src)
class Pix2pixDiscriminator(nn.Module):
"""Define a PatchGAN discriminator"""
def __init__(self, n_input_channels, loss_type='vanilla', n_first_conv_filters=64, n_layers=3, norm_layer='batch_norm'):
"""Construct a PatchGAN discriminator
Parameters:
n_input_channels (int): the number of channels in input images
n_first_conv_filters (int): the number of filters in the last conv layer
n_layers (int): the number of conv layers in the discriminator
norm_layer (str): normalization layer name
"""
super(Pix2pixDiscriminator, self).__init__()
norm_layer = get_norm_layer(norm_layer)
use_bias = (norm_layer == nn.InstanceNorm2d)
sequence = [nn.Conv2d(n_input_channels, n_first_conv_filters,
kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(0.2, True)]
nf_mult = 1
# gradually increase the number of filters
for n in range(1, n_layers+1):
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(n_first_conv_filters * nf_mult_prev, n_first_conv_filters * nf_mult,
kernel_size=4, stride=2 if n < n_layers else 1, padding=1, bias=use_bias),
LayerNormWrapper() if loss_type == 'wgangp' else norm_layer(n_first_conv_filters * nf_mult),
nn.LeakyReLU(0.2, True)
]
# output 1 channel prediction map
sequence += [nn.Conv2d(n_first_conv_filters * nf_mult, 1, kernel_size=4, stride=1, padding=1)]
self.model = nn.Sequential(*sequence)
def forward(self, input_src, input_tgt):
x = torch.cat([input_src, input_tgt], dim=1)
return self.model(x)
class Pix2pixGAN():
"""Define a Pix2pix GAN"""
def __init__(self, args):
"""Construct a Pix2pix GAN
Parameters:
args (argparse.Namespace): argument list
"""
self.device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
self.config = args.config
self.dataset = args.dataset
self.is_train = (args.mode == 'train')
self.__load_config()
self.__build_generator()
if self.is_train:
self.__load_dataset()
self.__build_discriminator()
self.gan_loss = get_gan_loss(self.config['loss'], self.device)
self.l1_loss = nn.L1Loss()
self.opt_g = torch.optim.Adam(self.generator.parameters(),
lr=self.config['lr_g'],
betas=(self.config['beta1'], self.config['beta2']))
self.opt_d = torch.optim.Adam(self.discriminator.parameters(),
lr=self.config['lr_d'],
betas=(self.config['beta1'], self.config['beta2']))
else:
self.test_images_path = [os.path.normpath(i) for i in args.input]
def __init_weights(self, net, type='normal', gain=0.02):
"""Initialize network weights
Parameters:
net (network) -- network to be initialized
type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
Initialization type 'normal' was used in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
# apply the initialization function <init_func>
net.apply(init_func)
def __load_config(self):
with open(self.config, 'r') as f:
self.config = yaml.safe_load(f)
def __load_image_transforms(self):
transforms_src = transforms.Compose([transforms.ToPILImage(),
transforms.Resize((self.config['image_rows'], self.config['image_cols'])),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
transforms_tgt = transforms.Compose([transforms.ToPILImage(),
transforms.Resize((self.config['image_rows'], self.config['image_cols'])),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
return transforms_src, transforms_tgt
def __load_dataset(self):
train_dataset_dir = 'datasets/{:s}/train'.format(self.dataset)
val_dataset_dir = 'datasets/{:s}/val'.format(self.dataset)
if not os.path.exists(train_dataset_dir):
raise ValueError('Train image directory {:s} does not exist.'.format(train_dataset_dir))
if not os.path.exists(val_dataset_dir):
self.use_val = False
warnings.warn('Validation image directory {:s} does not exist.'.format(val_dataset_dir))
train_dataset = Pix2pixDataset(train_dataset_dir, *self.__load_image_transforms())
assert all(s[0].shape == s[1].shape for s in train_dataset) and (len(set(s[0].shape for s in train_dataset)) == 1), \
"The shape of all source and target images must be the same."
self.train_dataloader = DataLoader(train_dataset, batch_size=self.config['batch_size'], shuffle=True, num_workers=0)
print('Loaded {:d} training samples from {:s} '\
'(batch size: {:d}, number of batches: {:d})'.format(len(train_dataset), train_dataset_dir,
self.config['batch_size'], len(self.train_dataloader)))
if os.path.exists(val_dataset_dir):
self.use_val = True
val_dataset = Pix2pixDataset(val_dataset_dir, *self.__load_image_transforms())
assert all(s[0].shape == s[1].shape for s in val_dataset) and (len(set(s[0].shape for s in val_dataset)) == 1), \
"The shape of all source and target images must be the same."
self.val_dataloader = DataLoader(val_dataset, batch_size=1, shuffle=True, num_workers=0)
print('Loaded {:d} validation samples from {:s} ' \
'(batch size: {:d}, number of batches: {:d})'.format(len(val_dataset), val_dataset_dir,
1, len(self.val_dataloader)))
def __build_generator(self):
self.generator = Pix2pixGenerator(n_input_channels=self.config['image_chns'],
n_output_channels=self.config['image_chns'],
num_downs=self.config['generator_downsamplings'],
n_first_conv_filters=self.config['generator_first_conv_filters'],
norm_layer=self.config['norm_layer'],
use_dropout=self.config['use_dropout'])
# initialize network weights
print('Initialize generator network with {:s}'.format(self.config['init_type']))
self.__init_weights(self.generator, self.config['init_type'], self.config['init_gain'])
self.generator.to(self.device)
print('Pix2pix generator architecture')
summary(self.generator, (self.config['image_chns'], self.config['image_rows'], self.config['image_cols']),
device='cuda' if 'cuda' in str(self.device) else 'cpu')
def __build_discriminator(self):
self.discriminator = Pix2pixDiscriminator(n_input_channels=2 * self.config['image_chns'],
loss_type=self.config['loss'],
n_first_conv_filters=self.config['discriminator_first_conv_filters'],
n_layers=self.config['discriminator_conv_layers'],
norm_layer=self.config['norm_layer'])
# initialize network weights
print('Initialize discriminator network with {:s}'.format(self.config['init_type']))
self.__init_weights(self.discriminator, self.config['init_type'], self.config['init_gain'])
self.discriminator.to(self.device)
print('Pix2pix discriminator architecture')
summary(self.discriminator, [(self.config['image_chns'], self.config['image_rows'], self.config['image_cols'])] * 2,
device='cuda' if 'cuda' in str(self.device) else 'cpu')
def __get_gradient_penalty_loss(self, real, fake, constant=1.0):
batch_size = real.shape[0]
alpha = torch.rand(batch_size, 1, 1, 1)
alpha = alpha.expand_as(real).to(self.device)
interpolated = alpha * real + (1 - alpha) * fake
interpolated.requires_grad_(True)
dummy = torch.empty(batch_size, 0, self.config['image_rows'], self.config['image_cols']).to(self.device) # to fit the discriminator input argument list
disc_interpolated = self.discriminator(interpolated, dummy)
grad_interpolated = torch.autograd.grad(outputs=disc_interpolated, inputs=interpolated,
grad_outputs = torch.ones_like(disc_interpolated),
create_graph = True, retain_graph = True, only_inputs = True)[0]
grad_interpolated = grad_interpolated.view(batch_size, -1) # flat the data
grad_norm = torch.sqrt(torch.sum(grad_interpolated ** 2, dim=1) + 1e-16)
return torch.mean((grad_norm - constant) ** 2)
def train(self):
train_start_time = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
for epoch in range(self.config['epochs']):
epoch_start_time = time.time()
# train each epoch
for batch, (real_src, real_tgt, _) in enumerate(self.train_dataloader):
real_src = real_src.to(self.device)
real_tgt = real_tgt.to(self.device)
# generate fake target
fake_tgt = self.generator(real_src)
# update discriminator
for param in self.discriminator.parameters(): # enable backprop for discriminator
param.requires_grad = True
self.opt_d.zero_grad() # clear discriminator gradients
pred_fake = self.discriminator(real_src, fake_tgt.detach()) # discriminate fake; stop backprop to the generator
loss_d_fake = self.gan_loss(pred_fake, False) # discriminator loss on fake
pred_real = self.discriminator(real_src, real_tgt) # discriminate real
loss_d_real = self.gan_loss(pred_real, True) # discriminator loss on real
loss_d = loss_d_fake + loss_d_real # total discriminator loss
if self.config['loss'] == 'wgangp': # add gradient penalty for wgangp
loss_gp = self.config['lambda_gp'] * self.__get_gradient_penalty_loss(real=torch.cat([real_src, real_tgt], dim=1),
fake=torch.cat([real_src, fake_tgt.detach()], dim=1))
loss_d += loss_gp
loss_d.backward()
self.opt_d.step() # update discriminator weights
# update generator
if (batch + 1) % self.config['dg_train_ratio'] == 0:
for param in self.discriminator.parameters(): # disable backprop for discriminator
param.requires_grad = False
self.opt_g.zero_grad() # clear generator gradients
pred_fake = self.discriminator(real_src, fake_tgt) # discriminate fake
loss_g_gan = self.gan_loss(pred_fake, True) # gan loss on fake; let discriminator think fake_tgt is real
loss_g_l1 = self.config['lambda_l1'] * F.l1_loss(fake_tgt, real_tgt) # weighted L1-loss
loss_g = loss_g_gan + loss_g_l1
loss_g.backward()
self.opt_g.step() # update generator weights
# print end-of-epoch log message
log_message = 'Epoch {:d} / {:d}: \t Elapsed Time: {:.4f} sec \t'.format(epoch + 1, self.config['epochs'],
time.time() - epoch_start_time)
log_message += 'G_loss: {:.4f}\t'.format(loss_g.item())
log_message += 'D_loss: {:.4f}'.format(loss_d.item())
if self.config['loss'] == 'wgangp':
log_message += ' (GP_loss: {:.4f})'.format(loss_gp.item())
print(log_message)
# save validation results
if ((epoch + 1) % self.config['val_freq'] == 0) and self.use_val:
self.__save_val(train_start_time, epoch + 1)
# save models
if ((epoch + 1) % self.config['save_freq'] == 0) or (epoch == self.config['epochs'] - 1):
self.save_models(train_start_time, epoch + 1)
def __save_val(self, tag=None, epoch=None):
val_output_dir = 'datasets/{:s}/val_output/{:s}'.format(self.dataset, tag) if tag is not None \
else 'datasets/{:s}/val_output'.format(self.dataset)
if not os.path.exists(val_output_dir):
os.makedirs(val_output_dir)
# take a sample to validate the generator
real_src, real_tgt, real_path = next(iter(self.val_dataloader)) # batch dimension shape is 1
with torch.no_grad():
fake_tgt = self.generator(real_src.to(self.device))
# denormalize images
real_src = denormalize_image(real_src[0])
fake_tgt = denormalize_image(fake_tgt[0])
real_tgt = denormalize_image(real_tgt[0])
# prepare output filename
real_path = real_path[0]
real_filename = real_path.split(os.sep)[-1]
real_filename_base, real_filename_ext = os.path.splitext(real_filename)
if epoch is not None:
real_filename_base = 'epoch_{:d}_{:s}'.format(epoch, real_filename_base)
# save numpy array as an image
val_output_image = np.concatenate([real_src, fake_tgt, real_tgt], axis=1)
val_output_image = Image.fromarray(val_output_image, 'RGB')
val_output_path = os.path.join(os.path.normpath(val_output_dir), real_filename_base + real_filename_ext)
val_output_image.save(val_output_path)
print('Validation is saved to {:s}.'.format(val_output_path))
def save_models(self, tag=None, epoch=None):
model_dir = 'datasets/{:s}/model/{:s}'.format(self.dataset, tag) if tag is not None \
else 'datasets/{:s}/model'.format(self.dataset)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
generator_model_filename = 'generator_epoch_{:d}.pth'.format(epoch) if epoch is not None \
else 'generator.pth'
discriminator_model_filename = 'discriminator_epoch_{:d}.pth'.format(epoch) if epoch is not None \
else 'discriminator.pth'
generator_model_path = os.path.join(os.path.normpath(model_dir), generator_model_filename)
discriminator_model_path = os.path.join(os.path.normpath(model_dir), discriminator_model_filename)
torch.save(self.generator.cpu().state_dict(), generator_model_path)
torch.save(self.discriminator.cpu().state_dict(), discriminator_model_path)
self.generator.to(self.device)
self.discriminator.to(self.device)
print('Generator model is saved to {:s}.'.format(generator_model_path))
print('Discriminator model is saved to {:s}'.format(discriminator_model_path))
def test(self):
test_output_dir = 'datasets/{:s}/test_output'.format(self.dataset)
if not os.path.exists(test_output_dir):
os.makedirs(test_output_dir)
transforms_src, _ = self.__load_image_transforms()
# load test source images
images, images_path = [], []
for image_path in self.test_images_path:
image = cv2.imread(image_path)
if image is not None:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # cv2.imread works with the BGR order
image = transforms_src(image).unsqueeze(0).to(self.device)
images.append(image)
images_path.append(image_path)
print('Loaded source image file {:s}'.format(image_path))
else:
warnings.warn('Source image file {:s} was not loaded.'.format(image_path))
images_src = torch.cat(images, dim=0)
# generate target images and save
with torch.no_grad():
images_tgt = self.generator(images_src)
for image_src, image_tgt, image_path in zip(images_src, images_tgt, images_path):
# denormalize images
image_src = denormalize_image(image_src)
image_tgt = denormalize_image(image_tgt)
# save numpy array as an image
image = np.concatenate([image_src, image_tgt], axis=1)
image = Image.fromarray(image, 'RGB')
test_output_path = os.path.join(os.path.normpath(test_output_dir), image_path.split(os.sep)[-1])
image.save(test_output_path)
print('Saved target image file {:s}'.format(test_output_path))
def load_models(self, generator_model_filename=None):
model_dir = 'datasets/{:s}/model'.format(self.dataset)
if generator_model_filename is None:
generator_model_filename = 'generator.pth'
generator_model_path = os.path.join(os.path.normpath(model_dir), generator_model_filename)
assert os.path.isfile(generator_model_path), "Generator model file must exist."
state_dict = torch.load(generator_model_path, map_location=self.device)
self.generator.load_state_dict(state_dict)
print('Loaded generator model {:s}'.format(generator_model_path)) | [
"zhulingchen@gmail.com"
] | zhulingchen@gmail.com |
971a7cfbc531597d27cc374ab49b3c2f655f988d | 90ed257f4e193b0b19e5bcb9d4a384b0cf6e6d3f | /MUSEUMS/spiders/collection62.py | 585e5c6d5b10bd99623f8bd781a67858ab1c5af1 | [] | no_license | BUCT-CS1701-SE-Design/webDataCollectionSystem | adc8ca97dda48c508909e73c02bb6622b93534b8 | f653b973b265d52e2ba4711b689c2de637a2cf8b | refs/heads/master | 2022-08-22T14:16:54.857847 | 2020-05-17T07:33:38 | 2020-05-17T07:33:38 | 256,792,222 | 1 | 1 | null | 2020-05-17T01:27:22 | 2020-04-18T15:49:35 | Python | UTF-8 | Python | false | false | 1,468 | py | # -*- coding: utf-8 -*-
import scrapy
from MUSEUMS.items import collection75Item
class Collection62Spider(scrapy.Spider):
custom_settings={
'ITEM_PIPELINES':{'MUSEUMS.pipelines.Collection75Pipeline':4,}
}
name = 'collection62'
allowed_domains = ['mtybwg.org.cn']
start_urls = ['http://www.mtybwg.org.cn/cangpin.aspx']
def parse(self, response):
li_list=response.xpath("//div[@class='rightcon']/ul/li")
for li in li_list:
url=li.xpath("./a/@href").extract_first()
yield scrapy.Request(
url,
callback=self.parse_deatil,
)
def parse_deatil(self,response):
l_list=response.xpath("//div[@class='rightcon']/ul/li")
for l in l_list:
item=collection75Item()
item["museumID"]=62
item["collectionName"]=l.xpath("./a[@class='tag2']/text()").extract_first()
item["collectionImage"]='http://www.mtybwg.org.cn'+l.xpath("./a/img/@src").extract_first()
url1='http://www.mtybwg.org.cn'+l.xpath("./a/@href").extract_first()
yield scrapy.Request(
url1,
callback=self.parse_deatil2,
meta={"item":item}
)
def parse_deatil2(self,response):
item=response.meta["item"]
item["collectionIntroduction"]=response.xpath("//div[@class='pluscon']/ul/text()").extract_first()
yield item
| [
"1975188506@qq.com"
] | 1975188506@qq.com |
5870288f5584f73653b7b45cce7b9396d8c5ec26 | dc8d778a655c6b9ebd9567acf9a01748b90b64c4 | /djtest/urls.py | 1623b4f51681275becd0941cb90efaccb2f37aa7 | [] | no_license | calebrash/intro-to-django | 536ff48a11ca42ca3efe616c6fef3b420974c000 | 947e1a0e784c95ae5c2b9f6a6398942e93a74786 | refs/heads/master | 2021-07-07T11:15:19.231243 | 2017-10-01T18:57:07 | 2017-10-01T18:57:07 | 105,465,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | from django.conf.urls import url
from django.contrib import admin
from djtest.views import CustomersListView, CustomerEditView
urlpatterns = [
url(r'^customers/$', CustomersListView.as_view(), name='customers_list'),
# Name url params by prefixing a pattern with `?P<...>`
url(r'^customers/(?P<customer_id>\d+)/$', CustomerEditView.as_view(), name='customers_edit'),
url(r'^admin/', admin.site.urls),
]
| [
"caleb.s.rash@gmail.com"
] | caleb.s.rash@gmail.com |
4f5328545ef679a7331bf4b9f101c85ed9f7de58 | d362cc4e2c703a19f405fa539660b2b5a88a338e | /pk_clean.py | b0bcb449f896949872233177e9b710908445eeff | [] | no_license | armykongtap/Poker-NN | ce994c78a68f7134f46194e90fab06da3cc010bc | 5b9643e76cf24d9680461ff1bc2d1cd9e73ec21b | refs/heads/master | 2023-04-28T01:05:31.348453 | 2021-05-01T16:47:14 | 2021-05-01T16:47:14 | 363,431,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,646 | py | import glob
import pandas as pd
from name_dict import NAME_DICT
BB_SIZE = 4
all_files = glob.glob("poker_log/*.csv")
li = []
for f in all_files:
df = pd.read_csv(f, index_col="order")
li.append(df)
df = pd.concat(li, axis=0)
df = df.sort_index()
df = df.reset_index(drop=True)
df = df[["entry"]]
#%%
# Round No
df["round_no"] = df["entry"].str.contains("-- starting hand").cumsum()
#%%
# Pre-flop
df["is_small_blind"] = df["entry"].str.contains("posts a small blind of")
df["is_open_flop"] = df["entry"].str.contains("Flop:")
df.loc[df["is_small_blind"], "is_preflop"] = True
df.loc[df["is_open_flop"], "is_preflop"] = False
df["is_preflop"] = df["is_preflop"].fillna(method="pad").fillna(False)
df = df.drop(columns=["is_small_blind", "is_open_flop"])
#%%
# Player name
df["player_name"] = df["entry"].str.extract(r"\"(\S+) @ \S+\"", expand=False)
df["player_name"] = df["player_name"].replace(NAME_DICT)
assert set(df["player_name"].dropna()).issubset(
set(NAME_DICT.values())
), "Please add more name dict"
#%%
# Stack
stack = df.set_index("round_no")
is_stack = stack["entry"].str.contains("Player stacks:")
stack = stack.loc[is_stack, "entry"].str.extractall(
r"\"(?P<player_name>\S+) @ \S+\" \((?P<stack>\d+)\)"
)
stack["stack"] = pd.to_numeric(stack["stack"])
stack["stack"] = stack["stack"] / BB_SIZE
stack["player_name"] = stack["player_name"].replace(NAME_DICT)
stack = stack.reset_index("round_no")
stack = stack.reset_index(drop=True)
df = df.merge(stack, on=["player_name", "round_no"], how="left", validate="m:1")
# Drop less than 3 player round
player_no = stack.groupby("round_no").count()
drop_round = set(player_no[player_no["player_name"] < 3].index)
df = df[~df["round_no"].isin(drop_round)]
#%%
# Position
position = df.copy()
position["position"] = df["entry"].str.extract(
r"(small blind|big blind|dealer)", expand=False
)
position = position[["player_name", "round_no", "position"]].dropna()
position = position.drop_duplicates(
["round_no", "position"], keep="first"
) # Sit while playing would pay SB and BB
df = df.merge(position, on=["player_name", "round_no"], how="left", validate="m:1")
is_name = df["player_name"].notna()
df.loc[is_name] = df.loc[is_name].fillna({"position": "middle position"})
#%%
# Action
df["action"] = df["entry"].str.extract(
r"(calls \d+|bets \d+|raises to \d+|checks|folds)"
)
df["sizing"] = pd.to_numeric(df["action"].str.extract(r"(\d+)", expand=False))
df["sizing"] = df["sizing"] / BB_SIZE
df["action"] = df["action"].str.extract(r"(call|bet|raise|check|fold)")
is_action = df["action"].notna()
df.loc[is_action] = df.loc[is_action].fillna({"sizing": 0})
#%%
# Hand
hand = df.copy()
hand["hand"] = (
hand["entry"]
.str.extract(r"(shows a .*)", expand=False)
.str.split("shows a ")
.str[-1]
.str[:-1]
)
hand = hand[["round_no", "player_name", "hand"]].dropna()
hand[["hand1", "hand2"]] = hand["hand"].str.split(",", expand=True)
hand["hand1"] = hand["hand1"].str.strip()
hand["hand2"] = hand["hand2"].str.strip()
hand["hand1_rank"] = hand["hand1"].str[:-1]
hand["hand1_suit"] = hand["hand1"].str[-1]
hand["hand2_rank"] = hand["hand2"].str[:-1]
hand["hand2_suit"] = hand["hand2"].str[-1]
hand[["hand1_rank", "hand2_rank"]] = hand[["hand1_rank", "hand2_rank"]].replace(
{"A": "14", "J": "11", "Q": "12", "K": "13"}
)
hand["hand1_rank"] = pd.to_numeric(hand["hand1_rank"])
hand["hand2_rank"] = pd.to_numeric(hand["hand2_rank"])
hand[["hand1_suit", "hand2_suit"]] = hand[["hand1_suit", "hand2_suit"]].replace(
{"♠": "spade", "♥": "heart", "♦": "diamond", "♣": "club"}
)
hand = hand[
["round_no", "player_name", "hand1_rank", "hand1_suit", "hand2_rank", "hand2_suit"]
]
hand = hand.drop_duplicates()
df = df.merge(hand, on=["player_name", "round_no"], how="left", validate="m:1")
#%%
# Export for NN
out = df.loc[
df["is_preflop"],
[
"player_name",
"stack",
"position",
"action",
"sizing",
"hand1_rank",
"hand1_suit",
"hand2_rank",
"hand2_suit",
],
]
out = out.dropna().reset_index(drop=True)
out["is_connect"] = (out["hand1_rank"] - out["hand2_rank"]).abs().isin({1, 12})
out["is_suit"] = out["hand1_suit"] == out["hand2_suit"]
out["is_premium"] = (out["hand1_rank"] >= 10) & (out["hand2_rank"] >= 10)
out["is_pocket"] = out["hand1_rank"] == out["hand2_rank"]
out = out[
[
"player_name",
"stack",
"position",
"action",
"sizing",
"is_connect",
"is_suit",
"is_premium",
"is_pocket",
]
]
out.to_csv("pk_pre_flop_clean.csv")
| [
"army_kongtap@hotmail.com"
] | army_kongtap@hotmail.com |
6c8b408cdbc0e12de0c454ed763f230c7914bf78 | a7b7b12020bd9868966c926eb75761971454e469 | /case_study/product_manager.py | bc2e08c34877f778ca180fa5daaa22e80d623b88 | [
"LicenseRef-scancode-other-permissive"
] | permissive | OpenStackUser/ApiUsecase | b9790b976376cc0d2147f5fa2594ae940eaca2bf | 425dd7c4c1b39e47056299130c00152a18ed87cd | refs/heads/main | 2023-04-01T14:59:48.395037 | 2021-04-12T06:05:38 | 2021-04-12T06:05:38 | 357,075,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,466 | py | """
Manages the business logic for products and calls through
to the PersistanceManager.
"""
import os.path
import json
import jsonschema
from copy import deepcopy
from case_study.persistance_manager import PersistanceManager
from case_study.product_service_manager import ProductServiceManager
from case_study.errors import ProductNotFoundError, InvalidRequestError, ErrorResponse
from case_study.utils import load_json
class ProductManager(object):
"""
Manages product related business logic
"""
def __init__(self, config, logger):
"""
Args:
self(case_study.persistance_manager.PersistanceManager)
config(dict)
logger(logging.logger)
"""
self.config = config
self.logger = logger
db_config = self.config.get("database")
self.persistance_manager = PersistanceManager(self.logger, db_config)
endpoint = self.config.get('product_endpoint')
qs = self.config.get('product_endpoint_exclude_fields')
self.product_service_manager = ProductServiceManager(endpoint, qs)
def get_product(self, product_id):
"""
Queries for product from data store.
Args:
self(case_study.persistance_manager.PersistanceManager)
product_id(int)
Returns:
dict: see ../schemas/product.json
Raises:
case_study.errors.ProductNotFoundError
"""
product = self.persistance_manager.get_product_by_id(product_id)
if not product:
raise ProductNotFoundError('product with id "{}" was not found'.format(product_id))
# Not necessary to send this to the client.
product.pop('_id')
name = self.product_service_manager.fetch_name(product_id)
if name:
product["name"] = name
return product
def persist_product(self, product_id, product):
"""
Persists product to data store.
Args:
self(case_study.persistance_manager.PersistanceManager)
product_id(int)
product(dict): see ../schemas/product.json
Raises:
case_study.errors.InvalidRequestError
"""
loaded_product = None
try:
loaded_product = self.validate_product(product)
except jsonschema.ValidationError as err:
raise InvalidRequestError(err.message)
except:
# Unlikely case but in case the schema cannot be found or the schema is flawed.
raise ErrorResponse('internal server error')
if loaded_product["id"] != product_id:
raise InvalidRequestError('provided document id "{}" did not match id in request URL "{}"'.format(loaded_product["id"], product_id))
self.persistance_manager.upsert_product(deepcopy(loaded_product))
name = self.product_service_manager.fetch_name(product_id)
if name:
loaded_product["name"] = name
return loaded_product
def validate_product(self, product):
"""
Args:
product(dict): see ../schemas/product.json
Returns:
dict
"""
schema = load_json("schemas/product.json")
loaded_product = json.loads(product)
jsonschema.validate(loaded_product, schema)
return loaded_product
| [
"noreply@github.com"
] | noreply@github.com |
741b779ce9481454a5c5158b3007bb89e6179542 | 69f73c3bd8721b53f99e9785395ed0dc78ac50d6 | /lambdas/WaterPumpControl/water_pump.py | 0cd26c9dc07726306072c1c707e86b69448898be | [] | no_license | renatogp/plant-watering | bb572264291e5ff92ac2bb3204e382d96f2ee7ba | 62c809620fae5b6f0b77f008eae8061495bc6951 | refs/heads/master | 2022-08-12T14:40:02.652316 | 2018-08-11T23:39:21 | 2018-08-11T23:39:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | import sys
import time
import os
import logging
import RPi.GPIO as GPIO
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vendored/'))
import greengrasssdk
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class WaterPumpControl:
WATER_PUMP_PIN = 18
RELEASE_DURATION = 4 # seconds
def __init__(self):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup([self.WATER_PUMP_PIN], GPIO.OUT, initial=GPIO.HIGH)
def on(self):
logging.info('Turning on pump')
GPIO.output([self.WATER_PUMP_PIN], GPIO.LOW)
def off(self):
logging.info('Turning off pump')
GPIO.output([self.WATER_PUMP_PIN], GPIO.HIGH)
def release(self, duration=None):
if not duration:
duration = self.RELEASE_DURATION
logging.info('Releasing water for {} seconds'.format(duration))
self.on()
time.sleep(duration)
self.off()
| [
"renatogp@8c859043562e.ant.amazon.com"
] | renatogp@8c859043562e.ant.amazon.com |
8238dc74c2a9928551a4937f868e2c50ed6a4df1 | b6d0b8e46e27f5874dc3f0631e896773cc0668bf | /server_test.py | 77dba639c352fe12d41e54b02368a68d77cfa4e6 | [] | no_license | unoken77/raspi_people_counting | f023ac283f3aea5ecaa035fbe6f310692899428f | 98d3e4c13d6ca1ab570fabbceaa8afb977b33d24 | refs/heads/master | 2020-04-20T05:08:11.714391 | 2019-02-01T05:34:23 | 2019-02-01T05:34:23 | 168,647,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | # coding: UTF-8
#!/usr/bin/python3 # This is server.py file
import socket
import cv2
import numpy
path="/home/pi/Desktop/current_number_of_people.txt"
print('start server')
# create a socket object
serversocket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
# get local machine name
#host = socket.gethostname()
host = "192.168.1.59"
count=0
port = 9999
# bind to the port
serversocket.bind((host, port))
s='test'
send_pic=None
# queue up to 5 requests
serversocket.listen(5)
print('waiting connection...')
clientsocket, addr = serversocket.accept()
print("Got a connection from %s" % str(addr))
while True:
# establish a connection
#clientsocket, addr = serversocket.accept()
#print("Got a connection from %s" % str(addr))
#count+=1
#msg = 'Thank you for connecting'+str(count) + "\r\n"
#msg=clientsocket.recv(1024)
print('before msg')
msg=clientsocket.recv(1024)
if msg =="number":
print('here')
msg=clientsocket.recv(1024)
with open(path) as f:
s=f.read()
clientsocket.send(s.encode('ascii'))
elif msg == "camera":
#msg=clientsocket.recv(921600)
cap= cv2.VideoCapture(1)
# OpenCVでWebカメラの画像を取り込む
ret, frame = cap.read()
frame=cv2.resize(frame, dsize=(200,200))
frame=frame.tostring()
clientsocket.send(frame)
cap.release()
#order=
#clientsocket.send(s.encode('ascii'))
#clientsocket.close()
clientsocket.close()
| [
"newunkn@gmail.com"
] | newunkn@gmail.com |
0c4a7d551e3a7f1c8a4a1b022e83f04c3d0d6d30 | e82e2305a4cde3d104770dcec688fbe7a2a91795 | /manage.py | 66eb5556ca857a49dcc8a3f932ac7457ca643e32 | [] | no_license | Ads7/analytics_vidhya | 66e983e5d7d90550ad58aa620242abfcbd96010b | dcb7b35031a6fdd074040d4d72a72f953f3f2c30 | refs/heads/master | 2021-01-20T15:41:37.761594 | 2018-03-31T21:46:55 | 2018-03-31T21:46:55 | 60,894,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "analytics_vidhya.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"amandeep.singh@industrybuying.com"
] | amandeep.singh@industrybuying.com |
45b5e85c59a7a5d95cbf4ae6752016b80d21347e | 1fafbcc2b1c8fb013bf00652ed64cb8c9417aab7 | /lms/extractors/textfile.py | 26088b9d27bba82b0604329a35642249e48b95f7 | [
"BSD-3-Clause"
] | permissive | Liad-n/lms | 7e3458091d9146939cac921bb42966237f9a3ef5 | b933e445efbc49532e7ceeeac05666d0b191a502 | refs/heads/master | 2022-12-18T08:27:13.310345 | 2020-09-25T14:24:20 | 2020-09-25T14:24:20 | 299,038,142 | 0 | 0 | BSD-3-Clause | 2020-09-27T13:28:12 | 2020-09-27T13:28:12 | null | UTF-8 | Python | false | false | 1,375 | py | from typing import Iterator, List, Tuple
from lms.extractors.base import Extractor, File
from lms.models.errors import BadUploadFile
TEXTCHARS = set(bytes(
{7, 8, 9, 10, 12, 13, 27}
| set(range(0x20, 0x100)) - {0x7f},
))
class Textfile(Extractor):
ALLOWED_EXTENSIONS = {'css', 'html', 'js', 'py', 'sql'}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.filename_no_ext, _, self.ext = self.filename.rpartition('.')
def can_extract(self) -> bool:
if self.ext not in self.ALLOWED_EXTENSIONS:
return False
if isinstance(self.file_content, str):
return True
return all(c in TEXTCHARS for c in self.file_content)
def get_exercise(self, to_extract: str) -> Tuple[int, List[File]]:
exercise_id, content = self._clean(to_extract)
if self.filename and not exercise_id:
exercise_id, _ = self._clean(self.filename_no_ext)
content = to_extract
if not exercise_id:
raise BadUploadFile("Can't resolve exercise id", self.filename)
return (exercise_id, [File(f'/main.{self.ext}', content)])
def get_exercises(self) -> Iterator[Tuple[int, List[File]]]:
exercise_id, files = self.get_exercise(self.file_content)
if exercise_id and files and files[0].code:
yield (exercise_id, files)
| [
"noreply@github.com"
] | noreply@github.com |
cae313cb0b5b88d0581dc335b04490f26ee686f4 | eb971e6bf2f599a584fc748d0fdf33ad2105f84b | /flaskenv/lib/python2.7/abc.py | f346b6bff00f379f75f5d44cd9b4ce216a4fad6d | [] | no_license | BradZzz/flask-epp | c7ccbd52144a4315bb2fcc37ceca02a184b49667 | c055fc42fc8f22c84441784dee15ff1f5fc2d6e4 | refs/heads/master | 2020-12-30T11:51:44.417526 | 2017-05-25T03:50:05 | 2017-05-25T03:50:05 | 91,532,121 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | /Users/Mauve3/anaconda/lib/python2.7/abc.py | [
"mauvemoonman@gmail.com"
] | mauvemoonman@gmail.com |
804d141858f3dc22514a6b54505eebf25a0e5c38 | e6c65e2e354336a4bea5b6a4ccbccd3682915fe2 | /out-bin/py/google/fhir/models/run_locally.runfiles/pypi__tensorboard_1_12_1/tensorboard/plugins/image/images_plugin.py | 0f7921e81efab27bdfd237ae85d1b4f5e13351ae | [
"Apache-2.0"
] | permissive | rasalt/fhir-datalab | c30ab773d84983dd04a37e9d0ddec8bf2824b8a4 | 3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de | refs/heads/master | 2021-10-09T05:51:04.593416 | 2018-12-21T18:11:03 | 2018-12-22T05:38:32 | 162,744,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__tensorboard_1_12_1/tensorboard/plugins/image/images_plugin.py | [
"ruchika.kharwar@gmail.com"
] | ruchika.kharwar@gmail.com |
d8090fe067b3f020942f08e80a83d04761dcfa45 | 2a4b1e7b438af4fc905486dd3e6c4f9b33209a19 | /core/dataloader.py | 9b1f520184f06f0b3c9029b8e2b284166a658fa6 | [] | no_license | ywx980615/fracture_identification | 9b6d0d81b9e39204ee892075b3494f8b282f6c9d | 2d5f9f72927724a917d2882e5775e044d09d9ad9 | refs/heads/master | 2022-04-14T07:01:55.179989 | 2020-03-30T03:58:20 | 2020-03-30T03:58:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,897 | py | import numpy as np
import pandas as pd
import matplotlib.image as mpimg
class DataLoader_for_training():
def __init__(self, original_picture, labled_picture, sample_size_x = 5,sample_size_y = 5,data_type = 'uint16'):
self.original_data = mpimg.imread(original_picture)
self.labled_data = mpimg.imread(labled_picture)
self.size_X = len(self.original_data[0,:])
self.size_Y = len(self.original_data[:,0])
self.sample_size_x = sample_size_x
self.sample_size_y = sample_size_y
self.data_type = data_type
def generate_training_data(self):
length_x = self.size_X - 2*self.sample_size_x -1
length_y = self.size_Y - 2*self.sample_size_y -1
train_x = np.zeros((length_x*length_y,(2*self.sample_size_x+1)*(2*self.sample_size_y+1))).astype(self.data_type)
train_y = np.zeros((length_x*length_y,1))
index = 0
for i in range(length_x):
for j in range(length_y):
data = self.original_data[j:j+2*self.sample_size_x+1,i:i+2*self.sample_size_y+1].flatten()
train_x[index,:] = data
train_y[index]=self.labled_data[j+self.sample_size_x+1,i+self.sample_size_y+1]
index = index + 1
print('data_have_been_generated')
train_y = (train_y /255).astype(self.data_type)
return train_x, train_y
class DataLoader_for_predict():
def __init__(self, original_picture, sample_size_x = 5,sample_size_y = 5, data_type = 'uint16'):
self.original_data = mpimg.imread(original_picture)
self.size_X = len(self.original_data[0,:])
self.size_Y = len(self.original_data[:,0])
self.sample_size_x = sample_size_x
self.sample_size_y = sample_size_y
self.data_type = data_type
def generate_predict_data(self):
length_x = self.size_X - 2*self.sample_size_x -1
length_y = self.size_Y - 2*self.sample_size_y -1
test_x = np.zeros((length_x*length_y,(2*self.sample_size_x+1)*(2*self.sample_size_y+1)))
index = 0
for i in range(length_x):
for j in range(length_y):
data = self.original_data[j:j+2*self.sample_size_x+1,i:i+2*self.sample_size_y+1].flatten()
test_x[index,:] = data
index = index + 1
return test_x.astype(self.data_type)
def generate_predict_lable(self,model):
length_x = self.size_X - 2*self.sample_size_x -1
length_y = self.size_Y - 2*self.sample_size_y -1
test_x = self.generate_predict_data()
test_y = model.predict(test_x)
test_y = np.argmax(test_y,axis = 1).reshape((length_x,length_y))
#test_y = test_y[:,0].reshape((length_x,length_y))
return test_y.T
| [
"menghan@menghandeMacBook-Pro.local"
] | menghan@menghandeMacBook-Pro.local |
d9099d9c18ae134d0ac17bc15493596d72137bcf | 9a10a4fc5ed7407d183291a72891207e3057d9ab | /app/api/category_routes.py | fe9766fd15d0a8ee64759e1aa0797283b3bdcee5 | [] | no_license | natoh19/sophora | f7a1980990575b26ee9a56054d1d16adc2332ccf | 0408fa0eccaa60a8ed19509058075b2620e8e3f9 | refs/heads/main | 2023-07-01T05:16:50.629663 | 2021-08-08T14:15:07 | 2021-08-08T14:15:07 | 372,988,268 | 1 | 0 | null | 2021-08-08T14:15:08 | 2021-06-01T23:46:52 | Python | UTF-8 | Python | false | false | 477 | py | from flask import Blueprint, jsonify
from flask_login import login_required
from app.models import Category
category_routes = Blueprint('categories', __name__, url_prefix = '/api/categories')
@category_routes.route('')
def categories():
categories = Category.query.all()
return {"categories": [category.to_dict() for category in categories]}
@category_routes.route('/<int:id>')
def category(id):
category = Category.query.get(id)
return category.to_dict()
| [
"natoh18@gmail.com"
] | natoh18@gmail.com |
94c1d5ff28daf72fe0e07b61a78378da4e476e6a | cadf9ca46531c2fed3ed0f9f982f4b35b2e58fb9 | /main.py | b5da6472df7b19ed447546d652695c378252a963 | [] | no_license | EliNovakova/flash-cards | 1f9876c7e18755e17444a458d98e8f13cb57dbd9 | b0a6ef67fa9af14d3d7b7f02c7ef5027b0fdd0c5 | refs/heads/main | 2023-09-01T04:13:34.872881 | 2021-10-05T22:15:17 | 2021-10-05T22:15:17 | 413,541,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,775 | py | from tkinter import *
import pandas
from random import randint, choice
BACKGROUND_COLOR = "#B1DDC6"
current_card = {}
words_to_learn_dict = {}
# ---------------------------- Code and functions ------------------------------- #
try: # tries to oped csv with words we have yet to learn (exists only if we played before)
words_to_learn_dataframe = pandas.read_csv("data/words_to_learn.csv")
except FileNotFoundError: # if file doesn't exist, opens csv with all the words
all_words_dataframe = pandas.read_csv("data/french_words.csv") # opens csv, reads it and creates dataframe
words_to_learn_dict = all_words_dataframe.to_dict(orient="records") # dataframe to list of dicts, orient helps us to display it nicely as in one card
else: # creates list of dicts from dataframe if file with words yet to learn exists
words_to_learn_dict = words_to_learn_dataframe.to_dict(orient="records")
def next_card():
"""Randomly generates next card with French word."""
global current_card, flip_timer
window.after_cancel(flip_timer) # every time we get a next card, the timer stops and then resets (we create it again at the end of the function)
current_card = choice(words_to_learn_dict) # random choice of a card
french_word = current_card["French"] # gets hold of the French word on the card
canvas.itemconfig(card_title, text="French", fill="black") # changes text on canvas (on the actual card)
canvas.itemconfig(card_word, text=french_word, fill="black") # changes text on canvas (on the actual card) to chosen French word
canvas.itemconfig(canvas_image, image=card_front_img) # changes canvas image to a front of a card
flip_timer = window.after(3000, func=flip_card) # we create a timer
def flip_card():
"""Flips card to display the word in English."""
canvas.itemconfig(canvas_image, image=card_back_img) # changes canvas image to a back of a card
canvas.itemconfig(card_title, text="English", fill="white") # changes text on canvas ("English")
canvas.itemconfig(card_word, text=current_card["English"], fill="white") # changes text on canvas to a chosen English translation
def is_known():
"""Removes a card with a word the user knows from the list."""
words_to_learn_dict.remove(current_card) # removes current card
next_card() # gives us another card
data = pandas.DataFrame(words_to_learn_dict) # creates dataframe from list of dicts
data.to_csv("data/words_to_learn.csv", index=False) # saves it as csv, index False doesn't add index to it
# ---------------------------- UI SETUP ------------------------------- #
window = Tk()
window.title("Flash cards")
window.config(padx=50, pady=50, bg=BACKGROUND_COLOR)
flip_timer = window.after(3000, func=flip_card) # establishes the timer for the first time
canvas = Canvas(width=800, height=526, bg=BACKGROUND_COLOR, highlightthickness=0)
card_front_img = PhotoImage(file="images/card_front.png")
card_back_img = PhotoImage(file="images/card_back.png")
canvas_image = canvas.create_image(400, 263, image=card_front_img)
card_title = canvas.create_text(400, 150, text="", font=("Arial", 40, "italic"))
card_word = canvas.create_text(400, 263, text="", font=("Arial", 60, "bold"))
canvas.grid(row=0, column=0, columnspan=2)
right_img = PhotoImage(file="images/right.png")
right_button = Button(image=right_img, highlightthickness=0, command=is_known)
right_button.grid(row=1, column=1)
wrong_img = PhotoImage(file="images/wrong.png")
wrong_button = Button(image=wrong_img, highlightthickness=0, command=next_card)
wrong_button.grid(row=1, column=0)
next_card() # we have to call it here so the moment we run the code card is already randomly chosen and displayed
window.mainloop() | [
"eli.novakova@seznam.cz"
] | eli.novakova@seznam.cz |
15e7f63643d166a7973ca8d358d2510577297cd3 | 0feeafb8e94cce131eee30e93e5f2f547b7936e2 | /Checkpoints/Sprint 5/Payroll.py | 01778cd9013a9729ed89495b11c19cbafa8ca82c | [] | no_license | danieljohnson107/EmpDat-Payroll | 2c4f87b8667c25329a5a6227fe6e9b1e3dc57b57 | f00b90392527c2070624f26583b8e271ff53043b | refs/heads/main | 2023-05-03T15:18:47.691610 | 2021-05-05T01:13:28 | 2021-05-05T01:13:28 | 332,270,381 | 1 | 1 | null | 2021-04-10T04:49:56 | 2021-01-23T17:39:22 | Python | UTF-8 | Python | false | false | 11,225 | py | from abc import ABC, abstractmethod
import os, os.path
PAY_LOGFILE = "paylog.txt"
employees = []
global current_emp
def load_employees():
"""Loads employee data into memory and creates an instance of the employee object for each entry"""
with open("employees.csv", "r") as emp_file:
first_line = True
for line in emp_file:
if first_line:
first_line = False
continue
tmp = line[:-1].split(",")
employees.append(Employee(tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6], tmp[7], int(tmp[8]), int(tmp[9]),
float(tmp[10]), float(tmp[11]), float(tmp[12]), tmp[13], int(tmp[14]),
tmp[15], tmp[16]))
# Create the .old file at the same time
old = open("employees.csv.old", "w")
for i in employees:
old.write(f"0,"
f"{i.emp_id},"
f"{i.first_name},"
f"{i.last_name},"
f"{i.address},"
f"{i.address2},"
f"{i.city},"
f"{i.state},"
f"{i.postal_code},"
f"{class_number(i.class_text)},"
f"{i.salary},"
f"{i.commission},"
f"{i.hourly},"
f"{i.password},"
f"{i.access},"
f"{i.phone_number},"
f"{i.department}\n")
# Close the files
old.close()
def authenticate(emp_id, password):
global current_emp
current_emp = emp_id
employee = find_employee_by_id(emp_id)
# Make sure the password isn't blank
if employee.password == "None":
return employee.password
# Check the password
if employee.password == password:
return True
else:
return False
def user_exists(emp_id):
# Check to see if the employee exists
for i in employees:
if i.emp_id == emp_id:
return True
return False
def change_password(emp_id, value):
""" Function to verify and set a new password """
employee = find_employee_by_id(emp_id)
if employee.password != "None":
return False
chars = 0
ints = 0
spec = 0
upper = 0
special_chars = ["!", "@", "#", "$", "%", "^", "&", "*", "(", ")", "-", "+", "?", "_", "=", ",", "<", ">", "/", "'",
'"', " "]
# Grab the total amount of each value
for i in value:
try:
int(i)
ints += 1
except ValueError:
if i in special_chars:
spec += 1
else:
chars += 1
# Check for upper case
if i.isupper():
upper += 1
if len(value) >= 8 and upper >= 1 and spec >= 1 and ints >= 1:
employee.password = value
write_out()
return True
else:
return "Fail"
def process_timecards():
"""Processes time cards for hourly employees"""
with open("timecards.csv", "r") as time_file:
for line in time_file:
emp_time = line[:-1].split(",")
emp = find_employee_by_id(emp_time[0])
if isinstance(emp.classification, Hourly):
for hours in emp_time[1:]:
emp.classification.add_timecard(float(hours))
def process_receipts():
"""Processes reciepts for commissioned employees"""
with open("receipts.csv", "r") as receipts_file:
for line in receipts_file:
emp_receipts = line[:-1].split(",")
emp = find_employee_by_id(emp_receipts[0])
if isinstance(emp.classification, Commissioned):
for receipt in emp_receipts[1:]:
emp.classification.add_receipt(float(receipt))
def run_payroll():
"""Runs payroll for all employees"""
if os.path.exists(PAY_LOGFILE): # pay_log_file is a global variable holding ‘payroll.txt’
os.remove(PAY_LOGFILE)
for emp in employees: # employees is the global list of Employee objects
emp.issue_payment() # issue_payment calls a method in the classification
# object to compute the pay, which in turn invokes
# the pay method.
def find_employee_by_id(id):
for employee in employees:
if employee.emp_id == id:
return employee
return False
def get_profile(emp_id):
i = find_employee_by_id(emp_id)
data = [i.emp_id, i.first_name, i.last_name, i.address, i.address2, i.city, i.state, i.postal_code, i.class_text,
i.salary, i.commission, i.hourly, i.password, i.access, i.phone_number, i.department]
# Check the data for any none values
for i in range(len(data)):
if data[i] == 'nan':
data[i] = ""
return data
def save_profile(emp_id, first_name, last_name, address, address2, city, state, postal_code, classification, salary,
hourly, password, access, phone_number, department):
employee = find_employee_by_id(emp_id)
try:
# assign the values to the array
employee.emp_id = emp_id
employee.first_name = first_name
employee.last_name = last_name
employee.address = address
employee.address2 = address2
employee.city = city
employee.state = state
employee.postal_code = postal_code
employee.classification = classification
employee.salary = salary
employee.hourly = hourly
employee.password = password
employee.access = access
employee.phone_number = phone_number
employee.department = department
# Get a text version of the classification
if classification == 1:
employee.class_text = "Salaried"
elif classification == 2:
employee.class_text = "Commissioned"
else:
employee.class_text = "Hourly"
write_out()
return True
except:
return False
def new_user(emp_id, first_name, last_name, address, address2, city, state, postal_code, classification, salary,
hourly, password, access, phone_number, department, commission=""):
new_employee = Employee(emp_id, first_name, last_name, address, address2, city, state, postal_code, classification,
salary, commission, hourly, password, access, phone_number, department)
employees.append(new_employee)
write_out()
def write_out():
""" Function to write all user data to employees.csv """
with open("employees.csv", "w") as new_data:
new_data.write(",id,first_name,last_name,address,address2,city,state,zip,classification,salary,commission,"
"hourly,password,access,phone_number,department\n")
for i in employees:
new_data.write(f"0,"
f"{i.emp_id},"
f"{i.first_name},"
f"{i.last_name},"
f"{i.address},"
f"{i.address2},"
f"{i.city},"
f"{i.state},"
f"{i.postal_code},"
f"{class_number(i.class_text)},"
f"{i.salary},"
f"{i.commission},"
f"{i.hourly},"
f"{i.password},"
f"{i.access},"
f"{i.phone_number},"
f"{i.department}\n")
def class_number(classification):
if classification == "Salaried":
return "1"
elif classification == "Commissioned":
return "2"
else:
return "3"
class Employee:
"""Defines an Employee object
Required Params: emp_id, first_name, last_name, address, address2, city, state, postal_code, classification, salary,
commission, hourly, password, access, phone_number, department
"""
def __init__(self, emp_id, first_name, last_name, address, address2, city, state, postal_code, classification,
salary, commission, hourly, password, access, phone_number, department):
self.emp_id = emp_id
self.first_name = first_name
self.last_name = last_name
self.address = address
self.address2 = address2
self.city = city
self.state = state
self.postal_code = postal_code
self.classification = classification
self.class_text = ""
self.salary = salary
self.commission = commission
self.hourly = hourly
self.password = password
self.access = access
self.phone_number = phone_number
self.department = department
if classification == 1:
self.class_text = "Salaried"
self.classification = Salaried(salary)
elif classification == 2:
self.class_text = "Commissioned"
self.classification = Commissioned(salary, commission)
else:
self.class_text = "Hourly"
self.classification = Hourly(hourly)
def make_hourly(self, hourly_rate):
"""Sets the Employee classification to hourly"""
self.classification = Hourly(hourly_rate)
def make_salaried(self, salary):
"""Sets the Employee classification to salaried"""
self.classification = Salaried(salary)
def make_commissioned(self, salary, commission_rate):
"""Sets the Employee classification to commissioned"""
self.classification = Commissioned(salary, commission_rate)
def issue_payment(self):
"""Issues payment to employee"""
pay = self.classification.compute_pay()
if pay > 0:
with open(PAY_LOGFILE, "a") as paylog:
print("Mailing", f"{pay:.2f}", "to", self.first_name, self.last_name,
"at", self.address, self.city, self.state, self.postal_code, file=paylog)
class Classification(ABC):
@abstractmethod
def compute_pay(self):
pass
class Hourly(Classification):
"""Defines methods for hourly Employees"""
def __init__(self, hourly_rate):
self.hourly_rate = hourly_rate
self.timecard = []
def add_timecard(self, hours):
self.timecard.append(hours)
def compute_pay(self):
pay = round(sum(self.timecard)*self.hourly_rate, 2)
self.timecard.clear()
return pay
class Salaried(Classification):
"""Defines methods for salaried Employees"""
def __init__(self, salary):
self.salary = salary
def compute_pay(self):
return round(self.salary/24, 2)
class Commissioned(Salaried):
"""Defines methods for commissioned Employees"""
def __init__(self, salary, commission_rate):
super().__init__(salary)
self.commission_rate = commission_rate
self.receipts = []
def add_receipt(self, amount):
self.receipts.append(amount)
def compute_pay(self):
pay = round((sum(self.receipts)*self.commission_rate/100)+self.salary/24, 2)
self.receipts.clear()
return pay | [
"65976231+easton57@users.noreply.github.com"
] | 65976231+easton57@users.noreply.github.com |
7030e71e24a4720cdc6d450aec15704ef1bfc65f | d3849a750a204cf6866da40df592d1ccdeccc738 | /E-Docs/edocs/doclocker_app/form.py | 02255b713fa941a7d5afe90b067d5e47eb05745e | [] | no_license | barrett70/mywork | 2001e8db76b24ce73f9fd7eef2111be594b49706 | 48349ae4a3927026cec0848dd2994add62119f1d | refs/heads/master | 2022-11-28T20:29:15.858849 | 2020-08-14T10:13:46 | 2020-08-14T10:13:46 | 287,505,931 | 0 | 0 | null | 2020-08-14T10:22:19 | 2020-08-14T10:22:18 | null | UTF-8 | Python | false | false | 808 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Images
class RegisterForm(UserCreationForm):
first_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
last_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2', )
class ContactForm(forms.Form):
contact_name = forms.CharField(required=True)
contact_email = forms.EmailField(required=True)
content = forms.CharField(
required=True,
widget=forms.TextInput()
) | [
"56765836+annanyasharma@users.noreply.github.com"
] | 56765836+annanyasharma@users.noreply.github.com |
4a7348702e26e42b79d656ecb439f92cd86c5ccd | 579168f4cfebaed6dd0e6833f230774605003f46 | /students/Russell_Large/template_student/lesson03/assignment/tests/test_gradel03.py | 1601ce415c896ff1390bdf148e91c3e06cf4f24e | [] | no_license | Washirican/Python220A_2019 | e9abd8a5c2151d509618bcadd3e2454a90959e85 | 46d6282518f02029a556e94e607612a47daf675a | refs/heads/master | 2020-05-04T19:32:43.827706 | 2019-06-09T15:07:57 | 2019-06-09T15:07:57 | 179,398,542 | 2 | 0 | null | 2019-04-04T01:28:08 | 2019-04-04T01:28:08 | null | UTF-8 | Python | false | false | 6,837 | py |
""" This is an integration test module """
import pytest
import sys
import os
import peewee
# dynamically connect to the database
# as long as data, src, and tests are all located
# in the same directory.
db_folder = os.getcwd()
db_location = str(db_folder[:-6] + '\src')
input_data = str(db_folder[:-6] + '\data\customer.csv')
sys.path.append(db_location)
import basic_operations as l
@pytest.fixture
def _add_customers():
return [
("123", "Name", "Lastname", "Address", "phone", "email", "Active", 999),
("456", "Name", "Lastname", "Address", "phone", "email", "inActive", 10),
("123", "Name", "Lastname", "Address", "phone", "email", "Active", 999),
("789", "Name", "Lastname", "Address", "phone", "email", "Active", 0),
("345", "Name", "Lastname", "Address", "phone", "email", "Active", -10),
("0123", "Name", "Lastname", "Address", "phone", "email", "Active", 999),
("777", "Name", "Lastname", "Address", "phone", "email", "Active", 999)
]
@pytest.fixture
def _search_customers():
return [
("998", "Name", "Lastname", "Address", "phone", "email", "Active", 999),
("997", "Name", "Lastname", "Address", "phone", "email", "inActive", 10),
("999", "Name", "Lastname", "Address", "phone", "email", "inActive", 120)
]
@pytest.fixture
def _delete_customers():
return [
("898", "Name", "Lastname", "Address", "phone", "email", "Active", 999),
("897", "Name", "Lastname", "Address", "phone", "email", "inActive", 10)
]
@pytest.fixture
def _list_active_customers():
return [
("598", "Name", "Lastname", "Address", "phone", "email", "Active", 999),
("597", "Name", "Lastname", "Address", "phone", "email", "inActive", 10),
("596", "Name", "Lastname", "Address", "phone", "email", "inActive", 99),
("595", "Name", "Lastname", "Address", "phone", "email", "Active", 999),
("594", "Name", "Lastname", "Address", "phone", "email", "Active", 10),
("593", "Name", "Lastname", "Address", "phone", "email", "Active", 99)
]
@pytest.fixture
def _update_customer_credit():
return [
("798", "Name", "Lastname", "Address", "phone", "email", "Active", 999),
("797", "Name", "Lastname", "Address", "phone", "email", "inActive", 10),
("796", "Name", "Lastname", "Address", "phone", "email", "inActive", -99)
]
@pytest.fixture
def _data():
return input_data
def test_add_customer(_add_customers):
""" additions """
for customer in _add_customers:
l.add_customer(customer[0],
customer[1],
customer[2],
customer[3],
customer[4],
customer[5],
customer[6],
customer[7]
)
added = l.search_customer(customer[0])
assert added['cust_name'] == customer[1]
assert added['cust_last_name'] == customer[2]
assert added['cust_email'] == customer[5]
assert added['cust_phone'] == customer[4]
for customer in _add_customers:
l.delete_customer(customer[0])
def test_search_customer(_search_customers):
""" search """
for customer in _search_customers:
l.add_customer(customer[0],
customer[1],
customer[2],
customer[3],
customer[4],
customer[5],
customer[6],
customer[7]
)
result = l.search_customer(102910)
assert result == None
result = l.search_customer(_search_customers[2][0])
assert result['cust_name'] == _search_customers[2][1]
assert result['cust_last_name'] == _search_customers[2][2]
assert result['cust_email'] == _search_customers[2][5]
assert result['cust_phone'] == _search_customers[2][4]
for customer in _search_customers:
l.delete_customer(customer[0])
def test_delete_customer(_delete_customers):
""" delete """
for customer in _delete_customers:
l.add_customer(customer[0],
customer[1],
customer[2],
customer[3],
customer[4],
customer[5],
customer[6],
customer[7]
)
response = l.delete_customer(customer[0])
assert response is True
deleted = l.search_customer(customer[0])
assert deleted == None
def test_update_customer_credit(_update_customer_credit):
""" update """
for customer in _update_customer_credit:
l.add_customer(customer[0],
customer[1],
customer[2],
customer[3],
customer[4],
customer[5],
customer[6],
customer[7]
)
l.update_customer_credit("798", 0)
l.update_customer_credit("797", 1000)
l.update_customer_credit("797", -42)
l.update_customer_credit("796", 500)
for customer in _update_customer_credit:
l.delete_customer(customer[0])
def test_list_active_customers(_list_active_customers):
""" Actives """
for customer in _list_active_customers:
l.add_customer(customer[0],
customer[1],
customer[2],
customer[3],
customer[4],
customer[5],
customer[6],
customer[7]
)
actives = l.list_active_customers()
assert actives == 4
for customer in _list_active_customers:
l.delete_customer(customer[0])
def test_load_csv(_data):
test = l.load_customer_data(_data)
ct = 0
cust_id_list = []
for customer in test:
if ct < 40:
l.add_customer(customer[0],
customer[1],
customer[2],
customer[3],
customer[4],
customer[5],
customer[6],
customer[7]
)
cust_id_list.append(customer[0])
ct += 1
else:
break
actives = l.list_active_customers()
assert actives == 30
for customer in cust_id_list:
l.delete_customer(customer)
| [
"objectivejoe@gmail.com"
] | objectivejoe@gmail.com |
e7c11fda978a4aa5af634d07bc50e343bfb341d1 | 6baf0e8e1c0c9ab73b02e4b1568ee9a014dc0aba | /print_iterations.py | 104a5e2c97a26fdc426502d5f70c8e392e4c18a6 | [] | no_license | santoshgurujula/PythonChallenges | 096660a324f20c8c099d02423594a692ecfe72b2 | 5492ad6a5dfd47f47b4a83463f82e08641a11e2f | refs/heads/master | 2020-12-14T23:51:25.500724 | 2020-01-21T02:24:33 | 2020-01-21T02:24:33 | 234,916,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | itr=int(input())
for i in range(1,itr+1):
for k in range(itr-i):
print(' ',end='')
for j in range(2*i-1):
print('*',end='')
print()
for i in range(itr-1,0,-1):
for k in range(itr-i):
print(' ',end='')
for j in range(2*i-1):
print('*',end='')
print() | [
"santoshgurujula@gmail.com"
] | santoshgurujula@gmail.com |
1f610a86d05507f68c3c3904b86faec44aca7e42 | 7a013424c82b71bc82aa312e0165a1af4170ac23 | /ABC/ABC169/D.py | 31bd058beee4eb1ee3d717253e4cd1cf3efb5c3b | [] | no_license | kikugawa-shoma/Atcoder | fe3405e36dd3e4e25127b6110d6009db507e7095 | 7299116b7beb84815fe34d41f640a2ad1e74ba29 | refs/heads/master | 2020-12-21T19:10:12.471507 | 2020-10-10T16:38:18 | 2020-10-10T16:38:18 | 236,531,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | N = int(input())
def factorization(n):
arr = []
temp = n
for i in range(2, int(-(-n**0.5//1))+1):
if temp%i==0:
cnt=0
while temp%i==0:
cnt+=1
temp //= i
arr.append([i, cnt])
if temp!=1:
arr.append([temp, 1])
if arr==[]:
arr.append([n, 1])
return arr
divs = factorization(N)
n = len(divs)
ans = 0
for i in range(n):
div,num = divs[i]
cnt = 0
now = 1
while 1:
if num >= now:
num -= now
now += 1
cnt += 1
else:
break
ans += cnt
if N == 1:
print(0)
exit()
print(ans)
| [
"kikugawa.s.shukatsu@gmail.com"
] | kikugawa.s.shukatsu@gmail.com |
e0fbc37d5143b688f711c1d71c2e67bc78828cdb | 37dae42b2fa33b43c09b92507d20af49bcce2038 | /my_scraper/middlewares.py | c503f62feee9b1958e24dceab92f7d6c82859674 | [] | no_license | kirimaks/angel_scraper | 7a337f051557adb2b21dde7eb45286c2d508c2eb | e95ae4a13c47ddcd984b564aeb63498ebf9110ee | refs/heads/master | 2020-09-13T07:38:42.064149 | 2016-09-11T21:36:48 | 2016-09-11T21:36:48 | 67,125,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | from scrapy.exceptions import IgnoreRequest
from my_scraper.tools.JujuTools import broken_links
class JuJuMiddleware(object):
def process_response(self, request, response, spider):
if response.url in broken_links:
print("\n\n\n\n **** ")
print("URL: ", response.url)
print("Ignore??\n\n\n")
raise IgnoreRequest
return response
| [
"kirimaks@yahoo.com"
] | kirimaks@yahoo.com |
93beede19fb04c811fd459c013a68d27685e1150 | 5136e9cd01069f1ecf3184976e05a3b597914f68 | /tests/kafka_check/test_replication_factor.py | dd2fafbcb8995d43897b12e2b85ad45cad357420 | [
"Apache-2.0"
] | permissive | mborst/kafka-utils | f2326d2b12b9e5bd1353adbd90a07b0df4455f5d | 6970ee835ed0e8946c5d67b0d8511e5746b1fb82 | refs/heads/master | 2020-07-18T22:18:09.566351 | 2019-09-03T18:18:07 | 2019-09-03T18:18:07 | 206,323,378 | 0 | 0 | Apache-2.0 | 2019-09-04T13:16:38 | 2019-09-04T13:16:38 | null | UTF-8 | Python | false | false | 6,529 | py | # -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import mock
from kafka.common import PartitionMetadata
from pytest import fixture
from kafka_utils.kafka_check.commands.replication_factor import _find_topics_with_wrong_rp
from kafka_utils.kafka_check.commands.replication_factor import _prepare_output
TOPICS_STATE = {
'topic_0': {
0: PartitionMetadata(
topic='topic_0',
partition=0,
leader=170396635,
replicas=(170396635, 170398981, 170396665),
isr=(170398981, 170396635),
error=0,
),
},
'topic_1': {
0: PartitionMetadata(
topic='topic_1',
partition=0,
leader=170396635,
replicas=(170396635, 170398981),
isr=(170396635, 170398981),
error=0,
),
},
}
TOPICS_WITH_WRONG_RP = [
{
'min_isr': 3,
'topic': 'topic_0',
'replication_factor': 3,
},
{
'min_isr': 3,
'topic': 'topic_1',
'replication_factor': 2,
},
]
@fixture
def mock_zk():
return mock.Mock()
def test_find_topics_with_wrong_rp_empty():
result = _find_topics_with_wrong_rp(
topics={},
zk=None,
default_min_isr=None,
)
assert result == []
@mock.patch(
'kafka_utils.kafka_check.commands.replication_factor.get_min_isr',
return_value=1,
autospec=True,
)
def test_find_topics_with_wrong_rp_ok(mock_min_isr, mock_zk):
result = _find_topics_with_wrong_rp(
topics=TOPICS_STATE,
zk=mock_zk,
default_min_isr=None,
)
calls = [mock.call(mock_zk, 'topic_0'), mock.call(mock_zk, 'topic_1')]
mock_min_isr.assert_has_calls(calls, any_order=True)
assert result == []
@mock.patch(
'kafka_utils.kafka_check.commands.replication_factor.get_min_isr',
return_value=None,
autospec=True,
)
def test_find_topics_with_wrong_rp_without_min_isr_in_zk_use_default(mock_min_isr, mock_zk):
result = _find_topics_with_wrong_rp(
topics=TOPICS_STATE,
zk=mock_zk,
default_min_isr=1,
)
calls = [mock.call(mock_zk, 'topic_0'), mock.call(mock_zk, 'topic_1')]
mock_min_isr.assert_has_calls(calls, any_order=True)
assert result == []
@mock.patch(
'kafka_utils.kafka_check.commands.replication_factor.get_min_isr',
return_value=None,
autospec=True,
)
def test_find_topics_with_wrong_rp_not_empty_with_default_min_isr(mock_min_isr, mock_zk):
result = _find_topics_with_wrong_rp(
topics=TOPICS_STATE,
zk=mock_zk,
default_min_isr=2,
)
topic1 = {
'replication_factor': 2,
'min_isr': 2,
'topic': 'topic_1',
}
calls = [mock.call(mock_zk, 'topic_0'), mock.call(mock_zk, 'topic_1')]
mock_min_isr.assert_has_calls(calls, any_order=True)
assert result == [topic1]
@mock.patch(
'kafka_utils.kafka_check.commands.replication_factor.get_min_isr',
return_value=3,
autospec=True,
)
def test_find_topics_with_wrong_rp_returns_all_topics(mock_min_isr, mock_zk):
result = _find_topics_with_wrong_rp(
topics=TOPICS_STATE,
zk=mock_zk,
default_min_isr=1,
)
calls = [mock.call(mock_zk, 'topic_0'), mock.call(mock_zk, 'topic_1')]
mock_min_isr.assert_has_calls(calls, any_order=True)
def dict_comparator(d):
return sorted(d.items())
assert sorted(result, key=dict_comparator) == sorted(TOPICS_WITH_WRONG_RP, key=dict_comparator)
def test_prepare_output_ok_no_verbose():
expected = {
'message': 'All topics have proper replication factor.',
'raw': {
'topics_with_wrong_replication_factor_count': 0,
}
}
assert _prepare_output([], False, -1) == expected
def test_prepare_output_ok_verbose():
expected = {
'message': 'All topics have proper replication factor.',
'raw': {
'topics_with_wrong_replication_factor_count': 0,
'topics': [],
}
}
assert _prepare_output([], True, -1) == expected
def test_prepare_output_critical_no_verbose():
expected = {
'message': '2 topic(s) have replication factor lower than specified min ISR + 1.',
'raw': {
'topics_with_wrong_replication_factor_count': 2,
}
}
assert _prepare_output(TOPICS_WITH_WRONG_RP, False, -1) == expected
def test_prepare_output_critical_verbose():
expected = {
'message': '2 topic(s) have replication factor lower than specified min ISR + 1.',
'verbose': (
"Topics:\n"
"replication_factor=3 is lower than min_isr=3 + 1 for topic_0\n"
"replication_factor=2 is lower than min_isr=3 + 1 for topic_1"
),
'raw': {
'topics_with_wrong_replication_factor_count': 2,
'topics': [
{
'min_isr': 3,
'topic': 'topic_0',
'replication_factor': 3,
},
{
'min_isr': 3,
'topic': 'topic_1',
'replication_factor': 2,
}
],
}
}
assert _prepare_output(TOPICS_WITH_WRONG_RP, True, -1) == expected
def test_prepare_output_critical_verbose_with_head_limit():
expected = {
'message': '2 topic(s) have replication factor lower than specified min ISR + 1.',
'verbose': (
"Top 1 topics:\n"
"replication_factor=3 is lower than min_isr=3 + 1 for topic_0"
),
'raw': {
'topics_with_wrong_replication_factor_count': 2,
'topics': [
{
'min_isr': 3,
'topic': 'topic_0',
'replication_factor': 3,
},
],
}
}
assert _prepare_output(TOPICS_WITH_WRONG_RP, True, 1) == expected
| [
"alp@yelp.com"
] | alp@yelp.com |
1487f463b36ac15949892d9d13ee5fa6dc48ad37 | c573cac75d4e34263fa29d3efccb76199be0af98 | /4/A.py | c3fa91a5b7637c8e587fb001b43db17bffc6807c | [] | no_license | se2313se/Ya.algorithms_training | b197a0d1f786b0a250de9420965f48436b92ca6a | c52a0ca53f8a807abc943fa60b5b178754118141 | refs/heads/main | 2023-06-08T23:03:40.853383 | 2021-06-24T17:21:07 | 2021-06-24T17:21:07 | 380,001,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | with open('input.txt', 'r', encoding='utf8') as f:
synonyms = dict()
n = int(f.readline())
for i in range(n):
tempWord, tempSynonyms = f.readline().split()
synonyms[tempWord] = tempSynonyms
synonyms[tempSynonyms] = tempWord
print(synonyms[f.readline().strip()])
| [
"71695356+se2313se@users.noreply.github.com"
] | 71695356+se2313se@users.noreply.github.com |
134c1cf4545f15d63a56bc24d9af8ce38c60fb6c | 8ddca08ac2a57be4705d7bd319795dc622c1df8a | /tests/__init__.py | 2a12b0f46c1f0f999ffb803ef2109fc78d057000 | [
"Apache-2.0"
] | permissive | sirpengi/msgpack-python-pure | 723047d11e4eaec1304fd84ebaa9d25176382902 | a67e6a143059ae1504bcc08572d55cfe377855e7 | refs/heads/master | 2021-01-16T21:54:02.436529 | 2012-07-14T05:33:09 | 2012-07-14T05:33:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | #!/bin/env/python
# -*- coding: utf-8 -*-
import sys
from os.path import join,dirname
sys.path.append(join(dirname(sys.argv[0]), '..'))
print join(dirname(sys.argv[0]), '..')
from test_case import *
from tests.test_except import *
from tests.test_main import *
if __name__ == '__main__':
import nose
nose.main()
| [
"fukuda@gmail.com"
] | fukuda@gmail.com |
c938955194dc89416c1f590325a2b127455eb227 | ecd7302e7fc521b1b9afbbb5c4e947552273b47a | /nets/MobileNetV2.py | 9c965bbb498dde422e73c06b42868f9ace6eba3e | [
"MIT"
] | permissive | lbf4616/PixelLink-with-MobileNet-V2 | 7762f4ec00b591405c418fd4ab287a58f1ef288d | 94b0f68141ac43c3248ec6c14d39f34c22e765f0 | refs/heads/master | 2020-07-03T09:00:30.657402 | 2019-08-28T04:02:09 | 2019-08-28T04:02:09 | 201,859,433 | 15 | 7 | null | null | null | null | UTF-8 | Python | false | false | 4,731 | py | import tensorflow as tf
import conv_blocks as ops
slim = tf.contrib.slim
expand_input = ops.expand_input_by_factor
def basenet(inputs, fatness = 32, dilation = True):
"""
backbone net of MobileNetV2
"""
# End_points collect relevant activations for external use.
end_points = {}
# Original VGG-16 blocks.
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
padding='SAME', activation_fn=tf.nn.relu6, normalizer_fn=slim.batch_norm):
net = slim.conv2d(inputs, 32, [3, 3], stride=2)
net = ops.expanded_conv(net, expansion_size=expand_input(1, divisible_by=1), num_outputs=16, stride=1, normalizer_fn=slim.batch_norm)
end_points['conv1'] = net
print(net)
net = ops.expanded_conv(net, expansion_size=expand_input(6), num_outputs=24, stride=2, normalizer_fn=slim.batch_norm)
net = ops.expanded_conv(net, expansion_size=expand_input(6), num_outputs=24, stride=1, normalizer_fn=slim.batch_norm)
end_points['conv2'] = net
print(net)
net = ops.expanded_conv(net, expansion_size=expand_input(6), num_outputs=32, stride=2, normalizer_fn=slim.batch_norm)
net = ops.expanded_conv(net, expansion_size=expand_input(6), num_outputs=32, stride=1, normalizer_fn=slim.batch_norm)
net = ops.expanded_conv(net, expansion_size=expand_input(6), num_outputs=32, stride=1, normalizer_fn=slim.batch_norm)
end_points['conv3'] = net
print(net)
net = ops.expanded_conv(net, expansion_size=expand_input(6), num_outputs=64, stride=2, normalizer_fn=slim.batch_norm)
net = ops.expanded_conv(net, expansion_size=expand_input(6), num_outputs=64, stride=1, normalizer_fn=slim.batch_norm)
net = ops.expanded_conv(net, expansion_size=expand_input(6), num_outputs=64, stride=1, normalizer_fn=slim.batch_norm)
net = ops.expanded_conv(net, expansion_size=expand_input(6), num_outputs=64, stride=1, normalizer_fn=slim.batch_norm)
net = ops.expanded_conv(net, expansion_size=expand_input(6), num_outputs=96, stride=1, normalizer_fn=slim.batch_norm)
net = ops.expanded_conv(net, expansion_size=expand_input(6), num_outputs=96, stride=1, normalizer_fn=slim.batch_norm)
net = ops.expanded_conv(net, expansion_size=expand_input(6), num_outputs=96, stride=1, normalizer_fn=slim.batch_norm)
end_points['conv4'] = net
print(net)
net = ops.expanded_conv(net, expansion_size=expand_input(6), num_outputs=160, stride=2, normalizer_fn=slim.batch_norm)
net = ops.expanded_conv(net, expansion_size=expand_input(6), num_outputs=160, stride=1, normalizer_fn=slim.batch_norm)
net = ops.expanded_conv(net, expansion_size=expand_input(6), num_outputs=160, stride=1, normalizer_fn=slim.batch_norm)
net = ops.expanded_conv(net, expansion_size=expand_input(6), num_outputs=320, stride=1, normalizer_fn=slim.batch_norm)
net = slim.conv2d(net, 1280, [1, 1], stride=1)
end_points['fc5'] = net
print(net)
# # Block1
# net = slim.repeat(inputs, 2, slim.conv2d, fatness, [3, 3], scope='conv1')
# end_points['conv1_2'] = net
# net = slim.max_pool2d(net, [2, 2], scope='pool1')
# end_points['pool1'] = net
# # Block 2.
# net = slim.repeat(net, 2, slim.conv2d, fatness * 2, [3, 3], scope='conv2')
# end_points['conv2_2'] = net
# net = slim.max_pool2d(net, [2, 2], scope='pool2')
# end_points['pool2'] = net
# # Block 3.
# net = slim.repeat(net, 3, slim.conv2d, fatness * 4, [3, 3], scope='conv3')
# end_points['conv3_3'] = net
# net = slim.max_pool2d(net, [2, 2], scope='pool3')
# end_points['pool3'] = net
# # Block 4.
# net = slim.repeat(net, 3, slim.conv2d, fatness * 8, [3, 3], scope='conv4')
# end_points['conv4_3'] = net
# net = slim.max_pool2d(net, [2, 2], scope='pool4')
# end_points['pool4'] = net
# # Block 5.
# net = slim.repeat(net, 3, slim.conv2d, fatness * 8, [3, 3], scope='conv5')
# end_points['conv5_3'] = net
# net = slim.max_pool2d(net, [3, 3], 1, scope='pool5')
# end_points['pool5'] = net
# # fc6 as conv, dilation is added
# if dilation:
# net = slim.conv2d(net, fatness * 16, [3, 3], rate=6, scope='fc6')
# else:
# net = slim.conv2d(net, fatness * 16, [3, 3], scope='fc6')
# end_points['fc6'] = net
# # fc7 as conv
# net = slim.conv2d(net, fatness * 16, [1, 1], scope='fc7')
# end_points['fc7'] = net
return net, end_points;
| [
"810804616@qq.com"
] | 810804616@qq.com |
15ebe1a3991b7c2926af485aac68c164facd7718 | adbf09a31415e6cf692ff349bd908ea25ded42a8 | /widgets/hello.py | 1f431dbfab5106918d3f455f654bdbbf17576618 | [] | no_license | cmulliss/gui_python | 53a569f301cc82b58880c3c0b2b415fad1ecc3f8 | 6c83d8c2e834464b99024ffd8cf46ac4e734e7a4 | refs/heads/main | 2023-08-12T22:33:01.596005 | 2021-10-11T12:35:41 | 2021-10-11T12:35:41 | 408,176,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | import tkinter as tk
from tkinter import ttk
# main window is going to be called root
# Tk is creating an object, the main window
# .pack() puts the text into the window
root = tk.Tk()
root.title("hello World")
ttk.Label(root, text="Hello World", padding=(30, 10)).pack()
# tells it to start running and continues until you close your window
root.mainloop()
| [
"cmulliss@gmail.com"
] | cmulliss@gmail.com |
ed91abd135830dd7d436149ede1b3294b705034b | b89d2baf79f7c64ae9ff51a755ca9d10a2104b0b | /cootalk/src/fetch_config.py | 079b85fda58624b5bdcdbda19314d4fe4aa4ba4a | [] | no_license | Git-liuliang/config_diff_fileupload | dfe4fca90f6b0f8cf1e062754aa741980f4f9db0 | a4401b0b5f62f4ff7a575e906f1aab9b7184aab4 | refs/heads/master | 2020-03-07T21:38:49.031051 | 2018-04-02T09:11:08 | 2018-04-02T09:11:08 | 127,732,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,580 | py | #! /usr/bin/python
from ansible.inventory import Inventory
from ansible.playbook import PlayBook
from ansible import callbacks
from ansible import utils
import time,os
import logging
from cootalk.conf import mylogging
from cootalk.src import Myexception
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
logger = logging.getLogger(__name__)
mylogging.load_my_logging_cfg()
class PlaybookRunnerCallbacks(callbacks.PlaybookRunnerCallbacks):
error = []
def __init__(self, stats, verbose=None):
super(PlaybookRunnerCallbacks, self).__init__(stats, verbose)
#self.error = error_list
def on_ok(self, host, host_result):
super(PlaybookRunnerCallbacks, self).on_ok(host, host_result)
if host_result.get('msg'):
PlaybookRunnerCallbacks.error.append(host_result)
logger.warning('===%s====host=%s===result=%s'%(host_result.get('msg'),host,host_result.get('file')))
else:
logger.info('===on_ok====host=%s===result=%s'%(host,host_result.get('item')))
def on_unreachable(self, host, results):
super(PlaybookRunnerCallbacks, self).on_unreachable(host, results)
PlaybookRunnerCallbacks.error.append(results)
logger.warning('===on_unreachable====host=%s===result=%s'%(host,results))
def on_failed(self, host, results, ignore_errors=False):
super(PlaybookRunnerCallbacks, self).on_failed(host, results, ignore_errors)
PlaybookRunnerCallbacks.error.append(results)
logger.warning('===on_unreachable====host=%s===result=%s'%(host,results))
def on_skipped(self, host, item=None):
super(PlaybookRunnerCallbacks, self).on_skipped(host, item)
PlaybookRunnerCallbacks.error.append(results)
logger.warning("this task does not execute,please check parameter or condition.")
class PlaybookCallbacks(callbacks.PlaybookCallbacks):
def __init__(self,verbose=False):
super(PlaybookCallbacks, self).__init__(verbose)
def on_stats(self, stats):
super(PlaybookCallbacks, self).on_stats(stats)
logger.info("palybook executes completed====")
class PlayUbook(object):
def __init__(self,host_dir,yaml_dir,getfile_path):
self.host_dir = host_dir
self.yaml_dir = yaml_dir
self.getfile_path = getfile_path
def playnow(self):
inventory = Inventory(self.host_dir)
stats = callbacks.AggregateStats()
playbook_cb = PlaybookCallbacks()
runner_cb = PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY)
results = PlayBook(playbook=self.yaml_dir, stats=stats, callbacks=playbook_cb, runner_callbacks=runner_cb,
inventory=inventory, forks=200, extra_vars={"dir": self.getfile_path})
res = results.run()
playbook_cb.on_stats(results.stats)
def core():
hosts_dir = os.path.join(BASE_DIR,'conf','hosts')
yaml_dir = os.path.join(BASE_DIR,'conf','key')
print(hosts_dir)
inventory = Inventory(hosts_dir)
stats = callbacks.AggregateStats()
playbook_cb = PlaybookCallbacks()
runner_cb = PlaybookRunnerCallbacks(stats,verbose=utils.VERBOSITY)
getfile_path = os.path.join(BASE_DIR,'outfile','remote_file')
results = PlayBook(playbook=yaml_dir,stats=stats,callbacks=playbook_cb,runner_callbacks=runner_cb,inventory=inventory,forks=200,extra_vars={"dir":getfile_path})
res = results.run()
playbook_cb.on_stats(results.stats)
#return runner_cb.error
return PlaybookRunnerCallbacks.error
if __name__ == '__main__':
core()
| [
"894513081@qq.com"
] | 894513081@qq.com |
79060db8148d189e49d71a2fcde2a58110cad683 | d4f05d51568bfda9fb964deba92d9fd599a3dcde | /desing_pattern/factory_method/idcard.py | d696179da0e2206fdb2814b3f87a9e6356415882 | [] | no_license | Fullmoon8507/PythonPracticeProject | 44beba7ce783e5e22429516d39ee96adc1ead785 | 57454099ad67bfe4431ee997fada640fde6ccecc | refs/heads/master | 2020-04-16T23:29:58.907552 | 2017-05-06T07:27:35 | 2017-05-06T07:27:35 | 53,178,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | from product import Product
class IDCard(Product):
def __init__(self, owner):
self.__owner = owner
print(self.__owner + 'のカードを作成します')
def use(self):
print(self.__owner + 'のカードを使います')
def get_owner(self):
return self.__owner
| [
"you@example.com"
] | you@example.com |
8a4d5bed883776ebcd3fcc904288d9add338fef0 | 584f7b51d7cd529448e2fc0147557e26931ab17e | /test_Begin_dtype.py | 94c25b201a1b4bb74e965f1d89a9301ac63f4647 | [
"BSD-3-Clause"
] | permissive | opticspy/lightpipes | 8ca0d2221a1b893de5e51fec9061e90b9145f5f8 | f4ffdedb3ab2f9b5ae5a9a8e37985d2a7f8bb2ef | refs/heads/master | 2023-09-04T19:07:11.376631 | 2023-09-04T15:24:55 | 2023-09-04T15:24:55 | 80,127,706 | 191 | 55 | BSD-3-Clause | 2023-08-23T00:45:33 | 2017-01-26T15:39:28 | Python | UTF-8 | Python | false | false | 572 | py | #! /usr/bin/env python
"""
Script to test the Begin command with dtype option.
"""
from LightPipes import *
import numpy as np
import sys
wavelength = 500*nm
size = 25*mm
N = 3000
N2=int(N/2)
w0=2*mm
print("LightPipes version = ", LPversion)
print("without dtype option:")
F=Begin(size,wavelength,N)
print("type of F:",F._dtype)
print("size of F.field: ",sys.getsizeof(F.field)/1e9," Gbyte")
print("\n")
print("with dtype option:")
F=Begin(size,wavelength,N,dtype=np.complex64)
print("type of F:",F._dtype)
print("size of F.field: ",sys.getsizeof(F.field)/1e9," Gbyte")
| [
"fred511949@gmail.com"
] | fred511949@gmail.com |
a64c44e8646a8722fd0cbdc5d2b44681652d31e3 | c90b3cb32b5c8b7ac55519931081c4d56edcd06f | /app/views.py | cdfd5aef52fffb764107749fe5f9e911ce46358c | [] | no_license | SShanshina/django-2-landing | 5e39111c271f45f66a2b34bcf80a2e00a167fb63 | 96176382ea606f7a83b693ebabe65251444ec357 | refs/heads/master | 2023-03-02T16:41:11.985470 | 2021-02-13T09:53:27 | 2021-02-13T09:53:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,613 | py | from collections import Counter
from django.shortcuts import render
# Для отладки механизма ab-тестирования используйте эти счетчики
# в качестве хранилища количества показов и количества переходов.
# но помните, что в реальных проектах так не стоит делать
# так как при перезапуске приложения они обнулятся
counter_show = Counter()
counter_click = Counter()
def index(request):
# Реализуйте логику подсчета количества переходов с лендига по GET параметру from-landing
response = request.GET.get('from-landing')
print(response)
counter_click[response] += 1
print(f'Количество переходов: original - {counter_click["original"]}, test - {counter_click["test"]}')
return render(request, 'index.html')
def landing(request):
# Реализуйте дополнительное отображение по шаблону app/landing_alternate.html
# в зависимости от GET параметра ab-test-arg
# который может принимать значения original и test
# Так же реализуйте логику подсчета количества показов
response = request.GET.get('ab-test-arg')
print(response)
if response == 'original':
counter_show[response] += 1
print(f'Количество показов: original - {counter_show["original"]}, test - {counter_show["test"]}')
return render(request, 'landing.html')
elif response == 'test':
counter_show[response] += 1
print(f'Количество показов: original - {counter_show["original"]}, test - {counter_show["test"]}')
return render(request, 'landing_alternate.html')
def stats(request):
# Реализуйте логику подсчета отношения количества переходов к количеству показов страницы
# Для вывода результат передайте в следующем формате:
original_result = counter_show['original'] / counter_click['original']
print(original_result)
test_result = counter_show['test'] / counter_click['test']
print(test_result)
return render(request, 'stats.html', context={
'test_conversion': round(test_result, 2),
'original_conversion': round(original_result, 2),
})
| [
"s.shanshina@gmail.com"
] | s.shanshina@gmail.com |
23d2de297f217b04686d2baf5f2e92f9b908f62f | 02af768853257bb60de8d6e6dca8778c07d976db | /xgboost-classifier.py | fee6085989b7cfa02cdfd78f60d075a5034a4c46 | [
"MIT"
] | permissive | saksham-mittal/CS6510-Kaggle-Challenge | 2bc976ddf8dd692f22a7921942e304ce71ab8cd9 | 01cf220a826649fc7341c057a2175c98acf025ba | refs/heads/master | 2020-05-09T18:22:34.839861 | 2019-04-14T17:22:38 | 2019-04-14T17:22:38 | 181,339,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,587 | py | import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
training_set = pd.read_csv("train.csv")
# Extracting labels from training set
training_labels = training_set['pricing_category']
# print(training_labels)
# Dropping the last column and id from training set
training_set = training_set.drop(labels='pricing_category', axis=1)
training_set = training_set.drop(labels='id', axis=1)
# print(training_set)
# Filling nan taxi_types with new class 'O'
training_set['taxi_type'].fillna('O', inplace=True)
# Filling nan customer_scores with mean of the attribute
training_set['customer_score'].fillna(training_set['customer_score'].mean(), inplace=True)
# Filling nan customer_score_confidence with new class 'O'
training_set['customer_score_confidence'].fillna('O', inplace=True)
# Filling nan months_of_activity with 0
training_set['months_of_activity'].fillna(0.0, inplace=True)
# One hot encoding the 'sex' attribute
labelEnc = LabelEncoder()
male = labelEnc.fit_transform(training_set['sex'])
oneHotEnc = OneHotEncoder(categorical_features=[0])
male = oneHotEnc.fit_transform(male.reshape(-1, 1)).toarray()
# Inserting the one hot encoding attribute and dropping the 'sex' attribute
training_set = training_set.drop(labels='sex', axis=1)
training_set.insert(training_set.shape[1], "male", male[:, 0], True)
training_set.insert(training_set.shape[1], "female", male[:, 1], True)
# Encoding taxi_type
training_temp = {}
for i in range(len(training_set.taxi_type.unique())):
training_temp["taxi_type_{}".format(sorted(training_set.taxi_type.unique())[i])] = np.zeros(training_set.shape[0], dtype="float32")
for i, taxi in enumerate(training_set['taxi_type']):
training_temp['taxi_type_{}'.format(taxi)][i] = 1.0
for key in training_temp.keys():
training_set[key] = training_temp[key]
training_set = training_set.drop(labels='taxi_type', axis=1)
# For trying label encoding only
# training_set['taxi_type'] = labelEnc.fit_transform(training_set['taxi_type'])
# Encoding customer_score_confidence
training_temp = {}
for i in range(len(training_set.customer_score_confidence.unique())):
training_temp["customer_score_confidence_{}".format(sorted(training_set.customer_score_confidence.unique())[i])] = np.zeros(training_set.shape[0], dtype="float32")
for i, taxi in enumerate(training_set['customer_score_confidence']):
training_temp['customer_score_confidence_{}'.format(taxi)][i] = 1.0
for key in training_temp.keys():
training_set[key] = training_temp[key]
training_set = training_set.drop(labels='customer_score_confidence', axis=1)
# For trying label encoding only
# training_set['customer_score_confidence'] = labelEnc.fit_transform(training_set['customer_score_confidence'])
# Encoding drop_location_type
training_temp = {}
for i in range(len(training_set.drop_location_type.unique())):
training_temp["drop_location_type_{}".format(sorted(training_set.drop_location_type.unique())[i])] = np.zeros(training_set.shape[0], dtype="float32")
for i, taxi in enumerate(training_set['drop_location_type']):
training_temp['drop_location_type_{}'.format(taxi)][i] = 1.0
for key in training_temp.keys():
training_set[key] = training_temp[key]
training_set = training_set.drop(labels='drop_location_type', axis=1)
# print(training_set)
training_set1 = training_set
# Replacing nan in annon_var_1 with mean
training_set['anon_var_1'].fillna(training_set['anon_var_1'].mean(), inplace=True)
# print(training_set)
# Trying dropping the anon_var_1 attribute in training_set1
training_set1 = training_set1.drop(labels='anon_var_1', axis=1)
"""
Doing the same preprocessing for the test data
"""
test_set = pd.read_csv("test.csv")
test_id = test_set['id']
test_id = np.asarray(test_id)
# Dropping id column
test_set = test_set.drop(labels='id', axis=1)
test_set['taxi_type'].fillna('O', inplace=True)
test_set['customer_score'].fillna(test_set['customer_score'].mean(), inplace=True)
test_set['customer_score_confidence'].fillna('O', inplace=True)
test_set['months_of_activity'].fillna(0.0, inplace=True)
labelEnc = LabelEncoder()
male = labelEnc.fit_transform(test_set['sex'])
oneHotEnc = OneHotEncoder(categorical_features=[0])
male = oneHotEnc.fit_transform(male.reshape(-1, 1)).toarray()
test_set = test_set.drop(labels='sex', axis=1)
test_set.insert(test_set.shape[1], "male", male[:, 0], True)
test_set.insert(test_set.shape[1], "female", male[:, 1], True)
test_temp = {}
for i in range(len(test_set.taxi_type.unique())):
test_temp["taxi_type_{}".format(sorted(test_set.taxi_type.unique())[i])] = np.zeros(test_set.shape[0], dtype="float32")
for i, taxi in enumerate(test_set['taxi_type']):
test_temp['taxi_type_{}'.format(taxi)][i] = 1.0
for key in test_temp.keys():
test_set[key] = test_temp[key]
test_set = test_set.drop(labels='taxi_type', axis=1)
# test_set['taxi_type'] = labelEnc.fit_transform(test_set['taxi_type'])
test_temp = {}
for i in range(len(test_set.customer_score_confidence.unique())):
test_temp["customer_score_confidence_{}".format(sorted(test_set.customer_score_confidence.unique())[i])] = np.zeros(test_set.shape[0], dtype="float32")
for i, taxi in enumerate(test_set['customer_score_confidence']):
test_temp['customer_score_confidence_{}'.format(taxi)][i] = 1.0
for key in test_temp.keys():
test_set[key] = test_temp[key]
test_set = test_set.drop(labels='customer_score_confidence', axis=1)
# test_set['customer_score_confidence'] = labelEnc.fit_transform(test_set['customer_score_confidence'])
test_temp = {}
for i in range(len(test_set.drop_location_type.unique())):
test_temp["drop_location_type_{}".format(sorted(test_set.drop_location_type.unique())[i])] = np.zeros(test_set.shape[0], dtype="float32")
for i, taxi in enumerate(test_set['drop_location_type']):
test_temp['drop_location_type_{}'.format(taxi)][i] = 1.0
for key in test_temp.keys():
test_set[key] = test_temp[key]
test_set = test_set.drop(labels='drop_location_type', axis=1)
test_set1 = test_set
# print(test_set)
test_set['anon_var_1'].fillna(test_set['anon_var_1'].mean(), inplace=True)
test_set1 = test_set1.drop(labels='anon_var_1', axis=1)
# For finiding error on part of train data
# X_train, X_test, y_train, y_test = train_test_split(training_set, training_labels, test_size=0.2, random_state=42)
"""
Preprocessing complete
"""
xg_classify = XGBClassifier(objective='multi:softmax', num_class=3, colsample_bytree=0.8, subsample=0.8, scale_pos_weight=1, learning_rate=0.06, max_depth=5, n_estimators=500, gamma=5)
# Trying data normalization
# sc = StandardScaler()
# sc.fit_transform(training_set)
# sc.fit_transform(test_set)
xg_classify.fit(training_set, training_labels)
print("Data fitting completed")
# Mean Squared Error on the training data
print("mse =", mean_squared_error(training_labels, xg_classify.predict(training_set)))
ans = xg_classify.predict(test_set)
print("Data prediction completed")
# print(test_id.shape)
# print(ans.shape)
# a = accuracy_score(y_test, ans)
# print("mean squeared : " , a)
print(ans)
# Writing output to the csv
with open("output-xgboost.csv", "w") as fp:
fp.write("id,pricing_category\n")
for i in range(test_id.shape[0]):
fp.write("{},{}.0\n".format(test_id[i], ans[i]))
| [
"mittalsaksham01@gmail.com"
] | mittalsaksham01@gmail.com |
b3a084f648e66397103782e5c2052e5bf1d8441c | ac6279d1894f1dec8ea5f484afc2d22b665370cc | /train_MTL_uncertainty.py | 6cfac276a21b36b51d57ca8a4c8a8eb2eb7508f6 | [] | no_license | Benaziza-Sidi/MultiTask-Learning-for-image-Super-Resolution | 64535124ec6ddd4bbf1d8b1ee08a04c8097bde92 | de8414d41ecc8e7c1b695b0d5488d4a6caae8c3c | refs/heads/main | 2023-04-21T23:19:15.216935 | 2021-05-06T03:10:55 | 2021-05-06T03:10:55 | 364,654,152 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,333 | py | import argparse
import os
import copy
import torch
from torch import nn
import torchvision
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
from models_ResNet_MLT import ResNetSR
from datasets_MLT import TrainDataset
from utils import AverageMeter, calc_psnr
import math
from sklearn.model_selection import train_test_split
from torch.utils.data import Subset
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
import matplotlib.pyplot as plt
import numpy as np
def train_val_dataset(dataset, val_split=0.2):
train_idx, val_idx = train_test_split(list(range(len(dataset))), test_size=val_split)
datasets = {}
datasets['train'] = Subset(dataset, train_idx)
datasets['val'] = Subset(dataset, val_idx)
return datasets
class MultiTaskLossWrapper(nn.Module):
def __init__(self, task_num, device, model):
super(MultiTaskLossWrapper, self).__init__()
self.device = device
self.model = model
self.task_num = task_num
self.log_vars = nn.Parameter(torch.zeros((task_num),requires_grad=True, device=device))
def forward(self, inputs, hr_loss, hq_loss):
precision1 = torch.exp(-self.log_vars[0])
loss_hr = torch.sum(precision1 * hr_loss + self.log_vars[0],-1)
precision2 = torch.exp(-self.log_vars[1])
loss_hq = torch.sum(precision2 * hq_loss + self.log_vars[1], -1)
loss = loss_hr + loss_hq
return loss, self.log_vars.data.tolist()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--hr-train-file', type=str, required=True)
parser.add_argument('--hq-train-file')
parser.add_argument('--test-file', type=str, required=True)
parser.add_argument('--outputs-dir', type=str, required=True)
parser.add_argument('--scale', type=int, default=2)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--num-epochs', type=int, default=50)
parser.add_argument('--num-workers', type=int, default=8)
parser.add_argument('--seed', type=int, default=123)
args = parser.parse_args()
args.outputs_dir = os.path.join(args.outputs_dir, 'x{}'.format(args.scale))
writer = SummaryWriter("runs/ResNetSR")
if not os.path.exists(args.outputs_dir):
os.makedirs(args.outputs_dir)
cudnn.benchmark = True
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
torch.manual_seed(args.seed)
print(device)
print(torch.backends.cudnn.deterministic)
print(torch.backends.cudnn.benchmark)
model = ResNetSR().to(device=device,dtype=torch.float32)
summary(model,(1,256,256))
criterion = nn.MSELoss()
mtl = MultiTaskLossWrapper(task_num=2,device=device,model = model)
optimizer = optim.Adam([
{'params': mtl.parameters() , 'lr': args.lr * 0.1}
], lr=args.lr)
print('number of trainable parameters = : ' + str(sum(p.numel() for p in mtl.parameters() if p.requires_grad)))
train_val_set = TrainDataset(args.hr_train_file,args.test_file,args.hq_train_file,'transpose')
datasets = train_val_dataset(train_val_set)
train_dataloader = DataLoader(dataset=datasets['train'],
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True)
eval_dataloader= DataLoader(dataset=datasets['val'],batch_size=1,shuffle=False)
best_weights = copy.deepcopy(model.state_dict())
best_epoch = 0
best_psnr = 0.0
for epoch in range(args.num_epochs):
model.train()
epoch_losses = AverageMeter()
with tqdm(total=(len(train_dataloader) - len(train_dataloader) % args.batch_size)) as t:
t.set_description('epoch: {}/{}'.format(epoch, args.num_epochs - 1))
for batch_idx, data in enumerate(train_dataloader):
inputs, hr_labels, hq_labels = data
#load the data into the cuda:0 device
inputs = inputs.to(device=device,dtype=torch.float32)
hr_labels = hr_labels.to(device=device,dtype=torch.float32)
hq_labels = hq_labels.to(device=device,dtype=torch.float32)
hr_preds, hq_preds = model(inputs)
hr_loss = criterion(hr_preds, hr_labels)
hq_loss = criterion(hq_preds, hq_labels)
loss , log_vars = mtl(inputs,hr_loss,hq_loss)
loss = loss.to(device)
epoch_losses.update(loss.item(), len(inputs))
optimizer.zero_grad()
loss.backward()
optimizer.step()
t.set_postfix(loss='{:.6f}'.format(epoch_losses.avg))
t.update(len(inputs))
writer.add_scalar('training_loss',epoch_losses.avg,epoch)
torch.save(model.state_dict(), os.path.join(args.outputs_dir, 'epoch_{}.pth'.format(epoch)))
model.eval()
hr_epoch_psnr = AverageMeter()
hq_epoch_psnr = AverageMeter()
hq_eval_losses = AverageMeter()
hr_eval_losses = AverageMeter()
eval_losses = AverageMeter()
print('[hr_weight, hq_weight] = ' + str(log_vars))
for data in eval_dataloader:
inputs, hr_labels, hq_labels = data
inputs = inputs.to(device=device,dtype=torch.float32)
hr_labels = hr_labels.to(device=device,dtype=torch.float32)
hq_labels = hq_labels.to(device=device,dtype=torch.float32)
with torch.no_grad():
hr_preds,hq_preds = model(inputs)
hr_eval_loss = criterion(hr_preds,hr_labels)
hq_eval_loss = criterion(hq_preds,hq_labels)
eval_losses.update(hr_eval_loss.item() + hq_eval_loss.item(), len(inputs))
hr_epoch_psnr.update(calc_psnr(hr_preds,hr_labels), len(inputs))
hq_epoch_psnr.update(calc_psnr(hq_preds,hq_labels), len(inputs))
writer.add_scalar('eval_loss',eval_losses.avg,epoch)
print('HR eval psnr: {:.2f}'.format(hr_epoch_psnr.avg))
print('HQ eval psnr :{:.2f}'.format(hq_epoch_psnr.avg))
writer.add_scalar('hr_psnr_eval',hr_epoch_psnr.avg,epoch)
writer.add_scalar('hq_psnr_eval',hq_epoch_psnr.avg,epoch)
hr_pred_grid=torchvision.utils.make_grid(hr_preds)
hq_pred_grid=torchvision.utils.make_grid(hq_preds)
writer.add_image('HR prediction epoch : ' + str(epoch),hr_pred_grid)
writer.add_image('HQ prediction epoch : ' + str(epoch),hq_pred_grid)
writer.close()
# best epoch choice is dependant on what output is to be optimized
if hr_epoch_psnr.avg > best_psnr:
best_epoch = epoch
best_psnr = hr_epoch_psnr.avg
best_weights = copy.deepcopy(model.state_dict())
print('best epoch: {}, hr_psnr: {:.2f}'.format(best_epoch, best_psnr))
torch.save(best_weights, os.path.join(args.outputs_dir, 'best.pth'))
| [
"noreply@github.com"
] | noreply@github.com |
787485ffad6e919c7f32f1053d53ecb96369920e | 4ab67b7b1b2f81e2c4db1a6948c606de046f5cff | /src/style_transfer.py | 00aec6cca95f0e5d3fb7884872cb415953228d2e | [] | no_license | klaudialemiec/style-transfer | 06c7fb56a401cea4a35d5b464360e82a71c256f5 | a3a8530de6147e5344aeb6ef34023ec2bfe350b8 | refs/heads/master | 2023-03-05T15:02:25.239286 | 2021-02-23T19:59:31 | 2021-02-23T19:59:31 | 341,357,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,846 | py | import tensorflow as tf
from tensorflow.keras.preprocessing.image import img_to_array
import numpy as np
from PIL import Image
def load_img(path_to_img):
max_dim = 512
img = tf.io.read_file(path_to_img)
img = tf.image.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
shape = tf.cast(tf.shape(img)[:-1], tf.float32)
long_dim = max(shape)
scale = max_dim / long_dim
new_shape = tf.cast(shape * scale, tf.int32)
img = tf.image.resize(img, new_shape)
img = img[tf.newaxis, :]
return img
def image_to_tenforlow_image(image):
max_dim = 512
img = np.array(image)
img = tf.image.convert_image_dtype(img, tf.float32)
shape = tf.cast(tf.shape(img)[:-1], tf.float32)
long_dim = max(shape)
scale = max_dim / long_dim
new_shape = tf.cast(shape * scale, tf.int32)
img = tf.image.resize(img, new_shape)
img = img[tf.newaxis, :]
return img
def tensor_to_image(tensor):
tensor = tensor * 255
tensor = np.array(tensor, dtype=np.uint8)
if np.ndim(tensor) > 3:
assert tensor.shape[0] == 1
tensor = tensor[0]
return Image.fromarray(tensor)
def clip_0_1(image):
return tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
def style_content_loss(
outputs,
targets,
num_layers,
content_weight=1e4,
style_weight=1e-2,
):
style_outputs = outputs["style"]
content_outputs = outputs["content"]
style_target = targets["style"]
content_target = targets["content"]
style_num_layers = num_layers["style"]
content_num_layers = num_layers["content"]
style_loss = tf.add_n(
[
tf.reduce_mean((style_outputs[name] - style_target[name]) ** 2)
for name in style_outputs.keys()
]
)
style_loss *= style_weight / style_num_layers
content_loss = tf.add_n(
[
tf.reduce_mean((content_outputs[name] - content_target[name]) ** 2)
for name in content_outputs.keys()
]
)
content_loss *= content_weight / content_num_layers
loss = style_loss + content_loss
return loss
def train_step(
extractor, image, optimizer, targets, num_layers, total_variation_weight=30
):
with tf.GradientTape() as tape:
outputs = extractor(image)
loss = style_content_loss(outputs, targets, num_layers)
loss += total_variation_weight * tf.image.total_variation(image)
grad = tape.gradient(loss, image)
optimizer.apply_gradients([(grad, image)])
image.assign(clip_0_1(image))
def gram_matrix(input_tensor):
result = tf.linalg.einsum("bijc,bijd->bcd", input_tensor, input_tensor)
input_shape = tf.shape(input_tensor)
num_locations = tf.cast(input_shape[1] * input_shape[2], tf.float32)
return result / (num_locations)
| [
"kl.lemiec@gmail.com"
] | kl.lemiec@gmail.com |
5c5787103c69797520000d729173623065a66de2 | f928e56e6c7bcba99b7514a3f8d340adb8539275 | /peky.py | 9febe2b776ba249b2dd12d63e859613a1c88aff6 | [] | no_license | hg570820/python_projects_git | 3a374ec832a5abd94049d0a7fad18405e39eceb4 | 151791db7ef72d46481ef14c8738e20a4235824a | refs/heads/master | 2020-04-03T14:21:49.983469 | 2018-12-02T09:59:27 | 2018-12-02T09:59:27 | 155,319,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,631 | py | from turtle import*
def nose(x, y): # 鼻子
penup() # 提起笔
goto(x, y) # 定位
pendown() # 落笔,开始画
setheading(-30) # 将乌龟的方向设置为to_angle/为数字(0-东、90-北、180-西、270-南)
begin_fill() # 准备开始填充图形
a = 0.4
for i in range(120):
if 0 <= i < 30 or 60 <= i < 90:
a = a+0.08
left(3) # 向左转3度
forward(a) # 向前走a的步长
else:
a = a-0.08
left(3)
forward(a)
end_fill() # 填充完成
penup()
setheading(90)
forward(25)
setheading(0)
forward(10)
pendown()
pencolor(255, 155, 192) # 画笔颜色
setheading(10)
begin_fill()
circle(5)
color(160, 82, 45) # 返回或设置pencolor和fillcolor
end_fill()
penup()
setheading(0)
forward(20)
pendown()
pencolor(255, 155, 192)
setheading(10)
begin_fill()
circle(5)
color(160, 82, 45)
end_fill()
def head(x, y): # 头
color((255, 155, 192), "pink")
penup()
goto(x, y)
setheading(0)
pendown()
begin_fill()
setheading(180)
circle(300, -30)
circle(100, -60)
circle(80, -100)
circle(150, -20)
circle(60, -95)
setheading(161)
circle(-300, 15)
penup()
goto(-100, 100)
pendown()
setheading(-30)
a = 0.4
for i in range(60):
if 0 <= i < 30 or 60 <= i < 90:
a = a+0.08
lt(3) # 向左转3度
fd(a) # 向前走a的步长
else:
a = a-0.08
lt(3)
fd(a)
end_fill()
def ears(x, y): # 耳朵
color((255, 155, 192), "pink")
penup()
goto(x, y)
pendown()
begin_fill()
setheading(100)
circle(-50, 50)
circle(-10, 120)
circle(-50, 54)
end_fill()
penup()
setheading(90)
forward(-12)
setheading(0)
forward(30)
pendown()
begin_fill()
setheading(100)
circle(-50, 50)
circle(-10, 120)
circle(-50, 56)
end_fill()
def eyes(x, y): # 眼睛
color((255, 155, 192), "white")
penup()
setheading(90)
forward(-20)
setheading(0)
forward(-95)
pendown()
begin_fill()
circle(15)
end_fill()
color("black")
penup()
setheading(90)
forward(12)
setheading(0)
forward(-3)
pendown()
begin_fill()
circle(3)
end_fill()
color((255, 155, 192), "white")
penup()
seth(90)
forward(-25)
seth(0)
forward(40)
pendown()
begin_fill()
circle(15)
end_fill()
color("black")
penup()
setheading(90)
forward(12)
setheading(0)
forward(-3)
pendown()
begin_fill()
circle(3)
end_fill()
def cheek(x, y): # 腮
color((255, 155, 192))
penup()
goto(x, y)
pendown()
setheading(0)
begin_fill()
circle(30)
end_fill()
def mouth(x, y): # 嘴
color(239, 69, 19)
penup()
goto(x, y)
pendown()
setheading(-80)
circle(30, 40)
circle(40, 80)
def setting(): # 参数设置
pensize(4)
hideturtle() # 使乌龟无形(隐藏)
colormode(255) # 将其设置为1.0或255.随后 颜色三元组的r,g,b值必须在0 .. cmode范围内
color((255, 155, 192), "pink")
setup(840, 500)
speed(10)
def main():
setting() # 画布、画笔设置
nose(-100, 100) # 鼻子
head(-69, 167) # 头
ears(0, 160) # 耳朵
eyes(0, 140) # 眼睛
cheek(80, 10) # 腮
mouth(-20, 30) # 嘴
done()
if __name__ == '__main__':
main()
| [
"391507059@qq.com"
] | 391507059@qq.com |
1a8e25cb83cbd84173b2fe52e27ff3b97302604e | b0feea0b9a1c7d270b17c12fdf1837e30bbd4db0 | /curb_challenge.py | 3fda1a861a02b5f109fe01f36282fe7f17f06518 | [] | no_license | aryasabeti/curbside-challenge | 083db3ab6854cccd43c13685c31e8f15f63e100f | f742e3a35f5998ca84212f8cc103c58eff621f18 | refs/heads/master | 2021-05-31T09:59:38.097154 | 2015-11-07T17:27:10 | 2015-11-07T17:27:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | import requests
import json
import itertools
def dict_keys_to_lower(d):
return {str(key).lower():value for key, value in d.items()}
def listify(list_or_single):
is_list = isinstance(list_or_single, list)
return list_or_single if is_list else [list_or_single]
def curb_api(endpoint, curb_headers = {}):
return requests.get('http://challenge.shopcurbside.com/' + endpoint, headers = curb_headers).text
def session_generator():
for i in itertools.count():
if(i % 10 == 0):
session = curb_api('get-session')
yield session
sessions = session_generator()
def get_response(endpoint):
response_text = curb_api(endpoint, {'session': next(sessions)})
return dict_keys_to_lower(json.loads(response_text))
def get_secret(endpoint):
response = get_response(endpoint)
if('secret' in response):
return response['secret']
else:
next_endpoints = listify(response['next'])
return ''.join(map(get_secret, next_endpoints))
if __name__ == '__main__':
print(get_secret('start'))
| [
"ariasabeti@gmail.com"
] | ariasabeti@gmail.com |
e1385f734a2a8cabe2dc74cbf093d982cd961bdb | 0959af52fb425a3c16b77166dafafc104cf576ca | /base/configs/mofcom/settings.py | 984806899a34862634727d38e874658375eeae28 | [] | no_license | njunth/Crawler | d62e161ebf2fceefed3c976ac460090d6284f620 | de0aa7321728d776915577827d719302f2cd1ed5 | refs/heads/master | 2021-09-16T21:03:38.905615 | 2018-06-25T04:36:37 | 2018-06-25T04:36:37 | 109,373,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,133 | py | # -*- coding: utf-8 -*-
# Scrapy settings for mySpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'mySpider'
SPIDER_MODULES = ['mySpider.spiders']
NEWSPIDER_MODULE = 'mySpider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'mySpider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
#FEED_EXPORT_ENCODING = 'utf-8'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'mySpider.middlewares.MyspiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'mySpider.middlewares.MyspiderDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'mySpider.pipelines.JsonWithEncodingPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"2364684794@qq.com"
] | 2364684794@qq.com |
50010f17037285bb9727e06c89112bd0a9b7a023 | 72320ffc0c89b3f61bcf40110e673c59940056ea | /setup.py | ece907ed6a9fd87152d76600eeb8fb6e5c237523 | [
"MIT"
] | permissive | Darkman/Rebrand-Blizzard-App | 8e789f8bc8e3d681a2e5bd9c04cc613f126b9012 | 256d69ea657bcd5cc4b38011e47cf6e45884075e | refs/heads/master | 2021-01-23T03:33:54.262006 | 2017-03-28T06:02:00 | 2017-03-28T06:02:00 | 86,091,648 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,355 | py | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='rebrand-blizzard-app',
version='0.0.1',
description='A tool to rebrand the Blizzard App back to Battle.net.',
long_description=long_description,
# The project's main homepage.
url='https://github.com/Darkman/Rebrand-Blizzard-App',
author='Caleb Pineur',
author_email='caleb.pineur@gmail.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project?
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: End Users/Desktop',
'Topic :: Games/Entertainment',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='rebrand battle.net blizzard app',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['PyYAML', 'psutil'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'rebrand-blizzard-app': [
'resources/avatars/*.png',
'resources/icons/*.png',
'resources/images/*.png',
'resources/images/addfrienddialog/*.png',
'logs/log_config.yaml'
],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'rebrand = rebrand-blizzard-app.main:main',
],
},
)
| [
"caleb.pineur@gmail.com"
] | caleb.pineur@gmail.com |
9f723479ced7eb5ee2e1e8c07253a875324fe98a | 67567552292a8747b2491c8de456570e0a684bb3 | /login/locust_file.py | 55d36b385661fc31365e86b36df1ad755a0c9f04 | [] | no_license | Nkr1shna/plalyst | 91c74b73eeff56f0cac071c2baee49da1b2d4b2b | c08de7be1fa453da22219d4a1f053db4e3d05537 | refs/heads/master | 2021-01-21T08:08:37.158388 | 2017-05-01T22:57:13 | 2017-05-01T22:57:13 | 83,337,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | from locust import HttpLocust, TaskSet, task
class UserBehavior(TaskSet):
def on_start(self):
self.login()
def login(self):
# GET login page to get csrftoken from it
response = self.client.get('http://localhost:8000/login/')
csrftoken = response.cookies['csrftoken']
# POST to login page with csrftoken
self.client.post('http://localhost:8000/login/',
{'username': 'gouthu123', 'password': 'gouthu123'},
headers={'X-CSRFToken': csrftoken})
@task
def index(self):
self.client.get('/')
@task
def register(self):
response = self.client.get('http://localhost:8000/register/')
csrftoken = response.cookies['csrftoken']
self.client.post('http://localhost:8000/register/',
{'username':'achyuth','email':'achyut@gmail.com','password':'achyuth'},
headers={'X-CSRFToken': csrftoken})
class WebsiteUser(HttpLocust):
task_set = UserBehavior
| [
"noreply@github.com"
] | noreply@github.com |
82e4befdb0ca44be5a7027e1fb020349ea55ed08 | cd8a35c735aa9f08d9a25995a53c1cb144fe2b29 | /kirby/cory/indexer.py | 49fff2aebee34aec07103747b204ed7a17907455 | [] | no_license | jxieeducation/Quick-Hackathon-Side-Project-Experiments-2014 | d684a234815d3992caaccb2878f42f2e847021fc | 343caf0ede537060c85a3b0dabb97a5ed090e0e0 | refs/heads/master | 2021-01-10T09:30:10.917732 | 2015-12-01T04:01:53 | 2015-12-01T04:01:53 | 47,163,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,217 | py | import requests, json
from requests.auth import HTTPBasicAuth
import sys, getopt
auths = []
auths.append(HTTPBasicAuth('godzillabitch', 'godzillabitch123'))
auths.append(HTTPBasicAuth('ankitsmarty', 'ankitsmarty123'))
auths.append(HTTPBasicAuth('yasmite', 'yasmite123'))
auths.append(HTTPBasicAuth('qwertybitch', 'qwertybitch123'))
auths.append(HTTPBasicAuth('andrewnoobee', 'andrewnoobee123'))
auths.append(HTTPBasicAuth('threelegged', 'threelegged123'))
auths.append(HTTPBasicAuth('ninewindows', 'ninewindows123'))
auths.append(HTTPBasicAuth('twittch', 'twittch123'))
auths.append(HTTPBasicAuth('phenoixtt', 'phenoixtt123'))
auths.append(HTTPBasicAuth('bootcampee', 'bootcampee123'))
auths.append(HTTPBasicAuth('lolmoob', 'lolmoob123'))
auths.append(HTTPBasicAuth('biggyt', 'biggyt123'))
auths.append(HTTPBasicAuth('tomatodude', 'tomatodude123'))
auths.append(HTTPBasicAuth('potatodude', 'potatodude123'))
auths.append(HTTPBasicAuth('friedricedude', 'friedricedude123'))
auths.append(HTTPBasicAuth('chowmeindude', 'chowmeindude123'))
auths.append(HTTPBasicAuth('goodolddude', 'goodolddude123'))
auths.append(HTTPBasicAuth('cryingdude', 'cryingdude123'))
auths.append(HTTPBasicAuth('sennheiserdude', 'sennheiserdude123'))
auths.append(HTTPBasicAuth('beatsdude', 'beatsdude123'))
auths.append(HTTPBasicAuth('bosedude', 'bosedude123'))
auths.append(HTTPBasicAuth('sonydude', 'sonydude123'))
auths.append(HTTPBasicAuth('bobdylann', 'bobdylann123'))
auths.append(HTTPBasicAuth('godizllll', 'godizllll123'))
auths.append(HTTPBasicAuth('jliiii', 'jliiii123'))
auths.append(HTTPBasicAuth('sssssst', 'sssssst123'))
auths.append(HTTPBasicAuth('wwwtt', 'wwwtt123'))
auths.append(HTTPBasicAuth('uoftttt', 'uoftttt123'))
auths.append(HTTPBasicAuth('vanyee', 'vanyee123'))
auths.append(HTTPBasicAuth('yammmettt', 'yammmettt123'))
auth = auths.pop()
def run(num=0):
since = num
scanned_repos = []
while 1:
while 1:
try:
r = requests.get("https://api.github.com/repositories?since=" + str(since), auth= auth)
break
except Exception as e:
auth = auths.pop()
repos = r.json()
for repo in repos:
# print repo['id']
if(repo['id'] in scanned_repos):
continue
language_checker = repo['url'] + "/languages"
while 1:
try:
r = requests.get(language_checker, auth=auth)
languages = r.json()
break
except Exception as e:
auth = auths.pop()
if "Python" in languages.keys():
# if "C" in languages.keys() or "C++" in languages.keys():
r = requests.get(repo['url'], auth= auth)
repo_info = r.json()
if repo_info['size'] <= 1500:
print repo['html_url']
with open("output.txt", "a") as myfile:
myfile.write(repo['html_url'] + "\n")
scanned_repos.append(repo['id'])
since = repos[len(repos) - 1]['id']
if __name__ == "__main__":
if len(sys.argv) == 2:
run(sys.argv[1])
else:
run() | [
"jason.xie@tubemogul.com"
] | jason.xie@tubemogul.com |
68cc05791599dc07362eb92d86edb5a8e01cf008 | 842532af0167dcedcdb1c99393a34f44a0dcaa57 | /KeyWord-Args.py | d3c45a08e33b08306ab709bdbc0ebc6464280386 | [] | no_license | AlBannaTechno/WorkingWithDecorator- | 16d1c73074429716ebceeb4d6853c163351141aa | dad9fea2d7503eadad9f92dfd8641ab4e3582cb7 | refs/heads/master | 2021-08-26T03:53:46.436702 | 2017-11-21T14:38:46 | 2017-11-21T14:38:46 | 111,560,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | def kwArgs(**multikargs):# we can also use predefine object kwargs
for key in multikargs:
print(str(key) +" : " + str(multikargs[key]))
#kwArgs(a=23,b=343,c="3432")
kwArgs()# we also can no passing any thing | [
"Al_Banna_Techno@yahoo.com"
] | Al_Banna_Techno@yahoo.com |
4fc6b19544b0a448d25c536c50a7bd9a9b72336d | 4dcee7dff58a6f0364283787aa7ad1dff16721e1 | /load_model_bert.py | 6cb6cae267ddd4f18678607ac7e645f9fd5ac9c4 | [] | no_license | karthikpuranik11/Masked-LM | ead8bcb5bcaedb8b62b627cc6dab2ce3c5fefcbe | bb049e493bc9968e3c50cac1fe88ebe7c436523f | refs/heads/main | 2023-03-18T21:51:27.842906 | 2021-03-07T17:37:54 | 2021-03-07T17:37:54 | 342,780,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | from transformers import BertForMaskedLM
import torch
import torch.nn as nn
class BertPred(nn.Module):
def __init__(self):
super().__init__()
self.bert = BertForMaskedLM.from_pretrained('bert-base-uncased')
def forward(self, input_ids, attention_mask=None, token_type_ids=None,
position_ids=None, head_mask=None, labels=None):
return self.bert(input_ids=input_ids,labels=labels)
model = BertPred()
model.load_state_dict(torch.load('/path/for/your/saved.bin'))
model.eval() | [
"noreply@github.com"
] | noreply@github.com |
4b17aba114901975c98a5376e5dca216bb72055d | 46fd6a08143d78da4a97e9e3577946ac78c43dc4 | /cerberos/urls.py | 62903c9b86d863603bf500b551d9c461fa6d9ddf | [
"BSD-3-Clause"
] | permissive | jlovison/cerberos | a65edfed5beeca5e6f5ca631dd0749a75c4f941d | 8da69ebdcf134a45e3efbf7827cb9b89f37eca2c | refs/heads/master | 2021-01-21T00:01:10.809572 | 2013-01-20T11:57:14 | 2013-01-20T11:57:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | # -*- coding: utf-8 -*-
from django.conf.urls.defaults import *
from cerberos.views import *
urlpatterns = patterns('',
)
| [
"adrian@adrima.es"
] | adrian@adrima.es |
56132f0702fab91eda672f8348039b347ca7ceec | 8169081e3ed51fb3c4bdb59856e839b9155b7020 | /python/src/input.py | a3c74316aca1125dbf07a7de3dbb3ea170dc34f4 | [] | no_license | ri003074/HackerRank | 941653d487d9625f0e420de237eea3a0b9ba143b | 5d624a859d5153d3dfb41698443ff92c6f24ddb1 | refs/heads/main | 2023-02-11T21:24:47.421957 | 2020-12-26T02:25:42 | 2020-12-26T02:25:42 | 323,825,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | # val = input()
# val_list = val.split(" ")
# poly = int(input())
# x = val_list[0]
# k = val_list[1]
# print(x)
# print(k)
# result = 0
# for i in range(poly):
# result += x ** (poly - i)
# print(result)
x, k = map(int, input().split())
print(input() == int(k))
print(x)
print(k)
| [
"ri003074@gmail.com"
] | ri003074@gmail.com |
922acf210b4651d39ea9cd56ca4897e55cfa3cc3 | 323ed7b6ba9efc34ee59f5d41e4bde27c57a344d | /fibonacci/fib.py | 77944ce7d8e25b690f5b857b0957c50bd5513c31 | [
"MIT"
] | permissive | jmusila/simple-logic-tests | 89b7711bfd5dbac72687a5b2af5ad20d3f69e686 | 508b0af93e99e3645887fc229718e162ff0c91a0 | refs/heads/master | 2023-04-11T11:43:04.149404 | 2019-11-14T12:43:16 | 2019-11-14T12:43:16 | 215,355,409 | 0 | 0 | MIT | 2021-04-20T18:49:35 | 2019-10-15T17:16:04 | Python | UTF-8 | Python | false | false | 332 | py | """ A fibonnaci is a series of numbers
where each member is formed from the sum of the last two numbers """
def fibonacci(n):
if(n <= 1):
return n
else:
return(fibonacci(n-1)+fibonacci(n-2))
n = int(input("Enter number of terms:"))
print("Fibonacci sequence:")
for i in range(n):
print (fibonacci(i))
| [
"jonathanmusila6@gmail.com"
] | jonathanmusila6@gmail.com |
6f4f8fbbcb0e5c348c98918c383284323a004ea4 | e7a2670b983ae37b4a73ec9db4ce1c7967ae635c | /benchexec/cgroups.py | 5ca4adef56608a870834b68bf6bdf2aaaeb73312 | [
"Apache-2.0"
] | permissive | zmanchun/benchexec | 89bba7b908b1782ad8c771c61ce529ced1c6bce6 | 92427e52840184d51bb88af79e2c10ee5c5fb145 | refs/heads/master | 2021-01-17T17:22:27.917477 | 2017-01-07T16:47:44 | 2017-01-07T16:47:44 | 43,826,931 | 0 | 0 | null | 2015-10-07T15:49:57 | 2015-10-07T15:49:57 | null | UTF-8 | Python | false | false | 15,618 | py | # BenchExec is a framework for reliable benchmarking.
# This file is part of BenchExec.
#
# Copyright (C) 2007-2015 Dirk Beyer
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
# THIS MODULE HAS TO WORK WITH PYTHON 2.7!
import logging
import os
import shutil
import signal
import tempfile
import time
from benchexec import util
__all__ = [
'find_my_cgroups',
'find_cgroups_of_process',
'BLKIO',
'CPUACCT',
'CPUSET',
'FREEZER',
'MEMORY',
]
CGROUP_NAME_PREFIX='benchmark_'
BLKIO = 'blkio'
CPUACCT = 'cpuacct'
CPUSET = 'cpuset'
FREEZER = 'freezer'
MEMORY = 'memory'
ALL_KNOWN_SUBSYSTEMS = set([
# cgroups for BenchExec
BLKIO, CPUACCT, CPUSET, FREEZER, MEMORY,
# other cgroups users might want
'cpu', 'devices', 'net_cls', 'net_prio', 'hugetlb', 'perf_event', 'pids',
])
def find_my_cgroups(cgroup_paths=None):
"""
Return a Cgroup object with the cgroups of the current process.
Note that it is not guaranteed that all subsystems are available
in the returned object, as a subsystem may not be mounted.
Check with "subsystem in <instance>" before using.
A subsystem may also be present but we do not have the rights to create
child cgroups, this can be checked with require_subsystem().
@param cgroup_paths: If given, use this instead of reading /proc/self/cgroup.
"""
logging.debug('Analyzing /proc/mounts and /proc/self/cgroup for determining cgroups.')
if cgroup_paths is None:
my_cgroups = dict(_find_own_cgroups())
else:
my_cgroups = dict(_parse_proc_pid_cgroup(cgroup_paths))
cgroupsParents = {}
for subsystem, mount in _find_cgroup_mounts():
# Ignore mount points where we do not have any access,
# e.g. because a parent directory has insufficient permissions
# (lxcfs mounts cgroups under /run/lxcfs in such a way).
if os.access(mount, os.F_OK):
cgroupsParents[subsystem] = os.path.join(mount, my_cgroups[subsystem])
return Cgroup(cgroupsParents)
def find_cgroups_of_process(pid):
"""
Return a Cgroup object that represents the cgroups of a given process.
"""
with open('/proc/{}/cgroup'.format(pid), 'rt') as cgroups_file:
return find_my_cgroups(cgroups_file)
def _find_cgroup_mounts():
"""
Return the information which subsystems are mounted where.
@return a generator of tuples (subsystem, mountpoint)
"""
try:
with open('/proc/mounts', 'rt') as mountsFile:
for mount in mountsFile:
mount = mount.split(' ')
if mount[2] == 'cgroup':
mountpoint = mount[1]
options = mount[3]
for option in options.split(','):
if option in ALL_KNOWN_SUBSYSTEMS:
yield (option, mountpoint)
except IOError:
logging.exception('Cannot read /proc/mounts')
def _find_own_cgroups():
"""
For all subsystems, return the information in which (sub-)cgroup this process is in.
(Each process is in exactly cgroup in each hierarchy.)
@return a generator of tuples (subsystem, cgroup)
"""
try:
with open('/proc/self/cgroup', 'rt') as ownCgroupsFile:
for cgroup in _parse_proc_pid_cgroup(ownCgroupsFile):
yield cgroup
except IOError:
logging.exception('Cannot read /proc/self/cgroup')
def _parse_proc_pid_cgroup(content):
"""
Parse a /proc/*/cgroup file into tuples of (subsystem,cgroup).
@param content: An iterable over the lines of the file.
@return: a generator of tuples
"""
for ownCgroup in content:
#each line is "id:subsystem,subsystem:path"
ownCgroup = ownCgroup.strip().split(':')
try:
path = ownCgroup[2][1:] # remove leading /
except IndexError:
raise IndexError("index out of range for " + str(ownCgroup))
for subsystem in ownCgroup[1].split(','):
yield (subsystem, path)
def kill_all_tasks_in_cgroup(cgroup, kill_process_fn):
tasksFile = os.path.join(cgroup, 'tasks')
freezer_file = os.path.join(cgroup, 'freezer.state')
def try_write_to_freezer(content):
try:
util.write_file(content, freezer_file)
except IOError:
pass # expected if freezer not enabled, we try killing without it
i = 0
while True:
i += 1
# TODO We can probably remove this loop over signals and just send
# SIGKILL. We added this loop when killing sub-processes was not reliable
# and we did not know why, but now it is reliable.
for sig in [signal.SIGKILL, signal.SIGINT, signal.SIGTERM]:
try_write_to_freezer('FROZEN')
with open(tasksFile, 'rt') as tasks:
task = None
for task in tasks:
task = task.strip()
if i > 1:
logging.warning('Run has left-over process with pid %s '
'in cgroup %s, sending signal %s (try %s).',
task, cgroup, sig, i)
kill_process_fn(int(task), sig)
if task is None:
return # No process was hanging, exit
try_write_to_freezer('THAWED')
time.sleep(i * 0.5) # wait for the process to exit, this might take some time
def remove_cgroup(cgroup):
if not os.path.exists(cgroup):
logging.warning('Cannot remove CGroup %s, because it does not exist.', cgroup)
return
assert os.path.getsize(os.path.join(cgroup, 'tasks')) == 0
try:
os.rmdir(cgroup)
except OSError:
# sometimes this fails because the cgroup is still busy, we try again once
try:
os.rmdir(cgroup)
except OSError as e:
logging.warning("Failed to remove cgroup %s: error %s (%s)",
cgroup, e.errno, e.strerror)
def _register_process_with_cgrulesengd(pid):
"""Tell cgrulesengd daemon to not move the given process into other cgroups,
if libcgroup is available.
"""
# Logging/printing from inside preexec_fn would end up in the output file,
# not in the correct logger, thus it is disabled here.
from ctypes import cdll
try:
libcgroup = cdll.LoadLibrary('libcgroup.so.1')
failure = libcgroup.cgroup_init()
if failure:
pass
#print('Could not initialize libcgroup, error {}'.format(success))
else:
CGROUP_DAEMON_UNCHANGE_CHILDREN = 0x1
failure = libcgroup.cgroup_register_unchanged_process(pid, CGROUP_DAEMON_UNCHANGE_CHILDREN)
if failure:
pass
#print('Could not register process to cgrulesndg, error {}. '
# 'Probably the daemon will mess up our cgroups.'.format(success))
except OSError:
pass
#print('libcgroup is not available: {}'.format(e.strerror))
class Cgroup(object):
def __init__(self, cgroupsPerSubsystem):
assert set(cgroupsPerSubsystem.keys()) <= ALL_KNOWN_SUBSYSTEMS
assert all(cgroupsPerSubsystem.values())
self.per_subsystem = cgroupsPerSubsystem # update self.paths on every update to this
self.paths = set(cgroupsPerSubsystem.values()) # without duplicates
def __contains__(self, key):
return key in self.per_subsystem
def __getitem__(self, key):
return self.per_subsystem[key]
def __str__(self):
return str(self.paths)
def require_subsystem(self, subsystem):
"""
Check whether the given subsystem is enabled and is writable
(i.e., new cgroups can be created for it).
Produces a log message for the user if one of the conditions is not fulfilled.
If the subsystem is enabled but not writable, it will be removed from
this instance such that further checks with "in" will return "False".
@return A boolean value.
"""
if not subsystem in self:
logging.warning('Cgroup subsystem %s is not enabled. Please enable it with '
'"sudo mount -t cgroup none /sys/fs/cgroup".',
subsystem)
return False
try:
test_cgroup = self.create_fresh_child_cgroup(subsystem)
test_cgroup.remove()
except OSError as e:
self.paths = set(self.per_subsystem.values())
logging.warning('Cannot use cgroup hierarchy mounted at {0} for subsystem {1}, '
'reason: {2}. '
'If permissions are wrong, please run "sudo chmod o+wt \'{0}\'".'
.format(self.per_subsystem[subsystem], subsystem, e.strerror))
del self.per_subsystem[subsystem]
return False
return True
def create_fresh_child_cgroup(self, *subsystems):
"""
Create child cgroups of the current cgroup for at least the given subsystems.
@return: A Cgroup instance representing the new child cgroup(s).
"""
assert set(subsystems).issubset(self.per_subsystem.keys())
createdCgroupsPerSubsystem = {}
createdCgroupsPerParent = {}
for subsystem in subsystems:
parentCgroup = self.per_subsystem[subsystem]
if parentCgroup in createdCgroupsPerParent:
# reuse already created cgroup
createdCgroupsPerSubsystem[subsystem] = createdCgroupsPerParent[parentCgroup]
continue
cgroup = tempfile.mkdtemp(prefix=CGROUP_NAME_PREFIX, dir=parentCgroup)
createdCgroupsPerSubsystem[subsystem] = cgroup
createdCgroupsPerParent[parentCgroup] = cgroup
# add allowed cpus and memory to cgroup if necessary
# (otherwise we can't add any tasks)
def copy_parent_to_child(name):
shutil.copyfile(os.path.join(parentCgroup, name), os.path.join(cgroup, name))
try:
copy_parent_to_child('cpuset.cpus')
copy_parent_to_child('cpuset.mems')
except IOError:
# expected to fail if cpuset subsystem is not enabled in this hierarchy
pass
return Cgroup(createdCgroupsPerSubsystem)
def add_task(self, pid):
"""
Add a process to the cgroups represented by this instance.
"""
_register_process_with_cgrulesengd(pid)
for cgroup in self.paths:
with open(os.path.join(cgroup, 'tasks'), 'w') as tasksFile:
tasksFile.write(str(pid))
def get_all_tasks(self, subsystem):
"""
Return a generator of all PIDs currently in this cgroup for the given subsystem.
"""
with open(os.path.join(self.per_subsystem[subsystem], 'tasks'), 'r') as tasksFile:
for line in tasksFile:
yield int(line)
def kill_all_tasks(self, kill_process_fn):
"""
Kill all tasks in this cgroup forcefully.
"""
for cgroup in self.paths:
kill_all_tasks_in_cgroup(cgroup, kill_process_fn)
def kill_all_tasks_recursively(self, kill_process_fn):
"""
Kill all tasks in this cgroup and all its children cgroups forcefully.
Additionally, the children cgroups will be deleted.
"""
def kill_all_tasks_in_cgroup_recursively(cgroup):
files = [os.path.join(cgroup,f) for f in os.listdir(cgroup)]
subdirs = filter(os.path.isdir, files)
for subCgroup in subdirs:
kill_all_tasks_in_cgroup_recursively(subCgroup)
remove_cgroup(subCgroup)
kill_all_tasks_in_cgroup(cgroup, kill_process_fn)
for cgroup in self.paths:
kill_all_tasks_in_cgroup_recursively(cgroup)
def has_value(self, subsystem, option):
"""
Check whether the given value exists in the given subsystem.
Does not make a difference whether the value is readable, writable, or both.
Do not include the subsystem name in the option name.
Only call this method if the given subsystem is available.
"""
assert subsystem in self
return os.path.isfile(os.path.join(self.per_subsystem[subsystem], subsystem + '.' + option))
def get_value(self, subsystem, option):
"""
Read the given value from the given subsystem.
Do not include the subsystem name in the option name.
Only call this method if the given subsystem is available.
"""
assert subsystem in self, 'Subsystem {} is missing'.format(subsystem)
return util.read_file(self.per_subsystem[subsystem], subsystem + '.' + option)
def get_file_lines(self, subsystem, option):
"""
Read the lines of the given file from the given subsystem.
Do not include the subsystem name in the option name.
Only call this method if the given subsystem is available.
"""
assert subsystem in self
with open(os.path.join(self.per_subsystem[subsystem], subsystem + '.' + option)) as f:
for line in f:
yield line
def get_key_value_pairs(self, subsystem, filename):
"""
Read the lines of the given file from the given subsystem
and split the lines into key-value pairs.
Do not include the subsystem name in the option name.
Only call this method if the given subsystem is available.
"""
assert subsystem in self
return util.read_key_value_pairs_from_file(self.per_subsystem[subsystem], subsystem + '.' + filename)
def set_value(self, subsystem, option, value):
"""
Write the given value for the given subsystem.
Do not include the subsystem name in the option name.
Only call this method if the given subsystem is available.
"""
assert subsystem in self
util.write_file(str(value), self.per_subsystem[subsystem], subsystem + '.' + option)
def remove(self):
"""
Remove all cgroups this instance represents from the system.
This instance is afterwards not usable anymore!
"""
for cgroup in self.paths:
remove_cgroup(cgroup)
del self.paths
del self.per_subsystem
def read_cputime(self):
"""
Read the cputime usage of this cgroup. CPUACCT cgroup needs to be available.
@return cputime usage in seconds
"""
return float(self.get_value(CPUACCT, 'usage'))/1000000000 # nano-seconds to seconds
def read_allowed_memory_banks(self):
"""Get the list of all memory banks allowed by this cgroup."""
return util.parse_int_list(self.get_value(CPUSET, 'mems'))
| [
"uni@philippwendler.de"
] | uni@philippwendler.de |
57670a608951f0aff03719d78f74fed6b1982d32 | 0e030501f9ca9d7274f9fb4a387deb9a7bf9036b | /message/server.py | b5209b5abf39320d3ee564b28dd2dde0f2f85a8a | [
"MIT"
] | permissive | stilvoid/microservices-workshop | 71ae765cad812af36a2e608eb5b3c8fda26792f4 | 0cfe8f2206bcc3bd333266d9f0d46908651c34ce | refs/heads/master | 2016-09-05T17:08:41.470039 | 2015-08-03T13:25:36 | 2015-08-03T13:25:36 | 39,803,714 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | from bottle import *
@get("/messages")
def get_messages(mongodb):
return {
"messages": [
{
"id": "message id 1",
"user": "user id 1",
"room": "room id 1",
"text": "message text 1"
},
{
"id": "message id 2",
"user": "user id 2",
"room": "room id 2",
"text": "message text 2"
}
]
}
@post("/messages")
def create_message(mongodb):
return {
"id": "room id",
"user": "user id",
"room": "room id",
"text": "message text"
}
| [
"steve.engledow@proxama.com"
] | steve.engledow@proxama.com |
31be2ad10c3145355839be95755ae95341a9054d | f7b123a3e0f84a787734258f26004bcdb8b439e4 | /Python/scripts/utils/save_pretrained_weight.py | 687a84b437f4cf30a8d7e5d85fe57ebcdbdfbd82 | [] | no_license | teodortotev/3D-Object-Pose-Recovery | ca26d65420487e921daa62a65284edc0e2bbb019 | 19f7e4f77b22104c950ba116c7c823e2409334b4 | refs/heads/master | 2020-12-18T05:07:01.454692 | 2020-05-31T12:17:38 | 2020-05-31T12:17:38 | 235,317,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | import torch
import torchvision as torchvision
def save_pretrained_weight():
#model = torchvision.models.segmentation.fcn_resnet101(pretrained=True, progress=True, num_classes=21, aux_loss=None)
model = torchvision.models.segmentation.deeplabv3_resnet101(pretrained=True, progress=True, num_classes=21, aux_loss=None)
state_dict = model.state_dict()
del state_dict['classifier.4.weight']
del state_dict['classifier.4.bias']
# aux_keys = []
# for k in state_dict.keys():
# if "aux_classifier" in k:
# aux_keys.append(k)
# for k in aux_keys:
# del state_dict[k]
torch.save(state_dict, "/home/teo/storage/Data/pretrained_weight_DeepLab101")
if __name__ == '__main__':
save_pretrained_weight()
| [
"teo@anubis01.eng.ox.ac.uk"
] | teo@anubis01.eng.ox.ac.uk |
773863c3bd0ed062f97b61301d61c084eefefa43 | 8d99c81acb90c23c37f5ec2eba2509cfe9d872b5 | /Codigo.py | 463e645a26566bc8ed642f6c7ead27692b7901c3 | [] | no_license | lalzatem/Proyecto_Conmutacion | 69b7f1f00973ca8d00bb911c35af5b1c8cc86737 | 3e2ae4a2fb60f071dd39fe7582761fc65957cefd | refs/heads/master | 2020-05-27T18:26:50.959111 | 2019-05-17T13:15:08 | 2019-05-17T13:15:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,227 | py | """
Importacion de las librerias necesarias
"""
import csv
import time
import os
import threading
from tkinter import font
from tkinter import *
import tkinter as tk
from tkinter import filedialog
from tkinter import messagebox
import serial
"""
Creacion de variables globales para poder usarlas en cualquier metodo
Ruta1: String donde se almacena la ruta del archivo1 que queremos abrir
Ruta2: String donde se almacena la ruta del archivo2 que queremos abrir
Filas_totales: entero donde se almacena el tamaño total del archivo
Generado: es una variable tipo booleano que nos dice si la tabla fue creada por primera vez o no
Tabla: es un diccionario donde se almacena toda la informacion de los archivos
Base: creacion de la interfaz grafica
frame: creacion de un frame
frame2: creacion de un frame
Letra: para darle tipo de letra y el tamaño
ModificacionOld: creacion de un arreglo que almacena fechas de modificacion de los archivos
"""
Ruta1=""
Ruta2=""
Filas_totales=0
Generado=False
Tabla={}
Base=Tk()
frame = Frame(Base)
frame2 = Frame(Base)
Letra = font.Font(family="Times", size=10, weight='bold')
ModificacionOld=[0,0]
"""
Creacion del metodo principal, donde llamamos a el metodo Interface
"""
def main():
Interface()
"""
Creacion del metodo Arduino que recibe dos parametros, el valor que se encuentra en el pedido y en modulo.
Me permite establecer la comunicacion con el arduino, el cual recibe el valor del swiche y eso se
ve reflejado en la accion de actualizar o no la tabla
"""
def Arduino(Valor,Modulo):
arduino = serial.Serial('COM15', 9600)
time.sleep(2)
rawString = arduino.readline().decode('ascii')
rawString=(int)(rawString)
if(rawString==1):
return True
elif rawString==0:
return False
arduino.close()
"""
Creacion del metodo Verificar que revisa si ha sido cambiada la fecha y la almacena en el arreglo ModificacioOld
para luego cambiar este valor en la actualizacion
"""
def Verificar():
if(ModificacionOld[0]==0):
ModificacionOld[0]=os.path.getmtime("moduloa.txt")
return False
else:
ModificacionOld[1]=(os.path.getmtime("moduloa.txt"))
if(ModificacionOld[0]!=ModificacionOld[1]):
ModificacionOld[0]=ModificacionOld[1]
return True
else:
return False
"""
Creacion de nuestra interfaz grafica, con frames, y botones.
"""
def Interface():
global Ruta1,Ruta2,Filas_totales,Generado,frame,Letra
Ruta1=StringVar()
Ruta2=StringVar()
Base.title("Menu")
Base.resizable(False,False)
frame.config(width=300,height=300,relief="sunken",bd=25)
frame2.config(width=300,height=300,relief="sunken",bd=25,bg="#808080")
frame2.pack(side=BOTTOM,anchor=SW)
frame.pack(side=RIGHT,anchor=NW)
Base.configure(width=800, height=800)
Grid.rowconfigure(frame, 0, weight=0)
Grid.columnconfigure(frame,0, weight=0)
Boton1=(Button(frame, text="Abrir",command=lambda:Abrir_Tablas(2),width=9, height=2,bg="snow4",borderwidth=5)).grid(row=0,column=2,sticky="nsew")
Boton2=(Button(frame, text="Abrir",command=lambda:Abrir_Tablas(1),width=9, height=2,bg="snow4",borderwidth=5)).grid(row=1,column=2,sticky="nsew")
Boton3=(Button(frame, text="Generar",command=lambda:Ruta_especifica(),width=6, height=1,bg="snow4",borderwidth=5)).grid(row=2,column=1,sticky="nsew")
Label_1=Label(frame, text="Pedidos",width=10, height=2,font=Letra).grid(row=0,column=0,sticky="nsew")
Label_2=Label(frame, text="Equivalencia",width=10, height=2,font=Letra).grid(row=1,column=0,sticky="nsew")
entry_1=Label(frame,textvariable=Ruta2).grid(row=0,column=1,sticky="nsew")
entry_2=Label(frame,textvariable=Ruta1).grid(row=1,column=1,sticky="nsew")
Base.mainloop()
"""
Creacion de la tabla con la informacion de las tablas que se muestra en el frame de la parte inferior
"""
def Tabla_Grafico():
global Filas_totales,Letra
if(Filas_totales>0):
Label_2=Label(frame2, text="Total",width=10, height=2,font=Letra,fg="white",bg="black",relief="solid",borderwidth=1).grid(row=5,column=8,sticky="nsew")
Label_3=Label(frame2, text="Pedido",width=10, height=2,font=Letra,fg="white",bg="black",relief="solid",borderwidth=1).grid(row=5,column=0,sticky="nsew")
Label_4=Label(frame2, text="Modulo",width=10, height=2,font=Letra,fg="white",bg="black",relief="solid",borderwidth=1).grid(row=5,column=1,sticky="nsew")
Label_5=Label(frame2, text="Posicion",width=10, height=2,font=Letra,fg="white",bg="black",relief="solid",borderwidth=1).grid(row=5,column=2,sticky="nsew")
Label_6=Label(frame2, text="Referencia",width=10, height=2,font=Letra,fg="white",bg="black",relief="solid",borderwidth=1).grid(row=5,column=3,sticky="nsew")
Label_7=Label(frame2, text="Cantidad",width=10, height=2,font=Letra,fg="white",bg="black",relief="solid",borderwidth=1).grid(row=5,column=4,sticky="nsew")
Label_8=Label(frame2, text="Numero",width=10, height=2,font=Letra,fg="white",bg="black",relief="solid",borderwidth=1).grid(row=5,column=5,sticky="nsew")
Label_9=Label(frame2, text="Fecha",width=10, height=2,font=Letra,fg="white",bg="black",relief="solid",borderwidth=1).grid(row=5,column=6,sticky="nsew")
Label_11=Label(frame2, text="Hora",width=10, height=2,font=Letra,fg="white",bg="black",relief="solid",borderwidth=1).grid(row=5,column=7,sticky="nsew")
Auxiliar=7
for Indices in Tabla:
Auxiliar2=0
for Valores in Tabla[Indices]:
cell=Label(frame2,width=10,text=Valores,relief="solid",borderwidth=1)
cell.grid(row=Auxiliar,column=Auxiliar2)
Auxiliar2+=1
Auxiliar+=1
Boton3=(tk.Button(frame, text="Actualizar",command=lambda:prueba(),width=6, height=1,bg="snow4",borderwidth=5)).grid(row=2,column=1,sticky="nsew")
"""
Creacion de los hilos
"""
def prueba():
hilo2 = threading.Thread(target=prueba2)
hilo2.start()
"""
Creacion de los hilos
"""
def prueba2():
while True:
time.sleep(2)
if Verificar():
Actualizar()
"""
Metodo que actualiza la tabla
"""
def Actualizar():
Tiempo()
Auxiliar=7
for Indices in Tabla:
Auxiliar2=0
for Valores in Tabla[Indices]:
cell=Label(frame2,text=Valores,width=10,relief="solid",borderwidth=1)
cell.grid(row=Auxiliar,column=Auxiliar2)
Auxiliar2+=1
Auxiliar+=1
"""
Metodo para poder abrir los archivos que se necesitan
"""
def Abrir_Tablas(a):
global Ruta1, Ruta2
if (a==1) :
Archivo= filedialog.askopenfilename(title="Abrir",initialdir="C:\\Users\james\Desktop\Programacion\Conmutacion\Parcial",filetypes=(("Ficheros de CSV","*.csv"),("Todos los archivos","*.*")))
Ruta1.set(Archivo)
else:
Archivo= filedialog.askopenfilename(title="Abrir",initialdir="C:\\Users\james\Desktop\Programacion\Conmutacion\Parcial",filetypes=(("Ficheros de CSV","*.csv"),("Todos los archivos","*.*")))
Ruta2.set(Archivo)
"""
Metodo que selecciona la ruta especifica en el arreglo ya seleccionado
"""
def Ruta_especifica():
global Ruta1, Ruta2
if Generado==False:
if((Ruta1.get()=="" or Ruta2.get()=="")or(Ruta1.get()==Ruta2.get())):
messagebox.showinfo("Informe error","Ingresa ambas direcciones")
else:
Ruta1_especifica=Ruta1.get().split("/")
Ruta2_especifica=Ruta2.get().split("/")
Ruta1_especifica=Ruta1_especifica[len(Ruta1_especifica)-1]
Ruta2_especifica=Ruta2_especifica[len(Ruta2_especifica)-1]
Leer_Tablas(Ruta2_especifica,Ruta1_especifica)
"""
Metodo que lee los archivos seleccionados
"""
def Leer_Tablas(Ruta_pedidos,Ruta_equivalencias):
global Tabla,Filas_totales,Generado
Lista_AUX=[]
with open(Ruta_equivalencias) as csv_File:
csv_reader = csv.reader(csv_File,delimiter=',')
next(csv_reader,None)
for row in csv_reader:
Lista_AUX.append([row[0],row[1]])
with open(Ruta_pedidos) as File:
reader = csv.DictReader(File)
for row in reader:
Filas_totales+=1
for Union in Lista_AUX:
if Union[0]==row['Posicion']:
if Filas_totales not in Tabla:
Tabla[Filas_totales]=[row['Pedido']]+[row['Modulo']]+[row['Posicion']]+[row['Referencia']]+[row['Cantidad']]+[Union[1]]+[0]+[0]+[0]
if(Generado==False):
Generado=True
Tabla_Grafico()
"""
Metodo que obtiene la fecha y hora del sistema cuando se hace alguna actualizacion
"""
def Tiempo():
Archivo=open('moduloa.txt')
Temporal=Archivo.read()
Temporal=Temporal.splitlines()
Temporal=Temporal[1].split('\t')
Modulo=Temporal[0].strip()
Valor=Temporal[1].strip()
if Arduino(Valor,Modulo):
for Modulos in Tabla:
if Modulo==Tabla[Modulos][1] and Tabla[Modulos][0]==Valor:
print(Valor)
print(Tabla[Modulos][0])
Tabla[Modulos][7]= time.strftime("%X")
Tabla[Modulos][6]= time.strftime("%d/%m/%y")
main() | [
"jamesmoralesmoreno@gmail.com"
] | jamesmoralesmoreno@gmail.com |
7041d1c2dc96f6974d2adb1f519283cf5f336461 | ad7e963d5393f7c74a6cf7b9dbb868ca17f30d1e | /vsco.py | 268fb87588cad1cf78dcef69e713b434a3a98806 | [] | no_license | trudypainter/vsco-zine | 5d5a3b8aaa7b4d81649e3c0e6d01ff9443e8e8a2 | acf5f2f94f52f12a2dbdab134316968b7dc46d17 | refs/heads/main | 2023-01-29T20:48:52.504960 | 2020-12-10T02:33:29 | 2020-12-10T02:33:29 | 319,768,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,455 | py | from datetime import datetime
import requests, json, time, datetime
################################
## CONSTANTS FOR VSCO CLASS ##
################################
visitvsco = {
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding":"gzip, deflate",
"Accept-Language":"en-US,en;q=0.9",
"Connection":"keep-alive",
"Host":"vsco.co",
"Upgrade-Insecure-Requests":"1",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36",
}
visituserinfo = {
"Accept":"*/*",
"Accept-Encoding":"gzip, deflate",
"Accept-Language":"en-US,en;q=0.9",
"Connection":"keep-alive",
"Host":"vsco.co",
"Referer":"http://vsco.co/bob/images/1",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36",
}
media = {
"Accept":"*/*",
"Accept-Encoding":"gzip, deflate",
"Accept-Language":"en-US,en;q=0.9",
"Connection":"keep-alive",
"Host":"vsco.co",
"Referer":"http://vsco.co/bob/images/1",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36",
"X-Client-Build":"1",
"X-Client-Platform":"web",
}
################################
## VSCO CLASS ##
################################
class VSCO:
def __init__(self, username):
# INITIALIZATION COOKIES/UID
self.username = username
self.session = requests.Session()
self.session.get("http://vsco.co/content/Static/userinfo?callback=jsonp_%s_0" %(str(round(time.time()*1000))),\
headers=visituserinfo)
self.uid = self.session.cookies.get_dict()['vs']
res = self.session.get("http://vsco.co/ajxp/%s/2.0/sites?subdomain=%s" % (self.uid,self.username))
self.siteid = res.json()["sites"][0]["id"]
# GET JSON OF ALL IMAGE INFO
self.mediaurl = "http://vsco.co/ajxp/%s/2.0/medias?site_id=%s" %(self.uid,self.siteid)
self.image_json = self.session.get(self.mediaurl,params={"size":1000,"page":1},\
headers=media).json()["media"]
def __iter__(self):
for image in self.image_json:
yield image
def __getitem__(self, ix):
return self.image_json[ix] | [
"tpainter@mit.edu"
] | tpainter@mit.edu |
79821afc06e9fe4955277c7a4ff993e631cd78f5 | a95d2c0042729211a8df5eb58128bb4a7a4899e1 | /GoogleMusicOffline-win32-x64/resources/app/server.py | c5ea3f22fb660aeba1e7b3f6b960f8e0c93451c4 | [
"MIT"
] | permissive | Kronos3/SMM_Bins | 9bb231f90be3087f0c99618be3656a57fe7dbd94 | b0aef3a0e36614071702d8375956244e6af1d742 | refs/heads/master | 2021-01-22T23:33:35.593084 | 2017-03-23T02:02:41 | 2017-03-23T02:02:41 | 85,652,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,200 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# server.py
#
# Copyright 2016 Andrei Tumbar <atuser@Kronos>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import time
start_time = time.time()
import json, traceback, sys, os, platform, socketserver, socket
sys.path.append("./deps")
sys.path.append(".")
if sys.platform == 'win32':
os.environ["PATH"] += os.pathsep + 'deps/ffmpeg'
from src import handler, gmusic
from http.server import HTTPServer
false = False
true = True
class cfg:
domain = "localhost"
log = "srv.log"
root = "."
subdomains = {}
port = 8000
#ssl = {'cert':'cert.pem', 'key':'key.pem'}
ssl = False
redirect = 'False'
forcewww = 'False'
ip = "localhost"
#def main():
# s = server.WebServerSSL ((cfg.ip, cfg.port), s_handler.WebHandler, cfg.ssl)
# s.configure (cfg)
# s.serve_forever ()
if sys.platform == "win32":
class HTTPMain (socketserver.ThreadingTCPServer):
allow_reuse_address = 1
def server_bind(self):
socketserver.TCPServer.server_bind(self)
host, port = self.server_address[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
else:
class HTTPMain (socketserver.ForkingTCPServer):
allow_reuse_address = 1
def server_bind(self):
socketserver.TCPServer.server_bind(self)
host, port = self.server_address[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
def run(server_class=HTTPMain, handler_class=handler.postHandler, serve=True, gui=False, debug=False):
server_address = (cfg.ip, cfg.port)
if debug:
print ("\nEntered debug\nNote: Debug is insecure\nUSE AT YOUR OWN RISK\n\n")
handler_class = handler.postHandlerDebug
httpd = server_class(server_address, handler_class)
if os.path.isfile('.token'):
with open ('.token', 'r') as f:
ret = gmusic.load_login (*eval(f.read()))
f.close()
if ret:
gmusic.write_data ()
handler.MainRHandler.is_logged_in = gmusic.load_oauth_login ()
if (not os.path.exists ('data')):
os.mkdir ('data')
if gui:
if sys.platform == "linux" or sys.platform == "linux2":
if platform.architecture()[0] == '64bit':
os.system ("./bin-lin64/electron . &")
elif platform.architecture()[0] == '32bit':
#os.system ("./linux32-bin/electron . &")
raise OSError('32-bit operating systems are not supported yet')
elif sys.platform == "darwin":
os.system ("open ./bin-mac64/Electron.app . &")
elif sys.platform == "win32":
if platform.architecture()[0] == '64bit':
os.system ("START /B .\\bin-win64\\electron.exe .")
elif platform.architecture()[0] == '32bit':
raise OSError('32-bit operating systems are not supported yet')
if serve:
print ("Started server on %s at port %s" % (cfg.ip, cfg.port))
httpd.serve_forever()
def main (argv):
os.chdir(cfg.root)
s = True
g = False
d = False
if '--test' in argv:
s = False
if '--gui' in argv:
g = True
if '--debug' in argv:
d = True
run (serve=s, gui=g, debug=d)
if __name__ == "__main__":
try:
main(sys.argv)
sys.exit(0)
except SystemExit:
sys.exit(0)
except:
traceback.print_exc(file=sys.stdout)
| [
"dovakhiin1359@gmail.com"
] | dovakhiin1359@gmail.com |
dd7bda05324df1c30a70004bdcf169a29b9a972f | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-framework-SpriteKit/PyObjCTest/test_skview.py | 96b626096078794678e9693ea10f2b0c41775b58 | [
"MIT"
] | permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,319 | py | import sys
from PyObjCTools.TestSupport import *
import objc
if sys.maxsize > 2 ** 32:
import SpriteKit
class TestSKViewHelper(SpriteKit.NSObject):
def view_shouldRenderAtTime_(self, v, t):
return 1
class TestSKView(TestCase):
@min_os_level("10.9")
def testMethods10_9(self):
self.assertArgIsBOOL(SpriteKit.SKView.setPaused_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.isPaused)
self.assertArgIsBOOL(SpriteKit.SKView.setShowsFPS_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.showsFPS)
self.assertArgIsBOOL(SpriteKit.SKView.setShowsDrawCount_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.showsDrawCount)
self.assertArgIsBOOL(SpriteKit.SKView.setShowsNodeCount_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.showsNodeCount)
self.assertArgIsBOOL(SpriteKit.SKView.setAsynchronous_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.isAsynchronous)
self.assertArgIsBOOL(SpriteKit.SKView.setIgnoresSiblingOrder_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.ignoresSiblingOrder)
@min_os_level("10.10")
def testMethods10_10(self):
self.assertArgIsBOOL(SpriteKit.SKView.setShowsFields_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.showsFields)
self.assertArgIsBOOL(SpriteKit.SKView.setShowsPhysics_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.showsPhysics)
self.assertArgIsBOOL(SpriteKit.SKView.setShowsQuadCount_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.showsQuadCount)
self.assertArgIsBOOL(SpriteKit.SKView.setAllowsTransparency_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.allowsTransparency)
self.assertArgIsBOOL(SpriteKit.SKView.setShouldCullNonVisibleNodes_, 0)
self.assertResultIsBOOL(SpriteKit.SKView.shouldCullNonVisibleNodes)
@min_sdk_level("10.12")
def testProtocols(self):
objc.protocolNamed("SKViewDelegate")
self.assertResultIsBOOL(TestSKViewHelper.view_shouldRenderAtTime_)
self.assertArgHasType(
TestSKViewHelper.view_shouldRenderAtTime_, 1, objc._C_DBL
)
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
3acdc6cbfdcc762fcc68330c815650f4c5ff865b | 7fc0279ca5427a0beb9361419f469fed85b199f5 | /UDP/tsUclient.py | 09fa28ebf07fe1acead529fec00a270241834766 | [] | no_license | huzai9527/python_network | 5a4665dea06d46a9fa1345a10d2307dfd1885be2 | fecdb66aa4bb558f0fd03b9de90a5593b8326172 | refs/heads/master | 2022-11-27T16:23:12.504636 | 2020-07-24T07:51:01 | 2020-07-24T07:51:01 | 278,091,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | """
创建客户端
cs = socket() 创建客户端套接字
comm_loop:
cs.sendto()/cs.recvfrom() 对话(接受/发送)
cs.close()
"""
from socket import *
HOST = '192.168.0.117'
PORT = 23456
BUFSIZE = 1024
ADDR = (HOST, PORT)
udpClientSock = socket(AF_INET, SOCK_DGRAM)
while True:
data = input('> ')
if not data:
break
udpClientSock.sendto(data.encode(), ADDR)
data, ADDR = udpClientSock.recvfrom(BUFSIZE)
if not data:
break
print(data)
udpClientSock.close() | [
"369212851@qq.com"
] | 369212851@qq.com |
c787115220d439682a1c8835e8413108a2beffe9 | 3ca0d23d0d10dd0333f62373fd558bff3edea237 | /analisis/bigml_clm_v02.py | 7927c97fc453826fbfef8c9c938216ef79b457e0 | [] | no_license | charlielm49/07-2017JAN30-printro | ebb51acf5e3267745ced514c1a5cae5f6fe07ea7 | b95d02d3f9c6f5e4edb312a66e7f9653f3c0eca3 | refs/heads/master | 2021-01-17T14:59:57.484038 | 2017-03-14T21:57:05 | 2017-03-14T21:57:05 | 84,100,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
import pymysql.cursors
import glob
import os
BIGML_USERNAME="charlielm1015"
BIGML_API_KEY="90bb088a4d01d81953df8aecfb4ac3a5850423ed"
import pprint
from bigml.api import BigML
api = BigML(BIGML_USERNAME, BIGML_API_KEY)
# prueba: path = "."
# Ruta a dir .data
path1 = "/opt/aws-ml/.data/worker/"
# Id del proceso actual
# path2 = "8d06f930-4016-4911-8c12-2cc0f92a5b78"
# Ruta en AWS
path2 = "5318f322-6e90-4952-b62f-19f5e21c3720"
path = path1 + path2
command = "cd " + path
os.system(command)
source = api.create_source('full_f.csv')
api.ok(source) # to ensure that objects are finished before using them
dataset = api.create_dataset(source)
api.ok(dataset)
model = api.create_model(dataset)
api.ok(model)
prediction = api.create_prediction(model, \
{'8919-forum-discussion_view': 5, '8919-forum-post_created': 2.5})
| [
"charlielm1015@hotmail.com"
] | charlielm1015@hotmail.com |
3753166f54ef09a7824e45477bb2a35d976c2953 | aa2e2765185122be8f5cff48c7fbce999f02435a | /script/mnist_bnlr.py | 46463a23764446b0b7091f2888bc262ae926fe9d | [] | no_license | Lightmann/BatchNormGD | ee904a944a757438040c9203163a2d108da556c0 | 22225684cc3525073ca8ecf4712fa4226f39743c | refs/heads/master | 2020-05-20T11:15:27.161145 | 2019-05-08T06:37:32 | 2019-05-08T06:37:32 | 185,545,206 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | from ModelAndTest import *
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
for i in range(5):
tf.reset_default_graph()
model = Model_mnist_bn()
taskname = 'mnist_bnlr_T%d' % i
#tensorboard_dir = '/home/lightmann/Results/%s/' % taskname
tensorboard_dir = '/hpctmp/matcyon/Results/%s/' % taskname
test = Test()
test.test_lr(model=model, dataset=mnist,
lr_list=np.logspace(-3,3,40), max_step=600,
logdir=tensorboard_dir)
test.value_check()
data_save([test.lr_list,test.value_history_np], '%s.dat'%taskname) | [
"noreply@github.com"
] | noreply@github.com |
4ca23ff6fdd410a150963f2a0aeabf250674f2a8 | 15e5cfb245e9f6159c930dcebe149984f837c44c | /Project/keraspatal/layers/embeddings.py | e6578f7e03c92bf58007b4d3ce43c233490cf939 | [] | no_license | Paxanator/Neural-Net-Project | 626a958fadc00c9c08a26b16663dc0253a90dfc4 | 2365055b18f9f0ca31cd7b84d02531fbe43e66a3 | refs/heads/master | 2021-01-25T05:15:31.981655 | 2016-02-20T20:44:56 | 2016-02-20T20:44:56 | 42,752,307 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,851 | py | from __future__ import absolute_import
import theano
import theano.tensor as T
from .. import activations, initializations, regularizers, constraints
from ..layers.core import Layer, MaskedLayer
from ..utils.theano_utils import sharedX
from ..constraints import unitnorm
class Embedding(Layer):
'''
Turn positive integers (indexes) into denses vectors of fixed size.
eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
@input_dim: size of vocabulary (highest input integer + 1)
@out_dim: size of dense representation
'''
def __init__(self, input_dim, output_dim, init='uniform',
W_regularizer=None, activity_regularizer=None, W_constraint=None,
mask_zero=False, weights=None):
super(Embedding, self).__init__()
self.init = initializations.get(init)
self.input_dim = input_dim
self.output_dim = output_dim
self.input = T.imatrix()
self.W = self.init((self.input_dim, self.output_dim))
self.mask_zero = mask_zero
self.params = [self.W]
self.W_constraint = constraints.get(W_constraint)
self.constraints = [self.W_constraint]
self.regularizers = []
self.W_regularizer = regularizers.get(W_regularizer)
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
if weights is not None:
self.set_weights(weights)
def get_output_mask(self, train=None):
X = self.get_input(train)
if not self.mask_zero:
return None
else:
return T.ones_like(X) * (1 - T.eq(X, 0))
def get_output(self, train=False):
X = self.get_input(train)
out = self.W[X]
return out
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"init": self.init.__name__,
"activity_regularizer": self.activity_regularizer.get_config() if self.activity_regularizer else None,
"W_regularizer": self.W_regularizer.get_config() if self.W_regularizer else None,
"W_constraint": self.W_constraint.get_config() if self.W_constraint else None}
class WordContextProduct(Layer):
'''
This layer turns a pair of words (a pivot word + a context word,
ie. a word from the same context, or a random, out-of-context word),
indentified by their index in a vocabulary, into two dense reprensentations
(word representation and context representation).
Then it returns activation(dot(pivot_embedding, context_embedding)),
which can be trained to encode the probability
of finding the context word in the context of the pivot word
(or reciprocally depending on your training procedure).
The layer ingests integer tensors of shape:
(nb_samples, 2)
and outputs a float tensor of shape
(nb_samples, 1)
The 2nd dimension encodes (pivot, context).
input_dim is the size of the vocabulary.
For more context, see Mikolov et al.:
Efficient Estimation of Word reprensentations in Vector Space
http://arxiv.org/pdf/1301.3781v3.pdf
'''
def __init__(self, input_dim, proj_dim=128,
init='uniform', activation='sigmoid', weights=None):
super(WordContextProduct, self).__init__()
self.input_dim = input_dim
self.proj_dim = proj_dim
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.input = T.imatrix()
# two different embeddings for pivot word and its context
# because p(w|c) != p(c|w)
self.W_w = self.init((input_dim, proj_dim))
self.W_c = self.init((input_dim, proj_dim))
self.params = [self.W_w, self.W_c]
if weights is not None:
self.set_weights(weights)
def get_output(self, train=False):
X = self.get_input(train)
w = self.W_w[X[:, 0]] # nb_samples, proj_dim
c = self.W_c[X[:, 1]] # nb_samples, proj_dim
dot = T.sum(w * c, axis=1)
dot = theano.tensor.reshape(dot, (X.shape[0], 1))
return self.activation(dot)
def get_config(self):
return {"name": self.__class__.__name__,
"input_dim": self.input_dim,
"proj_dim": self.proj_dim,
"init": self.init.__name__,
"activation": self.activation.__name__}
| [
"jpb2184@columbia.edu"
] | jpb2184@columbia.edu |
bd3c00409bd74afaf9970fc289a89f285e58e5f0 | 52fe3a40bca17da79bf3e974c3f74d111c311125 | /DBA-master/image_train.py | e8657b3514d2f2a0976c06b6575ac1e02840185f | [] | no_license | wangyongkang-xie/Theroy_DetectAcc | db143f3fe687b29d0b690194cb3a2a55e70b0c19 | 23893a32ebc9313321f5e31caf7e073eec88e488 | refs/heads/master | 2023-01-27T21:19:14.340309 | 2020-12-15T07:07:49 | 2020-12-15T07:07:49 | 321,581,407 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,825 | py | import utils.csv_record as csv_record
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
import main
import test
import copy
import config
def ImageTrain(helper, start_epoch, local_model, target_model, is_poison,agent_name_keys):
epochs_submit_update_dict = dict()
num_samples_dict = dict()
current_number_of_adversaries=0
for temp_name in agent_name_keys:
if temp_name in helper.params['adversary_list']:
current_number_of_adversaries+=1
for model_id in range(helper.params['no_models']):
epochs_local_update_list = []
last_local_model = dict()
client_grad = [] # only works for aggr_epoch_interval=1
for name, data in target_model.state_dict().items():
last_local_model[name] = target_model.state_dict()[name].clone()
agent_name_key = agent_name_keys[model_id]
## Synchronize LR and models
model = local_model
model.copy_params(target_model.state_dict())
optimizer = torch.optim.SGD(model.parameters(), lr=helper.params['lr'],
momentum=helper.params['momentum'],
weight_decay=helper.params['decay'])
model.train()
adversarial_index= -1
localmodel_poison_epochs = helper.params['poison_epochs']
if is_poison and agent_name_key in helper.params['adversary_list']:
for temp_index in range(0, len(helper.params['adversary_list'])):
if int(agent_name_key) == helper.params['adversary_list'][temp_index]:
adversarial_index= temp_index
localmodel_poison_epochs = helper.params[str(temp_index) + '_poison_epochs']
main.logger.info(
f'poison local model {agent_name_key} index {adversarial_index} ')
break
if len(helper.params['adversary_list']) == 1:
adversarial_index = -1 # the global pattern
for epoch in range(start_epoch, start_epoch + helper.params['aggr_epoch_interval']):
target_params_variables = dict()
for name, param in target_model.named_parameters():
target_params_variables[name] = last_local_model[name].clone().detach().requires_grad_(False)
if is_poison and agent_name_key in helper.params['adversary_list'] and (epoch in localmodel_poison_epochs):
main.logger.info('poison_now')
poison_lr = helper.params['poison_lr']
internal_epoch_num = helper.params['internal_poison_epochs']
step_lr = helper.params['poison_step_lr']
poison_optimizer = torch.optim.SGD(model.parameters(), lr=poison_lr,
momentum=helper.params['momentum'],
weight_decay=helper.params['decay'])
scheduler = torch.optim.lr_scheduler.MultiStepLR(poison_optimizer,
milestones=[0.2 * internal_epoch_num,
0.8 * internal_epoch_num], gamma=0.1)
temp_local_epoch = (epoch - 1) *internal_epoch_num
for internal_epoch in range(1, internal_epoch_num + 1):
temp_local_epoch += 1
_, data_iterator = helper.train_data[agent_name_key]
poison_data_count = 0
total_loss = 0.
correct = 0
dataset_size = 0
dis2global_list=[]
for batch_id, batch in enumerate(data_iterator):
data, targets, poison_num = helper.get_poison_batch(batch, adversarial_index=adversarial_index,evaluation=False)
poison_optimizer.zero_grad()
dataset_size += len(data)
poison_data_count += poison_num
output = model(data)
class_loss = nn.functional.cross_entropy(output, targets)
distance_loss = helper.model_dist_norm_var(model, target_params_variables)
# Lmodel = αLclass + (1 − α)Lano; alpha_loss =1 fixed
loss = helper.params['alpha_loss'] * class_loss + \
(1 - helper.params['alpha_loss']) * distance_loss
loss.backward()
# get gradients
if helper.params['aggregation_methods']==config.AGGR_FOOLSGOLD:
for i, (name, params) in enumerate(model.named_parameters()):
if params.requires_grad:
if internal_epoch == 1 and batch_id == 0:
client_grad.append(params.grad.clone())
else:
client_grad[i] += params.grad.clone()
poison_optimizer.step()
total_loss += loss.data
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(targets.data.view_as(pred)).cpu().sum().item()
if helper.params["batch_track_distance"]:
# we can calculate distance to this model now.
temp_data_len = len(data_iterator)
distance_to_global_model = helper.model_dist_norm(model, target_params_variables)
dis2global_list.append(distance_to_global_model)
model.track_distance_batch_vis(vis=main.vis, epoch=temp_local_epoch,
data_len=temp_data_len,
batch=batch_id,distance_to_global_model= distance_to_global_model,
eid=helper.params['environment_name'],
name=str(agent_name_key),is_poisoned=True)
if step_lr:
scheduler.step()
main.logger.info(f'Current lr: {scheduler.get_lr()}')
acc = 100.0 * (float(correct) / float(dataset_size))
total_l = total_loss / dataset_size
main.logger.info(
'___PoisonTrain {} , epoch {:3d}, local model {}, internal_epoch {:3d}, Average loss: {:.4f}, '
'Accuracy: {}/{} ({:.4f}%), train_poison_data_count: {}'.format(model.name, epoch, agent_name_key,
internal_epoch,
total_l, correct, dataset_size,
acc, poison_data_count))
csv_record.train_result.append(
[agent_name_key, temp_local_epoch,
epoch, internal_epoch, total_l.item(), acc, correct, dataset_size])
if helper.params['vis_train']:
model.train_vis(main.vis, temp_local_epoch,
acc, loss=total_l, eid=helper.params['environment_name'], is_poisoned=True,
name=str(agent_name_key) )
num_samples_dict[agent_name_key] = dataset_size
if helper.params["batch_track_distance"]:
main.logger.info(
f'MODEL {model_id}. P-norm is {helper.model_global_norm(model):.4f}. '
f'Distance to the global model: {dis2global_list}. ')
# internal epoch finish
main.logger.info(f'Global model norm: {helper.model_global_norm(target_model)}.')
main.logger.info(f'Norm before scaling: {helper.model_global_norm(model)}. '
f'Distance: {helper.model_dist_norm(model, target_params_variables)}')
if not helper.params['baseline']:
main.logger.info(f'will scale.')
epoch_loss, epoch_acc, epoch_corret, epoch_total = test.Mytest(helper=helper, epoch=epoch,
model=model, is_poison=False,
visualize=False,
agent_name_key=agent_name_key)
csv_record.test_result.append(
[agent_name_key, epoch, epoch_loss, epoch_acc, epoch_corret, epoch_total])
epoch_loss, epoch_acc, epoch_corret, epoch_total = test.Mytest_poison(helper=helper,
epoch=epoch,
model=model,
is_poison=True,
visualize=False,
agent_name_key=agent_name_key)
csv_record.posiontest_result.append(
[agent_name_key, epoch, epoch_loss, epoch_acc, epoch_corret, epoch_total])
clip_rate = helper.params['scale_weights_poison']
main.logger.info(f"Scaling by {clip_rate}")
for key, value in model.state_dict().items():
target_value = last_local_model[key]
new_value = target_value + (value - target_value) * clip_rate
model.state_dict()[key].copy_(new_value)
distance = helper.model_dist_norm(model, target_params_variables)
main.logger.info(
f'Scaled Norm after poisoning: '
f'{helper.model_global_norm(model)}, distance: {distance}')
csv_record.scale_temp_one_row.append(epoch)
csv_record.scale_temp_one_row.append(round(distance, 4))
if helper.params["batch_track_distance"]:
temp_data_len = len(helper.train_data[agent_name_key][1])
model.track_distance_batch_vis(vis=main.vis, epoch=temp_local_epoch,
data_len=temp_data_len,
batch=temp_data_len-1,
distance_to_global_model=distance,
eid=helper.params['environment_name'],
name=str(agent_name_key), is_poisoned=True)
distance = helper.model_dist_norm(model, target_params_variables)
main.logger.info(f"Total norm for {current_number_of_adversaries} "
f"adversaries is: {helper.model_global_norm(model)}. distance: {distance}")
else:
temp_local_epoch = (epoch - 1) * helper.params['internal_epochs']
for internal_epoch in range(1, helper.params['internal_epochs'] + 1):
temp_local_epoch += 1
_, data_iterator = helper.train_data[agent_name_key]
total_loss = 0.
correct = 0
dataset_size = 0
dis2global_list = []
for batch_id, batch in enumerate(data_iterator):
optimizer.zero_grad()
data, targets = helper.get_batch(data_iterator, batch,evaluation=False)
dataset_size += len(data)
output = model(data)
loss = nn.functional.cross_entropy(output, targets)
loss.backward()
# get gradients
if helper.params['aggregation_methods'] == config.AGGR_FOOLSGOLD:
for i, (name, params) in enumerate(model.named_parameters()):
if params.requires_grad:
if internal_epoch == 1 and batch_id == 0:
client_grad.append(params.grad.clone())
else:
client_grad[i] += params.grad.clone()
optimizer.step()
total_loss += loss.data
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(targets.data.view_as(pred)).cpu().sum().item()
if helper.params["vis_train_batch_loss"]:
cur_loss = loss.data
temp_data_len = len(data_iterator)
model.train_batch_vis(vis=main.vis,
epoch=temp_local_epoch,
data_len=temp_data_len,
batch=batch_id,
loss=cur_loss,
eid=helper.params['environment_name'],
name=str(agent_name_key) , win='train_batch_loss', is_poisoned=False)
if helper.params["batch_track_distance"]:
# we can calculate distance to this model now
temp_data_len = len(data_iterator)
distance_to_global_model = helper.model_dist_norm(model, target_params_variables)
dis2global_list.append(distance_to_global_model)
model.track_distance_batch_vis(vis=main.vis, epoch=temp_local_epoch,
data_len=temp_data_len,
batch=batch_id,distance_to_global_model= distance_to_global_model,
eid=helper.params['environment_name'],
name=str(agent_name_key),is_poisoned=False)
acc = 100.0 * (float(correct) / float(dataset_size))
total_l = total_loss / dataset_size
main.logger.info(
'___Train {}, epoch {:3d}, local model {}, internal_epoch {:3d}, Average loss: {:.4f}, '
'Accuracy: {}/{} ({:.4f}%)'.format(model.name, epoch, agent_name_key, internal_epoch,
total_l, correct, dataset_size,
acc))
csv_record.train_result.append([agent_name_key, temp_local_epoch,
epoch, internal_epoch, total_l.item(), acc, correct, dataset_size])
if helper.params['vis_train']:
model.train_vis(main.vis, temp_local_epoch,
acc, loss=total_l, eid=helper.params['environment_name'], is_poisoned=False,
name=str(agent_name_key))
num_samples_dict[agent_name_key] = dataset_size
if helper.params["batch_track_distance"]:
main.logger.info(
f'MODEL {model_id}. P-norm is {helper.model_global_norm(model):.4f}. '
f'Distance to the global model: {dis2global_list}. ')
# test local model after internal epoch finishing
epoch_loss, epoch_acc, epoch_corret, epoch_total = test.Mytest(helper=helper, epoch=epoch,
model=model, is_poison=False, visualize=True,
agent_name_key=agent_name_key)
csv_record.test_result.append([agent_name_key, epoch, epoch_loss, epoch_acc, epoch_corret, epoch_total])
if is_poison:
if agent_name_key in helper.params['adversary_list'] and (epoch in localmodel_poison_epochs):
epoch_loss, epoch_acc, epoch_corret, epoch_total = test.Mytest_poison(helper=helper,
epoch=epoch,
model=model,
is_poison=True,
visualize=True,
agent_name_key=agent_name_key)
csv_record.posiontest_result.append(
[agent_name_key, epoch, epoch_loss, epoch_acc, epoch_corret, epoch_total])
# test on local triggers
if agent_name_key in helper.params['adversary_list']:
if helper.params['vis_trigger_split_test']:
model.trigger_agent_test_vis(vis=main.vis, epoch=epoch, acc=epoch_acc, loss=None,
eid=helper.params['environment_name'],
name=str(agent_name_key) + "_combine")
epoch_loss, epoch_acc, epoch_corret, epoch_total = \
test.Mytest_poison_agent_trigger(helper=helper, model=model, agent_name_key=agent_name_key)
csv_record.poisontriggertest_result.append(
[agent_name_key, str(agent_name_key) + "_trigger", "", epoch, epoch_loss,
epoch_acc, epoch_corret, epoch_total])
if helper.params['vis_trigger_split_test']:
model.trigger_agent_test_vis(vis=main.vis, epoch=epoch, acc=epoch_acc, loss=None,
eid=helper.params['environment_name'],
name=str(agent_name_key) + "_trigger")
# update the model weight
local_model_update_dict = dict()
for name, data in model.state_dict().items():
local_model_update_dict[name] = torch.zeros_like(data)
local_model_update_dict[name] = (data - last_local_model[name])
last_local_model[name] = copy.deepcopy(data)
if helper.params['aggregation_methods'] == config.AGGR_FOOLSGOLD:
epochs_local_update_list.append(client_grad)
else:
epochs_local_update_list.append(local_model_update_dict)
epochs_submit_update_dict[agent_name_key] = epochs_local_update_list
return epochs_submit_update_dict, num_samples_dict
| [
"17326992704@163.com"
] | 17326992704@163.com |
8624f48b298a2fab6ca583a37d126b42d754d93b | 92bd1040bf0ccbbbd9bea43c766d756abacc6439 | /step_2/mes/clock_institution.py | 4bc21471b5e4bcdd45559b97f2b4802338714af6 | [] | no_license | gmucsn/mTree_clock_auction_tutorial | f0f5074b9f5f64ebe961276a8d77f75ee5d87c2e | 3a67c02976e742e76f31fc6f3809dce491efb506 | refs/heads/main | 2023-04-04T06:07:59.752069 | 2021-04-09T07:07:14 | 2021-04-09T07:07:14 | 355,942,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | from mTree.microeconomic_system.environment import Environment
from mTree.microeconomic_system.institution import Institution
from mTree.microeconomic_system.agent import Agent
from mTree.microeconomic_system.directive_decorators import *
from mTree.microeconomic_system.message import Message
import math
import random
import logging
import time
import datetime
@directive_enabled_class
class ClockInstitution(Institution):
def __init__(self):
pass | [
"skunath@local"
] | skunath@local |
87df33662bfa4926caa32f3b3fb0907ed1ddbc37 | 32226e72c8cbaa734b2bdee081c2a2d4d0322702 | /experiments/state_distance/optimal_control_with_q.py | e6785e1a4453bcf63958d1b547ffd1074ec35676 | [
"MIT"
] | permissive | Asap7772/rail-rl-franka-eval | 2b1cbad7adae958b3b53930a837df8a31ab885dc | 4bf99072376828193d05b53cf83c7e8f4efbd3ba | refs/heads/master | 2022-11-15T07:08:33.416025 | 2020-07-12T22:05:32 | 2020-07-12T22:05:32 | 279,155,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,763 | py | """
Choose action according to
a = argmax_{a, s'} r(s, a, s') s.t. Q(s, a, s') = 0
where r is defined specifically for the reacher env.
"""
import argparse
import joblib
import numpy as np
from railrl.state_distance.policies import (
SoftOcOneStepRewardPolicy,
TerminalRewardSampleOCPolicy,
ArgmaxQFPolicy,
PseudoModelBasedPolicy,
SamplePolicyPartialOptimizer)
from railrl.samplers.util import rollout
from railrl.torch.pytorch_util import set_gpu_mode
from railrl.core import logger
def experiment(variant):
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str,
help='path to the snapshot file')
parser.add_argument('--H', type=int, default=500,
help='Max length of rollout')
parser.add_argument('--num_rollouts', type=int, default=1,
help='Number of rollouts per eval')
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--argmax', action='store_true')
parser.add_argument('--hide', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--planh', type=int, default=1,
help='Planning horizon')
parser.add_argument('--discount', type=float, help='Discount Factor')
parser.add_argument('--weight', type=float, default=1.,
help='Constraint penalty weight')
parser.add_argument('--nsamples', type=int, default=100,
help='Number of samples for optimization')
parser.add_argument('--ngrad', type=int, default=0,
help='Number of gradient steps for respective policy.')
parser.add_argument('--mb', action='store_true',
help='Use (pseudo-)model-based policy')
parser.add_argument('--partial', action='store_true',
help='Use partial state optimizer')
parser.add_argument('--grid', action='store_true',
help='Sample actions from a grid')
parser.add_argument('--dt', help='decrement tau', action='store_true')
parser.add_argument('--cycle', help='cycle tau', action='store_true')
args = parser.parse_args()
data = joblib.load(args.file)
print("Done loading")
env = data['env']
qf = data['qf']
if args.gpu:
set_gpu_mode(True)
qf.to(ptu.device)
qf.train(False)
print("Env type:", type(env))
if args.argmax:
policy = ArgmaxQFPolicy(
qf,
env,
sample_size=args.nsamples,
num_gradient_steps=args.ngrad,
sample_actions_from_grid=args.grid,
)
elif args.mb:
policy = PseudoModelBasedPolicy(
qf,
env,
sample_size=args.nsamples,
)
elif args.partial:
policy = SamplePolicyPartialOptimizer(
qf,
env,
data['policy'],
sample_size=args.nsamples,
)
elif args.planh == 1:
policy = SoftOcOneStepRewardPolicy(
qf,
env,
data['policy'],
constraint_weight=args.weight,
sample_size=args.nsamples,
verbose=args.verbose,
sample_actions_from_grid=args.grid,
)
else:
policy = TerminalRewardSampleOCPolicy(
qf,
env,
horizon=args.planh,
constraint_weight=args.weight,
sample_size=args.nsamples,
verbose=args.verbose,
)
discount = 0
if args.discount is not None:
print("WARNING: you are overriding the discount factor. Right now "
"only discount = 0 really makes sense.")
discount = args.discount
init_tau = discount
while True:
paths = []
tau = init_tau
policy.set_tau(tau)
for _ in range(args.num_rollouts):
goal = env.sample_goal_for_rollout()
if args.verbose:
env.print_goal_state_info(goal)
env.set_goal(goal)
policy.set_goal(goal)
path = rollout(
env,
policy,
max_path_length=args.H,
animated=not args.hide,
)
path['goal_states'] = np.repeat(
np.expand_dims(goal, 0),
len(path['observations']),
0,
)
paths.append(path)
tau -= 1
if tau < 0:
if args.cycle:
tau = init_tau
else:
tau = 0
policy.set_tau(tau)
env.log_diagnostics(paths)
logger.dump_tabular()
| [
"asap7772@berkeley.edu"
] | asap7772@berkeley.edu |
82332f085a0ce0530c27abb8493eb16799f8861a | 44e8334e1b17fda7f60d9760f59868a9227e2ab0 | /python-tf/tf2/tf2-10-0-mnist.py | 1510bd8a4542793f25cbb4c7648fb41506d3382a | [] | no_license | MysteriousSonOfGod/python-3 | 47c2aa69a84ba78876c74bc6f2e7e6f3093df1e2 | a303a5284c40f3cb96a8082a1f5ed80773b66336 | refs/heads/master | 2023-02-16T18:21:46.153388 | 2021-01-13T10:55:14 | 2021-01-13T10:55:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py | # Lab 7 Learning rate and Evaluation
import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
import sys, os
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import images.image
learning_rate = 0.001
training_epochs = 15 # total training data을 한 번 train = 1 epoch
batch_size = 100 # 모든 데이터를 처리하지 않고 처리할 묶은 건수
# 모든데이터가 1000 이고 batch_size 100이면 1 epoch할려면 10번 반복작업이 실행됨
nb_classes = 10
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test_org) = mnist.load_data()
# 훈련 세트에 있는 첫 번째 이미지를 보면 픽셀 값의 범위가 0~255 사이
plt.figure()
plt.imshow(x_train[0])
plt.colorbar()
plt.grid(False)
images.image.save_fig("tf2.10.0.mnist_train_images")
plt.show()
# normalizing data
x_train, x_test_normal = x_train / 255.0, x_test / 255.0
# 훈련 세트에서 처음 25개 이미지와 그 아래 클래스 이름을 출력
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(x_train[i], cmap=plt.cm.binary)
plt.xlabel(y_train[i])
images.image.save_fig("tf2.10.0.mnist_train_images1_25")
plt.show()
| [
"cbaeck1@gmail.com"
] | cbaeck1@gmail.com |
a181287ab47a20cb7149b2e78d496ed95272cafb | 399dae0b5ad9ca27cde175d25b5435958674eb50 | /Reports/Generate Disk Space Report for Endpoints/generate-disk-space-report-for-endpoints.py | a016efdf31e2323d017285a0679d67edcd5a712e | [] | no_license | kannanch/pythonscripts | 61e3ea9e8ebf6a6b0ec2a4a829664e4507b803ba | 843a522236f9c2cc2aadc68d504c71bb72600bd9 | refs/heads/master | 2020-06-12T11:18:00.404673 | 2019-06-28T11:24:37 | 2019-06-28T11:24:37 | 194,282,297 | 1 | 0 | null | 2019-06-28T13:55:56 | 2019-06-28T13:55:56 | null | UTF-8 | Python | false | false | 15,017 | py | no=xx #Edit the xx parameter as Device Timeout. Eg if you have 500 enrolled endpoints then that xx must "100".
Head_computer=r'CHANGE_ME' # Head computer to send the email
emailto=r'CHANGE_ME' # Email address to send the report
Head_computer=Head_computer.upper()
KI=list(Head_computer)
KI.insert(0,str(no))
import datetime
KI.insert(len(KI),datetime.datetime.now().strftime("%Y%m%d"))
KEY="".join(KI)
import ast
import threading
import time
import os
from subprocess import PIPE, Popen
import ctypes
import shutil
import socket,re
import sys
def Email(fileToSend,To):
from mailjet_rest import Client
import os
api_key='3e70858a7a5c5fbc245a662d5d9aa238' # API KEY of Mail Jet
api_secret= 'a337abcc84d8fb062f6f1597d966ae6f' # API SECRET KEY of Mail Jet
mailjet = Client(auth=(api_key, api_secret), version='v3.1')
import base64
with open(fileToSend, 'rb') as fp:
ki=base64.b64encode(fp.read())
data = {
'Messages': [
{
"From": {
"Email": "c1operations123@gmail.com",
},
"To": [
{
"Email": "%s"%To,
}
],
"Subject": "Disk Usage Percentage of all devices ",
"TextPart": "Dear passenger 1, welcome to Mailjet! May the delivery force be with you!",
"HTMLPart": """<h3> Hi \n
Please find the attachment which contains all the device reports \n
Thank you.</h3>""",
"Attachments": [
{
"ContentType": "text/csv",
"Filename": "Diskreport.csv",
"Base64Content": "%s"%ki
}
]
}
]
}
result = mailjet.send.create(data=data)
ret=result.status_code
if ret==200:
out=result.json()
out=str(out)
if "success" in out:
print "Email Sent Successfully"
else:
print "Error sending email"
def Download(URL, DownloadTo = None, FileName = None):
import urllib
import ssl
if FileName:
FileName = FileName
else:
FileName = URL.split('/')[-1]
if DownloadTo:
DownloadTo = DownloadTo
else:
DownloadTo = os.path.join(os.environ['TEMP'])
DF = os.path.join(DownloadTo, FileName)
with open(os.path.join(DownloadTo, FileName), 'wb') as f:
try:
context = ssl._create_unverified_context()
f.write(urllib.urlopen(URL,context=context).read())
except:
f.write(urllib.urlopen(URL).read())
if os.path.isfile(DF):
return DF
else:
return False
def zip_item(path,final_path): # Creating ZIP file
import zipfile
zip_ref = zipfile.ZipFile(path, 'r')
zip_ref.extractall(final_path)
zip_ref.close()
return final_path
def Import_pubnub(DEST):
BDPATH = Download(r'https://drive.google.com/uc?export=download&id=1R1KFmrC0jh6TOdCFePt2SNTbu_ti_CpP', FileName = 'PUBNUB.zip')
SRC = os.path.join(os.environ['TEMP'])
path=zip_item(BDPATH,SRC)
SRC = os.path.join(os.environ['TEMP'],'PUBNUB')
from distutils.dir_util import copy_tree
copy_tree(SRC, DEST)
import pubnub
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub
from pubnub.callbacks import SubscribeCallback
print "Pubnub is imported"
return DEST
def computername():
return os.environ['COMPUTERNAME']
def ipaddress():
return socket.gethostbyname(socket.gethostname())
vbs=r'''
Sub DpySpaceInfo(ByVal infotype, ByVal drvSpace, ByVal percentage)
textline = Space(12 - Len(infotype)) & infotype & Space(17 - Len(drvSpace)) & drvSpace
'If percentage <> "" Then textline = textline & Space(33 - Len(textline)) & percentage
If percentage <> "" Then textline = textline & Space(11 - Len(percentage)) & percentage
WScript.Echo textline
End Sub
' Function to calculate the used and free space on the disk drive.
Sub GetDriveSpace(ByRef drive)
totalSpace = drive.TotalSize / 1024
freeSpace = drive.AvailableSpace / 1024
percentFree = freeSpace / totalSpace
percentUsed = 1 - percentFree
dpyUsedSpace = FormatNumber(totalSpace - freeSpace, 0, vbTrue, vbFalse, vbTrue) & " KB"
dpyFreeSpace = FormatNumber(freeSpace, 0, vbTrue, vbFalse, vbTrue) & " KB"
dpyTotalSpace = FormatNumber(totalSpace, 0, vbTrue, vbFalse, vbTrue) & " KB"
dpyPercentUsed = FormatPercent(percentUsed, 2, vbTrue, vbFalse, vbTrue)
dpyPercentFree = FormatPercent(percentFree, 2, vbTrue, vbFalse, vbTrue)
WScript.Echo "DRIVE " & drive.DriveLetter & ":" &dpyPercentFree
End Sub
Set oFileSystem = CreateObject("Scripting.FileSystemObject")
Set drivesList = oFileSystem.Drives
' Iterage through all drives ignoring all but fixed drives.
For Each d In drivesList
If d.DriveType = 2 Then GetDriveSpace d
Next
'''
class disable_file_system_redirection:
_disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection
_revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection
def __enter__(self):
self.old_value = ctypes.c_long()
self.success = self._disable(ctypes.byref(self.old_value))
def __exit__(self, type, value, traceback):
if self.success:
self._revert(self.old_value)
def runvbs(vbs):
workdir=os.environ['PROGRAMDATA']+r'\temp'
if not os.path.isdir(workdir):
os.mkdir(workdir)
with open(workdir+r'\temprun.vbs',"w") as f :
f.write(vbs)
with disable_file_system_redirection():
Percentage=os.popen('cscript.exe "'+workdir+r'\temprun.vbs"').read()
if os.path.isfile(workdir+r'\temprun.vbs'):
os.remove(workdir+r'\temprun.vbs')
return Percentage
def Drive(KEY):
SAM=[]
per=[]
percent=runvbs(vbs)
SAM.append(KEY)
SAM.append(computername())
SAM.append(ipaddress())
freepercent=re.findall('DRIVE (.*)',percent)
for val in freepercent:
val1=re.sub(r":(.*)", "", val)
val=re.sub(r"(.*):", "", val)
val=re.sub(r"%", "", val)
val=float(val)
freepercentage=100-val
per.append(str(freepercentage))
drive=os.popen('wmic logicaldisk WHERE DriveType=3 get name').read()
list_of_drives=drive.split()[1:]
def disk_usage(path):
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong()
if sys.version_info >= (3,) or isinstance(path, unicode):
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW
else:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA
ret = fun(path, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
raise ctypes.WinError()
used = total.value - free.value
return [total.value, used, free.value]
def bytes2human(n):
symbols = ('KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i+1)*10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return n
k=[disk_usage(i) for i in list_of_drives]
fnl=[]
for i in k:
for j in i:
SAM.append(bytes2human(j))
j=3
for i in list_of_drives:
SAM.insert(j,i)
j=j+4
j=7
for i in per:
SAM.insert(j,i)
j=j+5
print SAM
if len(SAM)>=8:
j=8
for i in per:
SAM.insert(j,"\n"+",")
j=j+6
else:
j=8
SAM.insert(j,"\n")
return SAM
list_head=['Computer_Name', 'IP_Address',"Drive_name","Total_Space","Used_Space","Free_Space","Percentage_of_usage"]
def publish_nonhead():
import time
time.sleep(30)
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub
from pubnub.callbacks import SubscribeCallback
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub
publish_key1= 'pub-c-7a797a24-388e-411c-b848-9bd170919784'
subscribe_key1= 'sub-c-b1b31f80-179a-11e8-95aa-1eb18890f15d'
pnconfig = PNConfiguration()
pnconfig.subscribe_key = subscribe_key1
pnconfig.publish_key = publish_key1
pnconfig.ssl = True
pubnub = PubNub(pnconfig)
import time
from pubnub.exceptions import PubNubException
try:
envelope = pubnub.publish().channel("Channel-706fxzjkv").message(Drive(KEY)).sync()
print("publish timetoken: %d" % envelope.result.timetoken)
except PubNubException as e:
print e
def publish(no):
import pubnub
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub
from pubnub.callbacks import SubscribeCallback
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub
publish_key1= 'pub-c-7a797a24-388e-411c-b848-9bd170919784'
subscribe_key1= 'sub-c-b1b31f80-179a-11e8-95aa-1eb18890f15d'
pnconfig = PNConfiguration()
pnconfig.subscribe_key = subscribe_key1
pnconfig.publish_key = publish_key1
pnconfig.ssl = True
pubnub = PubNub(pnconfig)
import time
s=3*no
time.sleep(s)
from pubnub.exceptions import PubNubException
try:
envelope = pubnub.publish().channel("Channel-706fxzjkv").message(Drive(KEY)).sync()
print("publish timetoken: %d" % envelope.result.timetoken)
app_process=os.getpid()
app_process=str(app_process)
import subprocess;
import ctypes
class disable_file_system_redirection:
_disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection
_revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection
def __enter__(self):
self.old_value = ctypes.c_long()
self.success = self._disable(ctypes.byref(self.old_value))
def __exit__(self, type, value, traceback):
if self.success:
self._revert(self.old_value)
time.sleep(5)
reportfolder=os.path.join(os.environ['ProgramData'],"new.csv")
Email(reportfolder,emailto)
print "Your file is in head computer at "+reportfolder
with disable_file_system_redirection():
process=subprocess.Popen(['taskkill', '/F','/PID',app_process],shell=True,stdout=subprocess.PIPE);
result=process.communicate()[0]
print (result)
except PubNubException as e:
print e
class LongFunctionInside(object):
lock_state = threading.Lock()
working = False
def long_function(self, timeout,no):
self.working = True
timeout_work = threading.Thread(name="thread_name", target=self.work_time, args=(timeout,))
timeout_work.setDaemon(True)
timeout_work.start()
import logging
import pubnub
from pubnub.exceptions import PubNubException
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub, SubscribeListener
import time
import os
pnconfig = PNConfiguration()
pnconfig.subscribe_key = 'sub-c-b1b31f80-179a-11e8-95aa-1eb18890f15d'
pnconfig.publish_key = ''
pubnub = PubNub(pnconfig)
n=0
my_listener = SubscribeListener()
pubnub.subscribe().channels('Channel-706fxzjkv').execute()
fp=os.path.join(os.environ['ProgramData'],"new.csv")
sample=''
for i in list_head:
if i == None:
sample=sample+"None"+","
else:
sample=sample+i+","
with open(fp,'w') as f:
f.write(sample)
f.write('\n')
while True:
print "Listening..."# endless/long work
pubnub.add_listener(my_listener)
result = my_listener.wait_for_message_on('Channel-706fxzjkv')
pubnub.remove_listener(my_listener)
result=result.message
print result[0]
sample=""
if(result[0]==KEY):
with open(fp,'a+') as f:
for i in range(1,len(result)):
if result[i] == None:
sample=sample+"None"+","
else:
sample=sample+result[i]+","
f.write(sample)
f.write('\n')
if not self.working: # if state is working == true still working
break
self.set_state(True)
def work_time(self, sleep_time):
print sleep_time# thread function that just sleeping specified time,
time.sleep(sleep_time)
if self.working:
publish(no)
self.set_state(False)
def set_state(self, state): # secured state change
while True:
self.lock_state.acquire()
try:
self.working = state
break
finally:
self.lock_state.release()
HOMEPATH = r"C:\Program Files (x86)"
if os.path.exists(HOMEPATH):
HOMEPATH = r"C:\Program Files (x86)"
else:
HOMEPATH =r"C:\Program Files"
fp=os.path.join(os.environ['ProgramData'],"new.csv")
if os.path.exists(fp):
try:
os.remove(fp)
except:
pass
DEST= os.path.join(HOMEPATH,r'COMODO\Comodo ITSM\Lib\site-packages')
Folders=os.listdir(DEST)
Nodow=0
Del_folders=['certifi', 'certifi-2018.1.18.dist-info','chardet', 'chardet-3.0.4.dist-info', 'Cryptodome', 'pubnub', 'pubnub-4.0.13.dist-info', 'pycryptodomex-3.4.12.dist-info','requests']
for i in Del_folders:
if i in Folders:
Nodow=Nodow+1
if Nodow>7:
c=0
else:
DEST=Import_pubnub(DEST)
computer=os.environ['computername']
import os
if computer==Head_computer :
lw = LongFunctionInside()
lw.long_function(0.1,no)
else:
publish_nonhead()
| [
"noreply@github.com"
] | noreply@github.com |
d7d9397514f924e2e3c51219055782d39055529b | f82e67dd5f496d9e6d42b4fad4fb92b6bfb7bf3e | /scripts/client/gui/scaleform/daapi/view/lobby/lobbyview.py | ccca6333bda22faa53d118768576e781414e63cf | [] | no_license | webiumsk/WOT0.10.0 | 4e4413ed4e7b00e22fb85d25fdae9400cbb4e76b | a84f536c73f86d9e8fab559e97f88f99f2ad7e95 | refs/heads/master | 2021-01-09T21:55:00.662437 | 2015-10-23T20:46:45 | 2015-10-23T20:46:45 | 44,835,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,690 | py | # Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/LobbyView.py
import BigWorld
import VOIP
import constants
import CommandMapping
from PlayerEvents import g_playerEvents
from gui import game_control, SystemMessages
import gui
from gui.LobbyContext import g_lobbyContext
from gui.battle_control import g_sessionProvider
from gui.Scaleform.daapi.view.meta.LobbyPageMeta import LobbyPageMeta
from gui.Scaleform.framework.entities.View import View
from gui.Scaleform.genConsts.FORTIFICATION_ALIASES import FORTIFICATION_ALIASES
from gui.Scaleform.genConsts.PREBATTLE_ALIASES import PREBATTLE_ALIASES
from gui.Scaleform.locale.SYSTEM_MESSAGES import SYSTEM_MESSAGES
from gui.prb_control.dispatcher import g_prbLoader
from gui.shared.ItemsCache import g_itemsCache
from gui.shared.utils.HangarSpace import g_hangarSpace
from gui.shared import EVENT_BUS_SCOPE, events, event_dispatcher as shared_events
from gui.Scaleform.framework import ViewTypes
from gui.Scaleform.Waiting import Waiting
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.shared.utils.functions import getViewName
from helpers import i18n
class LobbyView(LobbyPageMeta):
VIEW_WAITING = (VIEW_ALIAS.LOBBY_HANGAR,
VIEW_ALIAS.LOBBY_INVENTORY,
VIEW_ALIAS.LOBBY_SHOP,
VIEW_ALIAS.LOBBY_PROFILE,
VIEW_ALIAS.LOBBY_BARRACKS,
PREBATTLE_ALIASES.TRAINING_LIST_VIEW_PY,
PREBATTLE_ALIASES.TRAINING_ROOM_VIEW_PY,
VIEW_ALIAS.LOBBY_CUSTOMIZATION,
VIEW_ALIAS.LOBBY_RESEARCH,
VIEW_ALIAS.LOBBY_TECHTREE,
FORTIFICATION_ALIASES.FORTIFICATIONS_VIEW_ALIAS,
VIEW_ALIAS.BATTLE_QUEUE,
VIEW_ALIAS.BATTLE_LOADING)
class COMPONENTS:
HEADER = 'lobbyHeader'
def __init__(self, ctx = None):
super(LobbyView, self).__init__(ctx)
self.__currIgrType = constants.IGR_TYPE.NONE
def getSubContainerType(self):
return ViewTypes.LOBBY_SUB
def _populate(self):
View._populate(self)
self.__currIgrType = gui.game_control.g_instance.igr.getRoomType()
g_prbLoader.setEnabled(True)
self.addListener(events.LobbySimpleEvent.SHOW_HELPLAYOUT, self.__showHelpLayout, EVENT_BUS_SCOPE.LOBBY)
self.addListener(events.LobbySimpleEvent.CLOSE_HELPLAYOUT, self.__closeHelpLayout, EVENT_BUS_SCOPE.LOBBY)
self.addListener(events.GameEvent.SCREEN_SHOT_MADE, self.__handleScreenShotMade, EVENT_BUS_SCOPE.GLOBAL)
g_playerEvents.onVehicleBecomeElite += self.__onVehicleBecomeElite
self.app.loaderManager.onViewLoadInit += self.__onViewLoadInit
self.app.loaderManager.onViewLoaded += self.__onViewLoaded
self.app.loaderManager.onViewLoadError += self.__onViewLoadError
game_control.g_instance.igr.onIgrTypeChanged += self.__onIgrTypeChanged
self.__showBattleResults()
battlesCount = g_itemsCache.items.getAccountDossier().getTotalStats().getBattlesCount()
g_lobbyContext.updateBattlesCount(battlesCount)
self.fireEvent(events.GUICommonEvent(events.GUICommonEvent.LOBBY_VIEW_LOADED))
keyCode = CommandMapping.g_instance.get('CMD_VOICECHAT_MUTE')
if not BigWorld.isKeyDown(keyCode):
VOIP.getVOIPManager().setMicMute(True)
def _dispose(self):
game_control.g_instance.igr.onIgrTypeChanged -= self.__onIgrTypeChanged
self.app.loaderManager.onViewLoadError -= self.__onViewLoadError
self.app.loaderManager.onViewLoaded -= self.__onViewLoaded
self.app.loaderManager.onViewLoadInit -= self.__onViewLoadInit
g_playerEvents.onVehicleBecomeElite -= self.__onVehicleBecomeElite
self.removeListener(events.LobbySimpleEvent.SHOW_HELPLAYOUT, self.__showHelpLayout, EVENT_BUS_SCOPE.LOBBY)
self.removeListener(events.LobbySimpleEvent.CLOSE_HELPLAYOUT, self.__closeHelpLayout, EVENT_BUS_SCOPE.LOBBY)
self.removeListener(events.GameEvent.SCREEN_SHOT_MADE, self.__handleScreenShotMade, EVENT_BUS_SCOPE.GLOBAL)
View._dispose(self)
def __showHelpLayout(self, _):
self.as_showHelpLayoutS()
def __closeHelpLayout(self, _):
self.as_closeHelpLayoutS()
def __handleScreenShotMade(self, event):
if 'path' not in event.ctx:
return
SystemMessages.pushMessage(i18n.makeString('#menu:screenshot/save') % {'path': event.ctx['path']}, SystemMessages.SM_TYPE.Information)
def __onVehicleBecomeElite(self, vehTypeCompDescr):
self.fireEvent(events.LoadViewEvent(VIEW_ALIAS.ELITE_WINDOW, getViewName(VIEW_ALIAS.ELITE_WINDOW, vehTypeCompDescr), {'vehTypeCompDescr': vehTypeCompDescr}), EVENT_BUS_SCOPE.LOBBY)
def moveSpace(self, dx, dy, dz):
if g_hangarSpace.space:
g_hangarSpace.space.handleMouseEvent(int(dx), int(dy), int(dz))
def notifyCursorOver3dScene(self, isOver3dScene):
self.fireEvent(events.LobbySimpleEvent(events.LobbySimpleEvent.NOTIFY_CURSOR_OVER_3DSCENE, ctx={'isOver3dScene': isOver3dScene}))
def __onViewLoadInit(self, view):
if view is not None and view.settings is not None:
self.__subViewTransferStart(view.settings.alias)
return
def __onViewLoaded(self, view):
if view is not None and view.settings is not None:
self.__subViewTransferStop(view.settings.alias)
return
def __onViewLoadError(self, name, msg, item):
if item is not None and item.pyEntity is not None:
self.__subViewTransferStop(item.pyEntity.settings.alias)
return
def __onIgrTypeChanged(self, roomType, xpFactor):
icon = gui.makeHtmlString('html_templates:igr/iconSmall', 'premium')
if roomType == constants.IGR_TYPE.PREMIUM:
SystemMessages.pushMessage(i18n.makeString(SYSTEM_MESSAGES.IGR_CUSTOMIZATION_BEGIN, igrIcon=icon), type=SystemMessages.SM_TYPE.Information)
elif roomType in [constants.IGR_TYPE.BASE, constants.IGR_TYPE.NONE] and self.__currIgrType == constants.IGR_TYPE.PREMIUM:
SystemMessages.pushMessage(i18n.makeString(SYSTEM_MESSAGES.IGR_CUSTOMIZATION_END, igrIcon=icon), type=SystemMessages.SM_TYPE.Information)
self.__currIgrType = roomType
def __subViewTransferStart(self, alias):
if alias in self.VIEW_WAITING:
Waiting.show('loadPage')
def __subViewTransferStop(self, alias):
if alias != VIEW_ALIAS.BATTLE_LOADING and alias in self.VIEW_WAITING:
Waiting.hide('loadPage')
def __showBattleResults(self):
battleCtx = g_sessionProvider.getCtx()
if battleCtx.lastArenaUniqueID:
shared_events.showMyBattleResults(battleCtx.lastArenaUniqueID)
battleCtx.lastArenaUniqueID = None
return
| [
"info@webium.sk"
] | info@webium.sk |
11dfb9beb211a5842f05475135524472e63b0052 | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/compute/azure-mgmt-avs/generated_samples/workload_networks_get.py | 60db6d3b5326e38bb0efaea0f5d34f54b45f667d | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 1,556 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.avs import AVSClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-avs
# USAGE
python workload_networks_get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AVSClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.workload_networks.get(
resource_group_name="group1",
private_cloud_name="cloud1",
workload_network_name="default",
)
print(response)
# x-ms-original-file: specification/vmware/resource-manager/Microsoft.AVS/stable/2022-05-01/examples/WorkloadNetworks_Get.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
aed27d9f42e5ddf4ac6f352e7d7d2b88f8f3a672 | 4eb3ff3e56043bc20162a59039af37533432feb1 | /项目所用模块.py | 1205da794e0b83ed65e541fe40c0fafae5ead37b | [] | no_license | luofang0212/flask_test | 99787a43ba117b0e5684f811ad9f83442c6e95cb | e9ea8644f7bbae94c0b689b79235913f73da7124 | refs/heads/master | 2023-07-26T00:49:53.681815 | 2021-09-06T16:15:11 | 2021-09-06T16:15:11 | 403,010,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 本项目所用到的模块
''''''
'''
python 自带包
'''
'''
python 第三方库
'''
from flask import Flask
from flask import render_template
from flask import request
import jieba # 分词
from matplotlib import pyplot as plt #绘图,数据可视化
from PIL import Image #图片处理
import numpy as np #矩阵运算
import pymysql # mysql 数据库驱动
from wordcloud import WordCloud #词云
| [
"warm_homel@163.com"
] | warm_homel@163.com |
e562378ac18aecb5fdbdd782a93403acf269a01f | 0ea2b4cc229e92b0af2e1d9ac3b6f9e158ad7083 | /lp/LP_general_checkers.py | e86d6f35fecd43efdcf386b0fa831edba1d99f54 | [] | no_license | anon-neurips-submission/pearl | 80b2f67bea453552516586b8238c40b342ee5189 | 5866e3e0ffe1a4848bb5032f9cf137681a072d32 | refs/heads/master | 2023-05-11T20:01:21.752193 | 2021-06-04T00:01:02 | 2021-06-04T00:01:02 | 373,669,823 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 48,581 | py | import cplex
import numpy as np
import random
import torch
import torch.nn.functional as F
from copy import deepcopy
from scipy.spatial.distance import jensenshannon
DEBUG = True
# combined
def get_confusing_input_pool(net, orig_env, config_file, debug=True, value=True, policy=True, board_size=8,
value_ceil=0, mu=0.5, device='cuda:0', writer=None,lp_states_in_pool=5): #TODO change to 5
"""
"""
DEVICE = device
net = deepcopy(net)
layers = [(name, param.size()) for name, param in net.named_parameters()]
if debug:
print(layers)
print(len(layers))
print('env_config and net have been imported')
print('break')
#################################################################################### build variable names
X_dictionary_names = {}
for layer in layers:
if debug:
print(layer)
l = list(layer[1])
if len(l) == 1:
X_dictionary_names[("X_{0}".format(layer[0]))] = tuple(layer[1])
else:
X_dictionary_names[("X_{0}".format(layer[0]))] = tuple(layer[1][1:])
#
# add X_input with shape input_shape at the beginning of X_dictionary_names if the net does not have input Layer !!
names_string = list(X_dictionary_names.keys())
if debug:
print('names_string = ', names_string)
print('X_dictionary_names = ', X_dictionary_names)
weight_dims = list(X_dictionary_names.values())
# get params from pytorch
net_params = list(net.parameters())
#shape = net.get_output_shapes(policy=True, value=True, num_actions=256)
#TODO: did some hardcoding here so that we could run on all GPUs on machines
shape = [[8, 8, 4], [7, 7, 32], [7, 7, 32], [6, 6, 64],
[6, 6, 64], 2304, 256, 128, 128, 128, 128, 128,
256, 128, 128, 128, 1]
#TODO: uncomment 2 lines below for more robust
#shape = net.get_output_shapes(policy=True, value=True)
#shape[-5] = 256
print(shape)
# 6 by 6 mazenet should match these shapes.
# shape = ((6, 6, 5), (5, 5, 32), (5, 5, 32), (4, 4, 64), (4, 4, 64),
# (256), (256), (128), (128), (128), (128), (128), (1), (1))
if debug:
print(f'shape = {shape}')
#print(f'weight_dims = {weight_dims}')
################################ BUILD MANUALLY VARIABLE NAMES:
# shape = ((6, 6, 5), (5, 5, 32), (5, 5, 32), (4, 4, 64), (4, 4, 64),
# (1024), (256), (128), (128), (128), (128), (128), (6))
# need the following for both heads :
# shape = ((6, 6, 5), (5, 5, 32), (5, 5, 32), (4, 4, 64), (4, 4, 64),
# (1024), (256), (128), (128), (128), (128), (128), (6),
# (128), (128), (1), (1))
# input into Conv1
X_0 = {(i, j, k): 'X_0(i{0},j{1},k{2})'.format(i, j, k) for (i, j, k) in
build_indicies_dictionary([shape[0][0], shape[0][1], shape[0][2]])}
# conv1 --> relu
X_1 = {(i, j, k): 'X_1(i{0},j{1},k{2})'.format(i, j, k) for (i, j, k) in
build_indicies_dictionary([shape[1][0], shape[1][1], shape[1][2]])}
# relu --> conv2
X_2 = {(i, j, k): 'X_2(i{0},j{1},k{2})'.format(i, j, k) for (i, j, k) in
build_indicies_dictionary([shape[2][0], shape[2][1], shape[2][2]])}
# conv2 --> relu
X_3 = {(i, j, k): 'X_3(i{0},j{1},k{2})'.format(i, j, k) for (i, j, k) in
build_indicies_dictionary([shape[3][0], shape[3][1], shape[3][2]])}
# relu --> Flatten
X_4 = {(i, j, k): 'X_4(i{0},j{1},k{2})'.format(i, j, k) for (i, j, k) in
build_indicies_dictionary([shape[4][0], shape[4][1], shape[4][2]])}
# flatten --> dense
X_5 = {(i): 'X_5(i{0})'.format(i) for (i) in build_indicies_dictionary([shape[5]])}
# dense --> relu
X_6 = {(i): 'X_6(i{0})'.format(i) for (i) in build_indicies_dictionary([shape[6]])}
# relu --> dense
X_7 = {(i): 'X_7(i{0})'.format(i) for (i) in build_indicies_dictionary([shape[7]])}
# dense --> relu
X_8 = {(i): 'X_8(i{0})'.format(i) for (i) in build_indicies_dictionary([shape[8]])}
# relu --> policy x_10
# --> value x_13
X_9 = {(i): 'X_9(i{0})'.format(i) for (i) in build_indicies_dictionary([shape[9]])}
if policy:
# policy head
# dense --> RELU
X_10 = {(i): 'X_10(i{0})'.format(i) for (i) in build_indicies_dictionary([shape[10]])}
X_11 = {(i): 'X_11(i{0})'.format(i) for (i) in build_indicies_dictionary([shape[11]])}
X_12 = {(i): 'X_12(i{0})'.format(i) for (i) in build_indicies_dictionary([shape[12]])}
if value:
# VALUE HEAD
# (from X_9) dense --> relu
X_13 = {(i): 'X_13(i{0})'.format(i) for (i) in build_indicies_dictionary([shape[13]])}
# relu --> Dense
X_14 = {(i): 'X_14(i{0})'.format(i) for (i) in build_indicies_dictionary([shape[14]])}
# dense --> VALUE
X_15 = {(i): 'X_15(i{0})'.format(i) for (i) in build_indicies_dictionary([shape[16]])}
#TODO HARD CODE SHAPE
# BINARY
#################################################################### start cplex:
problem = cplex.Cplex()
############################################## define whether maximize or minimize
problem.objective.set_sense(problem.objective.sense.minimize)
############################################### add variables with bounds (X_input and output of each layer):
################################################################
# this defines BOUNDS and add variables to the cplex problem
problem.variables.add(names=list(X_0.values()), lb=[0.0] * len(X_0), ub=[1.0] * len(X_0))
problem.variables.set_types([(i, problem.variables.type.binary) for i in X_0.values()])
# ub from 1 to 2
problem.variables.add(names=list(X_1.values()), lb=[-2.0] * len(X_1), ub=[2.0] * len(X_1))
problem.variables.add(names=list(X_2.values()), lb=[0.0] * len(X_2), ub=[2.0] * len(X_2))
problem.variables.add(names=list(X_3.values()), lb=[-2.0] * len(X_3), ub=[2.0] * len(X_3))
problem.variables.add(names=list(X_4.values()), lb=[0.0] * len(X_4), ub=[2.0] * len(X_4))
problem.variables.add(names=list(X_5.values()), lb=[0.0] * len(X_5), ub=[2.0] * len(X_5))
problem.variables.add(names=list(X_6.values()), lb=[-2.0] * len(X_6), ub=[2.0] * len(X_6))
problem.variables.add(names=list(X_7.values()), lb=[0.0] * len(X_7), ub=[2.0] * len(X_7))
problem.variables.add(names=list(X_8.values()), lb=[-2.0] * len(X_8), ub=[2.0] * len(X_8))
problem.variables.add(names=list(X_9.values()), lb=[0.0] * len(X_9), ub=[2.0] * len(X_9))
if policy:
problem.variables.add(names=list(X_10.values()), lb=[-2.0] * len(X_10), ub=[2.0] * len(X_10))
problem.variables.add(names=list(X_11.values()), lb=[0.0] * len(X_11), ub=[2.0] * len(X_11))
problem.variables.add(names=list(X_12.values()), lb=[-10.0] * len(X_12), ub=[10.0] * len(X_12))
if value:
problem.variables.add(names=list(X_13.values()), lb=[-5.0] * len(X_13), ub=[5.0] * len(X_13))
problem.variables.add(names=list(X_14.values()), lb=[0.0] * len(X_14), ub=[5.0] * len(X_14))
problem.variables.add(names=list(X_15.values()), lb=[-10.0] * len(X_15), ub=[10.0] * len(X_15))
#problem.variables.add(names=list(X_16.values()), lb=[0.0] * len(X_16), ub=[10.0] * len(X_16))
####################################################################### OBJECTIVES
### all relus
#
#
problem.objective.set_linear(list(zip(list(X_2.values()), [1.0] * len(X_2))))
problem.objective.set_linear(list(zip(list(X_4.values()), [1.0] * len(X_4))))
problem.objective.set_linear(list(zip(list(X_7.values()), [1.0] * len(X_7))))
problem.objective.set_linear(list(zip(list(X_9.values()), [1.0] * len(X_9))))
if policy:
problem.objective.set_linear(list(zip(list(X_11.values()), [1.0] * len(X_11))))
if value:
problem.objective.set_linear(list(zip(list(X_14.values()), [1.0] * len(X_14))))
# minimize linear layer
problem.objective.set_linear(list(zip(list(X_15.values()), [1.0] * len(X_15))))
##################################################### CONSTRAINTS:
#############################################################################################################################
############################################# conv ###################################################
#############################################################################################################################
##################################################### CONSTRAINTS:
#############################################################################################################################
############################################# conv ###################################################
#############################################################################################################################
X_out = X_1 # this is the output of the (conv) layer
X_in = X_0 # this is the input to the conv layer
lay = 0
# size(input) /= size(output) in the case of a conv layer
shape_out = shape[1]
shape_in = shape[0]
# get weights and biases
#
# below needs to be pulled out from the pytorch model (torch here)
# W_conv_arr = model.layers[lay].get_weights()[0]
W_conv_arr = net_params[0]
# W_conv_arr = np.ones(shape=(1,1,5,32))
b_conv_arr = net_params[1]
# get conv filter parameters:
shape_W = W_conv_arr.shape
# get conv filters parameters:
strides = 1
pool_size_W = W_conv_arr.shape[2]
pool_size_H = W_conv_arr.shape[3]
pool_size_D = W_conv_arr.shape[1]
if True:
print(f"{pool_size_W},{pool_size_H},{pool_size_D}")
number_of_filters = W_conv_arr.shape[0]
# for every filter in the conv layer
for nnn in range((number_of_filters)):
# get the nth filter
# we want this to be shape 2, 2, 5
W_nth = W_conv_arr[nnn, :, :, :]
# W_nth = W_conv_arr[:, :, :,nnn]
# print(W_nth)
# print('n = ', nnn, 'W_nth shape = ', W_nth.shape)
W_nth = W_nth.reshape(pool_size_W, pool_size_H, pool_size_D)
# print('n = ', nnn, 'W_nth shape = ', W_nth.shape)
# print("X_in is ", X_in)
# print("X_out is ", X_out)
# for every i,j \in I_out X J_out
for i in range((shape_out[0])):
for j in range((shape_out[1])):
# get the portion of input that will be multiplied
input_i = [(i * (strides), (i * (strides)) + pool_size_W - 1)]
input_j = [(j * (strides), (j * (strides)) + pool_size_H - 1)]
# do the output
lin_expr_vars_lhs = [X_out[(i, j, nnn)]]
lin_expr_vals_lhs = [1.0]
# print("INPUT I ", input_i)
# print("INPUT J ", input_j)
# print("^^^^^^^^^^^^^^^^^^^^^")
# b_conv_arr[nnn]
# logger
# print('output indicies: ',i,j,nnn,'filter number = ',nnn,'sum of weights = ',np.sum(W_nth),' bias: ',b_conv_arr[nnn],' input indicies: ', range(input_i[0][0], input_i[0][1] + 1), range(input_j[0][0], input_j[0][1] + 1))
# loop to do the summation
for iii in range(input_i[0][0], input_i[0][1] + 1):
for jjj in range(input_j[0][0], input_j[0][1] + 1):
for kkk in range(pool_size_D):
# print((iii, jjj, kkk))
if True is False:
if (iii, jjj, kkk) in X_in:
lin_expr_vars_lhs.append(X_in[(iii, jjj, kkk)])
else:
continue
lin_expr_vars_lhs.append(X_in[(iii, jjj, kkk)])
a = round(W_nth[iii - input_i[0][0], jjj - input_j[0][0], kkk].item(), 4)
lin_expr_vals_lhs.append(-a)
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, lin_expr_vals_lhs)],
senses=['E'],
rhs=[round(b_conv_arr[nnn].item(), 4)],
names=["(conv_1)_"])
#############################################################################################################################
"""CONSTRAINTS (conv_2) """
# this is for X_3 = conv(X_2)
# we need X_in, X_out, shape_in, shape_out, weights, and biases
X_out = X_3 # this is the output of the (conv) layer
X_in = X_2 # this is the input to the conv layer
lay = 0
# size(input) /= size(output) in the case of a conv layer
shape_out = shape[3]
shape_in = shape[2]
# get weights and biases
#
# below needs to be pulled out from the pytorch model (torch here)
# below needs to be pulled out from the pytorch model (torch here)
# W_conv_arr = model.layers[lay].get_weights()[0]
W_conv_arr = net_params[2]
b_conv_arr = net_params[3]
# W_conv_arr = np.ones(shape=(1,1,32,64))
# b_conv_arr = np.ones(shape=(64,1))
# get conv filter parameters:
shape_W = W_conv_arr.shape
# print(shape_W)
# print(f'X_in is {X_in}')
# get conv filters parameters:
# strides = model.layers[lay].strides[0]
strides = 1
# CHECK THESE POOL SIZES
# WHAT IS THE RELATIONSHIP BETWEEN POOL SIZE AND KERNEL/PADDING
pool_size_W = W_conv_arr.shape[2]
pool_size_H = W_conv_arr.shape[3]
pool_size_D = W_conv_arr.shape[1]
if True:
print(f"{pool_size_W},{pool_size_H},{pool_size_D}")
number_of_filters = W_conv_arr.shape[0]
# for every filter in the conv layer
for nnn in range((number_of_filters)):
# get the nth filter
W_nth = W_conv_arr[nnn, :, :, :]
# print(W_nth.shape)
# print('n = ', nnn, 'W_nth shape = ', W_nth.shape)
W_nth = W_nth.reshape(pool_size_W, pool_size_H, pool_size_D)
# for every i,j \in I_out X J_out
for i in range((shape_out[0])):
for j in range((shape_out[1])):
# get the portion of input that will be multiplied
input_i = [(i * (strides), (i * (strides)) + pool_size_W - 1)]
input_j = [(j * (strides), (j * (strides)) + pool_size_H - 1)]
# print("INPUT I ", input_i)
# print("INPUT J ", input_j)
# print("^^^^^^^^^^^^^^^^^^^^^")
# do the output
lin_expr_vars_lhs = [X_out[(i, j, nnn)]]
lin_expr_vals_lhs = [1.0]
# b_conv_arr[nnn]
# logger
# print('output indicies: ',i,j,nnn,'filter number = ',nnn,'sum of weights = ',np.sum(W_nth),' bias: ',b_conv_arr[nnn],' input indicies: ', range(input_i[0][0], input_i[0][1] + 1), range(input_j[0][0], input_j[0][1] + 1))
# loop to do the summation
for iii in range(input_i[0][0], input_i[0][1] + 1):
for jjj in range(input_j[0][0], input_j[0][1] + 1):
for kkk in range(pool_size_D):
lin_expr_vars_lhs.append(X_in[(iii, jjj, kkk)])
a = round(W_nth[iii - input_i[0][0], jjj - input_j[0][0], kkk].item(), 4)
lin_expr_vals_lhs.append(-a)
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, lin_expr_vals_lhs)],
senses=['E'],
rhs=[round(b_conv_arr[nnn].item(), 4)],
names=["(conv_1)_"])
#############################################################################################################################
#############################################RELU ###################################################
#############################################################################################################################
"""CONSTRAINTS (ReLU_1_1) """
# X_2 >= X_1
X_out = X_2 # this is the output of the Relu
X_in = X_1 # this is the input to the Relu
shape_ = shape[1]
for i in range(shape_[0]):
for j in range(shape_[1]):
for k in range(shape_[2]):
lin_expr_vars_lhs = [X_out[(i, j, k)]]
lin_expr_vals_lhs = [1.0] * len(lin_expr_vars_lhs)
lin_expr_vars_rhs = [X_in[(i, j, k)]]
lin_expr_vals_rhs = [-1.0] * len(lin_expr_vars_rhs)
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs + lin_expr_vars_rhs,
val=lin_expr_vals_lhs + lin_expr_vals_rhs)],
senses=['G'],
rhs=[0.0],
names=["(ReLU_1_1)_"])
"""CONSTRAINTS (ReLU_1_2) """
# X_2 >= 0
X_out = X_2 # this is the output of the Relu
X_in = X_1 # this is the input to the Relu
shape_ = shape[1]
for i in range(shape_[0]):
for j in range(shape_[1]):
for k in range(shape_[2]):
lin_expr_vars_lhs = [X_out[(i, j, k)]]
lin_expr_vals_lhs = [1.0] * len(lin_expr_vars_lhs)
# lin_expr_vars_rhs = [X_in[(i, j, k)]]
# lin_expr_vals_rhs = [-1.0] * len(lin_expr_vars_rhs)
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, val=lin_expr_vals_lhs)],
senses=['G'],
rhs=[0.0],
names=["(ReLU_1_2)_"])
#############################################################################################################################
"""CONSTRAINTS (ReLU_2_1) """
# X_2 >= X_1
X_out = X_4 # this is the output of the Relu
X_in = X_3 # this is the input to the Relu
shape_ = shape[4]
for i in range(shape_[0]):
for j in range(shape_[1]):
for k in range(shape_[2]):
lin_expr_vars_lhs = [X_out[(i, j, k)]]
lin_expr_vals_lhs = [1.0] * len(lin_expr_vars_lhs)
lin_expr_vars_rhs = [X_in[(i, j, k)]]
lin_expr_vals_rhs = [-1.0] * len(lin_expr_vars_rhs)
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs + lin_expr_vars_rhs,
val=lin_expr_vals_lhs + lin_expr_vals_rhs)],
senses=['G'],
rhs=[0.0],
names=["(ReLU_2_1)_"])
"""CONSTRAINTS (ReLU_2_2) """
# X_2 >= 0
X_out = X_4 # this is the output of the Relu
# X_in = X_1 # this is the input to the Relu
shape_ = shape[4]
for i in range(shape_[0]):
for j in range(shape_[1]):
for k in range(shape_[2]):
lin_expr_vars_lhs = [X_out[(i, j, k)]]
lin_expr_vals_lhs = [1.0] * len(lin_expr_vars_lhs)
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, val=lin_expr_vals_lhs)],
senses=['G'],
rhs=[0.0],
names=["(ReLU_2_2)_"])
#############################################################################################################################
"""CONSTRAINTS (ReLU_3_1) """
# X_2 >= X_1
X_out = X_7 # this is the output of the Relu
X_in = X_6 # this is the input to the Relu
shape_ = shape[7]
for i in range(shape_):
lin_expr_vars_lhs = [X_out[(i)]]
lin_expr_vals_lhs = [1.0] * len(lin_expr_vars_lhs)
lin_expr_vars_rhs = [X_in[(i)]]
lin_expr_vals_rhs = [-1.0] * len(lin_expr_vars_rhs)
problem.linear_constraints.add(
lin_expr=[
cplex.SparsePair(lin_expr_vars_lhs + lin_expr_vars_rhs, val=lin_expr_vals_lhs + lin_expr_vals_rhs)],
senses=['G'],
rhs=[0.0],
names=["(ReLU_3_1)_"])
"""CONSTRAINTS (ReLU_3_2) """
# X_2 >= 0
X_out = X_7 # this is the output of the Relu
# X_in = X_1 # this is the input to the Relu
shape_ = shape[7]
for i in range(shape_):
lin_expr_vars_lhs = [X_out[(i)]]
lin_expr_vals_lhs = [1.0] * len(lin_expr_vars_lhs)
# lin_expr_vars_rhs = [X_in[(i, j, k)]]
# lin_expr_vals_rhs = [-1.0] * len(lin_expr_vars_rhs)
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, val=lin_expr_vals_lhs)],
senses=['G'],
rhs=[0.0],
names=["(ReLU_3_2)_"])
#############################################################################################################################
"""CONSTRAINTS (ReLU_4_1) """
# X_2 >= X_1
X_out = X_9 # this is the output of the Relu
X_in = X_8 # this is the input to the Relu
shape_ = shape[9]
for i in range(shape_):
lin_expr_vars_lhs = [X_out[(i)]]
lin_expr_vals_lhs = [1.0] * len(lin_expr_vars_lhs)
lin_expr_vars_rhs = [X_in[(i)]]
lin_expr_vals_rhs = [-1.0] * len(lin_expr_vars_rhs)
problem.linear_constraints.add(
lin_expr=[
cplex.SparsePair(lin_expr_vars_lhs + lin_expr_vars_rhs, val=lin_expr_vals_lhs + lin_expr_vals_rhs)],
senses=['G'],
rhs=[0.0],
names=["(ReLU_4_1)_"])
"""CONSTRAINTS (ReLU_4_2) """
# X_2 >= 0
X_out = X_9 # this is the output of the Relu
# X_in = X_1 # this is the input to the Relu
shape_ = shape[9]
for i in range(shape_):
lin_expr_vars_lhs = [X_out[(i)]]
lin_expr_vals_lhs = [1.0] * len(lin_expr_vars_lhs)
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, val=lin_expr_vals_lhs)],
senses=['G'],
rhs=[0.0],
names=["(ReLU_4_2)_"])
#############################################################################################################################
if policy:
# CONSTRAINTS (ReLU_5_1)
# X_2 >= X_1
X_out = X_11 # this is the output of the Relu
X_in = X_10 # this is the input to the Relu
shape_ = shape[11]
for i in range(shape_):
lin_expr_vars_lhs = [X_out[(i)]]
lin_expr_vals_lhs = [1.0] * len(lin_expr_vars_lhs)
lin_expr_vars_rhs = [X_in[(i)]]
lin_expr_vals_rhs = [-1.0] * len(lin_expr_vars_rhs)
problem.linear_constraints.add(
lin_expr=[
cplex.SparsePair(lin_expr_vars_lhs + lin_expr_vars_rhs, val=lin_expr_vals_lhs + lin_expr_vals_rhs)],
senses=['G'],
rhs=[0.0],
names=["(ReLU_5_1)_"])
# CONSTRAINTS (ReLU_5_2)
# X_2 >= 0
X_out = X_11 # this is the output of the Relu
# X_in = X_1 # this is the input to the Relu
shape_ = shape[11]
for i in range(shape_):
lin_expr_vars_lhs = [X_out[(i)]]
lin_expr_vals_lhs = [1.0] * len(lin_expr_vars_lhs)
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, val=lin_expr_vals_lhs)],
senses=['G'],
rhs=[0.0],
names=["(ReLU_5_2)_"])
#############################################################################################################################
if value:
# CONSTRAINTS (ReLU_6_1)
# X_2 >= X_1
X_out = X_14 # this is the output of the Relu
X_in = X_13 # this is the input to the Relu
shape_ = shape[13]
for i in range(shape_):
lin_expr_vars_lhs = [X_out[(i)]]
lin_expr_vals_lhs = [1.0] * len(lin_expr_vars_lhs)
lin_expr_vars_rhs = [X_in[(i)]]
lin_expr_vals_rhs = [-1.0] * len(lin_expr_vars_rhs)
problem.linear_constraints.add(
lin_expr=[
cplex.SparsePair(lin_expr_vars_lhs + lin_expr_vars_rhs, val=lin_expr_vals_lhs + lin_expr_vals_rhs)],
senses=['G'],
rhs=[0.0],
names=["(ReLU_6_1)_"])
# CONSTRAINTS (ReLU_6_2)
# X_2 >= 0
X_out = X_14 # this is the output of the Relu
# X_in = X_1 # this is the input to the Relu
shape_ = shape[13]
for i in range(shape_):
lin_expr_vars_lhs = [X_out[(i)]]
lin_expr_vals_lhs = [1.0] * len(lin_expr_vars_lhs)
problem.linear_constraints.add(
lin_expr=[
cplex.SparsePair(lin_expr_vars_lhs, val=lin_expr_vals_lhs)],
senses=['G'],
rhs=[0.0],
names=["(ReLU_6_2)_"])
#############################################################################################################################
#############################################################################################################################
#############################################################################################################################
#############################################################################################################################
#############################################################################################################################
#############################################################################################################################
#############################################################################################################################
#################################################### dense here ###########################################################
#############################################################################################################################
"""CONSTRAINTS (den_1)"""
# we need X_in, X_out, shape_in, shape_out, weights, and biases
W_dense_arr = net_params[4]
# (torch here)
# W_dense_arr = np.ones(shape=(6400,256))
# W_dense_arr.reshape((256, 1024))
b_dense_arr = net_params[5]
# (torch here)
# b_dense_arr = np.ones(shape=(256))
# make the biases an array
X_out = X_6 # this is the output of the FC layer
X_in = X_5
shape_ = shape[6] # shape of the output of the FC layer
shape_in = shape[5] # shape of the input of the FC layer
# looping over i (length of output)
for i in range(shape_):
lin_expr_vars_lhs = [X_out[(i)]]
lin_expr_vals_lhs = [1.0]
WW = W_dense_arr[i, :]
# this loop is for the dot product (shape of input)
for j in range(shape_in):
lin_expr_vars_lhs.append(X_in[(j)])
a = round(-WW[j].item(), 4)
lin_expr_vals_lhs.append(a)
bb = b_dense_arr[i]
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, val=lin_expr_vals_lhs)],
senses=['E'],
rhs=[round(bb.item(), 4)],
names=["(den_1)_"])
#############################################################################################################################
"""CONSTRAINTS (den_2)"""
# we need X_in, X_out, shape_in, shape_out, weights, and biases
W_dense_arr = net_params[6]
b_dense_arr = net_params[7] # make the biases an array
# W_dense_arr = net.layers[4].get_weights()[0]
# (torch here)
# W_dense_arr = np.ones(shape=(256,128))
# b_dense_arr = net.layers[4].get_weights()[1]
# (torch here)
# b_dense_arr = np.ones(shape=(128))
X_out = X_8 # this is the output of the FC layer
X_in = X_7
shape_ = shape[8] # shape of the output of the FC layer
shape_in = shape[7] # shape of the input of the FC layer
# looping over i (length of output)
for i in range(shape_):
lin_expr_vars_lhs = [X_out[(i)]]
lin_expr_vals_lhs = [1.0]
WW = W_dense_arr[i, :]
# this loop is for the dot product (shape of input)
for j in range(shape_in):
lin_expr_vars_lhs.append(X_in[(j)])
a = round(-WW[j].item(), 4)
lin_expr_vals_lhs.append(a)
bb = b_dense_arr[i]
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, val=lin_expr_vals_lhs)],
senses=['E'],
rhs=[round(bb.item(), 4)],
names=["(den_2)_"])
#############################################################################################################################
if policy:
"""CONSTRAINTS (den_3)"""
# we need X_in, X_out, shape_in, shape_out, weights, and biases
W_dense_arr = net_params[8]
b_dense_arr = net_params[9]
# (torch here)
# W_dense_arr = np.ones(shape=(128,128))
# b_dense_arr = net.layers[4].get_weights()[1]
# (torch here)
# b_dense_arr = np.ones(shape=(128))
X_out = X_10 # this is the output of the FC layer into POLICY HEAD
X_in = X_9
shape_ = shape[10] # shape of the output of the FC layer
shape_in = shape[9] # shape of the input of the FC layer
# looping over i (length of output)
for i in range(shape_):
lin_expr_vars_lhs = [X_out[(i)]]
lin_expr_vals_lhs = [1.0]
WW = W_dense_arr[i, :]
# this loop is for the dot product (shape of input)
for j in range(shape_in):
lin_expr_vars_lhs.append(X_in[(j)])
a = round(-WW[j].item(), 4)
lin_expr_vals_lhs.append(a)
bb = b_dense_arr[i]
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, val=lin_expr_vals_lhs)],
senses=['E'],
rhs=[round(bb.item(), 4)],
names=["(den_3)_"])
if value:
"""CONSTRAINTS (den_3)"""
# we need X_in, X_out, shape_in, shape_out, weights, and biases
W_dense_arr = net_params[12]
b_dense_arr = net_params[13]
# (torch here)
# W_dense_arr = np.ones(shape=(128,128))
# b_dense_arr = net.layers[4].get_weights()[1]
# (torch here)
# b_dense_arr = np.ones(shape=(128))
X_out = X_13 # this is the output of the FC layer into VALUE HEAD
X_in = X_9
shape_ = shape[13] # shape of the output of the FC layer
shape_in = shape[9] # shape of the input of the FC layer
# looping over i (length of output)
for i in range(shape_):
lin_expr_vars_lhs = [X_out[(i)]]
lin_expr_vals_lhs = [1.0]
WW = W_dense_arr[i, :]
# this loop is for the dot product (shape of input)
for j in range(shape_in):
lin_expr_vars_lhs.append(X_in[(j)])
a = round(-WW[j].item(), 4)
lin_expr_vals_lhs.append(a)
bb = b_dense_arr[i]
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, val=lin_expr_vals_lhs)],
senses=['E'],
rhs=[round(bb.item(), 4)],
names=["(den_3)_"])
#############################################################################################################################
#############################################################################################################################
#############################################################################################################################
##################################################### flatten ##################################################
#############################################################################################################################
if policy and False:
# CONSTRAINTS (den_4)
# LAST IN POLICY HEAD
# we need X_in, X_out, shape_in, shape_out, weights, and biases
# (torch here)
# W_dense_arr = net.layers[4].get_weights()[0]
# b_dense_arr = net.layers[4].get_weights()[1] # make the biases an array
W_dense_arr = net_params[10]
# (torch here)
# W_dense_arr = np.ones(shape=(128,128))
b_dense_arr = net_params[11]
# (torch here)
# b_dense_arr = np.ones(shape=(128))
X_out = X_12 # this is the output of the FC layer
X_in = X_11
shape_ = shape[12] # shape of the output of the FC layer
shape_in = shape[11] # shape of the input of the FC layer
# looping over i (length of output)
for i in range(shape_):
lin_expr_vars_lhs = [X_out[(i)]]
lin_expr_vals_lhs = [1.0]
WW = W_dense_arr[i, :]
# this loop is for the dot product (shape of input)
for j in range(shape_in):
lin_expr_vars_lhs.append(X_in[(j)])
a = round(-WW[j].item(), 4)
lin_expr_vals_lhs.append(a)
bb = b_dense_arr[i]
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, val=lin_expr_vals_lhs)],
senses=['E'],
rhs=[round(bb.item(), 4)],
names=["(den_4)_"])
"""CONSTRAINTS (Fltt)"""
# X_5 = flatten(X_4)
X_out = X_5 # this is the output of the Flatten
X_in = X_4 # this is the input to the Flatten
shape_ = shape[5] # shape of the output of the flatten layer
shape_in = shape[4] # shape of the input of the flatten layer
# ini
l = 0
for i in range(shape_in[0]):
for j in range(shape_in[1]):
for k in range(shape_in[2]):
lin_expr_vars_lhs = [X_in[(i, j, k)]]
lin_expr_vals_lhs = [1.0]
lin_expr_vars_rhs = [X_out[(l)]]
lin_expr_vals_rhs = [-1.0]
l = l + 1
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs + lin_expr_vars_rhs,
val=lin_expr_vals_lhs + lin_expr_vals_rhs)],
senses=['E'],
rhs=[0.0],
names=["(Fltt)_"])
# constraints v and vi
######################################################################################################################
if policy and False:
"""CONSTRAINTS (FINAL POLICY DENSE)"""
# we need X_in, X_out, shape_in, shape_out, weights, and biases
W_dense_arr = net_params[10] # make the weights an array
b_dense_arr = net_params[11]
X_out = X_12
X_in = X_11
shape_ = shape[12] # shape of the output of the FC layer
shape_in = shape[11] # shape of the input of the FC layer
# looping over i (length of output)
for i in range(shape_):
lin_expr_vars_lhs = [X_out[(i)]]
lin_expr_vals_lhs = [1.0]
WW = W_dense_arr[i, :]
# this loop is for the dot product (shape of input)
for j in range(shape_in):
lin_expr_vars_lhs.append(X_in[(j)])
a = round(-WW[j].item(), 4)
lin_expr_vals_lhs.append(a)
bb = b_dense_arr[i]
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, val=lin_expr_vals_lhs)],
senses=['E'],
rhs=[round(bb.item(), 4)],
names=[f"(den_final_value{i}"])
# CONSTRAINTS (v)
# we need X_in, X_out, shape_in, shape_out, weights, and biases, and number of classes
number_of_classes = 256
lin_expr_vars_lhs = []
lin_expr_vals_lhs = []
X_temp_1 = X_12
# ADDED FOR v
# mu = -0.5
for i in range(number_of_classes):
lin_expr_vars_lhs = [X_temp_1[(i)]]
# TODO: hardcode 5/6
lin_expr_vals_lhs = [(number_of_classes - 1) / number_of_classes]
# temp_set = np.setdiff1d([1, 2, 3, 4, 5, 6], i)
for j in range(number_of_classes):
if j == i:
continue
lin_expr_vars_lhs.append(X_temp_1[(j)])
aa = 1 / number_of_classes
a = round(aa, 4)
lin_expr_vals_lhs.append(a)
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, val=lin_expr_vals_lhs)],
senses=['G'],
rhs=[-mu],
names=[f"(v)_{i}"])
#print('CONSTRAINT v - last dense with softmax - is added')
# CONSTRAINTS (vi)
# we need X_in, X_out, shape_in, shape_out, weights, and biases, and number of classes
# torch here
X_temp_1 = X_12
# ADDED FOR vi
# radius
# DEFINED IN FUNCTION PARAMS
# mu = 0.5
for i in range(number_of_classes):
lin_expr_vars_lhs = [X_temp_1[(i)]]
lin_expr_vals_lhs = [1.0]
for j in range(number_of_classes):
if i == j:
continue
lin_expr_vars_lhs.append(X_temp_1[(j)])
aa = 1 / number_of_classes
a = round(aa, 4)
lin_expr_vals_lhs.append(a)
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, val=lin_expr_vals_lhs)],
senses=['L'],
rhs=[mu],
names=[f"(vi)_{i}"])
#print('CONSTRAINT vi - last dense with softmax - is added')
print('break')
#############################################################################################################################
#############################################################################################################################
##################################################### constraint BINARY !!!! ##################################################
#############################################################################################################################
#############################################################################################################################
"""CONSTRAINTS (Constraint_ch5 constant zeros"""
"""CONSTRAINT BINARY (men + kings) DOES NOT EXCEED 12 for player0"""
# All ones to help neural network find board edges in padded convolutions
X_in = X_0
# 8, 8, 4
shape_out = shape[0]
lin_expr_vars_lhs = list()
lin_expr_vals_lhs = list()
for i in range(shape_out[0]):
for j in range(shape_out[1]):
lin_expr_vars_lhs.append(X_in[(i, j, 0)])
lin_expr_vals_lhs.append(1.0)
lin_expr_vars_lhs.append(X_in[(i, j, 1)])
lin_expr_vals_lhs.append(1.0)
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, lin_expr_vals_lhs)],
senses=['L'],
rhs=[12.0],
names=[f"(Constraint_binary_menAndKings0_{i}_{j}"])
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, lin_expr_vals_lhs)],
senses=['G'],
rhs=[1.0],
names=[f"(Constraint_binary_menAndKings_atleastOne_{i}_{j}"])
"""CONSTRAINT BINARY (men + kings) DOES NOT EXCEED 12 for player1"""
# All ones to help neural network find board edges in padded convolutions
X_in = X_0
# 8, 8, 4
shape_out = shape[0]
lin_expr_vars_lhs = list()
lin_expr_vals_lhs = list()
for i in range(shape_out[0]):
for j in range(shape_out[1]):
lin_expr_vars_lhs.append(X_in[(i, j, 2)])
lin_expr_vals_lhs.append(1.0)
lin_expr_vars_lhs.append(X_in[(i, j, 3)])
lin_expr_vals_lhs.append(1.0)
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, lin_expr_vals_lhs)],
senses=['L'],
rhs=[12.0],
names=[f"(Constraint_binary_menAndKings1_{i}_{j}"])
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, lin_expr_vals_lhs)],
senses=['G'],
rhs=[1.0],
names=[f"(Constraint_binary_menAndKings1_atleastOne_{i}_{j}"])
##########################################################################
"""CONSTRAINT BINARY WHITE SPACES"""
# All ones to help neural network find board edges in padded convolutions
X_in = X_0
# 8, 8, 4
shape_out = shape[0]
lin_expr_vars_lhs = list()
lin_expr_vals_lhs = list()
for i in range(shape_out[0]):
for j in range(shape_out[1]):
# modified empty space in top left corner; nonempty in bottom left corner
if (i % 2 == 0 and j % 2 == 0) or (i % 2 == 1 and j % 2 == 1):
lin_expr_vars_lhs.append(X_in[(i, j, 0)])
lin_expr_vals_lhs.append(1.0)
lin_expr_vars_lhs.append(X_in[(i, j, 1)])
lin_expr_vals_lhs.append(1.0)
lin_expr_vars_lhs.append(X_in[(i, j, 2)])
lin_expr_vals_lhs.append(1.0)
lin_expr_vars_lhs.append(X_in[(i, j, 3)])
lin_expr_vals_lhs.append(1.0)
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, lin_expr_vals_lhs)],
senses=['E'],
rhs=[0.0],
names=[f"(Constraint_binary_whiteSpaces"])
"""CONSTRAINT BINARY NO OVERLAP"""
# All ones to help neural network find board edges in padded convolutions
X_in = X_0
# 8, 8, 4
shape_out = shape[0]
for i in range(shape_out[0]):
for j in range(shape_out[1]):
lin_expr_vars_lhs = list()
lin_expr_vals_lhs = list()
for k in range(shape_out[2]):
lin_expr_vars_lhs.append(X_in[(i, j, k)])
lin_expr_vals_lhs.append(1.0)
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, lin_expr_vals_lhs)],
senses=['L'],
rhs=[1.0],
names=[f"(Constraint_binary_NoOverlap"])
"""CONSTRAINT BINARY NO men in back row"""
# All ones to help neural network find board edges in padded convolutions
X_in = X_0
# 8, 8, 4
shape_out = shape[0]
lin_expr_vars_lhs = []
lin_expr_vals_lhs = []
for j in range(8):
lin_expr_vars_lhs.append(X_in[(7, j, 0)])
lin_expr_vals_lhs.append(1)
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, lin_expr_vals_lhs)],
senses=['E'],
rhs=[0.0],
names=[f"(Constraint_binary_backrow0"])
"""CONSTRAINT BINARY NO men in back row2"""
# All ones to help neural network find board edges in padded convolutions
X_in = X_0
# 8, 8, 4
shape_out = shape[0]
lin_expr_vars_lhs = []
lin_expr_vals_lhs = []
for j in range(8):
lin_expr_vars_lhs.append(X_in[(0, j, 2)])
lin_expr_vals_lhs.append(1)
problem.linear_constraints.add(
lin_expr=[cplex.SparsePair(lin_expr_vars_lhs, lin_expr_vals_lhs)],
senses=['E'],
rhs=[0.0],
names=[f"(Constraint_binary_backrow1"])
#### try to print either i,j,k mode or only certaint contraints, bounds, or only objectives
# problem.write( filename='MNIST_digits_.lp')
'''### this is only used for MIP (Mixed Integer Programming)
problem.parameters.mip.tolerances.integrality.set(1e-4)
# problem.parameters.mip.tolerances.mipgap.set(0.01)
# problem.parameters.mip.tolerances.absmipgap.set(0.01)
problem.parameters.mip.tolerances.mipgap.set(1e-4)
problem.parameters.mip.tolerances.absmipgap.set(1e-4)'''
problem.parameters.mip.pool.intensity.set(1)
problem.parameters.mip.tolerances.mipgap.set(1e-4)
problem.parameters.mip.tolerances.absmipgap.set(1e-4)
problem.parameters.mip.tolerances.integrality.set(1e-4)
problem.parameters.mip.limits.treememory.set(500)
# should be 5 by default
problem.parameters.mip.limits.populate.set(lp_states_in_pool)
postfix = "combined"
problem.write(filename=f'constraint_check_2{postfix}.lp')
random.seed()
problem.parameters.randomseed.set(random.randint(0, 999999))
problem.populate_solution_pool()
#problem.solve()
solutionstatus = problem.solution.status[problem.solution.get_status()]
print('LP STATUS: ', solutionstatus)
print("Solution value = ", problem.solution.get_objective_value())
# initialize numpy array of zeros to which we map our confusing output dictionary
confusing_output = np.zeros(shape=(20, board_size, board_size))
# pulling up the generated input image from the LP
temp = {k: problem.solution.get_values(id) for (k, id) in X_0.items()}
print("PRINTING X_0")
# manual reshaping
for (key, value) in temp.items():
confusing_output[key[2], key[0], key[1]] = value
if key[2] == 1:
pass
# print(f'key: {key} ... value: {value}')
print(confusing_output)
num_sols_in_pool = problem.solution.pool.get_num()
confusing_input_tensors = []
print(f'NUM SOLUTIONS IN THE SOLUTIONS POOL: {num_sols_in_pool}')
for idx in range(num_sols_in_pool):
#print(f'SOLUTION{idx}')
confusing_output = np.zeros(shape=(shape[0][2], shape[0][0], shape[0][1]))
# pulling up the generated input image from the LP
temp = {k: problem.solution.pool.get_values(idx, id) for (k, id) in X_0.items()}
#print("PRINTING X_0")
# manual reshaping
for (key, value) in temp.items():
confusing_output[key[2], key[0], key[1]] = value
if key[2] == 1:
pass
# print(f'key: {key} ... value: {value}')
confusing_input_tensors.append(torch.from_numpy(confusing_output[None, :, :, :]).float().to(DEVICE))
temp_cit = [v1 for i, v1 in enumerate(confusing_input_tensors) if not any(torch.equal(v1, v2)
for v2 in confusing_input_tensors[:i])]
confusing_input_tensors = temp_cit
print('Confusing input Tensors: ')
print('length:', len(confusing_input_tensors))
output_valueS_list = list()
jh_list = list()
for confusing_input_tensor in confusing_input_tensors:
this_state = orig_env.env.env.env.load_state_from_tensor(confusing_input_tensor)
orig_env.render()
print(confusing_input_tensor)
logit_values, output_value = net.forward(confusing_input_tensor)
output_valueS = torch.tanh(output_value)
ov_temp = output_valueS
output_valueS = output_valueS.squeeze(-1).tolist()
pols_soft = F.softmax(logit_values.double(), dim=-1).squeeze(0)
pols_soft /= pols_soft.sum()
pols_soft = pols_soft.tolist()
jh = jensenshannon(pols_soft, [1/256] * 256)
#print('logit_values = ', logit_values)
print('output_value = ', output_value)
# BEFORE SOFTMAX
# 0.018, 0.0623, -0.0818, -0.049, 0.0078, 0.003
#print('output_probabilities = ', pols_soft)
print('output_value = ', output_valueS)
jh_list.append(jh)
output_valueS_list.append(ov_temp.squeeze(-1).tolist())
return confusing_input_tensors, jh_list, output_valueS_list
# build variables indicies based on shape
def build_indicies_dictionary(ls):
dictionary = {}
if len(ls) < 2:
for i in range(ls[0]):
dictionary[(i)] = (i)
elif len(ls) < 3:
for i in range(ls[0]):
for j in range(ls[1]):
dictionary[(i, j)] = (i, j)
else:
for i in range(ls[0]):
for j in range(ls[1]):
for k in range(ls[2]):
dictionary[(i, j, k)] = (i, j, k)
return dictionary
| [
"headbannedband@gmail.com"
] | headbannedband@gmail.com |
60c292a379f999760e27264232ec1253c499b0ff | 9ff4bbd92db36b98df97d52719fbfe5b6119dcd4 | /my_first_github_py.py | ddd4bda86a606039a5e1c5c989b045f4a9f7224c | [] | no_license | UllasChandran/hello_world_ullas_firstrep | f0d28880c990f496282297ba5d6799adb47df45d | 298f5bf367f53b96dd72d15f58aaa3a0dc3f5968 | refs/heads/main | 2023-08-22T08:18:05.814907 | 2021-10-08T07:32:24 | 2021-10-08T07:32:24 | 414,888,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | print("Hello ALL . This is my First Python File in github")
| [
"noreply@github.com"
] | noreply@github.com |
7539f89d65e13d8d08aa52f5ad2cb95edad6e77c | 572dd7f851ff2f6b39fea8f99199c22260f113df | /user/messages/success.py | b4e779b05fd4003a8e96f5153edf170b46c1ee00 | [] | no_license | SEUNAGBEYE/Flighty | f869f3fb1c1c74bddff9102b11a02411f502dc52 | 46247f93e7f9c83441c3f50eaca2f0d3eaeca96f | refs/heads/develop | 2022-12-13T12:17:58.760670 | 2019-07-29T15:51:36 | 2019-07-29T15:51:36 | 165,585,170 | 0 | 0 | null | 2022-12-08T01:36:58 | 2019-01-14T02:52:46 | Python | UTF-8 | Python | false | false | 172 | py | USER_CREATED = 'User successfully created'
LOGIN_SUCCESSFULL = 'User sucessfully logged in'
PROFILE_UPDATED = 'Profile updated'
USER_RETRIEVED = 'User successfully fetched' | [
"agbeyeseun1@gmail.com"
] | agbeyeseun1@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.