blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b020d54ab698fb8ef1164833693ee6ef241f31bd | a8470bd653c6c08f66f1181342414e524ab4b2f0 | /day68.py | 0a3b763f341408d059eb1ef5baad2399ec59e594 | [] | no_license | khush611/algodaily | 321201d97ee5f0cd72c08e8c6e0f22e26dc89569 | 96f7b0d440840f595303d79344678511b45a186a | refs/heads/master | 2020-04-23T11:42:05.487017 | 2019-05-03T18:17:54 | 2019-05-03T18:17:54 | 171,145,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,635 | py |
def kadane(arr, start, finish, n):
Sum = 0
maxSum = -999999999999
i = None
finish[0] = -1
local_start = 0
for i in range(n):
Sum += arr[i]
if Sum < 0:
Sum = 0
local_start = i + 1
elif Sum > maxSum:
maxSum = Sum
start[0] = local_start
finish[0] = i
if finish[0] != -1:
return maxSum
maxSum = arr[0]
start[0] = finish[0] = 0
# Find the maximum element in array
for i in range(1, n):
if arr[i] > maxSum:
maxSum = arr[i]
start[0] = finish[0] = i
return maxSum
def findMaxSum(M):
global ROW, COL
# Variables to store the final output
maxSum, finalLeft = -999999999999, None
finalRight, finalTop, finalBottom = None, None, None
left, right, i = None, None, None
temp = [None] * ROW
Sum = 0
start = [0]
finish = [0]
# Set the left column
for left in range(COL):
# Initialize all elements of temp as 0
temp = [0] * ROW
# Set the right column for the left
# column set by outer loop
for right in range(left, COL):
# Calculate sum between current left
# and right for every row 'i'
for i in range(ROW):
temp[i] += M[i][right]
Sum = kadane(temp, start, finish, ROW)
if Sum > maxSum:
maxSum = Sum
finalLeft = left
finalRight = right
finalTop = start[0]
finalBottom = finish[0]
print("(Top, Left)", "(", finalTop,
finalLeft, ")")
print("(Bottom, Right)", "(", finalBottom,
finalRight, ")")
print("Max sum is:", maxSum)
ROW = 4
COL = 5
M = [[1, 2, -1, -4, -20],
[-8, -3, 4, 2, 1],
[3, 8, 10, 1, 3],
[-4, -1, 1, 7, -6]]
findMaxSum(M)
| [
"khushboobhushan611@gmail.com"
] | khushboobhushan611@gmail.com |
1c1a42ba57643d88c08337c7518606ba8bad7ab5 | 63518747ea358918412dfc2794a8632ed4c9ee25 | /python/xml/update.py | a995e1582fd2b86b7e3c797533d0367985970d0e | [] | no_license | YuChuanchun/SamyuDemos | 83b036d14f1b9dd8c73c671f2e699f156750ab6f | dc59bb649dd23e67c944aa15b28cb2726c2e9308 | refs/heads/master | 2016-09-08T01:54:46.882939 | 2013-09-05T01:43:14 | 2013-09-05T01:43:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | import xml.etree.ElementTree as ET
TAG = 'update'
FILE_PATH_COMMON = 'common.py'
FILE_PATH_CONFIG = 'config.xml'
def update():
try:
output = open(FILE_PATH_COMMON, 'w')
except IOError:
print(TAG, 'open common.py error')
tree = ET.parse(FILE_PATH_CONFIG)
root = tree.getroot()
for child in root.findall('public'):
if child.get('type') == 'string':
public = "PUBLIC_" + child.get("name").upper() + " = '" + child.text + "'\n"
elif child.get('type') == 'boolean':
public = "PUBLIC_" + child.get("name").upper() + " = '" + child.text + "'\n"
output.write(public)
for child in root.findall('private'):
prefix = 'PRIVATE_' + child.get('name').upper() + "_"
for item in child.findall('item'):
if item.get('type') == 'string':
private = prefix + item.get('name').upper() + " = '" + item.text + "'\n"
elif item.get('type') == 'boolean':
if item.text.lower() == 'true':
private = prefix + item.get('name').upper() + " = True\n"
else:
private = prefix + item.get('name').upper() + " = False\n"
output.write(private)
update() | [
"yuchuanchun@gmail.com"
] | yuchuanchun@gmail.com |
5ce0e3c35c45029b71c5e573cd7bce007d4566d0 | 7dd7e4fdb55ec7cd61ec2a9d30271502968d1444 | /gunicorn_config.py | 50547013609862fb4ef732d58265ba5712be901b | [] | no_license | erik-farmer/flask-boiler-plate | 342e5ebc0116a3762dd7a8b07704cff97297ad88 | 52690fc7cf070d0aeffde2e944fed9505f9c8533 | refs/heads/master | 2021-01-11T21:53:41.401816 | 2017-12-07T23:59:53 | 2017-12-07T23:59:53 | 78,870,174 | 0 | 0 | null | 2017-12-07T23:59:54 | 2017-01-13T17:24:25 | Python | UTF-8 | Python | false | false | 118 | py | import multiprocessing
bind = "127.0.0.1:8000"
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gevent'
| [
"efarmer@protagonist.io"
] | efarmer@protagonist.io |
e098bbdb9cba474a357454d9309d907c80d12f00 | 6529e119d2ad72942ed361e463f7e8fca2bbf5da | /scripts/filtersnp.py | c794c155dcf3ef6fbc0550be4a1710cb29c6f551 | [] | no_license | sduarrir/codemsc | 085b7e79eaa78c7d5847e5002530490f14d40804 | e8ca503d6def40d8de17cc06a17f1c6d6c521ddb | refs/heads/master | 2022-12-10T23:07:08.763701 | 2020-09-09T00:12:46 | 2020-09-09T00:12:46 | 290,771,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py | #from a given file filters in snps in a list, using plink
#example: filtersnp.py file_containing_all_filenames snplist
import sys
import os
#FUNCTIONS
#output file name (action is the _ added)
def outputname(inputf, act):
try:
name, _ = inputf.split('.') #make sure there is no extension
except ValueError:
name = inputf.strip()
outputf=name+'_'+act
return(name, outputf)
#change id for chr_pos
def renam(inputf, outputf):
#open file
f = open(inputf, "r")
wf = open(outputf, "w")
for line in f:
elem=line.split('\t')
chrid=elem[0]+'_'+elem[3]
elem[1]=chrid
nline = '\t'.join(elem)
wf.write(nline)
f.close()
wf.close()
#get inputfile
inputf = sys.argv[1]
snplist = sys.argv[2]
#renam
ext = 'filtersnp'
f = open(inputf, 'r')
for file in f:
#file w/ no extension
#get files + what is done to them ( no extension
inpf, outf = outputname(file, ext)
command = 'plink --bfile ' + inpf +' --extract '+ snplist+ ' --make-bed --out ' + outf
#print current file
current = 'filterinf snps from ' + inpf + ' ...'
print(current)
# execute plink
os.system(command)
f.close()
print("done!")
#outputf=outputname(inputf, 'renam')
#renam id
#renam(inputf, outputf)
| [
"sduarrir@example.com"
] | sduarrir@example.com |
f967c5af25bac400dae4bde6a3438947838cd97e | e35eb92b5ab6547119585004b9eea3cafe948050 | /efsw/storage/errors.py | 84ab6044f4679693c7697a6ed29b48ba498314da | [] | no_license | einsfr/mmkit | 0a084db85b2cf5ba268e692676095d768733f387 | f12bc2f83254a3123e02abdc105816cc04c438b5 | refs/heads/master | 2020-12-31T05:56:19.287611 | 2016-06-10T05:56:58 | 2016-06-10T05:56:58 | 29,473,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | FILE_DOES_NOT_EXIST_IN_STORAGE = 'В хранилище "{0}" отсутствует файл "{1}".'
STORAGE_DOES_NOT_CONTAIN_FILE = 'Файл "{0}" не принадлежит хранилищу "{1}".'
FILE_ALREADY_EXISTS_IN_STORAGE = 'Файл "{0}" уже существует в хранилище "{1}".'
STORAGE_ROOT_NOT_FOUND = 'Корневая папка хранилищ "{0}" не существует.'
STORAGE_ROOT_REWRITE_FORBIDDEN = 'Storage\'s root directory can\'t be rewritten if application is in production ' \
'environment.'
STORAGE_BASE_DIR_REWRITE_FORBIDDEN = 'Storage\'s base directory can\'t be rewritten if application is in production ' \
'environment.'
STORAGE_BASE_DIR_NOT_FOUND = 'Storage\'s "{0}" base directory "{1}" doesn\'t exist.'
| [
"einsfr@users.noreply.github.com"
] | einsfr@users.noreply.github.com |
c218c71173502582b74a6b241a8f7da1b3befe41 | 4c3e2557044884be630d3c6c47c3e446f951c681 | /Contest/ABC020/B.py | 29b372eb99969ecea9bb9383c5018b6e223e5b8f | [] | no_license | monda00/AtCoder | 01bdf89338c22f1792fde7f85728e01d97e5fd34 | abf947f2cdfe87486ad8935ba078918d4809573a | refs/heads/master | 2021-11-10T00:54:01.144582 | 2021-11-07T13:24:03 | 2021-11-07T13:24:03 | 186,128,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | a, b = input().split()
ab = int(a + b)
print(ab * 2)
| [
"monda0524@gmail.com"
] | monda0524@gmail.com |
e88cd2a0243ec52448cd76b9a2c30cd42ee9c40a | 519256c90af4dbc891455612e7984ca01462b189 | /test.py | 1c8cc9408b51445ffad18144333b367df0c6c21c | [] | no_license | sajjad-yazdanparast/ui-ai991-python | b15a4e795a24eeb632351e86ea0554ac6628ee37 | 816bd83ebc9c81148d6012f8b89d9c06776bedd8 | refs/heads/master | 2023-02-23T04:26:45.090170 | 2021-01-31T01:27:50 | 2021-01-31T01:27:50 | 315,128,221 | 0 | 0 | null | 2020-11-22T20:42:23 | 2020-11-22T20:42:23 | null | UTF-8 | Python | false | false | 382 | py | a = [1,2,3]
# if a ==[1,2,3] :
# print('mamad')
# pass
# # print(a)
# def eq (mlist) :
# if a == mlist :
# pass
# print(mlist)
# eq(a)
# )
# def tst (a) :
# a.append(4)
# tst(a)
# print(a)
# a = [
# [-1,-2,-3],
# [1,2,3]
# ]
# a[4]
a = (4,6)
if 0< a[0] <5 :
print('avali')
if 8<a[1] <10:
print('dovomi')
# print(1%1 ) | [
"sajjad.yazdanparast.tehrani@gmail.com"
] | sajjad.yazdanparast.tehrani@gmail.com |
b182d112f6cb1b8565fb48e838a02291e2d64987 | 2bcc421ee345b00cf805c543b37d18b5d019dc04 | /adafruit-circuitpython-bundle-6.x-mpy-20201126/examples/azureiot_central_properties.py | 415f9b7095f77f7c046958466f0ecc7f3a5f28bd | [] | no_license | saewoonam/sc-current-source-titano | 5a1ad46889c1b09c168424901fd71cb4eab5c61b | 1c136aa8b61268d9ac0b5a682b30ece70ab87663 | refs/heads/main | 2023-03-02T22:12:26.685537 | 2021-02-09T03:28:01 | 2021-02-09T03:28:01 | 317,299,900 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,963 | py | import random
import time
import board
import busio
from digitalio import DigitalInOut
import neopixel
from adafruit_esp32spi import adafruit_esp32spi, adafruit_esp32spi_wifimanager
import adafruit_esp32spi.adafruit_esp32spi_socket as socket
from adafruit_ntp import NTP
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
# ESP32 Setup
try:
esp32_cs = DigitalInOut(board.ESP_CS)
esp32_ready = DigitalInOut(board.ESP_BUSY)
esp32_reset = DigitalInOut(board.ESP_RESET)
except AttributeError:
esp32_cs = DigitalInOut(board.D13)
esp32_ready = DigitalInOut(board.D11)
esp32_reset = DigitalInOut(board.D12)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
"""Use below for Most Boards"""
status_light = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=0.2) # Uncomment for Most Boards
"""Uncomment below for ItsyBitsy M4"""
# status_light = dotstar.DotStar(board.APA102_SCK, board.APA102_MOSI, 1, brightness=0.2)
# Uncomment below for an externally defined RGB LED
# import adafruit_rgbled
# from adafruit_esp32spi import PWMOut
# RED_LED = PWMOut.PWMOut(esp, 26)
# GREEN_LED = PWMOut.PWMOut(esp, 27)
# BLUE_LED = PWMOut.PWMOut(esp, 25)
# status_light = adafruit_rgbled.RGBLED(RED_LED, BLUE_LED, GREEN_LED)
wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light)
print("Connecting to WiFi...")
wifi.connect()
print("Connected to WiFi!")
print("Getting the time...")
ntp = NTP(esp)
# Wait for a valid time to be received
while not ntp.valid_time:
time.sleep(5)
ntp.set_time()
print("Time:", str(time.time()))
# To use Azure IoT Central, you will need to create an IoT Central app.
# You can either create a free tier app that will live for 7 days without an Azure subscription,
# Or a standard tier app that will last for ever with an Azure subscription.
# The standard tiers are free for up to 2 devices
#
# If you don't have an Azure subscription:
#
# If you are a student, head to https://aka.ms/FreeStudentAzure and sign up, validating with your
# student email address. This will give you $100 of Azure credit and free tiers of a load of
# service, renewable each year you are a student
#
# If you are not a student, head to https://aka.ms/FreeAz and sign up to get $200 of credit for 30
# days, as well as free tiers of a load of services
#
# Create an Azure IoT Central app by following these instructions: https://aka.ms/CreateIoTCentralApp
# Add a device template with telemetry, properties and commands, as well as a view to visualize the
# telemetry and execute commands, and a form to set properties.
#
# Next create a device using the device template, and select Connect to get the device connection details.
# Add the connection details to your secrets.py file, using the following values:
#
# 'id_scope' - the devices ID scope
# 'device_id' - the devices device id
# 'key' - the devices primary key
#
# The adafruit-circuitpython-azureiot library depends on the following libraries:
#
# From the Adafruit CircuitPython Bundle (https://github.com/adafruit/Adafruit_CircuitPython_Bundle):
# * adafruit-circuitpython-minimqtt
# * adafruit-circuitpython-requests
from adafruit_azureiot import IoTCentralDevice
# Create an IoT Hub device client and connect
device = IoTCentralDevice(socket, esp, secrets["id_scope"], secrets["device_id"], secrets["key"])
# Subscribe to property changes
# Properties can be updated either in code, or by adding a form to the view
# in the device template, and setting the value on the dashboard for the device
def property_changed(property_name, property_value, version):
print("Property", property_name, "updated to", str(property_value), "version", str(version))
# Subscribe to the property changed event
device.on_property_changed = property_changed
print("Connecting to Azure IoT Central...")
# Connect to IoT Central
device.connect()
print("Connected to Azure IoT Central!")
message_counter = 60
while True:
try:
# Send property values every minute
# You can see the values in the devices dashboard
if message_counter >= 60:
device.send_property("Desired_Temperature", random.randint(0, 50))
message_counter = 0
else:
message_counter = message_counter + 1
# Poll every second for messages from the cloud
device.loop()
except (ValueError, RuntimeError) as e:
print("Connection error, reconnecting\n", str(e))
# If we lose connectivity, reset the wifi and reconnect
wifi.reset()
wifi.connect()
device.reconnect()
continue
time.sleep(1)
| [
"nams@nist.gov"
] | nams@nist.gov |
f2ec15ec6b195fffb34cf7280adecd51ca8ee052 | 95d1dd5758076c0a9740d545a6ef2b5e5bb8c120 | /PY/basic/class_inherit.py | 98146eaa6d42f48c981e6d630f45405486b34194 | [] | no_license | icoding2016/study | 639cb0ad2fe80f43b6c93c4415dc6e8a11390c85 | 11618c34156544f26b3b27886b55c771305b2328 | refs/heads/master | 2023-08-31T14:15:42.796754 | 2023-08-31T05:28:38 | 2023-08-31T05:28:38 | 117,061,872 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | #!/usr/bin/python
from __future__ import print_function
class B(object):
class_var = None
def __init__(self):
print("Class B init..")
self.inst_var = 0
def func(self):
print("B::func()")
print("class_var=%s" % self.class_var)
print("inst_var=%s" % self.inst_var)
def show(self):
print("B::show()")
print("class_var=%s" % self.class_var)
print("inst_var=%s" % self.inst_var)
class B1(B):
def __init__(self):
print("Class B1 init..")
self.inst_var = 1
def func(self):
print("B1::func()")
class B2(B):
def __init__(self):
super(B2, self).__init__()
print("base::__init__ called before Class B2 init..")
self.inst_var = 2 # initiate the instance's inst_var, not changing the base instance's inst_var
def func(self):
print("B2::func(), then explicitly call base.func()")
super(B2, self).func()
def changeSelfClassVar(self):
self.class_var = 2 # this add a var to the instance and assign 2, not changing the B::class_var
print("B2: self.class_var -> %s" % self.class_var)
def changeClassVar(self):
B.class_var = 22 # this modifies the 'class var' (static)
print("B2: class_var -> %s" % B.class_var)
if "__main__" in __name__:
print("-"*20)
b = B()
b.func()
print("-"*20)
b1 = B1()
b1.func()
print("-"*20)
b2 = B2()
b2.func()
print("-"*10)
b2.changeSelfClassVar()
b.show() # self.inst_var still None, 'static' B.class_var not changed.
b2.changeClassVar()
b.show()
| [
"icoding2016@gmail.com"
] | icoding2016@gmail.com |
3eb455197535d8594aa8a4424170a19929e20ec6 | 9a75b0d21d52b9490796d977245912821df9f5fc | /Event Manager/manager/models.py | 96b11f419b4a55c471e1e58cac0c3441797a54ae | [] | no_license | vishv843/woc3.0-eventmanager-vishv- | b77fe7a91fe58f505b8b432687bff58018a6f1fd | cb947786f92434b786e5173b6b4986121a28cd79 | refs/heads/master | 2023-02-24T01:33:46.150053 | 2021-01-31T18:24:05 | 2021-01-31T18:24:05 | 326,456,590 | 0 | 0 | null | 2021-01-21T17:28:00 | 2021-01-03T16:57:31 | Python | UTF-8 | Python | false | false | 872 | py | from django.db import models
class event(models.Model):
event_ID = models.IntegerField()
event_name = models.CharField(max_length = 50)
description = models.TextField()
from_date = models.DateField()
from_time = models.TimeField()
to_date = models.DateField()
to_time = models.TimeField()
registration_deadline = models.DateField()
poster_link = models.TextField()
password = models.CharField(max_length = 50)
email_ID = models.EmailField()
class participant(models.Model):
participant_ID = models.IntegerField()
name = models.CharField(max_length = 50)
contact = models.CharField(max_length = 15)
email_ID = models.EmailField()
event_name = models.CharField(max_length = 50)
registration_type = models.CharField(max_length = 20)
number_of_participants = models.PositiveIntegerField(null = True)
| [
"201901453@daiict.ac.in"
] | 201901453@daiict.ac.in |
2bb7c7ba3061c50db496fcc55f5566792482e2cd | 65c8a6a7af2ee8cdf3866d012ea814887bd68a26 | /ppro360_automation/Ppro360/CoachingAndTriadCoaching_Pages/RapidFireProcessConfirmation.py | 0a3d8240f845597bb551d6c2ea4dd50383a5257f | [] | no_license | 1282270620/automation_test | 9b3c595c3f7a139ded0a638ae4bcf31e0b7f9686 | 3faf86f0d641089eaf27eba906d22157dd2c1f5d | refs/heads/master | 2020-04-01T06:35:33.873989 | 2018-10-21T03:05:17 | 2018-10-21T03:05:17 | 152,954,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,478 | py | '''
Created on 20171101
@author: lei.tan
'''
from selenium.webdriver.common.by import By
from Tablet_pages import BasePage
class RapidFireProcessConfirmation(BasePage.Action):
def __init__(self):
self.callRecordingNumber_loc=(By.XPATH,"//*[@id='container']/div/section/div/form/div/div[3]/div[2]/div/div/input")
self.KPIcheckbox_path="//*[@id='container']/div/section/div/form/div[2]/div[1]/div/table/tbody/tr[4]/td[%d]/i"
self.comments_path="//*[@id='container']/div/section/div/form/div[2]/div[%d]/div/textarea"
self.comments_title_path="//*[@id='container']/div/section/div/form/div[2]/div[%d]/label"
self.scoreinput_path="//*[@id='container']/div/section/div/form/div[2]/div[%d]/div/div[4]/input"
self.scoreballstataus_path="//*[@id='container']/div/section/div/form/div[2]/div[%d]/div/div[4]/i"
self.overallscore_loc=(By.XPATH,"//*[@id='container']/div/section/div/form/div[2]/label/div[2]/input")
self.overallball_loc=(By.XPATH,"//*[@id='container']/div/section/div/form/div[2]/label/div[2]/i")
def click_KPIcheckbox (self, checkboxorderindex):
self.KPIcheckbox_loc=(By.XPATH,self.KPIcheckbox_path %checkboxorderindex)
self.find_element(*self.KPIcheckbox_loc).click()
def input_callRecordingNumber (self,text):
self.find_element(*self.callRecordingNumber_loc).send_keys(text);
def input_comments(self,lineindex,text):
self.comments_loc=(By.XPATH,self.comments_path %lineindex)
self.Input_text(text,*self.comments_loc)
def get_comments(self,lineindex):
self.comments_loc=(By.XPATH,self.comments_path %lineindex )
return self.find_element(*self.comments_loc).get_attribute("value")
def comments_disabled(self,lineindex):
self.comments_loc=(By.XPATH,self.comments_path %lineindex )
flag=self.find_element(*self.comments_loc).get_attribute("disabled")
return flag
def get_commentsBoxtitle(self,lineindex):
self.comments_title_loc=(By.XPATH,self.comments_title_path %lineindex)
return self.find_element(*self.comments_title_loc).text
def input_scoreinput(self,lineindex,text):
self.scoreinput_loc=(By.XPATH,self.scoreinput_path %lineindex)
self.Input_text(text,*self.scoreinput_loc)
def get_scoreinput(self,lineindex):
self.scoreinput_loc=(By.XPATH,self.scoreinput_path %lineindex )
return self.find_element(*self.scoreinput_loc).get_attribute("value")
def scoreinput_disabled(self,lineindex):
self.scoreinput_loc=(By.XPATH,self.scoreinput_path %lineindex )
flag=self.find_element(*self.scoreinput_loc).get_attribute("disabled")
return flag
def get_scoreballstataus(self,lineindex):
scoreballstataus_loc=(By.XPATH,self.scoreballstataus_path %lineindex )
scoreballstataus=self.find_element(*scoreballstataus_loc).get_attribute("class")
return scoreballstataus
def get_overallscore(self):
return self.find_element(*self.overallscore_loc).get_attribute("value")
def overallscore_disabled(self):
flag=self.find_element(*self.overallscore_loc).get_attribute("disabled")
return flag
def get_overallballstataus(self):
scoreballstataus=self.find_element(*self.overallball_loc).get_attribute("class")
return scoreballstataus
| [
"1282270620@qq.com"
] | 1282270620@qq.com |
192c788a6fe5b21b08215be378842606454be960 | b991746b8b0efb2b4e59826c828a49404a6b38fe | /chatapp/utils.py | ba167d0feab1b068e342c7bb0e6c9a9d8368f264 | [] | no_license | fishoe/ChatApp | 181086e0d23bed9ac1ac2873ae31abb803c25c3c | 4e209d7215ffcb9af8fd71ba46a9ff223e4c8cbe | refs/heads/master | 2022-12-15T21:41:52.392009 | 2020-08-29T09:53:40 | 2020-08-29T09:53:40 | 291,245,172 | 0 | 0 | null | 2020-08-29T09:57:49 | 2020-08-29T09:57:49 | null | UTF-8 | Python | false | false | 749 | py | import os
from asgiref.sync import sync_to_async
from tensorflow.keras.preprocessing.sequence import pad_sequences
from .models import User
@sync_to_async
def checkHateWord(name, model, tokenizer, text):
user = User.objects.get(name = name)
model = model if model else None
token_stc = text.split()
encode_stc = tokenizer.texts_to_sequences([token_stc])
pad_stc = pad_sequences(encode_stc, maxlen = 50)
score = model.predict(pad_stc)[0][0]
print(pad_stc, score)
if float(score) > 0.60:
user.count += 1
user.save()
return str(user.count), str(score)
def checkBlockUser(name):
user = User.objects.get(name = name)
if user.count > 4:
user.blocked = True
return user.blocked | [
"songys96@naver.com"
] | songys96@naver.com |
1b6cabcb128d8e6b00136c69f47189a012777663 | d67989daa0ae2e53d8bb1e7edcaadee2c61aa5e6 | /code/main.py | 2ab7f098cd453be8308a5eb28982806717def5a3 | [] | no_license | yogeshralhan/Python_Code | 4a5e8d714c0355fbf1d07586b9f89ec849069ebe | 9a4e818dd59f0c99694a99a632f36c946dd4e26d | refs/heads/master | 2021-01-10T01:16:01.407821 | 2016-03-29T06:18:45 | 2016-03-29T06:18:45 | 54,949,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | # math Functions
import math
num=5.6
print math.factorial(6)
print math.ceil(num)
# rename math function
from math import pi as py
print math.pi , py
| [
"yogeshralhan@gmail.com"
] | yogeshralhan@gmail.com |
7bd524537437d1adee982ca9c0168058ca7ea7e0 | 97dfb2d929f72a90d8f3c4c77297aca0c96af45e | /python/plot_location.py | 4d93652d75f97e5db54f311263bf7d0804789990 | [] | no_license | angelaslin/streetstyle-experiment | 59b76041be0883bd050945b04020756c72bf4aea | 7a15e87055ce9f8a164f965b438db1d05342f753 | refs/heads/master | 2021-07-25T07:03:22.710056 | 2017-11-03T22:02:37 | 2017-11-03T22:02:37 | 108,179,509 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | #!/usr/bin/env python
import sqlite3
import simplekml
def fetch_by_city_id(city_id, cursor):
# make the query
return cursor.execute("SELECT * FROM streetstyle27k WHERE city_id=" + repr(city_id))
def write_kml(kml_name, positions):
kml = simplekml.Kml()
for p in positions:
pnt = kml.newpoint(coords=[p])
pnt.style.iconstyle.icon.href = "http://maps.google.com/mapfiles/kml/shapes/placemark_circle_highlight.png"
pnt.style.iconstyle.scale = 0.5
kml.save(kml_name)
def main():
db_name = '../data/streetstyle27k.db'
city_id = 15
kml_name = '../results/kml_city_id_' + repr(city_id) + '.kml'
# open sqlite3 database
conn = sqlite3.connect(db_name)
cursor = conn.cursor()
entries = fetch_by_city_id(city_id, cursor)
positions = []
for e in entries:
positions.append((e[6],e[5]))
write_kml(kml_name, positions)
conn.close()
if __name__ == '__main__':
main()
| [
"lakshay.narula@utexas.edu"
] | lakshay.narula@utexas.edu |
262e5a8fc1b3277a125ac7ac66fefddc56cae93a | a457e3284fa1f32257969a72c69082dd0179eb73 | /gladweb/config.py | ef8979cda314e9b8cbea6d22467ff25691cdb8b3 | [] | no_license | slow2go/glad-web | 19377a6f17f19a4ebc46bc9c61afc9f709f628b0 | 13f8674c9602d1288b5de9437cf618e835fcac4e | refs/heads/master | 2021-01-24T08:29:43.615111 | 2017-05-22T14:29:30 | 2017-05-22T14:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | # ---
# Default Configuration
# ---
import os
import gladweb.cache
base_path = os.path.abspath(os.path.join(os.path.split(__file__)[0], '..'))
# ---
# Flask
# ---
# This key MUST be changed before you make a site public, as it is used
# to sign the secure cookies used for sessions.
SECRET_KEY = 'ChangeMeOrGetHacked'
# ---
# Glad Web
# ---
# A cache, which will be used to store/retrieve various files.
CACHE = gladweb.cache.FileCache(os.path.join(base_path, 'cache'))
# Path to a folder which will be used to store generation results
TEMP = os.path.join(base_path, 'temp')
# Generate static html files for /generated
# the webserver needs to be configured to serve /generated instead of passing
# requests through to glad-web.
# Note: /generated/icons still needs to be served by glad-web
FREEZE = True
try:
from local_config import *
except ImportError:
pass
| [
"admin@dav1d.de"
] | admin@dav1d.de |
24f8ccc67d87150963c71b87f4fdea9a87f39455 | 9e16c5aca51bfc4503351081e0d6fd639dfc27c0 | /membership_app/views.py | 6a14a76d8c6d62086c2162ee9e41dbb0d72e7549 | [] | no_license | AlexisGfly/exam_repo | 91d717be0449ab3724e9f9b4a76651a62a2fba70 | e219acc02cf14c2a9173f161a74adf1871ca20f4 | refs/heads/main | 2023-06-21T22:25:07.912477 | 2021-07-24T18:40:38 | 2021-07-24T18:40:38 | 389,174,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,456 | py | import re
from django.http import request
from django.shortcuts import redirect, render
from django.contrib import messages
from .models import *
import bcrypt
# Página de inicio
#=======================================================================================
def index(request):
return render(request, 'index.html')
#=======================================================================================
def create_user(request):
if request.method == "POST":
errors = User.objects.registration_validator(request.POST)
if len(errors) > 0:
for key,value in errors.items():
messages.error(request, value)
return redirect('/')
hash_pw = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt()).decode()
new_user = User.objects.create(
first_name = request.POST['first_name'],
last_name = request.POST['last_name'],
email = request.POST['email'],
password = hash_pw
)
request.session['logged_user'] = new_user.id
return redirect('/user/groups')
return redirect('/')
#=======================================================================================
def login(request):
if request.method == 'POST':
user = User.objects.filter(email = request.POST['email'])
if user:
log_user = user[0]
if bcrypt.checkpw(request.POST['password'].encode(), log_user.password.encode()):
request.session['logged_user'] = log_user.id
return redirect('/user/groups')
messages.error(request,'Email/password are incorrect. Please retry!')
return redirect('/')
#=======================================================================================
def logout(request):
request.session.flush()
return redirect('/')
#=======================================================================================
def groups(request):
if 'logged_user' not in request.session:
messages.error(request, 'Please register or please log in first')
return redirect('/')
groups = Group.objects.all()
lista_groups = []
for group in groups:
num_members = len(Member.objects.filter(group=group))
group_complete = {
'id': group.id,
'name': group.name,
'description':group.description,
'user_create': group.user_create.first_name,
'members': num_members
}
lista_groups.append(group_complete)
context = {
'logged_user': User.objects.get(id=request.session['logged_user']),
'all_groups': lista_groups
}
return render(request,'groups.html', context)
def add_group(request):
if 'logged_user' not in request.session:
messages.error(request, 'Please register or please log in first')
return redirect('/')
user_create = User.objects.get(id=request.session['logged_user'])
name = request.POST.get("name")
description = request.POST.get("description")
if len(name) <3:
messages.error(request, 'Name must be at least 3 characters')
return redirect('/user/groups')
if len(description) <3:
messages.error(request, 'Description must be at least 3 characters')
return redirect('/user/groups')
group = Group.objects.create(name=name, description=description, user_create=user_create)
member_joined = Member.objects.create(group_id=group.id,users=user_create.id)
return redirect('/user/groups')
def edit_group(request, group_id):
if 'logged_user' not in request.session:
messages.error(request, 'Please register or please log in first')
return redirect('/')
user_session = User.objects.get(id=request.session['logged_user'])
group = Group.objects.get(id=group_id)
members = Member.objects.filter(group=group)
isJoined=False
if request.method == "POST":
for member in members:
if user_session.id == member.users:
member_joined = Member.objects.get(id=member.id)
member_joined.delete()
isJoined=True
break
if not isJoined:
member_joined = Member.objects.create(group_id=group_id,users=user_session.id)
return redirect('/user/groups')
else:
user_created = 'YOU'
if user_session != group.user_create:
user_created =group.user_create.first_name + ' ' + group.user_create.last_name
list_members =[]
for member in members:
user = User.objects.get(id=member.users)
list_members.append(user.first_name + ' '+ user.last_name)
is_creator=False
if user_session == group.user_create:
is_creator=True
group_complete = {
'id':group.id,
'name': group.name,
'description':group.description,
'user_create': user_created,
'members': list_members,
'is_creator': is_creator
}
return render(request, 'edit.html', group_complete)
def delete_group(request, group_id):
if 'logged_user' not in request.session:
messages.error(request, 'Please register or please log in first')
return redirect('/')
group = Group.objects.get(id=group_id)
print(group)
group.delete()
return redirect('/user/groups') | [
"48730224+AlexisGfly@users.noreply.github.com"
] | 48730224+AlexisGfly@users.noreply.github.com |
6c5d5ada598b2db69393e93d23c815fcdc307669 | 1be6ff5b04d862ac1d428f8d68684cdd9396ea15 | /total-spent-by-customer.py | d4a03377e1b0852ad4af02d689a7f4642bc88422 | [] | no_license | thileite/Course_Repository-Taming-Big-Data-with-Apache-Spark-and-Python---Hands-On- | 18f75355f5e7188d6fa8ac09d04805f74edfb3a2 | b6707bffe84b951c360b772a9791ee834e8e086d | refs/heads/master | 2022-04-23T18:46:51.012728 | 2020-04-23T17:38:53 | 2020-04-23T17:38:53 | 256,584,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | from pyspark import SparkConf, SparkContext
conf = conf = SparkConf().setMaster("local").setAppName("CustomerBill")
sc = SparkContext(conf = conf)
def parseline(text):
fields= text.split(',')
customerId=int(fields[0])
dollars=float(fields[2])
return (customerId, dollars)
lines = sc.textFile("C:/SparkCourse/customer-orders.csv")
rdd = lines.map(parseline)
reduction = rdd.reduceByKey(lambda x, y: x + y)
flipped = reduction.map(lambda x: (x[1], x[0]))
totalByCustomerSorted = flipped.sortByKey()
results = totalByCustomerSorted.collect()
for result in results:
print(result)
| [
"thiago.sp57@hotmail.com"
] | thiago.sp57@hotmail.com |
25eb0d90d6fb21b20d59956b7dfe5d72fd792604 | e34ed1ae4f4674def35b2b079226ac98dcd58ee9 | /dchblog/mainsite/views.py | e999a1066d37b342f459105107aa7671bc7c23fd | [] | no_license | dch2333/dchblog | 315edf0d750e08f37716dc1640c9ff30eb998316 | 2371dcddc4696ae07ff0997dc02a1b3e9aeb6543 | refs/heads/master | 2020-03-22T12:51:06.800707 | 2018-07-07T08:29:21 | 2018-07-07T08:29:21 | 140,066,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import Post
from django.template.loader import get_template
from datetime import datetime
# Create your views here
def homepage(request):
if request.session.get('is_login', None):
template = get_template('index.html')
posts = Post.objects.all()
now = datetime.now()
html = template.render(locals())
return HttpResponse(html)
else:
return redirect('/login/')
def showpost(request, slug):
if request.session.get('is_login', None):
template = get_template('post.html')
try:
post = Post.objects.get(slug=slug)
if post != None:
now = datetime.now()
html = template.render(locals())
return HttpResponse(html)
except:
return redirect('/')
else:
return redirect('/login/')
| [
"noreply@github.com"
] | noreply@github.com |
81031a4507a7ac0e0004f8b3d17483130571c910 | 718e808d97e56e4ca065a97e2d533db4b760ac0e | /easy_scoping/widgets/migrations/0006_auto_20180711_2237.py | a28f9e450e4d5be25b169335ccba2659ff0bfeee | [] | no_license | net-prophet/django-easy-scoping | 0baa28234da075444ef49b2ab589fc574b5481d6 | 983ea86d7e5702d8322732c05b4378680d71f479 | refs/heads/master | 2021-07-05T06:46:31.117631 | 2020-07-31T03:12:11 | 2020-07-31T03:12:11 | 138,215,540 | 12 | 1 | null | 2020-07-31T03:12:12 | 2018-06-21T19:54:41 | Python | UTF-8 | Python | false | false | 393 | py | # Generated by Django 2.0.6 on 2018-07-11 22:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('widgets', '0005_auto_20180711_1733'),
]
operations = [
migrations.AlterField(
model_name='widget',
name='cost',
field=models.FloatField(blank=True, default=0),
),
]
| [
"wellsroberte@gmail.com"
] | wellsroberte@gmail.com |
673090671963e171e1144b22eb2e739347192faf | 87b0d4587c839250957b17127daac1c73e216d7d | /default/migrations/0001_initial.py | 7477a3be0c4393e9b4b9f9326b352968745f3f0e | [] | no_license | 10927/poll | f447edad701df882cb690aa48b9546169aad61d7 | 5a0f21208338f11ebbf8723d167e4f72eb425e63 | refs/heads/master | 2020-05-07T12:42:20.157683 | 2019-05-01T07:41:15 | 2019-05-01T07:41:15 | 180,516,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | # Generated by Django 2.1.3 on 2019-03-27 07:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Option',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='選項標題')),
('count', models.IntegerField(verbose_name='票數')),
('poll_id', models.IntegerField(verbose_name='投票主題')),
],
),
migrations.CreateModel(
name='Poll',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=250, verbose_name='投票主題')),
('date_created', models.DateField(auto_now_add=True, verbose_name='建立日期')),
],
),
]
| [
"i13585904041@gmail.com"
] | i13585904041@gmail.com |
89798dd5fa1297e000b83d9ca339b90c5a77ab26 | 743185f9314fd7eeb4bbee04e8b14aae781caa0b | /Problems/Very odd/main.py | 747a9f3f36ff8963a8f9a42092b3b46904910cf6 | [] | no_license | v4rden/JetBrainsAcademy-Python-RockPaperScissors | c01dd5db31a109c85274cf36a428e750315e75d3 | b5952b69d1ba5999efeb54b4bc04091d038d3c0f | refs/heads/master | 2023-02-25T22:58:35.093887 | 2021-01-31T12:54:56 | 2021-01-31T12:54:56 | 318,513,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | dividend = int(input())
divisor = int(input())
quotient = dividend / divisor
is_odd = quotient % 2 != 0
print(is_odd)
| [
"denys.variah@gmail.com"
] | denys.variah@gmail.com |
3e9fe40f62be60e37282225b84bfdd04a3e613a2 | aea96aa406250c3a2a8f2799e6cbbad256c262c3 | /test_2.py | c7b8058a78d61aaf7e83d31f493fb1c1dff1195d | [] | no_license | xiaochuanjiejie/python_exercise | cb0ffaa4b7c961c8ca9847526c84ee6ba261620c | 710fa85fd2d7a17994081bdc5f8b5ff66b77416e | refs/heads/master | 2021-01-21T16:18:04.640093 | 2017-08-11T10:02:49 | 2017-08-11T10:02:49 | 95,403,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | __author__ = 'chuan'
def changer(a,b):
a = 1
b[0] = 'spam'
return a,b
X = 2
L = [1,2]
a,b = changer(X,L)
print (a,b)
print '....'
X = 2
L = [1,2]
changer(X,L[:])
print X
print L | [
"xiaochuanjiejie@163.com"
] | xiaochuanjiejie@163.com |
f3dc8044a675bb0fbb19c8b606f2f0e3104bdcc5 | 4f0869639ee57f063f2d82c34dbfcda8a9ec12a3 | /filled_UBO_graph.py | e2bf2dfbe72963abd68726e93c943c735acced3c | [
"MIT"
] | permissive | Green-Resilience/GeoLinked_HollyFerguson | be82d82dff163ad904ef924f3ffb821f946f0c60 | aa5e93a602a6b6a1a4c6183b1e117ed166dd21ba | refs/heads/master | 2020-12-03T06:41:47.515975 | 2017-07-25T01:22:54 | 2017-07-25T01:22:54 | 95,718,820 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,486 | py | #-------------------------------------------------------------------------------
# Name: filled_UBO_graph.py
# Purpose: Fill graph with mapping for given structure
#
# Author: Holly Tina Ferguson hfergus2@nd.edu
#
# Created: 06/10/2015
# Copyright: (c) Holly Tina Ferguson 2015
# Licence: The University of Notre Dame
#-------------------------------------------------------------------------------
# #!/usr/bin/python
import sys
import getopt
import os
import rdflib
from rdflib import Graph
from rdflib import URIRef, BNode, Literal
from rdflib.namespace import RDF
from rdflib import Namespace
import pprint
from lxml import etree
from thickness_to_coordinates import thickness_to_coordinates
class filled_UBO_graph():
# Input parameters
#variable = ""
namespaces = {'gb': "http://www.gbxml.org/schema"}
def fill_graph(self, this_file_type, mapDict, UBOgraphStructure, inputfile):
"""
Fill appropriate mapping dictionary
#http://rdflib.readthedocs.org/en/latest/
#http://rdflib.readthedocs.org/en/latest/intro_to_graphs.html
#http://rdflib.readthedocs.org/en/latest/intro_to_parsing.html
RDF is a graph where the nodes are URI references, Blank Nodes or Literals, in RDFLib represented by
the classes URIRef, BNode, and Literal. URIRefs and BNodes can both be thought of as resources, such
a person, a company, a web-site, etc. A BNode is a node where the exact URI is not known. URIRefs are
also used to represent the properties/predicates in the RDF graph. Literals represent attribute values,
such as a name, a date, a number, etc.
"""
# Existing empty Graph Framework
UBO_frame = UBOgraphStructure
#for stmt in UBO_frame:
# pprint.pprint(stmt)
# First Graph Layer
UBO_New = Graph()
UBO = Namespace("http://www.semanticweb.org/hfergus2/ontologies/2015/UBO#")
GeoInstance1 = URIRef("http://www.semanticweb.org/hfergus2/ontologies/2015/UBO#GeoInstance1")
ASpatialObject = URIRef("http://www.semanticweb.org/hfergus2/ontologies/2015/UBO#ASpatialObject")
hasSpatialCollectionLocationMember = URIRef("http://www.semanticweb.org/hfergus2/ontologies/2015/UBO#ASpatialObject:hasSpatialCollectionLocationMember")
SpaceCollectionLocation = URIRef("http://www.semanticweb.org/hfergus2/ontologies/2015/UBO#SpaceCollectionLocation")
# Graph.add((s, p, o))
UBO_New.add( (GeoInstance1, RDF.type, ASpatialObject) )
UBO_New.add( (GeoInstance1, hasSpatialCollectionLocationMember, SpaceCollectionLocation) )
#UBO_New.add( (UBO.SpatialObject1, ASpatialObject.hasSpatialCollectionLocationMember, SpaceCollectionLocation) )
# Second Graph Layer
base = "http://www.semanticweb.org/hfergus2/ontologies/2015/UBO#"
# Currently assuming that each model is 1 building..........may be updated later
tree = etree.parse(inputfile)
# Find the corresponding surface to get the correct construction ID...pre-processing
print "pre-process"
SurfaceToMaterialList = self.preProcess(inputfile, mapDict, tree)
#for item in SurfaceToMaterialList:
# print "SurfaceToMaterialList: ", item, SurfaceToMaterialList[item]
flag = 0
proportionDict = dict()
hwtOrderDict = dict()
material_counter = 1
start = mapDict["SpaceCollectionLocation"][0]
find = mapDict["SpaceCollectionLocation-Property"][0]
loc = tree.xpath("/gb:gbXML/gb:" + start + "/gb:" + find, namespaces=self.namespaces)
if not loc:
elevation = None
latitude = None
longitude = None
hasProperty = URIRef(base + "SpaceCollectionLocation:hasProperty")
Property = URIRef(base + "Property")
PropertyA = URIRef(base + "SpaceLocationData")
UBO_New.add( (PropertyA, RDF.type, Property) )
UBO_New.add( (SpaceCollectionLocation, hasProperty, PropertyA) )
hasType = URIRef(base + "Property:hasType")
hasValue = URIRef(base + "Property:hasValue")
for l in loc:
#e = str(mapDict[SpaceCollectionLocation][3])
e = "Elevation"
elevation = float(l.xpath("./gb:" + e, namespaces = self.namespaces)[0].text)
Property1 = URIRef(base + "Property1")
UBO_New.add( (Property1, RDF.type, PropertyA) )
UBO_New.add( (Property1, hasType, Literal(e)) )
UBO_New.add( (Property1, hasValue, Literal(elevation)) )
lat = "Latitude"
latitude = float(l.xpath("./gb:" + lat, namespaces = self.namespaces)[0].text)
Property2 = URIRef(base + "Property2")
UBO_New.add( (Property2, RDF.type, PropertyA) )
UBO_New.add( (Property2, hasType, Literal(lat)) )
UBO_New.add( (Property2, hasValue, Literal(latitude)) )
lon = "Longitude"
longitude = float(l.xpath("./gb:" + lon, namespaces = self.namespaces)[0].text)
Property3 = URIRef(base + "Property3")
UBO_New.add( (Property3, RDF.type, PropertyA) )
UBO_New.add( (Property3, hasType, Literal(lon)) )
UBO_New.add( (Property3, hasValue, Literal(longitude)) )
# Third Graph Layer
base = "http://www.semanticweb.org/hfergus2/ontologies/2015/UBO#"
start = mapDict["SpaceCollectionLocation"][0]
find = mapDict["SpaceCollection"][0]
buildings = tree.xpath("/gb:gbXML/gb:" + start + "/gb:" + find, namespaces=self.namespaces)
buildingsDict = dict()
counter = 1
property_counter = 4
for b in buildings:
# Will add Building1, Building2, etc.
hasSpaceCollectionMember = URIRef(base + "SpaceCollectionLocation:hasSpaceCollectionMember")
SpaceCollection = URIRef(base + "SpaceCollection")
new_b = "SpaceCollection" + str(counter)
new_b = URIRef(base + new_b)
UBO_New.add( (new_b, RDF.type, SpaceCollection) )
UBO_New.add( (SpaceCollectionLocation, hasSpaceCollectionMember, new_b) )
hasProperty = URIRef(base + "SpaceCollection:hasProperty")
Property = URIRef(base + "Property")
PropertyB = URIRef(base + "SpaceMassingData")
UBO_New.add( (PropertyB, RDF.type, Property) )
UBO_New.add( (SpaceCollection, hasProperty, PropertyB) )
hasType = URIRef(base + "Property:hasType")
hasValue = URIRef(base + "Property:hasValue")
a = "Area"
area = float(b.xpath("./gb:" + a, namespaces = self.namespaces)[0].text)
next_property = "Property" + str(property_counter)
next_property = URIRef(base + next_property)
UBO_New.add( (next_property, RDF.type, PropertyB) )
UBO_New.add( (next_property, hasType, Literal(a)) )
UBO_New.add( (next_property, hasValue, Literal(area)) )
b_id = b.get("id")
buildingsDict[new_b] = (b_id)
property_counter += 1
counter += 1
space_counter = 1
spacesDict = dict()
overallUsedSurfaces = list()
surf_counter = 1
for b in buildings:
# Will add respective Spaces to SpaceCollection1, etc.
b_id = b.get("id")
current_RDF_label = "none"
current_b_id = "none"
for item in buildingsDict:
if b_id == buildingsDict[item]:
current_RDF_label = item
current_b_id = buildingsDict[item]
#start = mapDict["SpaceCollectionLocation"][0]
#find = mapDict["SpaceCollection"][0]
spaces = b.xpath("./gb:Space", namespaces=self.namespaces)
for s in spaces:
hasSpaceMember = URIRef(base + "SpaceCollection:hasSpaceMember")
Space = URIRef(base + "Space")
new_s = "#Space" + str(space_counter)
new_s = URIRef(new_s)
SpaceCollection = URIRef(current_RDF_label)
UBO_New.add( (new_s, RDF.type, Space) )
UBO_New.add( (SpaceCollection, hasSpaceMember, new_s) )
hasProperty = URIRef(base + "Space:hasProperty")
Property = URIRef(base + "Property")
PropertyC = URIRef(base + "SpaceData")
UBO_New.add( (PropertyC, RDF.type, Property) )
UBO_New.add( (Space, hasProperty, PropertyC) )
hasType = URIRef(base + "Property:hasType")
hasValue = URIRef(base + "Property:hasValue")
c = "Coordinates"
# This can later be better automated with the mapping dictionary, for now just using known path:
space_coordinate_set = s.xpath("./gb:ShellGeometry/gb:ClosedShell/gb:PolyLoop/gb:CartesianPoint", namespaces=self.namespaces)
scps = list()
for coordinate_list in space_coordinate_set:
cp = list()
cartesian_points = coordinate_list.xpath("./gb:Coordinate", namespaces=self.namespaces)
#print "this should be 3 locations: ", cartesian_points
for point in cartesian_points:
cp.append(float(point.text))
coordinates = tuple(cp)
scps.append(coordinates)
#scps.append(cartesian_point)
#new_space.scps = scps # now returning a list of tuples for non-square walls to get max/min heights
next_property = "Property" + str(property_counter)
next_property = URIRef(base + next_property)
UBO_New.add( (next_property, RDF.type, PropertyC) )
UBO_New.add( (new_s, hasProperty, next_property) )
UBO_New.add( (next_property, hasType, Literal(c)) )
UBO_New.add( (next_property, hasValue, Literal(str(scps))) )
property_counter += 1
s_id = s.get("id") ##---------------------------------------------------------------------------------------------------------------------------
surf_id_list_for_this_space = list()
surfaces = tree.xpath("/gb:gbXML/gb:Campus/gb:Surface", namespaces=self.namespaces)
currentSpaceBoundaries = s.xpath("./gb:SpaceBoundary", namespaces=self.namespaces)
for c in currentSpaceBoundaries:
su_id = c.get("surfaceIdRef")
for surf in surfaces:
surface_ID = surf.get("id")
if surface_ID == su_id:
surf_id_list_for_this_space.append(surf)
# This will ignore shading devices, however, so maybe the pattern needs a
# using hasSpaceBoundaryMember as seen in pattern with dashed lines (pptx)
# So, keep track of the surfaces that ARE used for spaces, and handle others afterwards...
overallUsedSurfaces.append(surf)
#surfaces = tree.xpath("/gb:gbXML/gb:Campus/gb:Surface/gb:PlanarGeometry/gb:PolyLoop/gb:CartesianPoint", namespaces=self.namespaces)
#surfaces = s.xpath("./gb:SpaceBoundary/gb:PlanarGeometry/gb:PolyLoop/gb:CartesianPoint", namespaces=self.namespaces)
#surfaces = s.xpath("./gb:SpaceBoundary", namespaces=self.namespaces)
for surf in surf_id_list_for_this_space:
sID = surf.get("id")
hasSpaceBoundaryMember = URIRef(base + "Space:hasSpaceBoundaryMember")
SpaceBoundary = URIRef(base + "SpaceBoundary")
new_sf = "#SpaceBoundary" + str(surf_counter)
#new_sf = URIRef(base + new_sf)
new_sf = URIRef(new_sf)
Space = URIRef(new_s)
UBO_New.add( (new_sf, RDF.type, SpaceBoundary) )
UBO_New.add( (Space, hasSpaceBoundaryMember, new_sf) )
hasProperty = URIRef(new_sf + ":hasProperty")
PropertyD = URIRef(new_sf + ":SurfaceData2D")
#UBO_New.add( (PropertyD, RDF.type, Property) )
hasType = URIRef(":hasType") #new_sf +
hasValue = URIRef(":hasValue")
c = "2DSurfaceCoordinates"
surfc = list()
surf_coordinate_set = surf.xpath("./gb:PlanarGeometry/gb:PolyLoop/gb:CartesianPoint", namespaces=self.namespaces)
for coordinate_list in surf_coordinate_set:
cp = list()
cartesian_points = coordinate_list.xpath("./gb:Coordinate", namespaces=self.namespaces)
#print "this should be 3 locations: ", cartesian_points
for point in cartesian_points:
cp.append(float(point.text))
coordinates = tuple(cp)
surfc.append(coordinates)
UBO_New.add( (new_sf, hasProperty, PropertyD) )
UBO_New.add( (PropertyD, hasType, Literal(c)) )
UBO_New.add( (PropertyD, hasValue, Literal(str(surfc))) )
# Call to use the thicknesses and calculate coordinates then add triples for those
thickness = tree.xpath("/gb:gbXML/gb:Material", namespaces=self.namespaces)
for item in thickness:
t = item.get("unit")
t = thickness_to_coordinates()
if flag == 0:
# Translate thickness meters into feet for each entry in SurfaceToMaterialList
SurfaceToMaterialList = t.unitTranslate(SurfaceToMaterialList)
# Organize lengths and coordinates proportionally to devise which on is the thickness coordinate
proportionDict, hwtOrderDict = t.organizeThicknessesProportionally(surf, SurfaceToMaterialList, surfc)
memberFlag = 1
UBO_New, surf_counter, property_counter, material_counter = t.materialLayers(UBO_New, surfc, tree, surf, flag, str(sID), surf_counter, property_counter, new_s, proportionDict, hwtOrderDict, memberFlag, this_file_type, new_sf, material_counter)
flag = 1
surf_counter += 1
property_counter += 1
spacesDict[new_s] = (s_id, surf_id_list_for_this_space)
# Will use spacesDict later when adding openings into the mix
space_counter += 1
# Process Extraneous Surfaces or other Boundaries that will still be from this Spatial Location Group
#surfaces = tree.xpath("/gb:gbXML/gb:Campus/gb:Surface", namespaces=self.namespaces)
for s in surfaces:
if s not in overallUsedSurfaces:
sID = s.get("id")
# Add triple that will go from SpatialCollectionLocation--hasSpaceBoundaryMember--SpaceBoundary
SpaceCollectionLocation = URIRef("http://www.semanticweb.org/hfergus2/ontologies/2015/UBO#SpaceCollectionLocation") #?
hasSpaceBoundaryMember = URIRef(base + "SpaceCollectionLocation:hasSpaceBoundaryMember")
SpaceBoundary = URIRef(base + "SpaceBoundary")
# Still using surf_counter here will continue numbering surfaces
new_b = "#SpaceBoundary" + str(surf_counter)
#new_b = URIRef(base + new_b)
new_b = URIRef(new_b)
UBO_New.add( (new_b, RDF.type, SpaceBoundary) )
UBO_New.add( (SpaceCollectionLocation, hasSpaceBoundaryMember, new_b) )
hasProperty = URIRef(new_b + ":hasProperty")
#Property = URIRef(base + "Property")
PropertyD = URIRef(new_b + ":SurfaceData2D")
hasType = URIRef(":hasType") #new_b +
hasValue = URIRef(":hasValue")
c = "2DSurfaceCoordinates"
surfc = list()
surf_coordinate_set = s.xpath("./gb:PlanarGeometry/gb:PolyLoop/gb:CartesianPoint", namespaces=self.namespaces)
for coordinate_list in surf_coordinate_set:
cp = list()
cartesian_points = coordinate_list.xpath("./gb:Coordinate", namespaces=self.namespaces)
#print "this should be 3 locations: ", cartesian_points
for point in cartesian_points:
cp.append(float(point.text))
coordinates = tuple(cp)
surfc.append(coordinates)
UBO_New.add( (new_b, hasProperty, PropertyD) )
UBO_New.add( (PropertyD, hasType, Literal(c)) )
UBO_New.add( (PropertyD, hasValue, Literal(str(surfc))) )
# Call to use the thicknesses and calculate coordinates then add triples for those
t = thickness_to_coordinates()
if flag == 0:
# Translate thickness meters into feet for each entry in SurfaceToMaterialList
SurfaceToMaterialList = t.unitTranslate(SurfaceToMaterialList)
# Organize lengths and coordinates proportionally to devise which on is the thickness coordinate
proportionDict, hwtOrderDict = t.organizeThicknessesProportionally(s, SurfaceToMaterialList, surfc)
memberFlag = 0
UBO_New, surf_counter, property_counter, material_counter = t.materialLayers(UBO_New, surfc, tree, s, flag, str(sID), surf_counter, property_counter, new_b, proportionDict, hwtOrderDict, memberFlag, this_file_type, new_b, material_counter)
flag = 1
surf_counter += 1
property_counter += 1
#print "Does this make sense?"
#print UBO_New.serialize(format='turtle')
return UBO_New, base
def preProcess(self, inputfile, mapDict, tree):
"""
Pre-Process gbXML based on known structure
(add openings later as SpaceBoundaryElement)
spacesDict[new_s] = (s_id, surf_id_list)
"""
surfaceToConstr = dict()
ConstrToMaterial = dict()
#materialDict[surfaceID] = (material ID, thickness, material ID, thickness, etc.)
SurfaceToMaterialList = dict()
surfaces = tree.xpath("/gb:gbXML/gb:Campus/gb:Surface", namespaces=self.namespaces)
constructions = tree.xpath("/gb:gbXML/gb:Construction", namespaces=self.namespaces)
layers = tree.xpath("/gb:gbXML/gb:Layer", namespaces=self.namespaces)
materials = tree.xpath("/gb:gbXML/gb:Material", namespaces=self.namespaces)
# Map a Construction ID to each SurfaceID
for s in surfaces:
surfaceID = s.get("id")
obj_constr = s.get("constructionIdRef")
for c in constructions:
constrID = c.get("id")
if obj_constr == None:
obj_constr = constrID
if constrID == obj_constr:
match = constrID
surfaceToConstr[surfaceID] = match
#for surfaceID in surfaceToConstr:
# print "surfaceToConstr: ", surfaceID, surfaceToConstr[surfaceID]
# Map a Material Name/ID? Set to each Construction ID
for c in constructions:
constrID = c.get("id")
layerSet = c.xpath("./gb:LayerId", namespaces=self.namespaces)
if not layerSet:
layer_id = None
else:
for layer in layerSet:
layer_id = layer.get("layerIdRef")
#print "layerID: ", layer_id
matThicknessSet = list()
for layer in layers:
testLayerID = layer.get("id")
if testLayerID == layer_id:
elements = layer.xpath("./gb:MaterialId", namespaces=self.namespaces)
for element in elements:
material_id_num = element.get("materialIdRef")
for m in materials:
singleMaterial = m.get("id")
if singleMaterial == material_id_num:
thickness = float(m.xpath("./gb:Thickness", namespaces=self.namespaces)[0].text)
#thickness = m.xpath("./gb:Thickness", namespaces=self.namespaces)
#for value in thickness:
#new_material.thickness_unit = value.get("unit")
if not thickness:
thickness = None
#thickness_unit = None
mattuple = (singleMaterial, thickness)
mtuple = tuple(mattuple)
#print "new mTuple: ", mtuple
matThicknessSet.append(mtuple)
# Appended a list of tuples formatted: (material ID, thickness, material ID, thickness, etc.)
ConstrToMaterial[constrID] = matThicknessSet
#for constrID in ConstrToMaterial:
# print "ConstrToMaterial: ", constrID, ConstrToMaterial[constrID]
# Fill SurfaceToMaterialList by matching a Surface ID to a Material Set
for surfaceID in surfaceToConstr:
construction = surfaceToConstr[surfaceID]
tupleMaterialSet = ConstrToMaterial[construction]
SurfaceToMaterialList[surfaceID] = tupleMaterialSet
return SurfaceToMaterialList
def lookup(self, mapDict, inputfile):
"""
SpaceCollectionLocation ('Campus', ['x'])
SpaceCollectionLocation-Property ('Location', ['Location', '[Longitude]', '[Latitude]', '[Elevation]'])
SpaceCollection ('Building', ['x'])
SpaceCollection-Property ('Coordinate', ['ShellGeometry', 'ClosedShell', 'PolyLoop', 'CartesianPoint', '[Coordinate]'])
Space ('Space', ['x'])
Space-Property ('Coordinate', ['PlanarGeometry', 'PolyLoop', 'CartesianPoint', '[Coordinate]'])
SpaceBoundary ('Surface', ['x'])
SpaceBoundary-Property ('Coordinate', ['PlanarGeometry', 'PolyLoop', 'CartesianPoint', '[Coordinate]'])
SpaceBoundaryElement ('Material', ['x'])
SpaceBoundaryElement-Property ('Thickness', ['Construction', 'Layer', '[Thickness]'])
"""
return None
| [
"Holly.T.Ferguson.57@nd.edu"
] | Holly.T.Ferguson.57@nd.edu |
7e8686362d0940c1585ac58643d980ba04aeb25c | e320f6b1061970791c4e8def4dd7722e098e7f27 | /googlemaps/plotter.py | 02b63ba5da169a642d664fddab8206ec89965dd8 | [
"Apache-2.0"
] | permissive | shehla/house-traffic-profiler | fe1acaf156f0b94f9c2e680b941d94dca845d242 | 543324c3a2e5dfc0dcd8c7bb8e46828369e44d57 | refs/heads/master | 2020-05-21T17:46:45.311360 | 2016-09-28T03:06:58 | 2016-09-28T03:06:58 | 63,258,843 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,094 | py | import matplotlib
matplotlib.use('Agg')
import datetime
from matplotlib.dates import date2num
import numpy as np
import pylab as pl
import stock_common
def scatter_values_with_dates(buy_values, buy_dates, color, marker_type):
dates = stock_common.convert_str_to_datetime(buy_dates)
buy_dates = [date2num(dd) for dd in dates]
pl.scatter(buy_dates, buy_values, marker=marker_type, s=40, c=color, edgecolor=color)
def plot_with_dates(recs, qty_name, date_fmt='%m/%d/%Y', fig=None, line_type='-', is_scatter=False, color='b', size=1, lw=1):
recs = stock_common.sort_list(recs, 'epoch')
if not fig:
fig = pl.figure()
graph = fig.add_subplot(111)
dates_num = [r['epoch'] for r in recs]
if is_scatter:
print('---->', qty_name)
graph.scatter(dates_num, [v[qty_name] for v in recs], linestyle=line_type, linewidth=0, color=color, s=size)
else:
graph.plot(dates_num, [v[qty_name] for v in recs], linestyle=line_type, linewidth=lw, color=color)
##################
x_range = sorted([int(r['epoch']) for r in recs])
num_labels = 8
x_gap = (x_range[-1] - x_range[0]) / num_labels
x_label_epochs = range(x_range[0], x_range[-1], x_gap)
graph.set_xticks(x_label_epochs)
x_label_dates = [datetime.datetime.fromtimestamp(e) for e in x_label_epochs]
xticks_labels = [d.strftime(date_fmt) for d in x_label_dates]
graph.set_xticklabels(xticks_labels)
##################
return fig, graph
def plot_relative_to_start(stock_data, qty_name, fig=None, show_xticks=True, line_type='-', is_scatter=False, color='b', size=1, lw=1):
start_time = stock_data[0]['epoch']
for r in stock_data:
r['epoch'] = r['epoch'] - start_time
if fig == None:
fig = pl.figure(figsize=(12, 6))
graph = fig.add_subplot(111)
plot_numbers_against_dates(stock_data, fig, qty_name, line_type, is_scatter, color, size)
graph.grid(True)
return fig, graph
# stock_data is a list of price recs which has volume, price, date(string) etc
def plot_stock_qty(stock_data, qty_name, fig=None, show_xticks=True, line_type='-', is_scatter=False, color='b', size=1, lw=1):
if len(stock_data[0]['date'].split()) == 2:
is_hourly = True
else:
is_hourly = False
if is_hourly:
epoch_times = [stock_common.get_epoch_time(r['date'], is_hourly=True) for r in stock_data]
else:
epoch_times = [stock_common.get_epoch_time(r['date']) for r in stock_data]
prices = [r[qty_name] for r in stock_data]
if fig == None:
fig = pl.figure(figsize=(12, 6))
graph = fig.add_subplot(111)
#graph.plot(epoch_times, prices)
plot_numbers_against_dates(stock_data, fig, qty_name, line_type, is_scatter, color, size)
if show_xticks:
plot_x_ticks_with_dates(graph, stock_data, False)
graph.grid(True)
return fig, graph
def plot_bar(x_labels, y_vals, fig, c, width=0.35):
graph = fig.add_subplot(111)
ind = np.arange(len(y_vals))
graph.bar(ind+width, y_vals, width=0.35, color=c)
graph.set_xticks(ind+0.35)
graph.set_xticklabels(x_labels)
graph.set_xlabel('Year')
graph.set_ylabel('Annual return (%)')
return graph
def plot_x_ticks_with_dates(graph, current_value, do_all):
if not do_all:
LABEL_DIFF = int(len(current_value) / 3)
else:
LABEL_DIFF = 1
dates_strings = [dd['date'] for dd in current_value[0::LABEL_DIFF]]
#dates = stock_common.convert_str_to_datetime(dates_strings)
#dates_num = [date2num(dd) for dd in dates]
#dates_num = [int(dd['epoch']) for dd in current_value[0::LABEL_DIFF]]
dates_num = [int(dd['epoch']) for dd in current_value]
dates_num = range(min(dates_num), max(dates_num), int((max(dates_num) - min(dates_num)) / 8.0))
print('=======>', dates_num, int(max(dates_num) - min(dates_num) / 8.0))
graph.set_xticks(dates_num)
#dates = stock_common.convert_str_to_datetime(dates_strings)
dates = [datetime.datetime.fromtimestamp(r) for r in dates_num]
graph.set_xticklabels(['/'.join('/'.join(str(r).split()[0].split('-')).split('/')[1:])+' '+str(r).split()[1].split(':')[0]+':00' for r in dates], fontsize=8)
return graph
def plot_numbers_against_numbers(x_vals, y_vals, fig):
graph = fig.add_subplot(111)
graph.plot(x_vals, y_vals)
return graph
# Takes a dict having key/values for amount and date. Plots
# amounts against dates
def plot_numbers_against_dates(current_value, fig, property_name='amount', line_type='-', is_scatter=False, color='b', size=1, lw=1):
graph = fig.add_subplot(111)
#dates = [dd['date'] for dd in current_value]
#dates = stock_common.convert_str_to_datetime(dates)
#dates_num = [date2num(dd) for dd in dates]
dates_num = [r['epoch'] for r in current_value]
if is_scatter:
graph.scatter(dates_num, [v[property_name] for v in current_value], linestyle=line_type, linewidth=2, color=color, s=size)
else:
graph.plot(dates_num, [v[property_name] for v in current_value], linestyle=line_type, linewidth=lw)
return graph
| [
"ubuntu@ip-172-31-14-204.us-west-1.compute.internal"
] | ubuntu@ip-172-31-14-204.us-west-1.compute.internal |
3861ea6d7d2718a347c6dc8c673b025145618266 | 244f21fdb16d07c27cf89ce90a7f9c234f45ca89 | /api/models.py | e80a41c064730b454be20fb0f9d13e2f5764790b | [] | no_license | falkeura/REST-API-Design | 73f235f16a90045682ede2fb1c1dd790f7235797 | 572f0c9811df17293d12b7e54983a413351c171f | refs/heads/master | 2021-01-17T18:21:24.072612 | 2016-06-15T10:31:02 | 2016-06-15T10:31:02 | 61,199,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | from django.db import models
class Artist(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100, blank=True, default='')
year = models.IntegerField()
#albums = models.ManyToManyField(to=Album)
def __str__(self):
return self.name
class Track(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100, blank=True, default='')
duration = models.IntegerField()
artists = models.ManyToManyField(to=Artist)
def __str__(self):
return self.name
class Album(models.Model):
id = models.AutoField(primary_key=True)
artist = models.ForeignKey(to=Artist)
name = models.CharField(max_length=100, blank=True, default='')
year = models.IntegerField()
tracks = models.ManyToManyField(to=Track)
def __str__(self):
return self.name
| [
"falkeura@gmail.com"
] | falkeura@gmail.com |
c145412791f2ebae17c77dff3bb1a42564469b78 | ba91d301f67130b01dac8febd577a255a3cc9877 | /main/migrations/0007_delete_counter.py | cc2192ddc997aea15673a61ea642308b276e6034 | [] | no_license | lars0320/django-deploy-test | 06fa2581eb2e46ccfbae759a54b52049a4b6ee98 | a716204f6784c9e043c416c2f25d516b99d1e16d | refs/heads/master | 2023-02-15T10:04:02.338236 | 2021-01-11T02:17:57 | 2021-01-11T02:17:57 | 305,244,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | # Generated by Django 3.1.2 on 2020-12-07 15:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0006_counter'),
]
operations = [
migrations.DeleteModel(
name='Counter',
),
]
| [
"lars@Larsui-MacBookPro.local"
] | lars@Larsui-MacBookPro.local |
960d2ddd5d6ba0162538f8c11cfd3df4a99ade54 | 3a25ca7b3818df651566390e1c2a1849750edf4d | /python/bin/servicetest.py | 9268ededa727a8984509d8c4f9e56300201b63bf | [] | no_license | rr1mand0/sandbox | 2068107d126ed75be59ff34f13317f48644564d2 | 6ed10c1e867821d14415c6cc2a3a4b7916d127cc | refs/heads/master | 2022-12-24T05:50:10.843272 | 2016-05-05T19:42:31 | 2016-05-05T19:42:31 | 3,511,839 | 0 | 1 | null | 2022-12-19T12:01:22 | 2012-02-22T06:00:23 | C++ | UTF-8 | Python | false | false | 876 | py | import unittest
import re
import sys
from couch import *
import json
class TestThesaurus(unittest.TestCase):
def setUp(self):
self._thesaurus = {
"cabage" : "cabbage",
"cabagge" : "cabbage",
"tom's": "tomatoes",
"toms": "tomatoes",
"tom": "tomatoes",
"tomato": "tomatoes",
"tomatoe": "tomatoes",
"tomatos": "tomatoes",
"veggie" : "veggies"
}
self.thes_dict = Thesaurus("test-thesaurus")
def tearDown(self):
pass
#self.thes_dict.destroy()
def test_add_definition(self):
self.thes_dict.set_thesaurus(self._thesaurus)
self.thes_dict.save()
synonyms = self.thes_dict.get_synonyms('tomatoes')
self.assertNotEqual(synonyms, None)
self.assertTrue(synonyms.__len__(), 6)
self.thes_dict.add_synonym("vegetables", "veggies")
if __name__ == '__main__':
unittest.main()
| [
"raymund.rimando@arcticwolf.com"
] | raymund.rimando@arcticwolf.com |
a4addfee73db8a0a6024bea2da7812a3a61be803 | 68be01bcf1d82e77f8439ca08db98b60df265dd5 | /yt1209/unittest_interval.py | 92ed2e372d7d27b9b57f7d7941eea74f10410814 | [] | no_license | ds-ga-1007/assignment7 | 549d889201d7dbce45614a9b7fd3f72e5d2c67fc | 33c7a3e579c37ce3096099a350a7c8135b302ea4 | refs/heads/master | 2020-12-24T11:32:56.420860 | 2016-12-08T02:07:51 | 2016-12-08T02:07:51 | 73,031,506 | 0 | 76 | null | 2017-01-06T17:44:40 | 2016-11-07T01:31:54 | Python | UTF-8 | Python | false | false | 3,181 | py | '''
Created on Nov 14, 2016
@author: Yovela
'''
import unittest
from interval import interval, InputError, MergedError, mergeIntervals, mergeOverlapping, insert
class Test(unittest.TestCase):
def test_validinterval(self):
""" test for interval function, test the range of integers it represents"""
int1 = interval("[1,4]")
int2 = interval("(2,5]")
int3 = interval("[4,8)")
int4 = interval("(3,9)")
self.assertEqual(int1.represent, [1, 2, 3, 4])
self.assertEqual(int2.represent, [3, 4, 5])
self.assertEqual(int3.represent, [4, 5, 6, 7])
self.assertEqual(int4.represent, [4, 5, 6, 7, 8])
def test_invalidinterval(self):
""" test for invalid interval input"""
with self.assertRaises(InputError):
interval("1234")
with self.assertRaises(InputError):
interval("(2,2)")
with self.assertRaises(InputError):
interval("[4,1)")
def test_merge_success(self):
"""test the mergeInterval function which successfully processed """
int1 = interval("(1,5]")
int2 = interval("(3,5]")
int3 = interval("[4,9]")
int4 = interval("(8,10]")
int5 = interval("[10,18]")
self.assertEqual(str(interval("(1,5]")), str(mergeIntervals(int1, int2)))
self.assertEqual(str(interval("(3,9]")), str(mergeIntervals(int2, int3)))
self.assertEqual(str(interval("[4,10]")), str(mergeIntervals(int3, int4)))
self.assertEqual(str(interval("(8,18]")), str(mergeIntervals(int4, int5)))
def test_merge_fail(self):
"""test the mergeInterval function which can't be merged """
int1 = interval("(1,2]")
int2 = interval("(3,5]")
int3 = interval("[7,9]")
int4 = interval("(10,12]")
int5 = interval("[18,18]")
with self.assertRaises(MergedError):
self.interval = mergeIntervals(int1, int2)
with self.assertRaises(MergedError):
self.interval = mergeIntervals(int2, int3)
with self.assertRaises(MergedError):
self.interval = mergeIntervals(int3, int4)
with self.assertRaises(MergedError):
self.interval = mergeIntervals(int4, int5)
def test_mergeOverlapping(self):
"""test the mergeOverlapping function"""
int1 = interval("[1,5]")
int2 = interval("[2,6)")
int3 = interval("(8,10]")
int4 = interval("[8,18]")
interval_for_merge = [int1, int2, int3, int4]
merged_list = [interval("[1,6)"), interval("[8,18]")]
self.assertEqual(str(mergeOverlapping(interval_for_merge)), str(merged_list))
def test_insert(self):
"""test the insert function"""
int1 = interval('[1,2]')
int2 = interval("(3,5)")
int3 = interval("[6,7)")
int4 = interval("(8,10]")
int5 = interval("[12,16]")
intervals_l = [int1, int2, int3, int4, int5]
newint = interval("[4,9]")
self.assertEqual(str(insert(intervals_l, newint)), str([interval("[1,2]"), interval("(3,10]"), interval("[12,16]")]))
if __name__ == "__main__":
unittest.main() | [
"Yovela@tuyuweideair.home"
] | Yovela@tuyuweideair.home |
b148058c1ad39e9e04a584f58f85df8d48650094 | d5fdece50ddc00f2a5686cd0839716bc0ca55622 | /Program/.pythonstartup.py | 07d95ebb4121a7f68c5f50b4cabdad15bab2cea2 | [] | no_license | uchihanuo/helloworld | 47611945919c0d82b67f0a0c13107e7793c7b0b2 | bf3103fb69e2ffcc4e5a7201a2f5b5086e3d9b6c | refs/heads/main | 2023-06-20T20:04:33.947053 | 2021-07-23T07:50:39 | 2021-07-23T07:50:39 | 388,643,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | import readline
import rlcompleter
import atexit
import os
# tab autocomplete
readline.parse_and_bind('tab: complete')
# history file
histfile = os.path.join(os.environ['Home', '.pythonhistory'])
try:
readline.read_history_file(histfile)
except IOError:
pass
atexit.register(readline.worte_history_file, histfile)
del os, histfile, readline, rlcompleter
| [
"jtrckr@163.com"
] | jtrckr@163.com |
e2f599d018fbbe61741e035468b72b0a90cef398 | bc9bf9aa31595bd329cb685210330f54d04bcdc5 | /Python/Camera calibration/Nokia/UNDISTORT.py | 43b7f65e9174601cfee0b63d6128d2f48a767ca1 | [] | no_license | akirilltikhonov/Tikhonov_Nagin | 05c38b31bb43c9cf666537ad793ae6f594dad0d2 | 5ebec61bbb4a12eae3fa43c8d4db5a712dab0811 | refs/heads/master | 2022-09-13T12:40:35.838695 | 2020-06-02T21:23:08 | 2020-06-02T21:23:08 | 219,587,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | import numpy as np
import cv2
import glob
# load matrix intrisinc parametrs and distortional coefficient
mtx = np.load('mtx.npy')
dist = np.load('dist.npy')
#dist = np.zeros((1,5), np.float32)
print(mtx)
print(dist)
#dist [0,0] = -0.2;
print(mtx)
print(dist)
Num = 1
images = glob.glob('photo before and after calibration\*-1.jpg')
for fname in images:
img = cv2.imread(fname)
# Determine windth and height frame, formation new matrix
# of intrisinc parametrs and ROI for crop image
h, w = img.shape[:2]
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),1,(w,h))
# undistorted frame
dst = cv2.undistort(img, mtx, dist, None, newcameramtx)
# crop the image
#x,y,w,h = roi
#dst = dst[y:y+h, x:x+w]
cv2.imwrite('photo before and after calibration/({})-2.jpg'.format(Num),dst)
Num = Num + 1
| [
"akirilltikhonov@gmail.com"
] | akirilltikhonov@gmail.com |
010106af979697d7647a4ff57d51f29884b5f48e | c2b4558a27eb913ca17025c9f8b0869e1166320b | /todo/migrations/0001_initial.py | 5aebb1a6834985bd5c00d2aea930b5fdbcd6a3b5 | [] | no_license | grvcisco/todo-app | 10061232cfc6f8353ee70a003472dd56bf08fe26 | ce952c0d63ea16ae09bdce6c0f47f06cd87fce53 | refs/heads/master | 2022-11-14T10:46:08.529447 | 2020-07-08T09:32:05 | 2020-07-08T09:32:05 | 278,043,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | # Generated by Django 2.2.8 on 2020-07-01 13:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('memo', models.TextField(blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('dateCompleted', models.DateTimeField(null=True)),
('important', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"er.gauravsri@gmail.com"
] | er.gauravsri@gmail.com |
d495f3bc21c8a9c1aaded2b9d73249a00aeee556 | 47bed3c25e1ee571c236b0b44f7824995ae6a33e | /auto_adb.py | eb59fd811dd8bf824c57d661d02c454f335ae34b | [] | no_license | nikki-liyao/autotest | d47d12c9ff3ceb9c75cceb5304f868aa662c5d38 | 1991fdea557b950ac89daa0ffb7aaeb44e3a9004 | refs/heads/master | 2020-09-10T04:01:25.851117 | 2019-11-14T09:06:03 | 2019-11-14T09:06:03 | 221,643,191 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,989 | py | # -*- coding: utf-8 -*-
import os
import subprocess
import platform
class auto_adb():
def __init__(self):
try:
with open('adb_directory', "r", encoding='utf-8') as f1:
adb_directory = f1.read()#读取 adb_directoty 内容并赋值
adb_path = adb_directory + 'adb.exe'
print(adb_path)
subprocess.Popen([adb_path], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)#创建adb 进程
self.adb_path = adb_path
except OSError:
if platform.system() == 'Windows':#识别操作系统
adb_path = os.path.join('Tools', 'adb.exe')
print(adb_path)
try:
subprocess.Popen(
[adb_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)# stdout=subprocess.PIPE 输出到 一个文件 stderr=subprocess.PIPE 错误信息输出到一个文件
self.adb_path = adb_path
except OSError:
pass
else:
try:
subprocess.Popen(
[adb_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
pass
print('请安装 ADB 及驱动并配置环境变量')
print('具体链接: https://github.com/wangshub/wechat_jump_game/wiki')
exit(1)
def get_screen(self):
process = os.popen(self.adb_path + ' shell wm size')#不明白
output = process.read()
return output
def run(self, raw_command):
command = '{} {}'.format(self.adb_path, raw_command)#不明白
process = os.popen(command)
output = process.read()
return output
def test_device(self):
print('检查设备是否连接...')
command_list = [self.adb_path, 'devices']
process = subprocess.Popen(command_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = process.communicate()#发送和读取process进程数据
if output[0].decode('utf8') == 'List of devices attached\n\n':
print('未找到设备')
print('adb 输出:')
for each in output:
print(each.decode('utf8'))
exit(1)
print('设备已连接')
print('adb 输出:')
for each in output:
print(each.decode('utf8'))
def test_density(self):
process = os.popen(self.adb_path + ' shell wm density')
output = process.read()
return output
def test_device_detail(self):
process = os.popen(self.adb_path + ' shell getprop ro.product.device')
output = process.read()
return output
def test_device_os(self):
process = os.popen(self.adb_path + ' shell getprop ro.build.version.release')
output = process.read()
return output
def adb_path(self):
return self.adb_path
| [
"18339810975@163.com"
] | 18339810975@163.com |
36c6f52b8183c92be1e1e822824a234b1519eb8f | ec6c5a8df01673132137e1a1179f85bb4179ff78 | /Majority.py | 9dbb512188fc5df228af121611cebbeaf8acca53 | [] | no_license | ShlomoZa/algorithms | 1b59fc76f745195fd3fd21757e93ce15a74f4165 | 85485bc9c66046f8feac840270126e6ab013e2e6 | refs/heads/master | 2020-05-27T09:33:54.699281 | 2019-05-25T11:04:51 | 2019-05-25T11:04:51 | 188,567,960 | 0 | 0 | null | 2019-05-25T13:11:58 | 2019-05-25T13:11:58 | null | UTF-8 | Python | false | false | 870 | py | def majority_element(seq, default=None):
"""Find which element in *seq* sequence is in the majority.
Return *default* if no such element exists.
Use Moore's linear time constant space majority vote algorithm
"""
candidate = default
count = 0
for e in seq:
if count != 0:
count += 1 if candidate == e else -1
else: # count == 0
candidate = e
count = 1
# check the majority
return candidate if seq.count(candidate) > len(seq) // 2 else default
lst = [34,15,34,34,34,34,15,15,34,34,22,15,15,15,15,34,15,34,15,15,34,15,34,15,34,22,22,15,34,15,34,15,34,15,34,22,34,22,34,34,34,34,34,22,15,34,34,34,15,34,15,15,22,34,15,15,34,34,34,22,34,15,15,34,34,34,15,22,22,22,15,34,34,22,34,34,22,34,15,22,34,34,15,22,34,34,34,34,22,22,15,34,34,22,34,34,34,22,34,22]
print(majority_element(lst)) | [
"maorlolz1@gmail.com"
] | maorlolz1@gmail.com |
03e6be1c5937d65a391365bc08609bd0edec78a5 | 2579563d2571e52819e502454c1ccffba160855d | /LeibnitzRule_p1.py | dfe686eeb7a577587cb65df89f8389653f6cf3db | [] | no_license | emonhossainraihan/YouTube-videos-1 | ebe96504f59fec3dcd99ffcb746d0efc767886e8 | 071228a7b0bade53089e4d6ea8e63ccc3d7ef750 | refs/heads/master | 2023-04-14T01:12:05.714504 | 2021-04-14T13:40:53 | 2021-04-14T13:40:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,670 | py | import numpy as np
from manimlib.imports import*
class IntroAndIntegrals(GraphScene):
CONFIG = {
# "camera_config":{
# "background_color": "#060100"
# },
"x_min": -1,
"x_max": 3,
"x_axis_width": 8,
"y_min": -1,
"y_max": 3,
"y_axis_height": 5,
"x_tick_frequency": 0.5,
"y_tick_frequency": 0.5,
"axes_color": "#cf0e4e",
"graph_origin": LEFT+DOWN,
"default_graph_colors": [TEAL_E, GREEN, YELLOW],
"default_derivative_color": GREEN,
"default_input_color": YELLOW,
"stroke_width": 5,
"num_rects" : 200,
"number_line_kwargs": {
"include_numbers": True,
"include_tip": True,
},
"func": lambda x: 2*x**2 - x**3 + 1,
"rieman_rects_kwargs": {
"x_min": 0,
"x_max": 2,
"stroke_width": 0.1,
"stroke_color": "#aa9f4b",
"fill_opacity": 0.8,
"start_color": "#d40b37", # "#d40b37", "#d4b90b"
"end_color": "#d4b90b",
}
}
def construct(self):
self.board = ImageMobject("stripes.jpg").set_width(FRAME_WIDTH)
self.add(self.board)
self.introduce_context()
self.recollect_integral()
def introduce_context(self):
context = TextMobject("Leibnitz integral rule over constant limits").set_width(
FRAME_WIDTH-1).to_edge(UP, 1)
context.set_color("#d9aa26")
underline = Line(context.get_left(), context.get_right()).next_to(context, DOWN)\
.set_style(stroke_width=2, stroke_color="#a7f542")
formula = TexMobject(
"\\frac{\mathrm{d}}{\mathrm{d} t}\int_{a}^{b}f(x,t)dx = \int_{a}^{b}\\frac{\partial }{\partial t}f(x,t)dx"
).next_to(underline, 3.5*DOWN).set_width(FRAME_WIDTH-2).set_color(MAROON) # "#aa9f4b"
self.add(context)
self.wait(4.5)
self.play(ShowCreation(underline), run_time=2)
self.wait()
self.play(DrawBorderThenFill(formula),run_time=2, rate_func=linear)
self.wait(4)
self.play(*[FadeOut(mob) for mob in self.mobjects if mob is not self.board],lag_ratio=0.15)
def recollect_integral(self, **graph_kwargs):
self.setup_axes() # Use self.setup_axes(animate=True) and comment the line 83
curve = self.get_graph(self.func)
graph = VGroup(self.axes, curve).to_edge(LEFT)
fx = TexMobject("f(x) = 2x^{2} - x^{3} + 1").set_color(YELLOW_D).to_edge(UP,buff=1).shift(1.5*LEFT)
self.wait(1.5)
self.play(Write(fx), lag_ratio=0.3, run_time=4)
self.wait()
self.show_axes(run_time=0.5,lag_ratio=0) # This won't work for you, comment this line.
self.play(ShowCreation(curve), run_time=1)
self.wait()
self.sweep_through_area(curve)
self.wait()
# divide into segments, raise the first set of rectangles from base
eq_spaced_lines = VGroup()
dx = ValueTracker(0.5)
dx_val = DecimalNumber(dx.get_value(),include_sign=False,num_decimal_places=2).next_to(self.underlying_area,3*DOWN)
rieman_rects = self.get_riemann_rectangles(curve, dx=dx.get_value(), **self.rieman_rects_kwargs)
delta_x = TexMobject("\\triangle x=").next_to(dx_val,LEFT)
delta_x_brace = Brace(rieman_rects[0],DOWN,buff=SMALL_BUFF)
for i in np.arange(0,2.5,dx.get_value()):
line = Line(self.x_axis.number_to_point(i),self.input_to_graph_point(i,curve))
eq_spaced_lines.add(line)
self.play(Write(eq_spaced_lines),run_time=3)
self.wait()
self.play(Write(delta_x_brace),Write(delta_x),Write(dx_val))
self.wait()
self.play(*[GrowFromEdge(rect,line.get_start()) for rect,line in zip(rieman_rects,eq_spaced_lines)],
*[FadeOut(line) for line in eq_spaced_lines],lag_ratio=0.15,run_time=2)
self.wait()
height = Brace(rieman_rects[0], LEFT)
height.add(TexMobject("f(x)").next_to(height,LEFT))
self.play(Write(height))
self.wait(4)
# sum of areas of rects approximates the area under curve
approx_equals = TexMobject("\\approx ").move_to(self.area_equals[0][4].get_center())
equals_copy = self.area_equals[0][4].copy()
sigma = TexMobject("\sum f(x)", "\\triangle x").move_to(self.area_equals[0][-1].get_center()+RIGHT)
self.play(FocusOn(self.area_equals[0][-1]))
self.play(
ReplacementTransform(self.area_equals[0][-1],sigma),
ReplacementTransform(self.area_equals[0][4], approx_equals),
run_time=2)
self.wait(4)
# delta x can be factored out from sigma
self.play(
sigma[1].shift,1.6*LEFT,
sigma[0].shift,RIGHT,
run_time=4 , rate_func=there_and_back_with_pause
)
self.wait(3)
# update rieman rects as delta x approaches 0
rieman_rects.add_updater(lambda r:r.become(self.get_riemann_rectangles(curve,dx=dx.get_value(),**self.rieman_rects_kwargs)))
dx_val.add_updater(lambda x:x.set_value(dx.get_value()))
delta_x_brace.add_updater(lambda b: b.become(Brace(rieman_rects[0], DOWN, buff=SMALL_BUFF)))
self.add(rieman_rects,dx_val,delta_x_brace)
self.play(dx.set_value,0.01,run_time=10,rate_func=linear)
self.wait(6)
# change in notation as dx-->0
rect_approx = SurroundingRectangle(approx_equals,buff=SMALL_BUFF)
rect_sigma = SurroundingRectangle(sigma[0][0],buff=SMALL_BUFF)
rect_deltax = SurroundingRectangle(sigma[1],buff=MED_SMALL_BUFF)
integrand = TexMobject("\\int_{0}^{2}").move_to(sigma.get_center())
surr_rects = [rect_deltax, rect_sigma, rect_approx]
transform_from = [sigma[1][-2], sigma[0][0], approx_equals]
transform_to = [TexMobject("d").next_to(sigma[1][-1],LEFT,buff=0.05),
integrand.shift(0.8*LEFT),
equals_copy]
for i in range(3):
self.play(ShowCreation(surr_rects[i]))
self.remove(surr_rects[i])
self.play(Transform(transform_from[i], transform_to[i]))
self.wait()
self.wait(6)
def sweep_through_area(self, graph):
sweeper = Triangle().scale(0.15).move_to(self.x_axis.number_to_point(0))
sweeper.shift((sweeper.get_top()[1]-sweeper.get_center()[1])*DOWN)
l_pivot = sweeper.copy()
r_pivot = sweeper.copy().shift(2*self.space_unit_to_x*RIGHT)
t = ValueTracker(0)
area = self.get_area(graph, t_min=0, t_max=t.get_value())
self.play(ShowCreation(l_pivot), ShowCreation(r_pivot), ShowCreation(area))
self.wait(2)
area.add_updater(
lambda area:area.become(self.get_area(graph,t_min=0,t_max=t.get_value()))
)
self.add(area)
self.area_equals = TextMobject("Area = ?").scale(1.5).next_to(graph,RIGHT).shift(5*UP+2*LEFT)
self.area_equals[0][:4].set_color_by_gradient(BLUE, GREEN)
self.play(sweeper.move_to,r_pivot,
t.set_value,2,
Write(self.area_equals),
run_time=3,rate_func=smooth)
self.wait()
self.underlying_area = area
class TheDerivative(GraphScene):
CONFIG = {
"x_min": -1,
"x_max": 3,
"x_axis_width": 8,
"y_min": -1,
"y_max": 3,
"y_axis_height": 5,
"x_tick_frequency": 0.5,
"y_tick_frequency": 0.5,
"axes_color": "#cf0e4e",
"graph_origin": LEFT+DOWN,
"default_graph_colors": [TEAL_E, GREEN, YELLOW],
"default_derivative_color": GREEN,
"default_input_color": YELLOW,
"stroke_width": 5,
"num_rects": 200,
"number_line_kwargs": {
"include_numbers": True,
"include_tip": True,
},
"func": lambda x: 2*x**2 - x**3 + 1,
"rieman_rects_kwargs": {
"x_min": 0,
"x_max": 2,
"stroke_width": 0.1,
"stroke_color": "#aa9f4b",
"fill_opacity": 0.8,
"start_color": "#d40b37", # "#d40b37", "#d4b90b"
"end_color": "#d4b90b",
}
}
def construct(self):
self.board = ImageMobject("stripes.jpg").set_width(FRAME_WIDTH)
self.add(self.board)
self.recollect_derivs()
def recollect_derivs(self):
curve = self.get_graph(self.func)
graph = VGroup(self.axes, curve).to_edge(LEFT).shift(2*LEFT)
fx = TexMobject("f(x) = 2x^{2} - x^{3} + 1").set_color(YELLOW_D).next_to(self.y_axis.number_to_point(3),buff=.5)
text1 = TextMobject("How sensitive the function is,")
text1[0][3:12].set_color(YELLOW_D)
text2 = TextMobject("to tiny changes in input ?")
context = VGroup(text1,text2).arrange_submobjects(DOWN, aligned_edge = LEFT).scale(.8).to_corner(UR)
context.add_background_rectangle(stroke_color=YELLOW_D, stroke_width=1.5, stroke_opacity=1,opacity=0,buff=0.2)
self.add(graph,fx)
self.wait(4)
self.play(Write(context),run_time=3)
self.wait()
x = 0.5
dx_tracker = ValueTracker(1)
dx = DecimalNumber(dx_tracker.get_value(), include_sign=False,num_decimal_places=2)
point1 = Dot(self.input_to_graph_point(x,curve)).scale(.75)
point2 = Dot(self.input_to_graph_point(x+dx.get_value(), curve)).scale(.75)
ref_line_kwargs = {"stroke_color": "#8b7c74", "stroke_width": 1.5}
dx_line = Line(point1.get_center(), point1.get_center()+RIGHT*dx.get_value()*self.space_unit_to_x,**ref_line_kwargs)
dy_line = Line(dx_line.get_end(), point2.get_center(),**ref_line_kwargs)
secant_line = Line(point1.get_center(), point2.get_center(), color = GREEN, stroke_width=2).scale(1.75)
sct_line_width = secant_line.get_length()
p = TexMobject("P").next_to(point1,UP+LEFT,buff=0.1).scale(0.8)
p_coords = TexMobject("P : ( a, f(a) )").scale(0.8).next_to(self.input_to_graph_point(2,curve))
v_l1 = Line(point1.get_center(), self.x_axis.number_to_point(x), **ref_line_kwargs)
v_l2 = Line(point2.get_center(), self.x_axis.number_to_point(x+dx.get_value()), **ref_line_kwargs)
dx_brace = Brace(Line(v_l1.get_end(),v_l2.get_end()))
dx_text = TexMobject("\\triangle x = ")
delta_x = VGroup(dx_text, dx).arrange_submobjects().next_to(dx_brace, DOWN)
delta_x.add(dx_brace)
dy_brace = Brace(dy_line,RIGHT)
dy_text = TexMobject("\\triangle y")
delta_y = VGroup(dy_brace, dy_text).arrange_submobjects().next_to(dy_line,RIGHT)
self.play(GrowFromCenter(point1),Write(p),Write(p_coords), run_time=4)
# self.wait(2)
self.play(ShowCreation(dx_line))
# self.wait()
self.play(ShowCreation(v_l1),ShowCreation(v_l2),lag_ratio=0)
self.play(Write(delta_x))
self.wait()
self.play(Write(delta_y),GrowFromCenter(point2))
self.wait(4)
slope = TexMobject("\\frac{\\triangle y}{\\triangle x} ","= \\frac{f(a+\\triangle x) - f(x)}{\\triangle x}")
slope.scale(.8).to_edge(RIGHT).shift(1.5*UP+2.5*LEFT)
eval_slope = TexMobject("= 4x-3x^{2}+\\triangle x .(2-3x) - (\\triangle x )^{2}").scale(.8).next_to(slope[1][0],2*DOWN,aligned_edge=LEFT)
self.play(Write(slope[0]),ShowCreation(secant_line))
self.wait()
self.play(Write(slope[1]))
self.wait()
self.play(Write(eval_slope))
self.wait()
self.play(
FadeOut(slope[1]),
eval_slope.next_to,slope[0]
)
self.wait()
delta_y.fade(darkness=1)
# updaters
dx.add_updater(lambda x:x.set_value(dx_tracker.get_value()))
point2.add_updater(lambda p: p.move_to(self.input_to_graph_point(x+dx_tracker.get_value(), curve)))
dx_line.add_updater(
lambda l: l.put_start_and_end_on(point1.get_center(),
point1.get_center()+RIGHT*dx_tracker.get_value()*self.space_unit_to_x))
dy_line.add_updater(lambda l:l.put_start_and_end_on(dx_line.get_end(), point2.get_center()))
secant_line.add_updater(lambda l: l.become(Line(point1.get_center(),point2.get_center(), color=GREEN, stroke_width=2).set_width(sct_line_width)))
v_l2.add_updater(lambda l: l.put_start_and_end_on(point2.get_center(), self.x_axis.number_to_point(x+dx.get_value())))
dx_brace.add_updater(lambda b:b.become(Brace(Line(v_l1.get_end(),v_l2.get_end()))))
self.add(point2, dx_line, dy_line, secant_line,dx_brace,v_l2,dx)
self.play(dx_tracker.set_value,0.01, run_time=6)
self.wait()
zeros_brace = Brace(eval_slope[0][8:], DOWN)
zeros_brace_text = TextMobject("Approach zero").next_to(zeros_brace,DOWN).scale(0.8)
approach_zero = VGroup(zeros_brace,zeros_brace_text)
self.play(ShowCreationThenFadeOut(SurroundingRectangle(eval_slope)))
self.wait()
self.play(
CircleIndicate(eval_slope[0][8:10]),
CircleIndicate(eval_slope[0][18:]),
Write(approach_zero),
run_time=2
)
self.play(
ApplyMethod(eval_slope[0][7:].fade,darkness=1),
ApplyMethod(approach_zero.fade,darkness=1)
)
self.wait(8)
self.play(
FadeOut(approach_zero),
FadeOut(eval_slope[0][7:]),
Transform(slope[0], TexMobject("\\frac{dy}{dx}").move_to(slope[0].get_center()).scale(0.8)),
run_time=2
)
self.wait(2)
deriv = TexMobject(" = {f}'(x)").scale(.8).next_to(eval_slope[0][5])
deriv[0][1:].set_color(YELLOW_D)
self.play(Write(deriv),run_time=2)
self.wait(2)
approx_y = TexMobject("dy =", "{f}'(x)",".dx").scale(.8).shift(2*RIGHT)
approx_y[1].set_color(YELLOW_D)
self.play(Write(approx_y[1]))
self.wait(2)
self.play(Write(approx_y[2]))
self.wait(2)
self.play(Write(approx_y[0]))
self.wait(10)
class WhatAreWeLookingFor(GraphScene):
CONFIG = {
"x_min": -0.5,
"x_max": 4,
"x_axis_width": 7,
"y_min": -1,
"y_max": 4,
"y_axis_height": 6,
"x_tick_frequency": 0.5,
"y_tick_frequency": 0.5,
"y_axis_label": "$f(x,t)$",
"axes_color": LIGHT_GREY, # b9464f #a95660 #b6497b "#d8d776"
"graph_origin": LEFT+DOWN,
"default_graph_colors": [YELLOW_D],
"default_derivative_color": GREEN,
"default_input_color": YELLOW,
"stroke_width": 5,
"num_rects": 100,
"number_line_kwargs": {
"include_numbers": True,
"include_tip": True,
},
"rieman_rects_kwargs": {
"x_min": 0,
"x_max": 2,
"stroke_width": 0.5,
"stroke_color": "#aa9f4b",
"fill_opacity": 0.8,
"start_color": "#d40b37", # "#d40b37", "#d4b90b"
"end_color": "#d4b90b",
},
"diff_area_kwargs" : {"stroke_width": 0.1,"stroke_color": "#aa9f4b","fill_opacity": 0.8},
"diff_area_cols": ["#d40b37", "#d4b90b"],
}
def construct(self):
self.board = ImageMobject("stripes.jpg").set_width(FRAME_WIDTH)
self.add(self.board)
self.setup_axes()
t_tracker = ValueTracker(1)
func = lambda x: 2*x**2 - x**3 + 0.5*t_tracker.get_value()*(x+1)
curve = self.get_graph(func, x_min=-0.5, x_max=2.5)
graph = VGroup(self.axes, curve).to_edge(RIGHT,buff=0.1).shift(DOWN)
curve_copy = curve.copy()
formula = TexMobject(
"\\frac{\mathrm{d}}{\mathrm{d} t}\int_{a}^{b}f(x,t)dx = \int_{a}^{b}\\frac{\partial }{\partial t}f(x,t)dx"
).set_color("#d8d776").scale(.85).to_edge(LEFT).shift(1.5*UP)
function = TexMobject("f(x,t) = 2x^{2} - x^{3} + \\frac{(x+1)}{2} \\ t").next_to(formula[0][0],1.5*UP,aligned_edge=LEFT).scale(.85)
function.set_color(YELLOW_D)
self.add(formula)
surr_rect_kwargs = {"stroke_width" : 1, "stroke_color" : YELLOW_D, "buff":SMALL_BUFF }
fxt_surr_rect = SurroundingRectangle(formula[0][7:13], **surr_rect_kwargs)
integral_brace = Brace(formula[0][4:15],color="#d8d776")
integral_brace.add(TextMobject("area",color="#d8d776").scale(0.8).next_to(integral_brace,DOWN))
deriv_brace = Brace(formula[0][0:15],color="#d8d776").shift(DOWN)
deriv_brace.add(TextMobject("rate of change of area", color="#d8d776").scale(.8).next_to(
deriv_brace, DOWN,aligned_edge=formula.get_edge_center(LEFT)))
self.wait(4)
self.play(FadeOut(formula[0][15:]))
self.wait(2)
self.play(ShowCreation(fxt_surr_rect))
self.wait(2)
self.play(Write(function),run_time=.5)
self.wait(2)
self.play(Write(graph))
self.wait()
self.add(curve_copy)
t_num_line_kwargs = {"color": LIGHT_GREY,"x_min": 0,"x_max": 4,"unit_size": 1,"tick_frequency": 0.5,}
t_num_line = NumberLine(**t_num_line_kwargs).to_corner(UR)
t_num_line.add(TexMobject("t").set_color(YELLOW_D).next_to(t_num_line,buff=.1))
sweeper = Triangle(color=YELLOW_D).scale(0.15).move_to(t_num_line.number_to_point(1))
sweeper.rotate(PI,about_point=sweeper.get_center())
sweeper.shift((sweeper.get_top()[1]-sweeper.get_center()[1])*UP)
self.add(t_num_line,sweeper)
curve.add_updater(lambda c: c.become(
self.get_graph(lambda x: 2*x**2 - x**3 + 0.5*t_tracker.get_value()*(1+x), x_min=-0.5, x_max=2.5)))
self.add(curve)
self.play(t_tracker.set_value,3,
sweeper.shift,2*RIGHT*t_num_line.unit_size,
run_time=4,rate_func=there_and_back)
self.remove(fxt_surr_rect,curve_copy)
self.wait()
# the integral
area = self.get_area(curve,0.5,2)
lower_bound = TexMobject("a").scale(.75).next_to(self.x_axis.number_to_point(0.5),LEFT+UP,buff=.1)
upper_bound = TexMobject("b").scale(.75).next_to(self.x_axis.number_to_point(2),RIGHT+UP,buff=.1)
bounds = VGroup(lower_bound,upper_bound)
self.play(Write(integral_brace[0]))
self.wait()
self.play(Write(integral_brace[1]),ShowCreation(area),Write(bounds))
self.wait(3)
self.play(ShowCreation(deriv_brace[0]))
self.wait(3)
self.play(Write(deriv_brace[1]))
curve.add_updater(lambda c: c.become(
self.get_graph(lambda x: 2*x**2 - x**3 + 0.5*t_tracker.get_value()*(1+x), x_min=-0.5, x_max=2.5)))
area.add_updater(lambda a: a.become(self.get_area(curve,0.5,2)))
self.add(curve,area)
for i in range(2):
self.play(
sweeper.shift,2*RIGHT*t_num_line.unit_size,
t_tracker.set_value,3,
run_time=4, rate_func=there_and_back
)
self.wait()
self.play(
*[FadeOut(mob) for mob in [integral_brace, deriv_brace, function]],
formula[0][:15].shift,1.5*UP
)
self.wait()
fxt = TexMobject("f(x,t)").scale(.8).next_to(
self.input_to_graph_point(2.4, curve_copy)).set_color("#ff6500")
self.play(Write(fxt))
area.clear_updaters()
diff_area = self.get_change_in_area(area,1)
self.add(diff_area)
dt_tracker = ValueTracker(1)
delta_t = TexMobject("\\triangle t =").scale(.8).next_to(t_num_line,DOWN)
dt = DecimalNumber(dt_tracker.get_value(),include_sign=False,num_decimal_places=2).scale(.8).next_to(delta_t)
self.add(sweeper.copy(),curve_copy)
curve.add_updater(lambda c: c.become(
self.get_graph(lambda x: 2*x**2 - x**3 + 0.5*t_tracker.get_value()*(1+x), x_min=-0.5, x_max=2.5)))
diff_area.add_updater(lambda a: a.become(
self.get_change_in_area(area, t_tracker.get_value())))
self.add(curve,diff_area)
self.play(
t_tracker.set_value,t_tracker.get_value()+dt_tracker.get_value(),
sweeper.shift, dt_tracker.get_value()*RIGHT*t_num_line.unit_size,
run_time=1)
fx_delta_t = TexMobject(
"f(x,t+\\triangle t)").scale(.8).next_to(self.input_to_graph_point(2.4, curve)).set_color("#ff6500")
self.play(Write(delta_t), Write(dt))
self.play(Write(fx_delta_t))
self.wait()
delta_a = TexMobject("\\triangle A").set_color_by_gradient(*self.diff_area_cols).scale(.8)
delta_a.to_edge(LEFT).shift(1.5*UP)
self.add(delta_a)
self.play(TransformFromCopy(diff_area, delta_a))
self.wait()
# approximating areas with riemann rects
dx_tracker = ValueTracker(0.25)
dx = DecimalNumber(dx_tracker.get_value(),include_sign=False,num_decimal_places=2).scale(.8)
curve2 = self.get_graph(lambda x: 2*x**2 - x**3 + 0.5*(x+1), x_min=-0.5, x_max=2.5).move_to(curve_copy)
approx_area = self.get_riemann_rectangles(curve2,x_min=0.5,x_max=2,dx=.25,stroke_width= 0.7,fill_opacity= 0.8)
dx_brace = Brace(approx_area[0])
dx_brace.add(TexMobject("\\triangle x =").scale(.8).next_to(dx_brace,DOWN))
dx.next_to(dx_brace[1])
delta_x = VGroup(dx_brace,dx)
approx_diff_area = self.get_change_in_area(approx_area,t_tracker.get_value())
# had to create curve2 same as curve_copy ; self.get_riemann_rectangles(curve_copy) was giving unexpected results
self.play(FadeOut(area),FadeOut(diff_area),lag_ratio=0)
self.wait()
self.play(*[GrowFromEdge(mob,mob.get_bottom()) for mob in approx_area],Write(delta_x),run_time=4)
self.wait(3)
self.play(*[GrowFromEdge(mob,mob.get_vertices()[1]) for mob in approx_diff_area],run_time=3)
self.wait(4)
# change in area of a sample rect
sample_rect = approx_diff_area[2]
sample_rect_pos = sample_rect.get_center()
self.play(sample_rect.next_to,delta_a,6*DOWN)
self.wait()
delta_h = Brace(sample_rect,RIGHT)
delta_h.add(TexMobject("\\triangle h").next_to(delta_h,buff=.2).scale(.8))
delta_x = Brace(sample_rect)
delta_x.add(TexMobject("\\triangle x").scale(.8).next_to(delta_x,DOWN,buff=.2))
sample_rect_dims = VGroup(delta_h,delta_x)
self.play(Write(sample_rect_dims))
self.wait()
approx_delta_h = TexMobject("\\approx \\frac{\partial f(x,t)}{\partial t} \ \\triangle t").scale(.8).next_to(delta_h)
self.play(Write(approx_delta_h),run_time=5)
self.wait(2)
area_of_sample_rect = TexMobject("area").next_to(sample_rect,3*RIGHT+2*DOWN).set_color(sample_rect.get_color()).scale(.8)
area_of_sample_rect.add(approx_delta_h.copy().next_to(area_of_sample_rect))
area_of_sample_rect.add(delta_x[-1].copy().next_to(area_of_sample_rect))
self.play(Write(area_of_sample_rect),run_time=2)
self.wait(2)
self.play(ApplyWave(approx_diff_area,amplitude=0.5),lag_ratio=0.15,run_time=2)
self.wait()
sigma = TexMobject("\\approx \sum \\frac{\partial f(x,t)}{\partial t}\\triangle t \ \\triangle x").scale(.8).next_to(delta_a)
FadeOut(area_of_sample_rect[0])
self.play(Write(sigma),run_time=2)
self.wait(5)
# ratio delta a over delta t
underline_da = Line(delta_a.get_left()+0.4*DOWN,delta_a.get_right()+0.4*DOWN,stroke_width=1.5)
self.play(
sigma[0][12:14].shift,2.1*LEFT,
sigma[0][1:12].shift,0.5*RIGHT
)
self.wait(2)
self.play(
ShowCreation(underline_da),
ApplyMethod(sigma[0][12:14].next_to,underline_da,DOWN+0.3*RIGHT,buff=.1), # delta t
ApplyMethod(sigma[0][1:12].shift,0.3*LEFT+0.3*DOWN), # sigma f
ApplyMethod(sigma[0][0].shift,0.1*RIGHT+0.3*DOWN), # approx
ApplyMethod(sigma[0][14:].shift, 0.3*LEFT+0.3*DOWN) # deltax
)
self.wait(4)
self.play(
FadeOut(delta_h[0]), FadeOut(delta_x), FadeOut(area_of_sample_rect),
sample_rect.move_to,sample_rect_pos,
lag_ratio=0
)
self.wait(3)
# let the deltas approach zero
# dx approaches 0
approx_area.add_updater(
lambda a: a.become(self.get_riemann_rectangles(curve2,x_min=0.5,x_max=2,dx=dx_tracker.get_value(),stroke_width= 0.1,fill_opacity= 0.8))
)
approx_diff_area.add_updater(
lambda a: a.become(self.get_change_in_area(approx_area, t_tracker.get_value())
))
dx.add_updater(lambda x: x.set_value(dx_tracker.get_value()))
dx_brace[0].add_updater(lambda b:b.become(Brace(approx_area[0])))
self.add(approx_diff_area,approx_area,dx,dx_brace)
self.play(dx_tracker.set_value,0.01,run_time=6)
self.wait(3)
#dt approaches 0
curve.clear_updaters()
approx_diff_area.add_updater(
lambda a: a.become(self.get_change_in_area(approx_area,1 + dt_tracker.get_value())
))
dt.add_updater(lambda t: t.set_value(dt_tracker.get_value()))
curve.add_updater(
lambda c: c.become(self.get_graph(lambda x: 2*x**2 - x**3 + 0.5*(x+1)*(1+dt_tracker.get_value())))
)
self.add(approx_diff_area,dt,curve)
self.play(
ApplyMethod(sweeper.shift,LEFT*t_num_line.unit_size*0.9),
dt_tracker.set_value,0.1,
run_time=2
)
self.wait()
dh_surr_rect = SurroundingRectangle(VGroup(delta_h[1],approx_delta_h))
self.add(dh_surr_rect)
self.play(Transform(approx_delta_h[0][0],TexMobject("=").scale(.8).move_to(approx_delta_h[0][0])),run_time=2)
self.wait(2)
# self.remove()
# self.wait()
self.remove(dh_surr_rect,approx_delta_h,delta_h[1])
self.wait()
deriv_surr_rect = SurroundingRectangle(VGroup(delta_a,sigma),stroke_color=YELLOW_D,stroke_width=1.5)
self.add(deriv_surr_rect)
self.wait()
arrow = Vector(color=YELLOW_D).rotate(3*PI/2,about_point=ORIGIN).next_to(sigma,DOWN)
approach_zero = VGroup(
TexMobject("\\triangle x\\rightarrow 0"),
TexMobject("\\triangle t\\rightarrow 0")
).scale(.8).arrange_submobjects(DOWN).next_to(arrow,buff=.2)
arrow.add(approach_zero)
self.play(Write(arrow))
self.wait(2)
final_eq = TexMobject("\\frac{\mathrm{d} A}{\mathrm{d} t}"," =\int_{a}^{b}\\frac{\partial f(x,t)}{\partial t} \ dx").scale(.85).next_to(arrow,DOWN)
final_eq.set_color("#d8d776")
self.play(Write(final_eq),run_time=2,lag_ratio=.15)
self.wait(3)
self.play(
final_eq[1].copy().next_to, formula[0][14]
)
self.wait(5)
def get_change_in_area(self, area, t_val):
func = lambda x,t: 2*x**2 - x**3 + 0.5*t*(x+1)
width = area[0].get_width()
diff_area = VGroup()
for index,x in enumerate(np.arange(0.5,2,width/self.x_axis.unit_size)):
height = (func(x,t_val) - func(x,1)) * self.y_axis.unit_size
rect = Rectangle(width=width, height=height,**self.diff_area_kwargs)
rect.next_to(area[index],UP,buff=0)
diff_area.add(rect)
diff_area.set_submobject_colors_by_gradient(*self.diff_area_cols)
return diff_area
class Thanks(Scene):
def construct(self):
self.add(ImageMobject("stripes.jpg").set_width(FRAME_WIDTH))
text_kwargs = {"fill_color": "#a7f542"}
thanks = TextMobject("Thanks for watching !").set_style(**text_kwargs)
text1 = TextMobject("If you find the videos on this channel to be compelling,")
text2 = TextMobject("consider to", "like, share and subscribe.")
subscribe = VGroup(text1, text2).arrange_submobjects(
DOWN, aligned_edge=LEFT).set_style(**text_kwargs)
VGroup(thanks,subscribe).arrange_submobjects(3*DOWN,aligned_edge=LEFT)
self.wait(2)
self.play(Write(thanks),lag_ratio=.15,run_time=2)
self.wait()
self.play(Write(subscribe),lag_ratio=.15, run_time=4)
self.wait(2)
| [
"abhijithmuthyala211325@gmail.com"
] | abhijithmuthyala211325@gmail.com |
c2ea836a58ec6f9d02d3d631bdecf55d3db16ccf | 88307f29f2930213819b2a21ac328ee52e5d8d65 | /tests/benchmark.py | 52a684fb04421ac6481101c185ec87ab22b3704e | [
"BSD-3-Clause"
] | permissive | ChristopherBradley/cogent3 | 7dc6524d66687402d2bd48c07ca68b41133e9f00 | 4b4c0fbc77f50aebd74ecf44a6d1777b2e2c0fbb | refs/heads/master | 2023-02-27T00:58:29.796585 | 2020-11-09T04:13:51 | 2020-11-09T04:13:51 | 219,615,537 | 0 | 0 | BSD-3-Clause | 2023-02-21T20:03:32 | 2019-11-04T23:22:58 | Python | UTF-8 | Python | false | false | 5,641 | py | #!/usr/bin/env python
import sys # ,hotshot
from cogent3 import load_aligned_seqs, load_tree
from cogent3.evolve.substitution_model import (
TimeReversibleCodon,
TimeReversibleDinucleotide,
TimeReversibleNucleotide,
)
from cogent3.maths import optimisers
from cogent3.util import parallel
__author__ = "Peter Maxwell and Gavin Huttley"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Peter Maxwell", "Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2020.7.2a"
__maintainer__ = "Gavin Huttley"
__email__ = "gavin.huttley@anu.edu.au"
__status__ = "Production"
ALIGNMENT = load_aligned_seqs(filename="data/brca1.fasta")
TREE = load_tree(filename="data/murphy.tree")
def subtree(size):
names = ALIGNMENT.names[:size]
assert len(names) == size
tree = TREE.get_sub_tree(names) # .balanced()
return names, tree
def brca_test(subMod, names, tree, length, par_rules, **kw):
# names = ALIGNMENT.names[:taxa]
# assert len(names) == taxa
tree = TREE.get_sub_tree(names) # .balanced()
aln = ALIGNMENT.take_seqs(names).omit_gap_pos()[:length]
assert len(aln) == length, (len(aln), length)
# the_tree_analysis = LikelihoodFunction(treeobj = tree, submodelobj = subMod, alignobj = aln)
par_controller = subMod.make_likelihood_function(tree, **kw)
for par_rule in par_rules:
par_controller.set_param_rule(**par_rule)
# lf = par_controller.make_calculator(aln)
return (par_controller, aln)
def measure_evals_per_sec(pc, aln):
pc.set_alignment(aln)
return pc.measure_evals_per_second(time_limit=2.0, wall=False)
def makePC(modelClass, parameterisation, length, taxa, tree, opt_mprobs, **kw):
modelClass = eval(modelClass)
if parameterisation is not None:
predicates = {"silly": silly_predicate}
par_rules = [{"par_name": "silly", "is_independent": parameterisation}]
else:
predicates = {}
par_rules = []
subMod = modelClass(
equal_motif_probs=True,
optimise_motif_probs=opt_mprobs,
predicates=predicates,
recode_gaps=True,
mprob_model="conditional",
)
(pc, aln) = brca_test(subMod, taxa, tree, length, par_rules, **kw)
return (pc, aln)
def quiet(f, *args, **kw):
import io
import sys
temp = io.StringIO()
_stdout = sys.stdout
try:
sys.stdout = temp
result = f(*args, **kw)
finally:
# pass
sys.stdout = _stdout
return result
def evals_per_sec(*args):
pc, aln = makePC(*args) # quiet(makeLF, *args)
speed1 = measure_evals_per_sec(pc, aln)
speed = str(int(speed1))
return speed
class CompareImplementations(object):
def __init__(self, switch):
self.switch = switch
def __call__(self, *args):
self.switch(0)
(pc, aln) = quiet(makePC, *args)
speed1 = measure_evals_per_sec(pc, aln)
self.switch(1)
(pc, aln) = quiet(makePC, *args)
speed2 = measure_evals_per_sec(pc, aln)
if speed1 < speed2:
speed = "+%2.1f" % (speed2 / speed1)
else:
speed = "-%2.1f" % (speed1 / speed2)
if speed in ["+1.0", "-1.0"]:
speed = ""
return speed
def benchmarks(test):
alphabets = ["Nucleotide", "Dinucleotide", "Codon"]
sequence_lengths = [18, 2004]
treesizes = [5, 20]
for (optimise_motifs, parameterisation) in [
(False, "global"),
(False, "local"),
(True, "global"),
]:
print(parameterisation, ["", "opt motifs"][optimise_motifs])
print(" " * 14, end=" ")
wcol = 5 * len(sequence_lengths) + 2
for alphabet in alphabets:
print(str(alphabet).ljust(wcol), end=" ")
print()
print("%-15s" % "", end=" ") # "length"
for alphabet in alphabets:
for sequence_length in sequence_lengths:
print("%4s" % sequence_length, end=" ")
print(" ", end=" ")
print()
print(
" " * 12
+ (
" | ".join(
[""]
+ ["-" * (len(sequence_lengths) * 5) for alphabet in alphabets]
+ [""]
)
)
)
for treesize in treesizes:
print(("%4s taxa | " % treesize), end=" ")
(taxa, tree) = subtree(treesize)
for alphabet in alphabets:
for sequence_length in sequence_lengths:
speed = test(
alphabet,
parameterisation == "local",
sequence_length,
taxa,
tree,
optimise_motifs,
)
print("%4s" % speed, end=" ")
print("| ", end=" ")
print()
print()
print()
def silly_predicate(a, b):
return a.count("A") > a.count("T") or b.count("A") > b.count("T")
# def asym_predicate((a,b)):
# print a, b, 'a' in a
# return 'a' in a
# mA = Codon()
# mA.setPredicates({'asym': asym_predicate})
def exponentiator_switch(switch):
import cogent3.evolve.substitution_calculation
cogent3.evolve.substitution_calculation.use_new = switch
if "relative" in sys.argv:
test = CompareImplementations(exponentiator_switch)
else:
test = evals_per_sec
parallel.inefficiency_forgiven = True
if parallel.get_rank() > 0:
# benchmarks(test)
quiet(benchmarks, test)
else:
try:
benchmarks(test)
except KeyboardInterrupt:
print(" OK")
| [
"Gavin.Huttley@anu.edu.au"
] | Gavin.Huttley@anu.edu.au |
c2e1a28b2ecb5eb62a6a9e4203bbb3bc3be76d22 | 6f4c82ae94fa3a27c7dae7a11c8b4ed27d6b604c | /mysite/settings.py | 29ee906269aca3225eddc72e294410c0e99d198f | [] | no_license | mtky3/my-first-blog | 6d8dfa158ef84cc47ad1f1d3b96998d5dfaffa97 | aacecd1e4c3f85316e53e6a3a6797afff03717bf | refs/heads/master | 2021-06-17T21:59:15.353130 | 2019-09-22T14:08:08 | 2019-09-22T14:08:08 | 200,529,000 | 0 | 0 | null | 2021-06-10T21:49:57 | 2019-08-04T18:41:03 | JavaScript | UTF-8 | Python | false | false | 4,508 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's%n7aamw866s)c!+7lys(y55l)6meztq0(1edfu2u4e8!gv6dq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com','.c9users.io']
# Application definition
INSTALLED_APPS = [
'accounts.apps.AccountsConfig',
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'import_export',
'cms',
'bootstrap4', # django-bootstrap4
'social_django',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
# https://qiita.com/sikkim/items/bb9ee5ef747660f84774
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'OPTIONS': {
# 'read_default_file': './my.cnf',
# 'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
# }
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGIN_REDIRECT_URL = '/'
AUTHENTICATION_BACKENDS = (
'social_core.backends.open_id.OpenIdAuth', # for Google authentication
'social_core.backends.google.GoogleOpenId', # for Google authentication
'social_core.backends.google.GoogleOAuth2', # for Google authentication
'social_core.backends.github.GithubOAuth2', # for Github authentication
'social_core.backends.facebook.FacebookOAuth2', # for Facebook authentication
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '192845778622-1g5lhmuj6e040iju37bh7377qeplde21.apps.googleusercontent.com' #Paste CLient Key
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'rHGou6cavdCku7R9EauBSNP4' #Paste Secret Key | [
"mk944@hotmail.com"
] | mk944@hotmail.com |
1fbd2cbf84bbc7502932e333d4bef73a29abfbed | febf5d7fde1f2908f784c93e095030407b32f301 | /astar.py | cfe2384eeebf4e191454a6f39e49409cdfb7bc80 | [
"MIT"
] | permissive | Shuhei-YOSHIDA/heuristic_optimization | dcbe7a9203e1c8d071fc2c32cddd02266d44388e | a77a67e92149356ba4c669c84d7afe1154c0ab72 | refs/heads/master | 2020-03-19T20:41:01.723536 | 2018-06-23T13:06:50 | 2018-06-23T13:06:50 | 136,911,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,199 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A-star algorithm
"""
import numpy as np
class Node(object):
def __init__(self, pose, index):
self.pose = pose # information of node
self.index = index
class Edge(object):
def __init__(self, from_index, to_index, cost):
self.from_i = from_index
self.to_i = to_index
self.cost = cost
class Graph(object):
def __init__(self, size):
self.nodes = [None for i in range(size)]
self.edges = []
def adjacentIndices(index, size):
"""
Depends on your problem
"""
adjacent = [0]*(3**3-1)
i = 0
for dz in range(-1, 2):
for dy in range(-1, 2):
for dx in range(-1, 2):
if dx == dy == dz == 0:
continue
adjacent[i] = index+dx+dy*size+dz*(size**2)
i += 1
return adjacent
def adjacentCost():
"""
Depends on your problem
"""
adjacent = [0]*(3**3-1)
i = 0
for dz in range(-1, 2):
for dy in range(-1, 2):
for dx in range(-1, 2):
if dx == dy == dz == 0:
continue
adjacent[i] = np.linalg.norm([dx, dy, dz])
i += 1
return adjacent
def makeGraph():
"""
Depends on your problem.
This example returns 3D space grids.
(Difficult to prepare array for nodes and edged for large 3D space,
for example, 1mm grid for 1m x 1m x 1m)
"""
index = 0
size = 50 # careful for the number of node
g = Graph(size**3)
for z in range(size):
for y in range(size):
for x in range(size):
n = Node([x, y, z], index)
g.nodes[index] = n
ad_i = adjacentIndices(index, size)
ad_c = adjacentCost()
for j, a_i in enumerate(ad_i):
if a_i < 0: # out of index
continue
e = Edge(index, a_i, ad_c[j])
g.edges.append(e)
index += 1
return g
def Astar(graph, init, end):
if __name__ == '__main__':
makeGraph()
print(adjacentIndices(13, 3)) # test
| [
"s-yoshida@frontier.hokudai.ac.jp"
] | s-yoshida@frontier.hokudai.ac.jp |
73a1f841632a9f0870b9f634bf909faf5101b1ad | f7ea41fc9d1b23fa5da8cfa52d6767df8d914128 | /tests/parser/cwrjson/encoder/test_json.py | 2e16e8037aaf468ff0dc43df28ba66bb0143d55a | [
"MIT"
] | permissive | gitter-badger/CWR-DataApi | f0d903558e688f10aba996b624f5f860c2d12f3c | ba2011bccd2a5046d9d200c2377daf003cf4b9bc | refs/heads/master | 2020-12-11T07:59:13.882298 | 2015-06-01T10:16:21 | 2015-06-01T10:16:21 | 37,245,502 | 0 | 0 | null | 2015-06-11T07:16:04 | 2015-06-11T07:16:04 | null | UTF-8 | Python | false | false | 13,565 | py | # -*- coding: utf-8 -*-
import unittest
import datetime
import json
from cwr.parser.encoder.cwrjson import JSONEncoder
from cwr.file import FileTag, CWRFile
from cwr.group import GroupHeader, GroupTrailer, Group
from cwr.work import WorkRecord
from cwr.agreement import AgreementRecord
from cwr.transmission import TransmissionTrailer, TransmissionHeader, \
Transmission
"""
Group from dictionary encoding tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestFileJSONEncoding(unittest.TestCase):
def setUp(self):
self._encoder = JSONEncoder()
def test_file_agreement(self):
tag = self._get_file_tag()
transmission = self._get_transmission_agreement()
data = CWRFile(tag, transmission)
encoded = self._encoder.encode(data)
expected = json.loads(
'{"transmission": {"header": {"creation_date_time": "2003-02-16", "sender_name": "SENDER", "sender_id": "ABC334", "sender_type": "SO", "record_type": "HDR", "edi_standard": "01.10", "transmission_date": "2003-02-17", "character_set": "ASCII"}, "groups": [{"group_trailer": {"record_count": 20, "record_type": "GRT", "group_id": 3, "transaction_count": 15}, "transactions": [[{"sales_manufacture_clause": "M", "date_of_signature": "2003-02-17", "prior_royalty_start_date": "2003-02-19", "advance_given": true, "retention_end_date": "2003-02-18", "international_standard_code": "DFG135", "prior_royalty_status": "D", "agreement_end_date": "2003-02-16", "record_type": "AGR", "shares_change": true, "post_term_collection_status": "D", "agreement_type": "OS", "submitter_agreement_n": "AB12", "society_assigned_agreement_n": "DF35", "record_sequence_n": 15, "agreement_start_date": "2003-02-15", "transaction_sequence_n": 3, "post_term_collection_end_date": "2003-02-20", "number_of_works": 12}], [{"sales_manufacture_clause": "M", "date_of_signature": "2003-02-17", "prior_royalty_start_date": "2003-02-19", "advance_given": true, "retention_end_date": "2003-02-18", "international_standard_code": "DFG135", "prior_royalty_status": "D", "agreement_end_date": "2003-02-16", "record_type": "AGR", "shares_change": true, "post_term_collection_status": "D", "agreement_type": "OS", "submitter_agreement_n": "AB12", "society_assigned_agreement_n": "DF35", "record_sequence_n": 15, "agreement_start_date": "2003-02-15", "transaction_sequence_n": 3, "post_term_collection_end_date": "2003-02-20", "number_of_works": 12}]], "group_header": {"record_type": "GRH", "version_number": "02.10", "group_id": 3, "batch_request_id": 15, "transaction_type": "AGR"}}, {"group_trailer": {"record_count": 20, "record_type": "GRT", "group_id": 3, "transaction_count": 15}, "transactions": [[{"sales_manufacture_clause": "M", "date_of_signature": "2003-02-17", "prior_royalty_start_date": "2003-02-19", "advance_given": true, "retention_end_date": "2003-02-18", "international_standard_code": "DFG135", "prior_royalty_status": "D", "agreement_end_date": "2003-02-16", "record_type": "AGR", "shares_change": true, "post_term_collection_status": "D", "agreement_type": "OS", "submitter_agreement_n": "AB12", "society_assigned_agreement_n": "DF35", "record_sequence_n": 15, "agreement_start_date": "2003-02-15", "transaction_sequence_n": 3, "post_term_collection_end_date": "2003-02-20", "number_of_works": 12}], [{"sales_manufacture_clause": "M", "date_of_signature": "2003-02-17", "prior_royalty_start_date": "2003-02-19", "advance_given": true, "retention_end_date": "2003-02-18", "international_standard_code": "DFG135", "prior_royalty_status": "D", "agreement_end_date": "2003-02-16", "record_type": "AGR", "shares_change": true, "post_term_collection_status": "D", "agreement_type": "OS", "submitter_agreement_n": "AB12", "society_assigned_agreement_n": "DF35", "record_sequence_n": 15, "agreement_start_date": "2003-02-15", "transaction_sequence_n": 3, "post_term_collection_end_date": "2003-02-20", "number_of_works": 12}]], "group_header": {"record_type": "GRH", "version_number": "02.10", "group_id": 3, "batch_request_id": 15, "transaction_type": "AGR"}}], "trailer": {"record_type": "TRL", "group_count": 155, "record_count": 568, "transaction_count": 245}}, "tag": {"sequence_n": 123, "receiver": "RCV", "sender": "SND", "version": 2.1, "year": 2015}}')
self.assertEqual(expected, json.loads(encoded))
def test_file_work_with_nones(self):
tag = self._get_file_tag()
transmission = self._get_transmission_work()
data = CWRFile(tag, transmission)
encoded = self._encoder.encode(data)
expected = json.loads(
'{"transmission": {"header": {"creation_date_time": "2003-02-16", "sender_name": "SENDER", "sender_id": "ABC334", "sender_type": "SO", "record_type": "HDR", "edi_standard": "01.10", "transmission_date": "2003-02-17", "character_set": "ASCII"}, "groups": [{"group_trailer": {"record_count": 20, "record_type": "GRT", "group_id": 3, "transaction_count": 15}, "transactions": [[{"opus_number": "OP35", "recorded_indicator": "Y", "contact_id": "123CONTACT", "record_sequence_n": 15, "music_arrangement": "ORI", "language_code": "ES", "duration": "01:12:00", "contact_name": "THE CONTACT", "composite_type": "MED", "lyric_adaptation": "MOD", "title": "TITLE", "transaction_sequence_n": 3, "excerpt_type": "MOV", "submitter_work_n": "ABC123", "priority_flag": "Y", "copyright_number": "ABDF146", "text_music_relationship": "MTX", "work_type": "BL", "grand_rights_indicator": true, "date_publication_printed_edition": "2003-02-16", "musical_work_distribution_category": "SER", "catalogue_number": "GGH97", "composite_component_count": 5, "exceptional_clause": "Y", "record_type": "NWR", "iswc": null, "version_type": "ORI", "copyright_date": "2003-02-17"}]], "group_header": {"record_type": "GRH", "version_number": "02.10", "group_id": 3, "batch_request_id": 15, "transaction_type": "NWR"}}], "trailer": {"record_type": "TRL", "group_count": 155, "record_count": 568, "transaction_count": 245}}, "tag": {"sequence_n": 123, "receiver": "RCV", "sender": "SND", "version": 2.1, "year": 2015}}')
self.assertEqual(expected, json.loads(encoded))
def _get_file_tag(self):
return FileTag(year=2015,
sequence_n=123,
sender='SND',
receiver='RCV',
version=2.1)
def _get_transmission_agreement(self):
header = TransmissionHeader(record_type='HDR',
sender_id='ABC334',
sender_name='SENDER',
sender_type='SO',
creation_date_time=datetime.datetime.strptime(
'20030216', '%Y%m%d').date(),
transmission_date=datetime.datetime.strptime(
'20030217', '%Y%m%d').date(),
edi_standard='01.10',
character_set='ASCII')
trailer = TransmissionTrailer(record_type='TRL',
group_count=155,
transaction_count=245,
record_count=568)
groups = [self._get_group_agreement(), self._get_group_agreement()]
return Transmission(header, trailer, groups)
def _get_transmission_work(self):
header = TransmissionHeader(record_type='HDR',
sender_id='ABC334',
sender_name='SENDER',
sender_type='SO',
creation_date_time=datetime.datetime.strptime(
'20030216', '%Y%m%d').date(),
transmission_date=datetime.datetime.strptime(
'20030217', '%Y%m%d').date(),
edi_standard='01.10',
character_set='ASCII')
trailer = TransmissionTrailer(record_type='TRL',
group_count=155,
transaction_count=245,
record_count=568)
groups = [self._get_group_work()]
return Transmission(header, trailer, groups)
def _get_group_agreement(self):
header = GroupHeader(record_type='GRH',
group_id=3,
transaction_type='AGR',
version_number='02.10',
batch_request_id=15)
trailer = GroupTrailer(record_type='GRT',
group_id=3,
transaction_count=15,
record_count=20)
transactions = [self._get_transaction_agreement(),
self._get_transaction_agreement()]
return Group(header, trailer, transactions)
def _get_group_work(self):
header = GroupHeader(record_type='GRH',
group_id=3,
transaction_type='NWR',
version_number='02.10',
batch_request_id=15)
trailer = GroupTrailer(record_type='GRT',
group_id=3,
transaction_count=15,
record_count=20)
transactions = [self._get_transaction_work()]
return Group(header, trailer, transactions)
def _get_transaction_agreement(self):
return [self._get_agreement()]
def _get_transaction_work(self):
return [self._get_work()]
def _get_agreement(self):
return AgreementRecord(record_type='AGR',
transaction_sequence_n=3,
record_sequence_n=15,
submitter_agreement_n='AB12',
agreement_type='OS',
agreement_start_date=datetime.datetime.strptime(
'20030215', '%Y%m%d').date(),
number_of_works=12,
prior_royalty_status='D',
post_term_collection_status='D',
international_standard_code='DFG135',
society_assigned_agreement_n='DF35',
sales_manufacture_clause='M',
agreement_end_date=datetime.datetime.strptime(
'20030216', '%Y%m%d').date(),
date_of_signature=datetime.datetime.strptime(
'20030217', '%Y%m%d').date(),
retention_end_date=datetime.datetime.strptime(
'20030218', '%Y%m%d').date(),
prior_royalty_start_date=datetime.datetime.strptime(
'20030219', '%Y%m%d').date(),
post_term_collection_end_date=datetime.datetime.strptime(
'20030220', '%Y%m%d').date(),
shares_change=True,
advance_given=True)
def _get_work(self):
return WorkRecord(record_type='NWR',
transaction_sequence_n=3,
record_sequence_n=15,
submitter_work_n='ABC123',
title='TITLE',
version_type='ORI',
musical_work_distribution_category='SER',
date_publication_printed_edition=datetime.datetime.strptime(
'20030216', '%Y%m%d').date(),
text_music_relationship='MTX',
language_code='ES',
copyright_number='ABDF146',
copyright_date=datetime.datetime.strptime('20030217',
'%Y%m%d').date(),
music_arrangement='ORI',
lyric_adaptation='MOD',
excerpt_type='MOV',
composite_type='MED',
composite_component_count=5,
iswc=None,
work_type='BL',
duration=datetime.datetime.strptime('011200',
'%H%M%S').time(),
catalogue_number='GGH97',
opus_number='OP35',
contact_id='123CONTACT',
contact_name='THE CONTACT',
recorded_indicator='Y',
priority_flag='Y',
exceptional_clause='Y',
grand_rights_indicator=True)
class TestFileJSONEncodingInvalid(unittest.TestCase):
def setUp(self):
self._encoder = JSONEncoder()
def test_none(self):
self.assertRaises(AttributeError, self._encoder.encode, None)
def test_string(self):
self.assertRaises(AttributeError, self._encoder.encode, 'abc')
| [
"programming@wandrell.com"
] | programming@wandrell.com |
d79a0c1c48fca91acad393a1c55d50743108bb09 | da6ca4b7a46c38f4ab7b0d6cf089fa243ad8febe | /namesgen/namesgen_model.py | 64f7269b22250d9dee23aac5eb2596e3ed3adabd | [] | no_license | Vorotori/namesgen | 942b01ac04a687efd2125e9f030262666138d281 | 5b021605b60486feee628980fb11881949cc9dc7 | refs/heads/master | 2023-03-30T09:39:59.561554 | 2021-03-31T10:33:38 | 2021-03-31T10:33:38 | 347,580,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | # The training code for the model
# Accepts X and Y created with create_xy() from 'utils'
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, GRU, Dropout
# Building actual model
def compile_lstm_gru(max_char, chars):
model = Sequential()
model.add(LSTM(256, input_shape=(max_char, len(chars)), recurrent_dropout=0.2, return_sequences=True, activation='tanh'))
model.add(GRU(128, recurrent_dropout=0.2, return_sequences=True))
model.add(Dropout(0.4))
model.add(Dense(len(chars), activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
| [
"noize9999@gmail.com"
] | noize9999@gmail.com |
7d66868e5b3183db472a19c57b08b0dcffe29de5 | c6baee790696725b2e2775f0f9fadf2cd92fc940 | /as11.py | e182ea69185df2c9ccaf6edd410d91fb7007d502 | [] | no_license | afreedfayaz18/Assignment_2 | 2a8566ab0cacacacfb4f785f645c141003c46db3 | e2bb014d0aabc7d5219999a401386081b9960250 | refs/heads/main | 2023-03-10T21:28:00.889879 | 2021-02-26T10:11:27 | 2021-02-26T10:11:27 | 340,627,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | x=int(input("Enter the element :"))
d1={}
for i in range(x):
keys=input("Enter the keys :")
values=input("Enter the values :")
d1[keys]=values
for values in d1:
x.append(values)
print(d1) | [
"noreply@github.com"
] | noreply@github.com |
d15660fc48951b9344177ed79786e63aa102cc9c | 10ea830c0a3bbf3a233d44c17267e32c1889f989 | /adventofcode/year2020/day4/tests/test_solution.py | 3b855c70e6f09c0dcf9a118de8045ee24d18d018 | [] | no_license | oeuftete/advent-of-code | ad344f3978340894efac2754acfa28e6414a0a3a | 88d1f335e5eb3dcc86949637212e7e463b15786d | refs/heads/main | 2023-08-17T19:14:15.971173 | 2023-08-09T00:38:58 | 2023-08-09T00:38:58 | 159,965,687 | 0 | 0 | null | 2023-08-09T00:38:59 | 2018-12-01T16:44:43 | Python | UTF-8 | Python | false | false | 2,637 | py | import pytest
from adventofcode.year2020.day4.solution import Passport, PassportBatch
@pytest.fixture(name="batch")
def fixture_batch():
return """
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
"""
@pytest.fixture(name="invalid_batch")
def fixture_invalid_batch():
return """
eyr:1972 cid:100
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926
iyr:2019
hcl:#602927 eyr:1967 hgt:170cm
ecl:grn pid:012533040 byr:1946
hcl:dab227 iyr:2012
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277
hgt:59cm ecl:zzz
eyr:2038 hcl:74454a iyr:2023
pid:3556412378 byr:2007
"""
@pytest.fixture(name="valid_batch")
def fixture_valid_batch():
return """
pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980
hcl:#623a2f
eyr:2029 ecl:blu cid:129 byr:1989
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm
hcl:#888785
hgt:164cm byr:2001 iyr:2015 cid:88
pid:545766238 ecl:hzl
eyr:2022
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719
"""
def test_passport():
with pytest.raises(ValueError):
Passport({"foo": "bar"})
@pytest.mark.parametrize(
"field,value,is_valid",
[
("byr", "2002", True),
("byr", "2003", False),
("hgt", "60in", True),
("hgt", "150cm", True),
("hgt", "190cm", True),
("hgt", "190in", False),
("hgt", "190", False),
("hcl", "#123abc", True),
("hcl", "#123abz", False),
("hcl", "123abc", False),
("ecl", "brn", True),
("ecl", "wat", False),
("pid", "000000001", True),
("pid", "0123456789", False),
],
)
def test_strict_rules(field, value, is_valid):
assert Passport.strict_validators()[field](value) == is_valid
def test_passport_batch(batch):
pb = PassportBatch(batch)
assert len(pb.passports) == 4
assert pb.passports[0].ecl == "gry"
assert pb.passports[2].ecl == "brn"
assert len(pb.valid_passports()) == 1
assert len(pb.valid_passports(allow_missing_fields=["cid"])) == 2
def test_strict_invalid_batch(invalid_batch):
pb = PassportBatch(invalid_batch)
assert len(pb.passports) == 4
assert len(pb.valid_passports(allow_missing_fields=["cid"], strict=True)) == 0
def test_strict_valid_batch(valid_batch):
pb = PassportBatch(valid_batch)
assert len(pb.passports) == 4
assert len(pb.valid_passports(allow_missing_fields=["cid"], strict=True)) == 4
| [
"oeuftete@gmail.com"
] | oeuftete@gmail.com |
36ec77440bac05f178aa0e1d39d88a1aebcc7fbd | 791fc562419b62f752e7efb73d2e9b7ba6d6634f | /workouts/bis_and_tris.py | c9400655b0f2f086ce017487a6288174732a90cb | [] | no_license | mathewmoon/py-hiit | 6c36168cfe6e9aabd00bfbfc24aaa0e42fec0ea5 | 1389e328dabb2d2c4da1c6a3565b0494f000b757 | refs/heads/master | 2023-04-09T16:57:55.944109 | 2021-04-20T13:27:43 | 2021-04-20T13:27:43 | 326,480,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | #!/usr/bin/env python3.8
from pyhiit import speak, bis_and_tris
bis_and_tris()
| [
"mmoon@quinovas.com"
] | mmoon@quinovas.com |
e643151df5617988380ac3140741ac8d1ed15fef | 2e6b40104dfb666d807edd649acb0175dc19b7ab | /Test.py | 7a01c2d2c84c0ffc68046c1e20d4b72ea1191793 | [
"MIT"
] | permissive | CzakoZoltan08/AutomaticAI | 08140dff82b816245fb596c13a1856c95ad1806c | 02af79afbafabd6f4784982b2faded5d54928545 | refs/heads/master | 2020-08-25T08:33:08.989074 | 2020-04-15T05:21:19 | 2020-04-15T05:21:19 | 216,989,022 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,315 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 23 11:30:29 2019
@author: czzo
"""
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from AutomaticAI import ParticleSwarmOptimization as pso_algorithm
#--- MAIN ---------------------------------------------------------------------+
def main():
# load the MNIST digits dataset
mnist = datasets.load_digits()
X = mnist.data
y = mnist.target
# Splitting the data into training set, test set and validation set
x_train, x_test, y_train, y_test = train_test_split(X, y)
num_particles=5
num_iterations=30
pso = pso_algorithm.PSO(particle_count=num_particles, distance_between_initial_particles=0.7, evaluation_metric=accuracy_score)
best_metric, best_model = pso.fit(X_train=x_train,
X_test=x_test,
Y_train=y_train,
Y_test=y_test,
maxiter=num_iterations,
verbose=True,
max_distance=0.05)
print("BEST")
print(best_metric)
print(best_model)
if __name__ == "__main__":
main() | [
"czzo@erni.ro"
] | czzo@erni.ro |
fdbe14bbaeb683b646af9998bae1f016dd06a05f | c3fadbeabfc2289bfe0e481fca3f97839d05713c | /frappe/contacts/doctype/summer/test_summer.py | ab661acb973bdc597d0a46fda55e8e5be1c50f17 | [
"MIT"
] | permissive | ahmadRagheb/frappe-face | c9c277a744fce8ea62d4eaa14467fa1036b39f1f | ace1675d7dec53f4fbbd476c4a5c375e48d0de09 | refs/heads/master | 2021-05-06T11:51:19.399783 | 2017-12-29T15:44:25 | 2017-12-29T15:44:25 | 114,284,125 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestSummer(unittest.TestCase):
pass
| [
"ahmedragheb75@gmail.com"
] | ahmedragheb75@gmail.com |
ec771792c99b81d60de883ad609af2084995cd10 | a5c4ea16042a8078e360c32636c00e3163ac99a8 | /Pytorch_Tutorial/08_transfer_learning/custompytorch/utils/helpers.py | 29f3ab0c8ddc604babb84d5d48f924c624f60e47 | [] | no_license | lykhahaha/Mine | 3b74571b116f72ee17721038ca4c58796610cedd | 1439e7b161a7cd612b0d6fa4403b4c8c61648060 | refs/heads/master | 2020-07-15T05:16:13.808047 | 2019-06-01T07:30:01 | 2019-06-01T07:30:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,219 | py | import torch
import time
import copy
import matplotlib.pyplot as plt
import numpy as np
def train_model(model, dataloaders, dataset_sizes, criterion, optimizer, scheduler=None, num_epochs=25):
"""
Scheduling the learning rate
Saving the best model
Arguments:
model: nn Modules
dataloaders: {'train': torch.utils.data.DataLoader, 'val': torch.utils.data.DataLoader}
dataset_sizes: {'train': dataset_sizes of train, 'test': dataset_sizes of test}
"""
device = torch.device('cuda:0') if torch.cuda.is_available() else 'cpu'
best_model_wts = copy.deepcopy(model.state_dict())
best_val_acc = 0.
for e in range(num_epochs):
start = time.time()
statistics = {
'train': {
'loss': 0.,
'acc': 0.
},
'val': {
'loss':0.,
'acc': 0.
}
}
for phase in ['train', 'val']:
if phase == 'train':
if scheduler:
scheduler.step()
model.train() # set model to training mode
else:
model.eval() # set model to evaluate mode
# loop over dataloader
for inputs, labels in dataloaders[phase]:
inputs, labels = inputs.to(device), labels.to(device)
# Zero out parameter gradients
optimizer.zero_grad()
# Forward pass, track history in train phase
with torch.set_grad_enabled(phase=='train'):
outputs = model(inputs)
_, preds = torch.max(outputs, dim=1) # torch.max return 2 tensors: first is max value, second is argmax value
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
statistics[phase]['loss'] += loss.item() * inputs.size(0)
statistics[phase]['acc'] += (preds == labels.data).sum().item()
statistics[phase] = {key: statistics[phase][key]/dataset_sizes[phase] for key in statistics[phase].keys()}
time_elapsed = time.time() - start
print(f"[INFO]Epoch {e+1}/{num_epochs} - {time_elapsed:.2f}s - Loss: {statistics['train']['loss']:.5f}, Accuracy: {statistics['train']['acc']:.5f}, Validation loss: {statistics['val']['loss']:.5f}, Validation accuracy: {statistics['val']['acc']:.5f}")
if best_val_acc < statistics['val']['acc']:
best_val_acc = statistics['val']['acc']
best_model_wts = copy.deepcopy(model.state_dict())
# load best weights
model.load_state_dict(best_model_wts)
return model
def imshow(inp, title=None):
"""
Imshow for Tensor
"""
inp = inp.permute(1, 2, 0).numpy()
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title:
plt.title(title)
def visualize_model(model, dataloaders, class_names, file_names=None, num_images=6):
"""
Generic function to display predictions for a few images
Arguments:
class_names: ['ant', 'bee']
"""
device = torch.device('cuda:0') if torch.cuda.is_available() else 'cpu'
model.eval()
fig = plt.figure()
image_num = 0
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, dim=1)
for j in range(inputs.size(0)):
image_num += 1
if j == num_images:
if file_names:
fig.savefig(file_names)
plt.close(fig)
else:
plt.imshow(fig)
model.train()
return
ax = plt.subplot(num_images//2, 2, image_num)
ax.axis('off')
ax.set_title(f'Predicted: {class_names[preds[j]]}')
imshow(inputs.cpu().data[j])
| [
"ITITIU15033@student.hcmiu.edu.vn"
] | ITITIU15033@student.hcmiu.edu.vn |
cf7c690ca2a5fe2029451939356e872d65621585 | edce7228da66444715ba38ceb84637ca78ac5d89 | /transition/State.py | b9d5b4f9d654d627996dfbfdb20cee84cdf6308a | [] | no_license | askintution/Tb_DepParserMF_ARC | 36132059d4de0348fceab4b6cf27bc50c8c18cc0 | 896ac42282300417a976ac7a9ddf6d7de3795069 | refs/heads/master | 2021-10-08T02:38:30.106677 | 2018-12-06T12:54:58 | 2018-12-06T12:54:58 | 292,239,329 | 1 | 0 | null | 2020-09-02T09:31:24 | 2020-09-02T09:31:23 | null | UTF-8 | Python | false | false | 9,310 | py | from transition.Action import *
from transition.Instance import *
from transition.AtomFeat import *
from data.Dependency import *
import torch
from torch.autograd import Variable
import numpy as np
max_length = 512
class State:
def __init__(self):
self._stack = [-3] * max_length
self._stack_size = 0
self._rel = [-3] * max_length
self._head = [-3] * max_length
self._have_parent = [-1] * max_length
self._next_index = 0
self._word_size = 0
self._is_start = True
self._is_gold = True
self._inst = None
self._atom_feat = AtomFeat()
self._pre_action = Action(CODE.NO_ACTION, -1)
def ready(self, sentence, vocab):
self._inst = Instance(sentence, vocab)
self._word_size = len(self._inst.words)
def clear(self):
self._next_index = 0
self._stack_size = 0
self._word_size = 0
self._is_gold = True
self._is_start = True
self._pre_action = Action(CODE.NO_ACTION, -1)
self.done_mark()
def done_mark(self):
self._stack[self._stack_size] = -2
self._head[self._next_index] = -2
self._rel[self._next_index] = -2
self._have_parent[self._next_index] = -2
def allow_shift(self):
if self._next_index < self._word_size:
return True
else:
return False
def allow_arc_left(self):
if self._stack_size > 1:
return True
else:
return False
def allow_arc_right(self):
if self._stack_size > 1:
return True
else:
return False
def allow_pop_root(self):
if self._stack_size == 1 and self._next_index == self._word_size:
return True
else:
return False
def allow_arc_label(self):
if self._pre_action.is_arc_left() or self._pre_action.is_arc_right():
return True
else:
return False
def shift(self, next_state):
assert self._next_index < self._word_size
next_state._next_index = self._next_index + 1
next_state._stack_size = self._stack_size + 1
self.copy_state(next_state)
next_state._stack[next_state._stack_size - 1] = self._next_index
next_state._have_parent[self._next_index] = 0
next_state.done_mark()
next_state._pre_action.set(CODE.SHIFT, -1)
def arc_left(self, next_state):
assert self._stack_size > 1
next_state._next_index = self._next_index
next_state._stack_size = self._stack_size
self.copy_state(next_state)
next_state.done_mark()
next_state._pre_action.set(CODE.ARC_LEFT, -1)
def arc_right(self, next_state):
assert self._stack_size > 1
next_state._next_index = self._next_index
next_state._stack_size = self._stack_size
self.copy_state(next_state)
next_state.done_mark()
next_state._pre_action.set(CODE.ARC_RIGHT, -1)
def arc_label(self, next_state, dep):
assert self._stack_size > 1
next_state._next_index = self._next_index
next_state._stack_size = self._stack_size - 1
self.copy_state(next_state)
top0 = self._stack[self._stack_size - 1]
top1 = self._stack[self._stack_size - 2]
if (self._pre_action.is_arc_left()):
next_state._stack[next_state._stack_size - 1] = top0
next_state._head[top1] = top0
next_state._have_parent[top1] = 1
next_state._rel[top1] = dep
else:
next_state._head[top0] = top1
next_state._have_parent[top0] = 1
next_state._rel[top0] = dep
next_state.done_mark()
next_state._pre_action.set(CODE.ARC_LABEL, dep)
def pop_root(self, next_state, dep):
assert self._stack_size == 1 and self._next_index == self._word_size
next_state._next_index = self._word_size
next_state._stack_size = 0
self.copy_state(next_state)
top0 = self._stack[self._stack_size - 1]
next_state._head[top0] = -1
next_state._have_parent[top0] = 1
next_state._rel[top0] = dep
next_state.done_mark()
next_state._pre_action.set(CODE.POP_ROOT, dep)
def move(self, next_state, action):
next_state._is_start = False
next_state._is_gold = False
if action.is_shift():
self.shift(next_state)
elif action.is_arc_left():
self.arc_left(next_state)
elif action.is_arc_right():
self.arc_right(next_state)
elif action.is_arc_label():
self.arc_label(next_state, action.label)
elif action.is_finish():
self.pop_root(next_state, action.label)
else:
print(" error state ")
def get_candidate_actions(self, vocab):
mask = np.array([False]*vocab.ac_size)
if self.allow_arc_label():
mask = mask | vocab.mask_arc_label
return ~mask
if self.allow_arc_left():
mask = mask | vocab.mask_arc_left
if self.allow_arc_right():
mask = mask | vocab.mask_arc_right
if self.is_end():
mask = mask | vocab.mask_no_action
if self.allow_shift():
mask = mask | vocab.mask_shift
if self.allow_pop_root():
mask = mask | vocab.mask_pop_root
return ~mask
def copy_state(self, next_state):
next_state._inst = self._inst
next_state._word_size = self._word_size
next_state._stack[0:self._stack_size] = (self._stack[0:self._stack_size])
next_state._rel[0:self._next_index] = (self._rel[0:self._next_index])
next_state._head[0:self._next_index] = (self._head[0:self._next_index])
next_state._have_parent[0:self._next_index] = (self._have_parent[0:self._next_index])
def is_end(self):
if self._pre_action.is_finish():
return True
else:
return False
def get_gold_action(self, vocab):
gold_action = Action(CODE.NO_ACTION, -1)
if self._stack_size == 0:
gold_action.set(CODE.SHIFT, -1)
elif self._stack_size == 1:
if self._next_index == self._word_size:
gold_action.set(CODE.POP_ROOT, vocab.ROOT)
else:
gold_action.set(CODE.SHIFT, -1)
elif self._pre_action.is_arc_left() or self._pre_action.is_arc_right():# arc label
assert self._stack_size > 1
top0 = self._stack[self._stack_size - 1]
top1 = self._stack[self._stack_size - 2]
if self._pre_action.is_arc_left():
gold_action.set(CODE.ARC_LABEL, vocab._rel2id[self._inst.rels[top1]])
elif self._pre_action.is_arc_right():
gold_action.set(CODE.ARC_LABEL, vocab._rel2id[self._inst.rels[top0]])
elif self._stack_size > 1: # arc
top0 = self._stack[self._stack_size - 1]
top1 = self._stack[self._stack_size - 2]
assert top0 < self._word_size and top1 < self._word_size
if top0 == self._inst.heads[top1]: # top1 <- top0
gold_action.set(CODE.ARC_LEFT, -1)
elif top1 == self._inst.heads[top0]: # top1 -> top0,
# if top0 have right child, shift.
have_right_child = False
for idx in range(self._next_index, self._word_size):
if self._inst.heads[idx] == top0:
have_right_child = True
break
if have_right_child:
gold_action.set(CODE.SHIFT, -1)
else:
gold_action.set(CODE.ARC_RIGHT, -1)
else: # can not arc
gold_action.set(CODE.SHIFT, -1)
return gold_action
def get_result(self, vocab):
result = []
result.append(Dependency(0, vocab._root_form, vocab._root, 0, vocab._root))
for idx in range(0, self._word_size):
assert self._have_parent[idx] == 1
relation = vocab.id2rel(self._rel[idx])
head = self._head[idx]
word = self._inst.words[idx]
tag = self._inst.tags[idx]
result.append(Dependency(idx + 1, word, tag, head + 1, relation))
return result
def prepare_index(self):
if self._stack_size > 0:
self._atom_feat.s0 = self._stack[self._stack_size - 1]
else:
self._atom_feat.s0 = self._word_size
if self._stack_size > 1:
self._atom_feat.s1 = self._stack[self._stack_size - 2]
else:
self._atom_feat.s1 = self._word_size
if self._stack_size > 2:
self._atom_feat.s2 = self._stack[self._stack_size - 3]
else:
self._atom_feat.s2 = self._word_size
if self._next_index >= 0 and self._next_index < self._word_size:
self._atom_feat.q0 = self._next_index
else:
self._atom_feat.q0 = self._word_size
if self._pre_action.is_arc_left() or self._pre_action.is_arc_right():
self._atom_feat.arc = True
else:
self._atom_feat.arc = False
return self._atom_feat.index()
| [
"yunan.hlju@gmail.com"
] | yunan.hlju@gmail.com |
4f023ab7751bbceb04b9cac13d16333603cd0d0b | b0c99781527a7eb856f3238cc8f0c75adcda56b7 | /configuration.py | 9f2d8d0a99b6b841a965fbbff119e9f05fa4e6b2 | [] | no_license | TheQueasle/GMpi | dcf5a5878c87a910b7ab35f292016801dc28b25c | 05fd259db1e321c2061d1cf0efd98c8fca48f2fd | refs/heads/master | 2020-04-10T01:54:30.197184 | 2018-11-29T19:18:29 | 2018-11-29T19:18:29 | 160,729,012 | 0 | 0 | null | 2018-12-06T20:36:38 | 2018-12-06T20:36:38 | null | UTF-8 | Python | false | false | 473 | py | #!/usr/bin/env python3
import os
from sys import exit
from GMPi_Pack import BuildConfig
print("Making configuration file: config.txt")
if os.path.exists('config.txt'):
print("\nError: Configuration file already exists.")
print(" Please remove it before generating a new one.\n\n")
exit(-1)
BuildConfig()
print("Done.\n\n")
print("Please open config.txt and enter the appropriate information")
print("for all entries with <REPLACE> as the current value.\n\n")
| [
"paul.blischak@gmail.com"
] | paul.blischak@gmail.com |
41bd11bb5664129a74675694664f8bf656e63cb7 | 19a4b375a3f232ed7ddddd56745f63d1949c4d78 | /train.py | 914108df1d11ec3c5b44d92b147972567397dfd3 | [] | no_license | AotY/ask39-cm | 949131b963b986ab2314088198e96fec5997574d | b586a2221edf72a5666cd1cb83bfb91dae5496e5 | refs/heads/master | 2020-04-12T09:14:54.055829 | 2019-01-12T08:27:37 | 2019-01-12T08:27:37 | 162,396,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,376 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2018 LeonTao
#
import os
import sys
import time
import math
import argparse
import torch
import torch.nn.functional as F
from tqdm import tqdm
from modules.optim import ScheduledOptimizer
from modules.early_stopping import EarlyStopping
from vocab import Vocab
from vocab import PAD_ID
from cm_model import CMModel
from dataset import load_data, build_dataloader
from misc.utils import generate_texts, save_generated_texts
# Parse argument for language to train
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, help='')
parser.add_argument('--data_dir', type=str, help='')
parser.add_argument('--vocab_path', type=str, help='')
parser.add_argument('--vocab_size', type=int, help='')
parser.add_argument('--embedding_size', type=int)
parser.add_argument('--hidden_size', type=int)
parser.add_argument('--bidirectional', action='store_true')
parser.add_argument('--enc_num_layers', type=int)
parser.add_argument('--dec_num_layers', type=int)
parser.add_argument('--dropout', type=float)
parser.add_argument('--teacher_forcing_ratio', type=float, default=0.5)
parser.add_argument('--share_embedding', action='store_true')
parser.add_argument('--tied', action='store_true')
parser.add_argument('--max_grad_norm', type=float, default=5.0)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--min_len', type=int, default=5)
parser.add_argument('--q_max_len', type=int, default=60)
parser.add_argument('--r_max_len', type=int, default=55)
parser.add_argument('--beam_size', type=int, default=10)
parser.add_argument('--batch_size', type=int, help='')
parser.add_argument('--valid_split', type=float, default=0.08)
parser.add_argument('--test_split', type=int, default=5)
parser.add_argument('--epochs', type=int, default=20)
parser.add_argument('--start_epoch', type=int, default=1)
parser.add_argument('--lr_patience', type=int,
help='Number of epochs with no improvement after which learning rate will be reduced')
parser.add_argument('--es_patience', type=int, help='early stopping patience.')
parser.add_argument('--device', type=str, help='cpu or cuda')
parser.add_argument('--save_model', type=str, help='save path')
parser.add_argument('--save_mode', type=str,
choices=['all', 'best'], default='best')
parser.add_argument('--checkpoint', type=str, help='checkpoint path')
parser.add_argument('--smoothing', action='store_true')
parser.add_argument('--log', type=str, help='save log.')
parser.add_argument('--seed', type=str, help='random seed')
parser.add_argument('--mode', type=str, help='train, eval, infer')
args = parser.parse_args()
print(' '.join(sys.argv))
torch.random.manual_seed(args.seed)
device = torch.device(args.device)
print('device: {}'.format(device))
# load vocab
vocab = Vocab()
vocab.load(args.vocab_path)
args.vocab_size = int(vocab.size)
print('vocab size: ', args.vocab_size)
# load data
datas = load_data(args, vocab)
# dataset, data_load
train_data, valid_data, test_data = build_dataloader(args, datas)
# model
model = CMModel(
args,
device
).to(device)
print(model)
# optimizer
# optimizer = optim.Adam(model.parameters(), lr=args.lr)
optim = torch.optim.Adam(
model.parameters(),
args.lr,
betas=(0.9, 0.98),
eps=1e-09
)
# scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=2, gamma=0.5)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optim,
mode='min',
factor=0.1,
patience=args.lr_patience
)
optimizer = ScheduledOptimizer(
optim,
scheduler,
args.max_grad_norm
)
# early stopping
early_stopping = EarlyStopping(
type='min',
min_delta=0.001,
patience=args.es_patience
)
# train epochs
def train_epochs():
''' Start training '''
log_train_file = None
log_valid_file = None
if args.log:
log_train_file = os.path.join(args.log, 'train.log')
log_valid_file = os.path.join(args.log, 'valid.log')
print('[Info] Training performance will be written to file: {} and {}'.format(
log_train_file, log_valid_file))
with open(log_train_file, 'w') as log_tf, \
open(log_valid_file, 'w') as log_vf:
log_tf.write('epoch,loss,ppl,accuracy\n')
log_vf.write('epoch,loss,ppl,accuracy\n')
valid_accus = []
for epoch in range(args.start_epoch, args.epochs + 1):
print('[ Epoch', epoch, ']')
start = time.time()
train_loss, train_accu = train(epoch)
print(' (Training) ppl: {ppl: 8.5f}, accuracy: {accu:3.3f} %, '
'elapse: {elapse:3.3f} min'.format(
ppl=math.exp(min(train_loss, 100)),
accu=100*train_accu,
elapse=(time.time()-start)/60)
)
start = time.time()
valid_loss, valid_accu = eval(epoch)
print(' (Validation) ppl: {ppl: 8.5f}, accuracy: {accu:3.3f} %, '
'elapse: {elapse:3.3f} min'.format(
ppl=math.exp(min(valid_loss, 100)),
accu=100*valid_accu,
elapse=(time.time()-start)/60)
)
valid_accus += [valid_accu]
# is early_stopping
is_stop = early_stopping.step(valid_loss)
checkpoint = {
'model': model.state_dict(),
'settings': args,
'epoch': epoch,
'optimizer': optimizer.optimizer.state_dict(),
# 'early_stopping': early_stopping,
'valid_loss': valid_loss,
'valid_accu': valid_accu
}
if args.save_model:
if args.save_mode == 'all':
model_name = os.path.join(
args.save_model,
'accu_{accu:3.3f}.pth'.format(accu=100*valid_accu)
)
torch.save(checkpoint, model_name)
elif args.save_mode == 'best':
model_name = os.path.join(args.save_model, 'best.pth')
if valid_accu >= max(valid_accus):
torch.save(checkpoint, model_name)
print(' - [Info] The checkpoint file has been updated.')
if log_train_file and log_valid_file:
with open(log_train_file, 'a') as log_tf, open(log_valid_file, 'a') as log_vf:
log_tf.write('{epoch}, {loss: 8.5f}, {ppl: 8.5f}, {accu:3.3f}\n'.format(
epoch=epoch,
loss=train_loss,
ppl=math.exp(min(train_loss, 100)),
accu=100*train_accu)
)
log_vf.write('{epoch}, {loss: 8.5f}, {ppl: 8.5f}, {accu:3.3f}\n'.format(
epoch=epoch,
loss=valid_loss,
ppl=math.exp(min(valid_loss, 100)),
accu=100*valid_accu)
)
if is_stop:
print('Early Stopping.\n')
sys.exit(0)
# train
def train(epoch):
''' Epoch operation in training phase'''
model.train()
total_loss = 0
n_word_total = 0
n_word_correct = 0
for batch in tqdm(
train_data, mininterval=2,
desc=' (Training: %d) ' % epoch, leave=False):
# prepare data
enc_inputs, dec_inputs, enc_lengths, dec_lengths = map(
lambda x: x.to(device), batch)
# [batch_size, max_len]
dec_targets = dec_inputs[1:, :]
dec_inputs = dec_inputs[:-1, :]
# print('enc_inputs: ', enc_inputs.shape)
# print(enc_inputs)
# print(enc_lengths)
# print('dec_inputs: ', dec_inputs.shape)
# print('dec_targets: ', dec_targets.shape)
# forward
optimizer.zero_grad()
dec_outputs = model(
enc_inputs,
enc_lengths,
dec_inputs,
dec_lengths
)
# backward
loss, n_correct = cal_performance(
dec_outputs,
dec_targets,
smoothing=args.smoothing
)
loss.backward()
# update parameters
optimizer.step()
# note keeping
total_loss += loss.item()
non_pad_mask = dec_targets.ne(PAD_ID)
n_word = non_pad_mask.sum().item()
n_word_total += n_word
n_word_correct += n_correct
loss_per_word = total_loss/n_word_total
accuracy = n_word_correct/n_word_total
return loss_per_word, accuracy
def eval(epoch):
''' Epoch operation in evaluation phase '''
model.eval()
total_loss = 0
n_word_total = 0
n_word_correct = 0
with torch.no_grad():
for batch in tqdm(
valid_data, mininterval=2,
desc=' (Validation: %d) ' % epoch, leave=False):
enc_inputs, dec_inputs, enc_lengths, dec_lengths = map(
lambda x: x.to(device), batch)
dec_targets = dec_inputs[1:, :]
dec_inputs = dec_inputs[:-1, :]
dec_outputs = model(
enc_inputs,
enc_lengths,
dec_inputs,
dec_lengths
)
# backward
loss, n_correct = cal_performance(
dec_outputs,
dec_targets,
smoothing=False
)
# note keeping
total_loss += loss.item()
non_pad_mask = dec_targets.ne(PAD_ID)
n_word = non_pad_mask.sum().item()
n_word_total += n_word
n_word_correct += n_correct
loss_per_word = total_loss/n_word_total
accuracy = n_word_correct/n_word_total
return loss_per_word, accuracy
def infer(epoch):
''' Epoch operation in infer phase '''
model.eval()
total_loss = 0
n_word_total = 0
n_word_correct = 0
with torch.no_grad():
for batch in tqdm(
test_data, mininterval=2,
desc=' (INFER: %d) ' % epoch, leave=False):
enc_inputs, dec_inputs, enc_lengths, dec_lengths = map(
lambda x: x.to(device), batch)
dec_targets = dec_inputs[1:, :]
dec_inputs = dec_inputs[:-1, :]
greedy_outputs, beam_outputs, beam_length = model.decode(
enc_inputs,
enc_lengths,
)
# [batch_size, max_len]
enc_texts = generate_texts(
vocab, args.batch_size, enc_inputs.transpose(0, 1), decode_type='greedy')
# [batch_size, max_len]
dec_texts = generate_texts(
vocab, args.batch_size, dec_targets.transpose(0, 1), decode_type='greedy')
# [batch_size, max_len]
greedy_texts = generate_texts(
vocab, args.batch_size, greedy_outputs, decode_type='greedy')
# [batch_size, topk, max_len]
beam_texts = generate_texts(
vocab, args.batch_size, beam_outputs, decode_type='beam_search')
save_path = os.path.join(args.data_dir, 'generated/%d.txt' % epoch)
save_generated_texts(epoch, enc_texts, dec_texts,
greedy_texts, beam_texts, save_path)
def cal_performance(pred, gold, smoothing=False):
''' Apply label smoothing if needed '''
# pred: [max_len * batch_size, vocab_size]
# gold: [max_len, batch_size]
loss = cal_loss(pred, gold, smoothing)
pred = pred.max(1)[1]
gold = gold.contiguous().view(-1)
non_pad_mask = gold.ne(PAD_ID)
n_correct = pred.eq(gold)
n_correct = n_correct.masked_select(non_pad_mask).sum().item()
return loss, n_correct
def cal_loss(pred, gold, smoothing):
''' Calculate cross entropy loss, apply label smoothing if needed. '''
# [max_len * batch_size]
gold = gold.contiguous().view(-1)
if smoothing:
eps = 0.1
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
non_pad_mask = gold.ne(PAD_ID)
loss = -(one_hot * log_prb).sum(dim=1)
loss = loss.masked_select(non_pad_mask).sum() # average later
else:
loss = F.cross_entropy(
pred, gold, ignore_index=PAD_ID, reduction='sum')
return loss
if __name__ == '__main__':
mode = args.mode
if args.checkpoint:
print('load checkpoint...')
checkpoint = torch.load(args.checkpoint)
model.load_state_dict(checkpoint['model'])
optimizer.optimizer.load_state_dict(checkpoint['optimizer'])
# early_stopping = checkpoint['early_stopping']
args = checkpoint['settings']
epoch = checkpoint['epoch']
args.start_epoch = epoch + 1
valid_loss = checkpoint['valid_loss']
valid_accu = checkpoint['valid_accu']
print(
' - (checkpoint) epoch: {epoch: d} ppl: {ppl: 8.5f}, accuracy: {accu:3.3f} %'.
format(
epoch=epoch,
ppl=math.exp(min(valid_loss, 100)),
accu=100*valid_accu,
)
)
args.mode = mode
if args.mode == 'train':
train_epochs()
elif args.mode == 'eval':
eval(epoch)
elif args.mode == 'infer':
infer(epoch)
| [
"694731929@qq.com"
] | 694731929@qq.com |
051729d9725330cfb6c440e3355ae48aa9ac620f | 93f49d55cf1bc9c605d946c9c6505f8cfcdc4fb1 | /builtin/set1.py | 9ea1707fc27cc8afa54bb325bc47cce34649c93d | [] | no_license | dada99/python-learn | 662bce5600fcedc85b467ff28333edae4e09eaeb | b26a4bf628f39ec05e071637cff254ba528cb267 | refs/heads/master | 2023-01-31T14:56:36.848062 | 2023-01-10T07:53:46 | 2023-01-10T07:53:46 | 192,673,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | set1 = {'a','b'}
#set1.add('a') # change to {'b', 'a'}
#print(set1)
set1.add('b')
print(set1)
set1.add('c')
print(set1)
print(set1.pop())
print(set1.pop()) | [
"da.liu@ericsson.com"
] | da.liu@ericsson.com |
21039bc4571a8b7dbabfdf6bc482e5b5269767eb | 3c2b461339b93ea6a63ac6a37373ac1d99ecc5d1 | /ssmusic/wsgi.py | 61cdae16480b8b0c9a0e29c0c19890278c3c403c | [] | no_license | kuliye/ssmusic-django | 93dc3bba5e2e7431887a3365ed4a6cb49e2d0e01 | c162c9cb4642c571f4a48c46f6df6eec0920ab0c | refs/heads/master | 2020-03-19T01:18:08.727029 | 2018-06-03T03:42:44 | 2018-06-03T03:42:44 | 135,535,879 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for ssmusic project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ssmusic.settings")
application = get_wsgi_application()
| [
"1440894632@qq.com"
] | 1440894632@qq.com |
1ac63a541e9a8dce7c61fe484cbb580d7979038e | 0737f5a9e19cc14692c8bf99dc349ae856a20b0c | /replay_buffer.py | 8f793775a62d5ab3e9a96878a951fc17eebfd190 | [] | no_license | takuseno/unreal | 37fd0c0b7613182f1abb5d55b5d0f8564acf25c2 | 864cfbc1edf56510c69ef3809ae0adc6cb129017 | refs/heads/master | 2020-03-22T19:55:25.246229 | 2018-08-07T09:03:52 | 2018-08-07T09:03:52 | 140,560,691 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,188 | py | from collections import deque
from random import sample, randrange, random
import uuid
class ReplayBuffer:
def __init__(self, capacity=2e3):
self.capacity = capacity
self.ids = []
self.transitions = {}
self.rewarding_states = {}
self.non_rewarding_states = {}
self.episode_terminal_ids = []
# ((s_t-2, s_t-1, s_t), a_t-1, r_t, a_t, r_t+1, s_t+1, t_t+1)
def add(self, obs_t, action_tm1, reward_t, action_t, reward_tp1, obs_tp1, terminal):
# create unique id
id = uuid.uuid4()
self.ids.append(id)
# remove oldest transision
if len(self.transitions.keys()) > self.capacity:
self.remove(self.ids[0])
# for value function replay and others
transition = dict(
obs_t=obs_t[-1],
action_tm1=action_tm1,
reward_t=reward_t,
action_t=action_t,
reward_tp1=reward_tp1,
obs_tp1=obs_tp1
)
self.transitions[id] = transition
# for reward prediction
reward_prediction_dict = dict(obs_t=obs_t, reward_tp1=reward_tp1)
if reward_tp1 == 0.0:
self.non_rewarding_states[id] = reward_prediction_dict
else:
self.rewarding_states[id] = reward_prediction_dict
# add episode terminal id
if terminal:
self.episode_terminal_ids.append(id)
def remove(self, id):
if id in self.ids:
self.ids.remove(id)
self.transitions.pop(id)
if id in self.episode_terminal_ids:
self.episode_terminal_ids.remove(id)
if id in self.rewarding_states:
self.rewarding_states.pop(id)
if id in self.non_rewarding_states:
self.non_rewarding_states.pop(id)
def sample_rp(self):
prob = random()
if prob > 0.5 and len(self.rewarding_states.values()) != 0:
transition = sample(list(self.rewarding_states.values()), 1)[0]
else:
transition = sample(list(self.non_rewarding_states.values()), 1)[0]
reward = transition['reward_tp1']
if reward == 0.0:
reward_class = 0
elif reward > 0.0:
reward_class = 1
else:
reward_class = 2
return transition['obs_t'], reward_class
def sample_sequence(self, n):
if len(self.episode_terminal_ids) > 0:
# get terminal index
episode_index = randrange(len(self.episode_terminal_ids))
id = self.episode_terminal_ids[episode_index]
end_index = self.ids.index(id)
# get start index
if episode_index == 0:
start_index = 0
else:
prev_id = self.episode_terminal_ids[episode_index - 1]
start_index = self.ids.index(prev_id) + 1
else:
# no episode ends yet
end_index = len(self.ids) - 1
start_index = 0
# get trajectory
length = end_index - start_index + 1
if length > n:
sample_start_index = randrange(length - n + 1) + start_index
sample_end_index = sample_start_index + n - 1
else:
sample_start_index = start_index
sample_end_index = end_index
transitions = list(self.transitions.values())
sampled_transitions = transitions[sample_start_index:sample_end_index+1]
is_terminal = self.ids[sample_end_index] in self.episode_terminal_ids
return sampled_transitions, is_terminal
def sample_vr(self, n):
transitions, is_terminal = self.sample_sequence(n)
# format results
obs_t = []
actions_tm1 = []
rewards_t = []
for transition in transitions:
obs_t.append(transition['obs_t'])
actions_tm1.append(transition['action_tm1'])
rewards_t.append(transition['reward_t'])
obs_t.append(transitions[-1]['obs_tp1'])
actions_tm1.append(transitions[-1]['action_t'])
rewards_t.append(transitions[-1]['reward_tp1'])
return obs_t, actions_tm1, rewards_t, is_terminal
| [
"takuma.seno@gmail.com"
] | takuma.seno@gmail.com |
900ccf7e6638ac6465f9b33828d8fc1a49fef58d | 2617bfec230858814b32795c6a47249c54a15cac | /cupy_alias/math/window.py | 54960b88fb3887e5f8929a8b21fa80008b2da55a | [
"MIT",
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | fixstars/clpy | a06a1281887470d8faee3ec204b56fbef2496fab | 693485f85397cc110fa45803c36c30c24c297df0 | refs/heads/clpy | 2021-06-10T04:00:30.974447 | 2021-02-28T06:01:26 | 2021-02-28T06:01:26 | 136,439,592 | 154 | 20 | NOASSERTION | 2021-04-07T02:41:03 | 2018-06-07T07:33:04 | Python | UTF-8 | Python | false | false | 39 | py | from clpy.math.window import * # NOQA
| [
"tomoharu.kitawaki@fixstars.com"
] | tomoharu.kitawaki@fixstars.com |
81181e6ac9198d94144b3665cc1e602a1c778b7e | e46f17948d9293e431db54a63d1dda4d7fbf6dac | /289. Game of Life/main.py | 6e2a6a82530d7835a67ff63c3dc500d1dceef6ee | [] | permissive | Competitive-Programmers-Community/LeetCode | 37878e3c7b6b18210e9db5951b04baf691238e0c | 841fdee805b1a626e9f1cd0e12398d25054638af | refs/heads/master | 2022-02-21T06:53:08.949304 | 2019-10-11T11:13:53 | 2019-10-11T11:13:53 | 178,438,819 | 0 | 0 | MIT | 2019-04-01T06:45:20 | 2019-03-29T16:17:12 | Python | UTF-8 | Python | false | false | 1,602 | py | class Solution:
def gameOfLife(self, board):
"""
:type board: List[List[int]]
:rtype: void Do not return anything, modify board in-place instead.
"""
r=len(board)
c=len(board[0])
matrix=[[0 for j in range(c)] for i in range(r)]
for i in range(r):
for j in range(c):
count=0
if i-1>=0 and board[i-1][j]==1:
count+=1
if i-1>=0 and j-1>=0 and board[i-1][j-1]==1:
count+=1
if i-1>=0 and j+1<c and board[i-1][j+1]==1:
count+=1
if i+1<r and board[i+1][j]==1:
count+=1
if j-1>=0 and board[i][j-1]==1:
count+=1
if j+1<c and board[i][j+1]==1:
count+=1
if i+1<r and j-1>=0 and board[i+1][j-1]==1:
count+=1
if i+1<r and j+1<c and board[i+1][j+1]==1:
count+=1
if board[i][j]==1 and count<2:
matrix[i][j]=0
elif board[i][j]==1 and (count==2 or count==3):
matrix[i][j]=1
elif board[i][j]==1 and count>3:
matrix[i][j]=0
elif board[i][j]==0 and count==3:
matrix[i][j]=1
else:
matrix[i][j]=0
for i in range(r):
for j in range(c):
board[i][j]=matrix[i][j]
| [
"noreply@github.com"
] | noreply@github.com |
613316fc65699e0a97e8a758cf9c0d43e73580ae | c660f412b852b5fdc8572fd378e31e76e6587b27 | /Semana11/asd.py | c4d07a6a6f67bcab2c4023bca78415f43d4384b3 | [] | no_license | Pablolog42/2020-CC1002 | bb604ae08ebdb49f35c23fbbed424ef6c3961adf | 9923e50ba660e3464463bc5ea3fd36b89e1f11f1 | refs/heads/master | 2023-02-10T10:03:41.613467 | 2021-01-02T04:39:44 | 2021-01-02T04:39:44 | 326,109,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | cola = []
cola.append("miau")
cola.append("guau")
cola.append("rawr")
print(cola) | [
"pablolog42@gmail.com"
] | pablolog42@gmail.com |
64d65da33eabe7090b0bb2fed71a97ee84a451fe | bab42fa4c574d47f57a6bad221c285676397ecdc | /Week1/Day1.py | 323bd553eee3576b1d6a035c0bc0da2d2a436e52 | [] | no_license | neighborpil/PY_WebCrawlingStudy | 7647f85f4610b98ed838fdff1d08d3983ff9b519 | 146f75e2bdb176c920194fdf9ce88b3e76b1ec4a | refs/heads/master | 2020-04-11T05:48:51.389458 | 2018-12-13T09:39:47 | 2018-12-13T09:39:47 | 157,983,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | import re
hand = open('test.txt')
for line in hand:
# print(line)
line = line.rstrip()
if re.search('좋아요', line) :
print(line)
print('-------------------')
hand2 = open('test.txt')
for line in hand2 :
line = line.rstrip()
if line.find('좋아요') >= 0:
print(line)
print('-------------------')
# startwith
hand3 = open('test.txt')
for line in hand3:
# print(line)
line = line.rstrip()
if re.search('^좋아요', line) :
print(line)
print('-------------------')
hand4 = open('test.txt')
for line in hand4 :
line = line.rstrip()
if line.startswith('좋아요'):
print(line)
| [
"feelongpark"
] | feelongpark |
f8f599dc9bd468a345c92c450edabbc81fc1a329 | bc8aa86439962d8bd937728d22815bd389b4bbb8 | /Tutfinder/settings.py | 720332df54cd72040cacc2b04df59e3a84f28410 | [] | no_license | NASAKZ07/tutfinder1 | bd64b3ce7bd8cfe37f5d9a2cff4820ed4d2fa0fb | dce64afa2cbc1e3cb987d9f5fbf763b90d95d820 | refs/heads/master | 2021-06-26T01:42:20.250205 | 2019-12-06T16:35:22 | 2019-12-06T16:35:22 | 226,367,996 | 0 | 0 | null | 2021-06-10T22:22:08 | 2019-12-06T16:25:08 | Python | UTF-8 | Python | false | false | 2,942 | py | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'whkz$%q#ew6(9p1^hj6$+cjo*928ibori^1_)4i#i7_jv9+&o0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"crispy_forms",
'main.apps.MainConfig',
'users.apps.UsersConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Tutfinder.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Tutfinder.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
CRISPY_TEMPLATE_PACK="bootstrap4"
SITE_ID = 1
LOGIN_REDIRECT_URL = '/'
| [
"abdnurs2001@gmail.com"
] | abdnurs2001@gmail.com |
6d5af405040977f56788eda81171f196eb8faaa6 | 254908bf16c8991a9440c04fb03056ad383d1888 | /rj_gameplay/rj_gameplay/tactic/prep_move.py | 2ee0a91bb12a9780c88b929ea7337b463bc6db4e | [
"Apache-2.0"
] | permissive | RoboJackets/robocup-software | d5fdad02f17c9943791142e59a515f38086a4332 | fb46ea51942dc001ff73d0570528ab1cbb3d5065 | refs/heads/ros2 | 2023-08-26T22:43:21.163196 | 2023-07-02T20:54:37 | 2023-07-02T20:54:37 | 15,662,105 | 217 | 209 | Apache-2.0 | 2023-09-13T01:25:23 | 2014-01-06T01:09:14 | C++ | UTF-8 | Python | false | false | 1,492 | py | from typing import List, Tuple
import numpy as np
import stp
from rj_msgs.msg import RobotIntent
from rj_gameplay.role import dumb_move
class PrepMove(stp.tactic.Tactic):
"""Seeks to a single point, passed in on init."""
def __init__(self, world_state: stp.rc.WorldState):
super().__init__(world_state)
self._target_pt = np.array([0.0, 0.0])
self._role_requests.append(
(stp.role.cost.PickClosestToPoint(self._target_pt), dumb_move.DumbMove)
)
def tick(
self,
world_state: stp.rc.WorldState,
) -> List[Tuple[int, RobotIntent]]:
# returns list of (robot_id, robot_intent)
# assumes all roles requested are filled, because tactic is one unit
self._target_pt = world_state.ball.pos[0:2] - [0, 0.5]
if (
len(self.assigned_roles) != len(self._role_requests)
and self.assigned_robots
):
self.init_roles(world_state)
return [(role.robot.id, role.tick(world_state)) for role in self.assigned_roles]
def is_done(
self,
world_state: stp.rc.WorldState,
) -> bool:
return False
def init_roles(
self,
world_state: stp.rc.WorldState,
):
robot = self.assigned_robots[0]
role = self._role_requests[0][1]
if role is dumb_move.DumbMove:
self.assigned_roles.append(
role(robot, self._target_pt, world_state.ball.pos)
)
| [
"noah.bjyu@gmail.com"
] | noah.bjyu@gmail.com |
c5dfe8988d7e51e6781716c164a15a4945e1a750 | 2932571ebb9001edfb2b9a5b06ae4414b2fca910 | /blog_api/admin.py | 13161921319dcff5e14e317ab6c15fdef0631bc1 | [] | no_license | Astha-24/blog | 24738f3205ca3b07f94ccdf0a5698daaa01a1869 | 66b26a628c8645f9ef5c8ad1cb10f33d4b57dab2 | refs/heads/master | 2022-05-03T07:41:34.064826 | 2019-09-16T03:36:11 | 2019-09-16T03:36:11 | 208,393,138 | 1 | 1 | null | 2022-04-22T22:21:10 | 2019-09-14T05:21:01 | Python | UTF-8 | Python | false | false | 345 | py | from django.contrib import admin
from blog_api import models
# Register your models here.
admin.site.register(models.UserProfile)
admin.site.register(models.Category)
class StoryAdmin(admin.ModelAdmin):
list_display = ('title','author','category','verified')
admin.site.register(models.Story,StoryAdmin)
admin.site.register(models.Comment)
| [
"gangwalastha78@gmail.com"
] | gangwalastha78@gmail.com |
54a8bdb070155635c618a0678553e1b098b4735a | da418b0fa32fdb69533124d96a1d44c104256cae | /todo/admin.py | 706ab4ace5f18b4cae658eb0ee8b9169f0410bfe | [] | no_license | yannickberthoud/IntranetBenu | 830565a56e2ad6a93118d73c2a36dcdc73c402bc | 676e2c468f2947ffe8f834a3ebbd39972db89385 | refs/heads/master | 2020-04-10T16:10:06.145905 | 2018-12-10T07:39:36 | 2018-12-10T07:39:36 | 161,135,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | from django.contrib import admin
from .models import Todo
class TodoAdmin(admin.ModelAdmin):
fieldsets = (
('Assignation', {'fields': ['department', 'assigned_to']}),
('Task', {'fields': ['priority', 'title', 'description', 'due_date', 'status']})
)
list_display = ('department', 'assigned_to', 'title', 'due_date', 'status')
search_field = ('department', 'title', 'due_date')
list_filter = ('department','status')
admin.site.register(Todo, TodoAdmin)
| [
"y.berthoud@benu.ch"
] | y.berthoud@benu.ch |
f97563f2b9ee97e00d1d2d6e5485a10a011113c6 | 23185f4b793d324d4902f9a5f0e8ac22f47e7e0e | /fixture/db.py | a008e05ee3d953b9aa2113b822667f95009e5eba | [
"Apache-2.0"
] | permissive | DennisSmirnov/python_tests | 739fb7beec3f0958781fbf8acd81795557ba3c58 | b862e631b6cd27d0658e382c0e08ddd94c5f7269 | refs/heads/master | 2022-11-29T13:15:37.217945 | 2020-08-13T15:48:14 | 2020-08-13T15:48:14 | 238,788,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,325 | py | import pymysql.cursors
from model.group import Group
from model.contact import Contact
class DbFixture:
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connction = pymysql.connect(host=host, database=name, user=user, password=password, autocommit=True)
def get_group_list(self):
list = []
cursor = self.connction.cursor()
try:
cursor.execute("select group_id, group_name, group_header, group_footer from group_list")
for row in cursor:
(id, name, header, footer) = row
list.append(Group(id=str(id), name=name, header=header, footer=footer))
finally:
cursor.close()
return list
def get_contact_list(self):
list = []
cursor = self.connction.cursor()
try:
cursor.execute("select id, firstname, lastname from addressbook where deprecated='0000-00-00 00:00:00'")
for row in cursor:
(id, firstname, lastname) = row
list.append(Contact(id=str(id), firstname=firstname, lastname=lastname))
finally:
cursor.close()
return list
def destroy(self):
self.connction.close() | [
"arb.smirnov@gmail.com"
] | arb.smirnov@gmail.com |
741870643f2712c065d1a59619d2b892df5df9ef | 8a91367e0d6bf0fd105c913995a7b0fad2a0f686 | /assignment/a program to calculate the length of a string.py | 6663c5204124d083ce4bbdb0232cc9b87af4895c | [] | no_license | santosdave/assignment | 8280fbf0df60ad3c6103cc5b35e3885c8d531b6c | 4a0a6c1931acfae94ab071ff3fbafbba9e5aae2b | refs/heads/master | 2020-08-07T10:52:57.385500 | 2019-10-07T15:34:08 | 2019-10-07T15:34:08 | 213,420,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | def string_length(str1):
count = 0
for char in str1:
count +=1
return count
print(string_length('This assignment'))
| [
"noreply@github.com"
] | noreply@github.com |
75e43a9b60553738eed5eeeaf7dee3384d088840 | 1a700479e5aac652a1260bdd3f0247130b7075cc | /snippets/urls.py | 021a1e35914c59f4de65948fd7a0aa48b4aaec4b | [] | no_license | Chi1211/tutorial | aca18f36b75a651df87ac5efa035a12bb271ff2b | b717b73d8ce58c61fcec1290423e1483685529e4 | refs/heads/master | 2023-02-20T21:36:00.504040 | 2021-01-22T02:04:22 | 2021-01-22T02:04:22 | 331,801,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py |
from django.urls import path
from . import views
urlpatterns = [
path('', views.Snippet_list, name=''),
path('detail/<int:id>', views.Snippet_detail, name='detail'),
]
| [
"bichchi1211@gmail.com"
] | bichchi1211@gmail.com |
65faef840d04bdeb7155de6e7e23e1f6a184626b | 4bf067cd4fa1cee2891c54b02fafcaca28e8227a | /random.py | 5dffcfd8d8ead06c246861f051676c2e77a62e0b | [] | no_license | c1c51/Python | e19dd8e0d90ec5015c87cd9f113c79aea7b25111 | 6dfa8ffa94d92e8741217ae09f265680e4e44951 | refs/heads/master | 2020-03-09T19:06:33.997743 | 2018-04-10T14:49:01 | 2018-04-10T14:49:01 | 128,949,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | import Queue
import threading
import urllib2
# called by each thread
def get_url(q, url):
q.put(urllib2.urlopen(url).read())
theurls = ["http://google.com", "http://yahoo.com"]
q = Queue.Queue()
for u in theurls:
t = threading.Thread(target=get_url, args = (q,u))
t.daemon = True
t.start()
s = q.get()
print (s)
| [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
efb51893323885d6670eb8d40a90a1bae3d186f0 | 4e1390db718bffd402db9365080b7d7b2e95f069 | /extra.py | d6f17fe239c7ed35c8479bd462e46c01f92dfd7d | [] | no_license | Mishagunkin/lab3 | 351a93f22e5510ff5af7c70699a0226215c02df6 | 0ed61910a0f2c74fa1e3b7a765ce7dea8bfdd879 | refs/heads/master | 2021-08-22T22:34:38.593242 | 2017-12-01T13:26:11 | 2017-12-01T13:26:11 | 112,732,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | import random
import matplotlib.pyplot as plt
year = set()
years = []
for i in range(5001):
years.append(random.randint(0,100))
year = set(years)
d = {}
for i in years:
if i not in d.keys():
d[i] = 1;
else:
d[i] += 1;
x = []
y = []
for i in d.keys():
x.append(i);
y.append(d[i]);
plt.hist(years)
plt.show()
print(d) | [
"misha36gunkin@mail.ru"
] | misha36gunkin@mail.ru |
fd6abb816229c9531f83907b7121353cf5458b38 | 7c63114e668d8e4810a885977f0eccb0fbb4af70 | /bourse/context_processors.py | 501a14f336e20529268ed10c492f4e71fab6bf90 | [] | no_license | WoopyOnOff/woop-bourse | 1a91385fba0cd56ad234afdfd7b61a09031dceea | d86ac0c8428b5894ec501234e1de0dfeca047025 | refs/heads/master | 2023-02-24T11:57:30.657934 | 2023-02-13T18:53:14 | 2023-02-13T18:53:14 | 250,991,947 | 5 | 1 | null | 2023-02-13T18:53:04 | 2020-03-29T09:05:01 | Python | UTF-8 | Python | false | false | 373 | py | from django.conf import settings
from .models import Event
def site_params(request):
return {'SITE_TITLE_SETTING': settings.SITE_TITLE, 'SITE_SIGNATURE_SETTING': settings.SITE_SIGNATURE, 'SITE_CURRENCY_SETTING': settings.CURRENCY }
def active_event(request):
active_event = Event.objects.filter(status__in=[1,2,3])
return {'active_event':active_event} | [
"25619661+clmntpllr@users.noreply.github.com"
] | 25619661+clmntpllr@users.noreply.github.com |
e924599e2d1e9dd63c005c8de0ccb182cb38f851 | 9681b187a4bd3b9aad06c45a8877698b8723a889 | /python_task.py | 1dde3590b666e155e9083b0eaabd7d4f76cee6da | [] | no_license | AlexandreSabino/poc-scdf-python | 36970303786e3908f77dec552c7cf06e2853979d | 3b5c9967f26302b676b2ae66b61731d6e26ce16e | refs/heads/master | 2020-07-31T17:09:43.700441 | 2019-09-24T22:38:32 | 2019-09-24T22:38:32 | 210,686,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py | import sys
import time
from util.task_status import TaskStatus
from util.task_args import get_task_id, get_db_url, get_task_name, get_cmd_arg
try:
# Connect to SCDF's database.
status = TaskStatus(get_task_id(), get_db_url())
# Set task's status to RUNNING.
status.running()
# Do something.
print('Start task do biscoito:{}, id:{}'.format(get_task_name(), get_task_id()))
print('Wait for 10 seconds ... :) ')
sys.stdout.flush()
time.sleep(10)
if get_cmd_arg('error.message') is not None:
raise Exception(get_cmd_arg('error.message'))
print('message: ' + str(get_cmd_arg('message')))
print(str(get_cmd_arg('password')))
print("Goodbye!")
# Set task's status to COMPLETED.
status.completed()
except Exception as exp:
# Set task's status to FAILED.
status.failed(1, 'Task failed: {}'.format(exp)) | [
"alexandre.sabino.avsd@gmail.com"
] | alexandre.sabino.avsd@gmail.com |
3fce743cbdf9d48cb346a2ba6e778f03e34393e2 | a0c3149b77984fb85d2aadb2992bd1998e2a8b23 | /media.py | 4daef12a6f661b0e355e7efeb9c00ebdad3f93cc | [] | no_license | shockwave92/Udacity_website-project | 4d26b752b0ae57a8a3e9995ca0acd4b64352dca4 | 907d8fd5d6e04f8a90ac562448fba4d2f0b93d0b | refs/heads/master | 2021-01-19T12:00:25.333574 | 2017-04-30T19:47:57 | 2017-04-30T19:47:57 | 88,013,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | import webbrowser
class Movie():
"""
This class provides a way to store movie related information
"""
VALID_RATINGS = ['G', 'PG', 'PG-13', 'R']
def __init__(self, movie_title, movie_storyline, poster_image, youtube_trailer, release_date):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_url = youtube_trailer
self.release = release_date
| [
"adadam2002@gmail.com"
] | adadam2002@gmail.com |
a39b5d0de5a23822bee08ee7d2ed333be2eb5179 | b069c092ed99cd12c5cbf91a8165887a0846ab99 | /Project1/Naive-Bayes+Classification (1).py | b06c327861fdc134c0eaf3a4726b2f0f7885934b | [] | no_license | sashaena/NLP-18-sashaenaofori | 8603de2e119fc26a8be06923a67a20da11e2bedb | e71329535ddef80ecd611895df2c17ea3fd9b354 | refs/heads/master | 2020-04-03T11:43:45.677780 | 2018-11-26T18:09:49 | 2018-11-26T18:09:49 | 155,230,097 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,046 | py |
# coding: utf-8
# In[1]:
import re
import math
import random
# In[2]:
def processFiles(fname):
# creating dictionary to story various classes
dict_corpus = {0:[],1:[]}
# put various files in a list to continously read files
for i in fname:
with open (i, "r") as openedFile:
for line in openedFile:
lineSplit = line.strip('\n').split('\t')
# print(lineSplit)
if len(lineSplit) > 1 and int(lineSplit[1]) == 0:
lineSplitFormat = ''.join(lineSplit[0]).lower()
lineSplitFormat = re.sub(r'[,.:!+&$?;""()''/|]', '', lineSplitFormat)
dict_corpus[0].append(lineSplitFormat.split())
else:
lineSplitFormat = ''.join(lineSplit[0]).lower()
lineSplitFormat = re.sub(r'[,.:!+&$?;""()''/|]', '', lineSplitFormat)
dict_corpus[1].append(lineSplitFormat.split())
# print(dict_corpus[1])
return dict_corpus
# globally accessed
name= ["amazon_cells_labelled.txt", "imdb_labelled.txt", "yelp_labelled.txt"]
dict_corpus=processFiles(name)
# In[3]:
# This function calculates the log_prior of the two classes positive(1) and negative(0)
def calculate_logprior(dict_corpus):
positive_class = len(dict_corpus[1])
negative_class = len(dict_corpus[0])
num_documents = positive_class + negative_class
log_prior = {0:math.log(negative_class/num_documents), 1:math.log(positive_class/num_documents)}
print (positive_class, negative_class, num_documents)
print(log_prior)
return positive_class,negative_class,log_prior
positive_class,negative_class,log_prior=calculate_logprior(dict_corpus)
# In[4]:
def calculate_loglikelihood(dict_corpus):
# creating a dictionary to store the number of occurences her word in each class
wordCountPositive= {}
wordCountNegative= {}
denominator= {}
vocab = []
# counting the word occurrences in the negative review dictionary
for review in dict_corpus[0]:
for word in review:
wordCountNegative[word] = wordCountNegative.get(word, 0) + 1
# counting the word occurrences in the positive review dictionary
# for each review in my positive dictionary
for review in dict_corpus[1]:
for word in review:
wordCountPositive[word]= wordCountPositive.get(word, 0) +1
# print(wordCountPositive)
print(len(wordCountNegative.keys()))
# print(wordCountNegative)
# the vocab is all the individual words in the dict corpus
# returns a distinct/unique words because we wrapped in the collection "set"
vocab = set(list(wordCountPositive.keys())+ list(wordCountNegative.keys()))
print(len(vocab))
countPos = 0
countNeg = 0
for word in vocab:
countPos+=wordCountPositive.get(word, 0) + 1
denominator[1] = countPos
for word in vocab:
countNeg+=wordCountNegative.get(word, 0) + 1
denominator[0] = countNeg
# print(denominator)
return wordCountPositive, wordCountNegative, denominator, vocab
wordCountPositive, wordCountNegative, denominator, vocab = calculate_loglikelihood(dict_corpus)
# In[5]:
# This function predicts the class of a sentence
def predictsentence(test_sentence):
sum= {0: 0 , 1:0 }
for word in test_sentence.split():
loglikehood_positive = math.log((wordCountPositive.get(word, 0)+1)/denominator[1])
loglikehood_negative = math.log((wordCountNegative.get(word, 0)+1)/denominator[0])
sum[1]+=loglikehood_positive
sum[0]+= loglikehood_negative
# added the value of the log prior to the log likelihood
sum[0] = sum[0] + log_prior[0]
sum[1] = sum[1] + log_prior[1]
# print(log_prior)
# Determining the class of the sentence
if sum[1] > sum[0]:
return 1
else:
return 0
predictsentence("bad")
# In[6]:
# This function predicts the class of a document.
# It utilises the function predictSentence above to predict individual sentences in a text file
def predictDocKnownLabels(testdoc, results):
computed = []
knownLabel = []
with open (testdoc, "r") as openedTestdoc,open (results, "w", newline = "") as openedresultdoc:
for line in openedTestdoc:
lineSplitFormat = ''.join(line).lower()
lineSplitFormat = re.sub(r'[,.:!+<>&$?;""()''/|]', '', lineSplitFormat)
# this splits by tab and strips the newline character and return a list of reviews and their labels
x= lineSplitFormat.strip('\n').split('\t')
# print(x)
# append the label of the various reviews as an integer and append to my knownLabel list
knownLabel.append(int(x[1]))
# call the function predictSentence and pass in only the reviews
label = predictsentence(x[0])
# append the predicted labels to the list computed
computed.append(label)
# write to the results file the predicted labels
openedresultdoc.write(str(label) + "\n")
# print(knownLabel)
# print(computed)
return computed, knownLabel
knownLabel,computed = predictDocKnownLabels("yelp_labelled.txt", "results.txt")
# In[7]:
# This function calculates for the accuracy of the predictions
# This builds up on the function predictUnknown
def accuracy(knownLabel, computed):
correct = 0
for i in range(len(knownLabel)):
if knownLabel[i] == computed[i]:
correct+=1
#print statement
accuracy = round((correct/ len(knownLabel)) *100, 2)
# print("Accuracy:" , accuracy )
return accuracy
accuracy(knownLabel, computed)
# In[8]:
# This function predicts the class of a document with unknown labels.
# It utilises the function predictSentence above to predict individual sentences in a text file
def predictDocUnknownLabels(testdoc, results):
with open (testdoc, "r") as openedTestdoc,open (results, "w", newline = "") as openedresultdoc:
for line in openedTestdoc:
lineSplitFormat = ''.join(line).lower()
# print(lineSplitFormat)
lineSplitFormat = re.sub(r'[,.:!+<>&$?;""()''/|]', '', lineSplitFormat)
# call the function predictSentence and pass in only the reviews
label = predictsentence(lineSplitFormat)
# print(label)
# write to the results file the predicted labels
openedresultdoc.write(str(label) + "\n")
# To test this Naive Bayes classifier, relapce testdoc.txt with intended test file
predictDocUnknownLabels("testdoc.txt", "results.txt")
# In[ ]:
# In[ ]:
| [
"sasha_ena.ofori@outlook.com"
] | sasha_ena.ofori@outlook.com |
517d684780c3347d815863369480a78b0ca47097 | 021f3512b248ead3887b7464a4f32fd157853f98 | /wikipedia-solutions/solution_1.py | e5b1ee1e38f740d048534314133b02e0129cef0c | [
"MIT"
] | permissive | jtmorgan/ds4ux | c1953dca84bfa108392bd25ffe2cb1fb21d0a261 | 14c4ece59b367fe7c8db09a126161693b9a640b3 | refs/heads/master | 2020-12-24T21:01:04.186781 | 2017-10-04T01:03:11 | 2017-10-04T01:03:11 | 59,071,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | """
1. Save the revision metadata printed in wikipedia1-2.py to a file called "wikipedia_revisions.tsv".
"""
import requests
# raw string:
# ?action=query&prop=revisions&titles=Python_(programming_language)&rvlimit=500&rvprop=timestamp|user&format=json')
# parameter version which makes a little more sense
parameters = {'action' : 'query',
'prop' : 'revisions',
'titles' : 'Python (programming language)',
'rvlimit' : 500,
'rvprop' : "timestamp|user",
'format' : 'json',
'continue' : ''}
output_file = open("wikipedia_revisions.tsv", 'w')
# run a "while True" loop
while True:
wp_call = requests.get('https://en.wikipedia.org/w/api.php', params=parameters)
response = wp_call.json()
for page_id in response["query"]["pages"].keys():
page_title = response["query"]["pages"][page_id]["title"]
revisions = response["query"]["pages"][page_id]["revisions"]
for rev in revisions:
print(page_title + "\t" + rev["user"] + "\t" + rev["timestamp"], file=output_file)
if 'continue' in response:
parameters.update(response['continue'])
else:
break
output_file.close()
| [
"jonnymorgan.esq@gmail.com"
] | jonnymorgan.esq@gmail.com |
7eb2b122df26f69dc11333a5818a93efdc8ae711 | c72f2a8f1b7c2e3f7f4b8f5f1721443094d51d41 | /sdk/python/bouncerapi/models/login_to_bouncer_api_request.py | 79a8053d932bbc580c8902c89df017b9b6dec37c | [
"MIT",
"Apache-2.0"
] | permissive | nmfta-repo/nmfta-bouncer | e2da24ef5983a9901eaeeeebb5b72012ddd9602f | a178244dbf0b8a165aabc02a5d1ba05006f9ec22 | refs/heads/master | 2022-12-09T18:06:57.921644 | 2020-07-17T20:23:29 | 2020-07-17T20:23:29 | 131,425,777 | 1 | 2 | Apache-2.0 | 2022-12-08T05:06:35 | 2018-04-28T16:26:35 | Java | UTF-8 | Python | false | false | 1,847 | py | # -*- coding: utf-8 -*-
"""
bouncerapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class LoginToBouncerAPIRequest(object):
"""Implementation of the 'Login to Bouncer API request' model.
TODO: type model description here.
Attributes:
username (string): TODO: type description here.
password (string): TODO: type description here.
grant_type (string): must be `password`
"""
# Create a mapping from Model property names to API property names
_names = {
"username":'username',
"password":'password',
"grant_type":'grant_type'
}
def __init__(self,
username=None,
password=None,
grant_type=None):
"""Constructor for the LoginToBouncerAPIRequest class"""
# Initialize members of the class
self.username = username
self.password = password
self.grant_type = grant_type
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
username = dictionary.get('username')
password = dictionary.get('password')
grant_type = dictionary.get('grant_type')
# Return an object of this model
return cls(username,
password,
grant_type)
| [
"krishnaswin@hotmail.com"
] | krishnaswin@hotmail.com |
5e277ac73b8593875d3614ad8691df57cb8aa2fb | ba0cbdae81c171bd4be7b12c0594de72bd6d625a | /MyToontown/py2/toontown/minigame/DistributedTagTreasure.pyc.py | 48111c1e609a7eb84bfa86c60461bd082ade4002 | [] | no_license | sweep41/Toontown-2016 | 65985f198fa32a832e762fa9c59e59606d6a40a3 | 7732fb2c27001264e6dd652c057b3dc41f9c8a7d | refs/heads/master | 2021-01-23T16:04:45.264205 | 2017-06-04T02:47:34 | 2017-06-04T02:47:34 | 93,279,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,019 | py | # 2013.08.22 22:21:41 Pacific Daylight Time
# Embedded file name: toontown.minigame.DistributedTagTreasure
from toontown.safezone import DistributedTreasure
class DistributedTagTreasure(DistributedTreasure.DistributedTreasure):
__module__ = __name__
def __init__(self, cr):
DistributedTreasure.DistributedTreasure.__init__(self, cr)
self.modelPath = 'phase_4/models/props/icecream'
self.grabSoundPath = 'phase_4/audio/sfx/SZ_DD_treasure.mp3'
self.accept('minigameOffstage', self.handleMinigameOffstage)
def handleEnterSphere(self, collEntry):
if not base.localAvatar.isIt:
self.d_requestGrab()
return None
def handleMinigameOffstage(self):
self.nodePath.reparentTo(hidden)
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\minigame\DistributedTagTreasure.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:21:41 Pacific Daylight Time
| [
"sweep14@gmail.com"
] | sweep14@gmail.com |
20011cc0e92d5d34d0cd86caa3ea2e7add6217df | 4d54787803ddbf1bc9b94d177386f682996c74fa | /DenominatorChecker.py | 6f1b7e2fcf24dcdcdc9f92e577ae0d1af41e2f29 | [] | no_license | miarobin/Level4Project | 0d1ef43c6449be3a9f6a1e9100e6e4ec0513dc8f | 33c1c61d544e1e4c2b8c6c66e7d924c52b135699 | refs/heads/master | 2022-06-06T03:48:28.806914 | 2020-04-29T16:31:01 | 2020-04-29T16:31:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,644 | py | ##RAMBO Momentum Generator
import numpy as np
import matrix2py
from tqdm import tqdm
import sys
import matplotlib.pyplot as pyplot
def minkowski_product(p1, p2):
#Minkowski product of two 4-vectors
return np.sum(p1[0]*p2[0] - p1[1]*p2[1] - p1[2]*p2[2] - p1[3]*p2[3])
def dot_product(v1, v2):
#Dot product of two vectors
return np.sum(np.multiply(v1, v2), axis=0)
def rambo(n = 5):
#Random phase space generator RAMBO.
rho_1, rho_2, rho_3, rho_4 = np.random.rand(4, n)
c = 2*rho_1 - 1
phi = 2*np.pi*rho_2
q_0 = - np.log(np.multiply(rho_3,rho_4))
q_1 = q_0*np.sqrt(1-c**2)*np.cos(phi)
q_2 = q_0*np.sqrt(1-c**2)*np.sin(phi)
q_3 = q_0*c
q = np.array([q_0, q_1, q_2, q_3])
Q = np.sum(q, axis=1)
M = np.sqrt(minkowski_product(Q, Q))
b = - Q[1:]/M
x = 1/M
gamma = np.sqrt(1 + dot_product(b,b))
a = 1/(1+gamma)
p_0 = x*(gamma*q_0 + dot_product(q[1:],b[:,None]))
p_123 = x*np.add(q[1:], np.outer(b, q[0] + a*dot_product(q[1:],b[:,None])))
p = np.transpose(np.array([p_0, p_123[0], p_123[1], p_123[2]]))
return p
def sing_event(CM, n):
#Generate one full set of momenta and matrix element
p_a = np.array([CM, 0, 0, CM])/2
p_b = np.array([CM, 0, 0, -CM])/2
mom = rambo(n)*CM #Output momenta
me = matrix2py.get_value(np.transpose(np.concatenate(([p_a, p_b], mom))),alphas,nhel) #Matrix element calculation
return (me, mom)
##NPY mandel creation (mom still structured)
def mandel_creation(combs_str, mom):
mandel_vars = []
for comb in combs_str:
p = np.sum(np.array([mom[int(i)-1] for i in comb.split(',')]), axis=0)
mandel_vars.append(minkowski_product(p,p))
return np.array(mandel_vars)
##Initital variables:
CM = 1000 #Center of mass energy
n_jet = 3 #Number of jets
matrix2py.initialisemodel('../../Cards/param_card.dat')
alphas = 0.13
nhel = -1 # means sum over all helicity
mandel_str = ['1,3','1,4','2,3','2,4','1,2,3','1,2,4','1,3,4','2,3,4']
def genDataNPY(n_processes):
me_max = np.zeros(n_processes)
current_max = 0
for i in tqdm(range(n_processes)):
me, mom = sing_event(CM, n_jet)
mandel_vars = reduce(np.multiply, mandel_creation(mandel_str, mom))
me = np.multiply(me, mandel_vars)
if me > current_max:
me_max[i] = me
current_max = me
pyplot.scatter(range(n_processes), me_max)
genDataNPY(int(sys.argv[1])) ##Enter number of datapoints when calling code (ie python GenDataLO.py 100000) | [
"flxj44@local"
] | flxj44@local |
e06919dd2a9bbd247c1840da35f544f13f1c92bb | 36a7c6c092799d9550233be9c735964768f34f09 | /EVSCapp/permissions.py | e6f4ad1effbded5fa92093c5e13393f9be30bfdf | [] | no_license | AmirIdris/EVSCProject | eea215f8480fdcee54cc2cce0a675621c8c487bb | ed994c240924e6c30626b7e8a8020480c8112c4e | refs/heads/master | 2023-07-28T00:12:56.857669 | 2021-09-07T21:10:58 | 2021-09-07T21:10:58 | 393,363,817 | 0 | 1 | null | 2021-09-07T21:10:58 | 2021-08-06T12:01:30 | CSS | UTF-8 | Python | false | false | 271 | py | from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.user == request.user | [
"you@example.com"
] | you@example.com |
a855dfcdb9d19fe7cf5b4dba0238a6e3b1d49525 | 3989cdaba804fc0ca3a3adf811ccc201b9e404ca | /server/platform_event_queue_processor.py | d6970cca5cb6719aa6a8c75719296634bfa62a07 | [] | no_license | KevinJMcGrath/GammaSFDCPlatformEventHandler | 462a259f1cc99ddead17387507311060a0a2509e | 1cb9a54912a21593bd8f711055927f42319f50a7 | refs/heads/master | 2023-03-31T04:48:20.088803 | 2021-03-31T11:18:25 | 2021-03-31T11:18:25 | 289,269,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | import asyncio
import logging
import security
from models import tenant
from tenant import processor
async def process_events(async_queue: asyncio.Queue):
while True:
tenant_event: tenant.TenantEvent = await async_queue.get()
logging.debug('Event item retreived from queue.')
if security.platform_auth.check_platform_event_authorized(tenant_event):
if tenant_event.type == 'create':
processor.create_tenant(tenant_event=tenant_event)
elif tenant_event.type == 'status':
processor.status_check(tenant_event)
elif tenant_event.type == 'delete':
processor.delete_tenant(tenant_event)
elif tenant_event.type == 'system_check':
processor.send_proof_of_life()
elif tenant_event.type == 'list_pending':
processor.event_type_not_implemented(tenant_event)
else:
processor.reject_event(tenant_event, reason="invalid_event_type")
else:
processor.reject_event(tenant_event, reason="invalid_event_auth")
async_queue.task_done()
logging.debug('Event item fully processed.') | [
"kevinmcgr@gmail.com"
] | kevinmcgr@gmail.com |
77aac97c9c3d75a6f3b31b05cbf3118ddc7f1971 | 896d2984e41cc29c8d5140bb3c9b47e82dbc21e6 | /venv/Lib/site-packages/PySide2/examples/widgets/richtext/syntaxhighlighter/syntaxhighlighter_rc.py | 01b1633e7e5f9f3a2b7ae02588c32077897cfab1 | [] | no_license | PakasitKetudom/Movies | 5f07b4f1fb0a5217bfe025157f58b4747f5c2943 | 57bb2b79990acec6d843a75d7e9dd157b599c287 | refs/heads/main | 2023-01-21T14:12:09.281643 | 2020-11-26T14:01:03 | 2020-11-26T14:01:03 | 313,512,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,406 | py | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: Thu Jan 31 14:42:32 2019
# by: The Resource Compiler for PySide2 (Qt v5.12.1)
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore
qt_resource_data = b"\
\x00\x00\x06\xca\
T\
EMPLATE = app\x0d\x0aL\
ANGUAGE = C++\x0d\x0aT\
ARGET = \
assistant\x0d\x0a\x0d\x0aCON\
FIG += qt\
warn_on\x0d\x0aQT \
+= xml n\
etwork\x0d\x0a\x0d\x0aPROJEC\
TNAME = A\
ssistant\x0d\x0aDESTDI\
R = .\
./../bin\x0d\x0a\x0d\x0aFORM\
S += finddialog.\
ui \x5c\x0d\x0a he\
lpdialog.ui \x5c\x0d\x0a \
mainwindo\
w.ui \x5c\x0d\x0a \
settingsdialog.u\
i \x5c\x0d\x0a tab\
bedbrowser.ui \x5c\x0d\
\x0a topicch\
ooser.ui\x0d\x0a\x0d\x0aSOUR\
CES += main.cpp \
\x5c\x0d\x0a helpw\
indow.cpp \x5c\x0d\x0a \
topicchoose\
r.cpp \x5c\x0d\x0a \
docuparser.cpp \
\x5c\x0d\x0a setti\
ngsdialog.cpp \x5c\x0d\
\x0a index.c\
pp \x5c\x0d\x0a pr\
ofile.cpp \x5c\x0d\x0a \
config.cpp \
\x5c\x0d\x0a findd\
ialog.cpp \x5c\x0d\x0a \
helpdialog.\
cpp \x5c\x0d\x0a m\
ainwindow.cpp \x5c\x0d\
\x0a tabbedb\
rowser.cpp\x0d\x0a\x0d\x0aHE\
ADERS += \
helpwindow.h \x5c\x0d\x0a\
topiccho\
oser.h \x5c\x0d\x0a \
docuparser.h \x5c\
\x0d\x0a settin\
gsdialog.h \x5c\x0d\x0a \
index.h \x5c\x0d\
\x0a profile\
.h \x5c\x0d\x0a fi\
nddialog.h \x5c\x0d\x0a \
helpdialog\
.h \x5c\x0d\x0a ma\
inwindow.h \x5c\x0d\x0a \
tabbedbrow\
ser.h \x5c\x0d\x0a \
config.h\x0d\x0a\x0d\x0aRES\
OURCES += assist\
ant.qrc\x0d\x0a\x0d\x0aDEFIN\
ES += QT_KEYWORD\
S\x0d\x0a#DEFINES += \
QT_PALMTOPCENTER\
_DOCS\x0d\x0a!network:\
DEFINES +\
= QT_INTERNAL_NE\
TWORK\x0d\x0aelse:QT +\
= network\x0d\x0a!xml:\
DEFINES \
+= QT_IN\
TERNAL_XML\x0d\x0aelse\
:QT += xml\x0d\x0aincl\
ude( ../../src/q\
t_professional.p\
ri )\x0d\x0a\x0d\x0awin32 {\x0d\
\x0a LIBS += -ls\
hell32\x0d\x0a RC_F\
ILE = assistant.\
rc\x0d\x0a}\x0d\x0a\x0d\x0amacos {\
\x0d\x0a ICON = ass\
istant.icns\x0d\x0a \
TARGET = assist\
ant\x0d\x0a# QMAKE_\
INFO_PLIST = Inf\
o_mac.plist\x0d\x0a}\x0d\x0a\
\x0d\x0a#target.path =\
$$[QT_INSTALL_B\
INS]\x0d\x0a#INSTALLS \
+= target\x0d\x0a\x0d\x0a#as\
sistanttranslati\
ons.files = *.qm\
\x0d\x0a#assistanttran\
slations.path = \
$$[QT_INSTALL_TR\
ANSLATIONS]\x0d\x0a#IN\
STALLS += assist\
anttranslations\x0d\
\x0a\x0d\x0aTRANSLATIONS \
= assista\
nt_de.ts \x5c\x0d\x0a \
as\
sistant_fr.ts\x0d\x0a\x0d\
\x0a\x0d\x0aunix:!contain\
s(QT_CONFIG, zli\
b):LIBS += -lz\x0d\x0a\
\x0d\x0a\x0d\x0atarget.path=\
$$[QT_INSTALL_BI\
NS]\x0d\x0aINSTALLS +=\
target\x0d\x0a\
"
qt_resource_name = b"\
\x00\x08\
\x0e\x84\x7fC\
\x00e\
\x00x\x00a\x00m\x00p\x00l\x00e\x00s\
\x00\x07\
\x0c\xe8G\xe5\
\x00e\
\x00x\x00a\x00m\x00p\x00l\x00e\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x16\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| [
"ke.pakasit@gmail.com"
] | ke.pakasit@gmail.com |
3968eaae944487dd8ca192951ae43c82a5f073ba | 0f79c5ca76bd971f4fe88219f26a955fcfe8da70 | /docs/conf.py | c7a22ffdc4435235b4a8b4412c150d9a3b8a990b | [] | no_license | aragilar/magnetic | b05b9c5bae124484dc07559de4378bccda1db115 | fe2d112ba32b1607fda3a562c539dc03563b9acc | refs/heads/master | 2023-08-31T18:12:21.262853 | 2017-10-15T05:19:33 | 2017-10-15T05:19:33 | 59,745,925 | 0 | 0 | null | 2023-09-07T22:43:28 | 2016-05-26T11:44:43 | Python | UTF-8 | Python | false | false | 8,325 | py | # -*- coding: utf-8 -*-
#
# magnetic documentation build configuration file, created by
# sphinx-quickstart on Wed May 13 22:53:37 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from magnetic import __version__ as mag_version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'magnetic'
copyright = u'2015, James Tocknell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = mag_version
# The full version, including alpha/beta/rc tags.
release = mag_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'magneticdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'magnetic.tex', u'magnetic Documentation',
u'James Tocknell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'magnetic', u'magnetic Documentation',
[u'James Tocknell'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'magnetic', u'magnetic Documentation',
u'James Tocknell', 'magnetic', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"aragilar@gmail.com"
] | aragilar@gmail.com |
23207e111a4e7e07a5d636cb6326493693f5b3c4 | 532ca0c5361b54970bc435232e2a6d079c49aecd | /03_Conditionals and Control Flow/01_Conditionals and Control Flow/04_How the Tables Have Turned.py | 761dda428d2ddf39b1b6f433d981dd3583040f68 | [] | no_license | haveano/codeacademy-python_v1 | dc5484e8df73b9a15ffce835dde625b6454c8302 | 10e6fb2974e1c47f380bb6a33c50b171ecfbf50f | refs/heads/master | 2021-01-11T16:45:57.337493 | 2017-05-30T10:04:08 | 2017-05-30T10:04:08 | 79,660,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | """
How the Tables Have Turned
Comparisons result in either True or False, which are booleans as we learned before in this exercise.
# Make me true!
bool_one = 3 < 5
Let's switch it up: we'll give the boolean, and you'll write the expression, just like the example above.
Instructions
For each boolean value in the editor, write an expression that evaluates to that value.
Remember, comparators are: ==, !=, >, >=, <, and <=.
Use at least three different ones!
Don't just use True and False! That's cheating!
"""
# Create comparative statements as appropriate on the lines below!
# Make me true!
bool_one = 3 < 5 # We already did this one for you!
# Make me false!
bool_two = 13 != 14-1
# Make me true!
bool_three = 13 !=14-2
# Make me false!
bool_four = 13 >= 14
# Make me true!
bool_five = 13 <= 13
| [
"noreply@github.com"
] | noreply@github.com |
2a98e4473d1b4c0397d714840ce2654e08bf6b3f | 42f5eaf16bfd7076cb5a598cf2f239faa575f28b | /05-grpc-google-cloud-speech/python/google/ads/googleads/v2/enums/spending_limit_type_pb2.py | 516d2b12e5019a54a44c7e996fd5de1e731b7684 | [] | no_license | jiriklepl/IMW-2019 | ab0e1c791a794ccf8a6a8d8d4e732c29acee134c | 921c85d3c8132114ad90db8deb52eb5ddc06c720 | refs/heads/master | 2020-08-28T13:29:15.087785 | 2019-12-15T17:12:24 | 2019-12-15T17:12:24 | 217,711,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,608 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v2/enums/spending_limit_type.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v2/enums/spending_limit_type.proto',
package='google.ads.googleads.v2.enums',
syntax='proto3',
serialized_options=b'\n!com.google.ads.googleads.v2.enumsB\026SpendingLimitTypeProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v2/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V2.Enums\312\002\035Google\\Ads\\GoogleAds\\V2\\Enums\352\002!Google::Ads::GoogleAds::V2::Enums',
serialized_pb=b'\n7google/ads/googleads/v2/enums/spending_limit_type.proto\x12\x1dgoogle.ads.googleads.v2.enums\x1a\x1cgoogle/api/annotations.proto\"X\n\x15SpendingLimitTypeEnum\"?\n\x11SpendingLimitType\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x0c\n\x08INFINITE\x10\x02\x42\xeb\x01\n!com.google.ads.googleads.v2.enumsB\x16SpendingLimitTypeProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v2/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V2.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V2\\Enums\xea\x02!Google::Ads::GoogleAds::V2::Enumsb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_SPENDINGLIMITTYPEENUM_SPENDINGLIMITTYPE = _descriptor.EnumDescriptor(
name='SpendingLimitType',
full_name='google.ads.googleads.v2.enums.SpendingLimitTypeEnum.SpendingLimitType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INFINITE', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=145,
serialized_end=208,
)
_sym_db.RegisterEnumDescriptor(_SPENDINGLIMITTYPEENUM_SPENDINGLIMITTYPE)
_SPENDINGLIMITTYPEENUM = _descriptor.Descriptor(
name='SpendingLimitTypeEnum',
full_name='google.ads.googleads.v2.enums.SpendingLimitTypeEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_SPENDINGLIMITTYPEENUM_SPENDINGLIMITTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=120,
serialized_end=208,
)
_SPENDINGLIMITTYPEENUM_SPENDINGLIMITTYPE.containing_type = _SPENDINGLIMITTYPEENUM
DESCRIPTOR.message_types_by_name['SpendingLimitTypeEnum'] = _SPENDINGLIMITTYPEENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SpendingLimitTypeEnum = _reflection.GeneratedProtocolMessageType('SpendingLimitTypeEnum', (_message.Message,), {
'DESCRIPTOR' : _SPENDINGLIMITTYPEENUM,
'__module__' : 'google.ads.googleads.v2.enums.spending_limit_type_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v2.enums.SpendingLimitTypeEnum)
})
_sym_db.RegisterMessage(SpendingLimitTypeEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"jiriklepl@seznam.cz"
] | jiriklepl@seznam.cz |
9d351c3474bbe68f18537e4cbdff5cf9e3bfcd13 | 6b74279c196a34e3d27d825fa028a42555a5bd36 | /recursion/subset_problem.py | 2b7f627206d68f3a8e6b6a80a4a895914f1cab79 | [] | no_license | SANDIPAN22/DSA | f489857cd60609ea22a86d4a0de4158ab5006d83 | 706a46926df36a97eda35ac06db0dd402fbac8b4 | refs/heads/master | 2023-07-30T16:10:36.943888 | 2021-10-02T19:32:01 | 2021-10-02T19:32:01 | 401,262,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | def subsetProb(ip,op):
if len(ip)==0:
print(f"'{op}'")
return
else:
op1=op
op2=op
op2+=ip[0]
ip=ip[1:]
subsetProb(ip,op1)
subsetProb(ip,op2)
subsetProb("abc",'') | [
"chak.sandipan22@gmail.com"
] | chak.sandipan22@gmail.com |
713c20a1f3c049d5e9c62bdd9651d102db125cd3 | b074beeb9c38ff755ef82104f90e6e945fc3e770 | /Utils/utils.py | 3a657ac474c538bb1041c14e9cdddaec2f71bdc9 | [] | no_license | pymmrd/SparkstreamingApp_python | 0118b6598ff9abbfcbcf63fbeb18eb076fb81131 | 434666b0c3a150cbac46b15ded0f656d3519b897 | refs/heads/master | 2020-03-28T15:20:52.008607 | 2017-03-27T00:52:58 | 2017-03-27T00:52:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | import hashlib
def get_md5(s):
md5= hashlib.md5()
md5.update(s)
return md5.hexdigest()
def is_chinese(s):
s=s.decode("utf-8")
if s>=u"\u4e00" and s<=u"\u9fa6":
return True
else:return False | [
"webber"
] | webber |
ea36278471afa827a5ed3742f81a477d2f653c21 | 3fcdfbb73118f4bde9a1e0ed466974ab636332db | /my_django_forum/wsgi.py | c7fa3a8840f0cd75f32207ab72f8d8d314d79444 | [] | no_license | vnitikesh/My-Django-Forum-System | 54e579e54893c9637e351f6d29dfa3db82cdd106 | ea9804d3819946ea08b5b82bb43fe0bc27f331f5 | refs/heads/master | 2022-04-01T07:14:18.734263 | 2020-01-14T08:18:26 | 2020-01-14T08:18:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for my_django_forum project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'my_django_forum.settings')
application = get_wsgi_application()
| [
"natylaza89@gmail.com"
] | natylaza89@gmail.com |
44a2d15829c9b06e9ade3372be90a5a605ada09f | 9159c96171694b5ba0ea5b28caec10f3f0207c75 | /Core_code/GA-working-copy/master/.svn/text-base/txmultimastersimple.py.svn-base | 749a6f380ab6a9d5b047a1e9a732241bacac4b10 | [
"MIT"
] | permissive | robhuva/Thesis | 8ba00703a6e8aa47d0248f86bc96a1918bd3a6a3 | 4e91e248dc69b3a99fe242b41fd0addf386275aa | refs/heads/master | 2021-01-25T03:19:44.209283 | 2015-06-17T05:41:38 | 2015-06-17T05:41:38 | 16,933,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,419 | #
#
# Copyright (C) University of Melbourne 2012
#
#
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#
#
import numpy
import time
import logging
import copy
from os import path
from tools import mureilbuilder, mureilexception, mureiloutput, mureiltypes, globalconfig
from tools import mureilbase, configurablebase
from generator import txmultigeneratorbase
logger = logging.getLogger(__name__)
class TxMultiMasterSimple(mureilbase.MasterInterface, configurablebase.ConfigurableMultiBase):
def get_full_config(self):
if not self.is_configured:
return None
# Will return configs collected from all objects, assembled into full_config.
full_conf = {}
full_conf['Master'] = self.config
full_conf[self.config['data']] = self.data.get_config()
full_conf[self.config['algorithm']] = self.algorithm.get_config()
full_conf[self.config['global']] = self.global_config
for gen_type in self.dispatch_order:
full_conf[self.config[gen_type]] = self.gen_list[gen_type].get_config()
return full_conf
def set_config(self, full_config, extra_data):
# Master explicitly does not copy in the global variables. It is too confusing
# to combine those with flags, defaults and values defined in the config files.
self.load_initial_config(full_config['Master'])
# Get the global variables
mureilbuilder.check_section_exists(full_config, self.config['global'])
if 'model' not in full_config[self.config['global']]:
full_config[self.config['global']]['model'] = 'tools.globalconfig.GlobalBase'
self.global_calc = mureilbuilder.create_instance(full_config, None, self.config['global'],
mureilbase.ConfigurableInterface)
self.global_config = self.global_calc.get_config()
# Now check the dispatch_order, to get a list of the generators
for gen in self.config['dispatch_order']:
self.config_spec += [(gen, None, None)]
self.update_from_config_spec()
self.check_config()
self.dispatch_order = self.config['dispatch_order']
# Set up the data class and get the data, and compute the global parameters
self.data = mureilbuilder.create_instance(full_config, self.global_config, self.config['data'],
mureilbase.DataSinglePassInterface)
self.global_calc.update_config({'data_ts_length': self.data.get_ts_length()})
self.global_calc.post_data_global_calcs()
self.global_config = self.global_calc.get_config()
# Instantiate the transmission model
if self.config['transmission'] in full_config:
self.transmission = mureilbuilder.create_instance(full_config, self.global_config,
self.config['transmission'], configurablebase.ConfigurableMultiBase,
self.config['run_periods'])
mureilbuilder.supply_single_pass_data(self.transmission,
self.data, self.config['transmission'])
else:
self.transmission = None
# Instantiate the generator objects, set their data, determine their param requirements
param_count = 0
self.gen_list = {}
self.gen_params = {}
start_values_min = numpy.array([[]]).reshape((len(self.config['run_periods']), 0))
start_values_max = numpy.array([[]]).reshape((len(self.config['run_periods']), 0))
for i in range(len(self.dispatch_order)):
gen_type = self.dispatch_order[i]
# Build the generator instances
gen = mureilbuilder.create_instance(full_config, self.global_config,
self.config[gen_type], txmultigeneratorbase.TxMultiGeneratorBase,
self.config['run_periods'])
self.gen_list[gen_type] = gen
# Supply data as requested by the generator
mureilbuilder.supply_single_pass_data(gen, self.data, gen_type)
# Determine how many parameters this generator requires and
# allocate the slots in the params list
params_req = gen.get_param_count()
if (params_req == 0):
self.gen_params[gen_type] = (0, 0)
else:
self.gen_params[gen_type] = (param_count,
param_count + params_req)
run_period_len = len(self.config['run_periods'])
(starts_min, starts_max) = gen.get_param_starts()
starts_min = numpy.array(starts_min)
starts_max = numpy.array(starts_max)
if starts_min.size == 0:
start_values_min = numpy.hstack((start_values_min, (
(numpy.ones((run_period_len, params_req)) *
self.global_config['min_param_val']).tolist())))
else:
start_values_min = numpy.hstack((start_values_min, starts_min))
if starts_max.size == 0:
start_values_max = numpy.hstack((start_values_max, (
(numpy.ones((run_period_len, params_req)) *
self.global_config['max_param_val']).tolist())))
else:
start_values_max = numpy.hstack((start_values_max, starts_max))
param_count += params_req
start_values_min = start_values_min.reshape(run_period_len * param_count)
start_values_max = start_values_max.reshape(run_period_len * param_count)
self.param_count = param_count
# Check that run_periods increases by time_period_yrs
self.run_periods = self.config['run_periods']
if len(self.run_periods) > 1:
run_period_diffs = numpy.diff(self.run_periods)
if (not (min(run_period_diffs) == self.global_config['time_period_yrs']) or
not (max(run_period_diffs) == self.global_config['time_period_yrs'])):
raise mureilexception.ConfigException('run_periods must be separated by time_period_yrs', {})
self.period_count = len(self.run_periods)
self.total_param_count = param_count * self.period_count
# Check if 'extra_data' has been provided, as a full gene to start at.
# extra_data needs to be a dict with entry 'start_gene' that is a list
# of integer values the same length as param_count.
if extra_data is not None:
if 'start_gene' in extra_data:
if not (len(start_values_min) == self.total_param_count):
msg = ('extra_data of start_gene passed to txmultimastersimple. ' +
'Length expected = {:d}, found = {:d}'.format(self.total_param_count,
len(start_values_min)))
raise mureilexception.ConfigException(msg, {})
start_values_min = extra_data['start_gene']
start_values_max = extra_data['start_gene']
# Instantiate the genetic algorithm
mureilbuilder.check_section_exists(full_config, self.config['algorithm'])
algorithm_config = full_config[self.config['algorithm']]
algorithm_config['min_len'] = algorithm_config['max_len'] = self.total_param_count
algorithm_config['start_values_min'] = start_values_min
algorithm_config['start_values_max'] = start_values_max
algorithm_config['gene_test_callback'] = self.gene_test
self.algorithm = mureilbuilder.create_instance(full_config, self.global_config,
self.config['algorithm'], mureilbase.ConfigurableInterface)
self.is_configured = True
def get_config_spec(self):
"""Return a list of tuples of format (name, conversion function, default),
e.g. ('capex', float, 2.0). Put None if no conversion required, or if no
default value, e.g. ('name', None, None)
Configuration:
algorithm: The name of the configuration file section specifying the algorithm class to use and
its configuration parameters. Defaults to 'Algorithm'.
data: The name of the configuration file section specifying the data class to use and its
configuration parameters. Defaults to 'Data'.
transmission: The name of the configuration file section specifying the transmission model class
to use and its configuration parameters. Defaults to 'Transmission', and if the 'Transmission'
section is not provided, no transmission model will be used.
global: The name of the configuration file section specifying the global configuration parameters.
Defaults to 'Global'.
dispatch_order: a list of strings specifying the names of the generator models to dispatch, in order,
to meet the demand. All of these models then require a parameter defining the configuration file
section where they are configured. e.g. dispatch_order: solar wind gas. This requires additional
parameters, for example solar: Solar, wind: Wind and gas: Instant_Gas to be defined, and corresponding
sections Solar, Wind and Instant_Gas to configure those models.
run_periods: A list of integers specifying the years defining each period in the multi-period
simulation. Defaults to 2010. e.g. run_periods: 2010 2020 2030 2040 2050
iterations: The number of iterations of the algorithm to execute. Defaults to 100.
output_file: The filename to write the final output data to. Defaults to 'mureil.pkl'.
output_frequency: Defaults to 500. After the first iteration and every output_frequency after
that, report on the simulation status.
do_plots: Defaults to False. If True, output plots every output_frequency and at the end
of the run.
"""
return [
('algorithm', None, 'Algorithm'),
('data', None, 'Data'),
('transmission', None, 'Transmission'),
('global', None, 'Global'),
('iterations', int, 100),
('output_file', None, 'mureil.pkl'),
('dispatch_order', mureilbuilder.make_string_list, None),
('do_plots', mureilbuilder.string_to_bool, False),
('output_frequency', int, 500),
('run_periods', mureilbuilder.make_int_list, [2010])
]
def run(self, extra_data=None):
start_time = time.time()
logger.critical('Run started at %s', time.ctime())
if (not self.is_configured):
msg = 'run requested, but txmultimastersimple is not configured'
logger.critical(msg)
raise mureilexception.ConfigException(msg, {})
try:
self.algorithm.prepare_run()
for i in range(self.config['iterations']):
self.algorithm.do_iteration()
if ((self.config['output_frequency'] > 0) and
((i % self.config['output_frequency']) == 0)):
logger.info('Interim results at iteration %d', i)
self.output_results(iteration=i)
except mureilexception.AlgorithmException:
# Insert here something special to do if debugging
# such an exception is required.
# self.finalise will be called by the caller
raise
logger.critical('Run time: %.2f seconds', (time.time() - start_time))
results = self.output_results(iteration=self.config['iterations'], final=True)
return results
def output_results(self, final=False, iteration=0):
(best_params, opt_data) = self.algorithm.get_final()
if len(best_params) > 0:
# Protect against an exception before there are any params
results = self.evaluate_results(best_params)
logger.info('======================================================')
logger.info('Total cost ($M): {:.2f}, including carbon (MT): {:.2f}, terminal value ($M): {:.2f}'.format(
results['totals']['cost'], results['totals']['carbon'] * 1e-6, results['totals']['terminal_value']))
logger.info('======================================================')
ts_demand = {}
# Now iterate across the periods, and then across the generators
for period in self.run_periods:
period_results = results['periods'][period]
logger.info('------------------------------------------------------')
logger.info('PERIOD ' + str(period) + ':')
logger.info('------------------------------------------------------')
logger.info('Period cost ($M): {:.2f}, carbon (MT): {:.2f}'.format(
period_results['totals']['cost'],
period_results['totals']['carbon'] * 1e-6))
if 'demand' in self.dispatch_order:
ts_demand[period] = period_results['generators']['demand']['other']['ts_demand']
else:
ts_demand[period] = self.data.get_timeseries('ts_demand')
period_results['totals']['demand'] = (numpy.sum(ts_demand[period]) *
self.global_config['time_scale_up_mult'] * self.global_config['timestep_hrs'])
logger.info('Period total demand (GWh): {:.2f}'.format(
period_results['totals']['demand'] / 1000))
for gen_type, value in period_results['generators'].iteritems():
gen_string = value['desc_string']
gen_cost = value['cost']
gen_supply = value['total_supply_period']
logger.info(gen_type + ' ($M {:.2f}, GWh {:.2f}) : '.format(
gen_cost, gen_supply / 1000) + gen_string)
logger.info('======================================================')
pickle_dict = {}
pickle_dict['opt_data'] = opt_data
pickle_dict['best_params'] = best_params
full_conf = self.get_full_config()
mureiloutput.clean_config_for_pickle(full_conf)
pickle_dict['config'] = full_conf
pickle_dict['best_results'] = results
pickle_dict['ts_demand'] = ts_demand
if self.config['do_plots']:
for period in self.run_periods:
plot_data = {}
for gen_type, value in results['periods'][period]['generators'].iteritems():
plot_data[gen_type] = value['aggregate_supply']
this_final = final and (period == self.config['run_periods'][-1])
mureiloutput.plot_timeseries(plot_data,
ts_demand[period], this_final, plot_title=(
str(period) + ' at iteration ' + str(iteration)))
output_file = self.config['output_file']
mureiloutput.pickle_out(pickle_dict, output_file)
else:
results = None
return results
def finalise(self):
self.algorithm.finalise()
def calc_cost(self, gene, full_results=False):
"""Calculate the total system cost for this gene. This function is called
by the algorithm from a callback. The algorithm may set up multi-processing
and so this calc_cost function (and all functions it calls) must be
thread-safe.
This means that the function must not modify any of the
internal data of the objects.
"""
temp = numpy.array(gene)
params_set = temp.reshape(self.period_count, self.param_count)
gen_state_handles = {}
for gen_type in self.dispatch_order:
gen_state_handles[gen_type] = (
self.gen_list[gen_type].get_startup_state_handle())
if self.transmission is not None:
tx_state_handle = self.transmission.get_startup_state_handle()
cost = 0
if full_results:
results = {'totals': {}, 'periods': {}, 'terminal': {}}
total_carbon = 0.0
for i in range(len(self.run_periods)):
period = self.run_periods[i]
params = params_set[i]
if full_results:
period_carbon = 0.0
results['periods'][period] = period_results = {'generators': {}, 'totals': {}}
results['terminal'] = {'totals': {}, 'generators': {}}
# supply_request is the running total, modified here
if 'demand' in self.dispatch_order:
supply_request = numpy.zeros(self.data.get_ts_length(), dtype=float)
else:
supply_request = numpy.array(self.data.get_timeseries('ts_demand'), dtype=float)
period_cost = 0
period_sites = []
for gen_type in self.dispatch_order:
gen = self.gen_list[gen_type]
gen_ptr = self.gen_params[gen_type]
if full_results:
(this_sites, this_cost, this_supply,
period_results['generators'][gen_type]) = gen.calculate_time_period_simple(
gen_state_handles[gen_type], period, params[gen_ptr[0]:gen_ptr[1]],
supply_request, full_results=True)
period_carbon += numpy.sum(period_results['generators'][gen_type]['carbon_emissions_period'])
else:
(this_sites, this_cost, this_supply) = gen.calculate_time_period_simple(
gen_state_handles[gen_type], period, params[gen_ptr[0]:gen_ptr[1]],
supply_request)
period_sites += this_sites
period_cost += this_cost
supply_request -= this_supply
if self.transmission is not None:
tx_cost = self.transmission.calculate_cost(tx_state_handle, period, period_sites)
period_cost += tx_cost
## and store tx_cost somewhere useful in period_results
if full_results:
period_results['totals']['cost'] = period_cost
period_results['totals']['carbon'] = period_carbon
total_carbon += period_carbon
cost += period_cost
# calculate the terminal value at the end of the last period
total_terminal_value = 0.0
final_period = self.run_periods[-1]
for gen_type in self.dispatch_order:
gen = self.gen_list[gen_type]
terminal_value, site_terminal_value = gen.get_terminal_value(final_period,
gen_state_handles[gen_type])
if full_results:
results['terminal']['generators'][gen_type] = {'total_value': terminal_value,
'site_value': site_terminal_value}
total_terminal_value += terminal_value
cost -= total_terminal_value
if full_results:
results['totals']['cost'] = cost
results['totals']['carbon'] = total_carbon
results['totals']['terminal_value'] = total_terminal_value
return cost, results
else:
return cost
def evaluate_results(self, params):
"""Collect a dict that includes all the calculated results from a
run with params.
Inputs:
params: list of numbers, typically the best output from a run.
Outputs:
results: a dict of gen_type: gen_results
where gen_results is the output from calculate_time_period_simple in
txmultigenerator.py (or subclass), with full_results = True.
"""
cost, results = self.calc_cost(params, full_results=True)
return results
def gene_test(self, gene):
"""input: list
output: float
takes the gene.values, tests it and returns the genes score
"""
score = -1 * self.calc_cost(gene)
return score
| [
"r.huva@student.unimelb.edu.au"
] | r.huva@student.unimelb.edu.au | |
7b8bdba27ed199995dad3b31e6e65b3a7e52b40d | 5e5fb109a0a2422e2a6fdb7ad504d3f0303f8e7b | /src/models/game.py | 89ea04cc42fe350f09204cc13a72de227e0657e7 | [] | no_license | podgib/brownlow | 3df1d6e10c37dc9064d85b3e96d9b2156d304ebc | 25cb53ea6f72f9fc24809edf0970887c8da0eb2e | refs/heads/master | 2021-01-10T06:20:22.651452 | 2017-02-07T11:13:37 | 2017-02-07T11:13:37 | 44,313,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | from google.appengine.ext import ndb
from player import Player
class Team:
MEN = 1
WOMEN = 2
TEST = -1
@staticmethod
def getAll():
return ["Men", "Women", "Test"]
@staticmethod
def getString(team):
if team == Team.MEN:
return "Men"
elif team == Team.WOMEN:
return "Women"
elif team == Team.TEST:
return "Test"
else:
return None
@staticmethod
def getTeam(team_string):
if team_string.lower().strip() == "men":
return Team.MEN
elif team_string.lower().strip() == "women":
return Team.WOMEN
elif team_string.lower().strip() == "test":
return Team.TEST
else:
return None
class Game(ndb.Model):
opponent = ndb.StringProperty(required=True)
date = ndb.DateProperty(required=True, auto_now_add=True)
venue = ndb.StringProperty(required=True)
team = ndb.IntegerProperty(required=True)
players = ndb.KeyProperty(kind=Player, repeated=True)
weight = ndb.FloatProperty(required=True, default=1.0)
class GameResults(ndb.Model):
game = ndb.KeyProperty(kind=Game, required=True)
three = ndb.KeyProperty(kind=Player)
two = ndb.KeyProperty(kind=Player)
one = ndb.KeyProperty(kind=Player)
voters = ndb.IntegerProperty(default=0)
| [
"gmp@robots.ox.ac.uk"
] | gmp@robots.ox.ac.uk |
a5c63d3ad05aa62aec4433768fefe25c628a59af | 1fbe15a468ea6ba1634e6d928dbdb23b4e133684 | /mysite/mysite/settings.py | 2bb92e7c56b7b64ec4fc4537cf9fd5cc04bc5a40 | [] | no_license | dangminhnguyen/djangorep | 068180697feda3352e5bf9349d5124e920ee718b | f209b660aa60005eb68e6bbad73855f15cf1e10c | refs/heads/master | 2020-12-02T04:44:21.685296 | 2019-12-30T13:43:10 | 2019-12-30T13:43:10 | 230,890,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,116 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6pfim0o9b#yg=_6d2*_#s*%l@17febpr=$89bcq@!2s$%0)i&u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"minknguyen.bk@gmail.com"
] | minknguyen.bk@gmail.com |
fa24099fb4c61a922ec7e32ecb388a6cac3cd988 | f2889a13368b59d8b82f7def1a31a6277b6518b7 | /309.py | da9a1c3ac6e5917c4c8a202bed12b01b6642673d | [] | no_license | htl1126/leetcode | dacde03de5c9c967e527c4c3b29a4547154e11b3 | c33559dc5e0bf6879bb3462ab65a9446a66d19f6 | refs/heads/master | 2023-09-01T14:57:57.302544 | 2023-08-25T15:50:56 | 2023-08-25T15:50:56 | 29,514,867 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | # ref: https://leetcode.com/discuss/71391/easiest-java-solution-with
# -explanations
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if len(prices) < 2:
return 0
b0 = -prices[0] # max profit for buying at 0
b1 = b0 # max profit for buying at 1
s0, s1, s2 = 0, 0, 0 # max profit for buying at i, i - 1, i - 2
for i in xrange(1, len(prices)):
b0 = max(b1, s2 - prices[i])
s0 = max(s1, b1 + prices[i])
b1 = b0
s2 = s1
s1 = s0
return s0
if __name__ == '__main__':
sol = Solution()
print sol.maxProfit([1, 2, 3, 0, 2])
| [
"b93902098@ntu.edu.tw"
] | b93902098@ntu.edu.tw |
c0a64564ab4b36beaea6b8237e0cdcdbeb264a48 | 2f5c1c74a05fe08942b103cdb3aa2ad27bb1e1ee | /throwingdice.py | b2f77877b76c9a0cbeaf35f5921c2e1ebe092bdc | [] | no_license | Simranjeet96/Mainly-python-along-with-some-machine-learning | ac304fa306b9eab01bab06003215d1bdeeec64f9 | 0d642d8229fc5994f956efd080f371b8775452b8 | refs/heads/master | 2020-03-17T06:23:57.140271 | 2018-05-15T05:52:43 | 2018-05-15T05:52:43 | 133,353,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | import numpy as np
sum=[]
for i in range(2000):
a=np.random.uniform(low=1,high=7,size=1).astype(int)
b=np.random.uniform(low=1,high=7,size=1).astype(int)
sum.append(int(a+b))
sol=[]
import random
while(len(sum)!=0):
a=sum[0]
count=1
if(len(sum)!=1):
for i in range(1,len(sum)):
if(sum[i]==a):
count=count+1
for i in range(count) :
sum.remove(a)
sol.append((a,count))
import matplotlib.pyplot as plt
a=[i[0] for i in sol]
b=[i[1] for i in sol]
plt.bar(a,b,color='red')
plt.xticks(a)
plt.show()
| [
"simranjeetdua@gmail.com"
] | simranjeetdua@gmail.com |
8d6f043737bcc3296da79d096ccd11d7b254412b | 192d00c224b12db87dccd8a28da66cb942a28c67 | /analytics.py | 83157d24c6c1c542c1b4edc1ccb838992e328b7c | [] | no_license | estib-vega/stats | 68f7c554de6f786823d7419da2b7c92cc2895487 | 3f873f8f2baa370f03a791a482dbd71499ca3c1d | refs/heads/master | 2020-05-30T05:21:11.280859 | 2019-07-16T16:16:53 | 2019-07-16T16:16:53 | 189,557,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,831 | py | def get_followers_delta(file_1, file_2):
followers_1 = []
followers_2 = []
new_followers = []
lost_followers = []
with open(file_1) as first:
for username in first:
followers_1.append(username.rstrip())
with open(file_2) as second:
for username in second:
followers_2.append(username.rstrip())
for username in followers_2:
if username not in followers_1:
new_followers.append(username.rstrip())
for username in followers_1:
if username not in followers_2:
lost_followers.append(username.rstrip())
return new_followers, lost_followers
def get_like_data(file):
like_data = []
with open(file) as f:
for line in f:
line_list = line.split(',')
username, post_link, datetime = line_list
like_data.append([username.rstrip(), post_link.rstrip(), datetime.rstrip()])
return like_data
def get_follower_data():
import os
followers_logs = os.listdir("followers")
followers_logs.sort()
likes_logs = os.listdir("likes")
n = len(followers_logs)
i = 0
complete_new_followers = []
complete_lost_followers = []
while i <= (n - 2):
file_1 = "followers/{}".format(followers_logs[i])
file_2 = "followers/{}".format(followers_logs[i + 1])
new_followers, lost_followers = get_followers_delta(file_1, file_2)
complete_new_followers += new_followers
complete_lost_followers += lost_followers
# print file_1
# print "new followers:", len(new_followers), "lost followers:", len(lost_followers)
# net_gain = (len(new_followers) - len(lost_followers))
# print "net gain:", net_gain
i += 1
total_new_followers = len(set(complete_new_followers))
total_lost_followers = len(set(complete_lost_followers))
# print "total new followers:", total_new_followers, "total lost followers:", total_lost_followers
total_new_staying_followers = [item for item in set(complete_new_followers) if item not in set(complete_lost_followers)]
# print "staying:", len(total_new_staying_followers)
n = len(likes_logs)
i = 0
complete_like_data = []
liked_follower = set()
staying_liked_follower = set()
for liked_file in likes_logs:
file = "likes/{}".format(liked_file)
like_data = get_like_data(file)
for single_like in like_data:
username = single_like[0]
if username in set(complete_new_followers):
liked_follower.add(username)
if username in total_new_staying_followers:
staying_liked_follower.add(username)
complete_like_data.append(like_data)
return liked_follower, staying_liked_follower, complete_like_data, complete_new_followers
if __name__ == "__main__":
import os
followers_logs = os.listdir("followers")
followers_logs.sort()
likes_logs = os.listdir("likes")
n = len(followers_logs)
i = 0
complete_new_followers = []
complete_lost_followers = []
while i <= (n - 2):
file_1 = "followers/{}".format(followers_logs[i])
file_2 = "followers/{}".format(followers_logs[i + 1])
new_followers, lost_followers = get_followers_delta(file_1, file_2)
complete_new_followers += new_followers
complete_lost_followers += lost_followers
print file_1
print "new followers:", len(new_followers), "lost followers:", len(lost_followers)
net_gain = (len(new_followers) - len(lost_followers))
print "net gain:", net_gain
i += 1
total_new_followers = len(set(complete_new_followers))
total_lost_followers = len(set(complete_lost_followers))
print "total new followers:", total_new_followers, "total lost followers:", total_lost_followers
total_new_staying_followers = [item for item in set(complete_new_followers) if item not in set(complete_lost_followers)]
print "staying:", len(total_new_staying_followers)
n = len(likes_logs)
i = 0
# complete_like_data = []
liked_follower = set()
staying_liked_follower = set()
for liked_file in likes_logs:
file = "likes/{}".format(liked_file)
like_data = get_like_data(file)
for single_like in like_data:
username = single_like[0]
if username in set(complete_new_followers):
liked_follower.add(username)
if username in total_new_staying_followers:
staying_liked_follower.add(username)
# complete_like_data.append(like_data)
print "liked followers:", len(liked_follower)
print "liked staying followers:", len(staying_liked_follower)
# print liked_follower
print "---"
# print staying_liked_follower
| [
"stron@me.com"
] | stron@me.com |
3926d3e015c07d2af53a19cc161b89f51c294770 | efb1f3cc2419b223179c57c2662bc05449a630fb | /04-data-preprocessing/01.missing-data.py | 52a9adae433afaea8f34558c5333f5cbeb7ea12b | [] | no_license | francoisbeaussier/python-machine-learning | 821f76b50359427eac3cf12982c82c1a629eeb80 | 20b186bd6b7a4f801a98a60f6ecbe3bc971eb3d3 | refs/heads/master | 2022-12-01T21:00:06.476036 | 2020-08-18T13:04:04 | 2020-08-18T13:04:04 | 282,351,485 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,387 | py | import pandas as pd
from io import StringIO
csv_data = \
'''A,B,C,D
1,2,3,4
5,6,,8
10,11,12,'''
df = pd.read_csv(StringIO(csv_data))
print("\nData:")
print(df)
# Find null values
print('\nCounting null values:'),
print(df.isnull().sum())
# Drop rows with null values
print('\nDrop rows with null values:')
print(df.dropna(axis=0))
# Drop columns with null values
print('\nDrop columns with null values:')
print(df.dropna(axis=1))
# dropna can also be used to remove only rows and column that are all NaN
print('\nDrop rows that are only filled with null values:')
print(df.dropna(how='all'))
# dropna can use a threshold
print('\nDrop rows that have fewer than 4 values:')
print(df.dropna(thresh=4))
# dropna can target specifc columns
print('\nDrop rows that have nulls in column C:')
print(df.dropna(subset=['C']))
# While convenient, simply removing data with null values is often not a good idea
# because we probably also lose other valuable information
# Replace nulls by the mean value (or median or most_frequent)
from sklearn.impute import SimpleImputer
import numpy as np
imr = SimpleImputer(missing_values=np.nan, strategy='mean')
imr = imr.fit(df.values)
imputed_data = imr.transform(df.values)
print('\nReplaced by mean:')
print(imputed_data)
# Pandas has a shortcut method:
print('\Pandas fillna by mean:')
print(df.fillna(df.mean()))
| [
"francois@beaussier.net"
] | francois@beaussier.net |
5befbb260c9b7b8ba459e5e42945153549916fbe | b17448d7eb36796700594794d79aeaf8438a81a2 | /test.py | 2af94baf00b7aaf4627e112ee25582d8ff5835e7 | [] | no_license | whsasf/WuKong | f422a5547531ffadb13061ccbb504389b2cb07c6 | 9aabae849671c5b3eb1178d80586d74fcd11d6f7 | refs/heads/master | 2020-03-16T06:33:55.204243 | 2018-06-18T10:15:06 | 2018-06-18T10:15:06 | 129,723,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | #!/usr/bin/python3
import basic_class
basic_class.mylogger_summary.summary('zfxfdsfdsf') | [
"whsasf@126.com"
] | whsasf@126.com |
ebf5b1b39f49ea2b12c7672c547502a823486e89 | 61a7b953cddc52e9fe4dbef61d18911b56f7f7e2 | /clubShop/mainapps/show/migrations/0003_merge_20181027_1155.py | 904336d1b2eb4875e6926a3c9d0d7e70fdfb5096 | [] | no_license | Liukuan-group/our_project | 60ac7e730eceec66b362c9b95745ffbf93891635 | 305145d42c388ddbaa34dcab8723f431b3a0c3b5 | refs/heads/master | 2020-04-02T08:21:45.038251 | 2018-11-02T02:32:50 | 2018-11-02T02:32:50 | 154,241,756 | 0 | 0 | null | 2018-10-26T08:40:03 | 2018-10-23T01:27:49 | Python | UTF-8 | Python | false | false | 330 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-27 03:55
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('show', '0002_auto_20181027_0913'),
('show', '0002_auto_20181025_2207'),
]
operations = [
]
| [
"451552848@QQ.com"
] | 451552848@QQ.com |
ee5a72ea10783d9681c1871f70438a04edbea51c | 80c4942f7b88c411eabc57198d51836fee93e810 | /course-outline/coursegrades/migrations/0001_initial.py | 2144ae09d7503ab6574e2e84adefa59a9a16bce7 | [] | no_license | tongxu95/ENSF607_Web_Project | 4a90b0f37aa328fbe23824d68ebc03f44492b4df | 80c4ee6c2bea4eadee4a15d67680fb2c51a3f6ab | refs/heads/master | 2023-02-22T23:35:37.347731 | 2021-01-28T17:00:04 | 2021-01-28T17:00:04 | 330,017,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | # Generated by Django 3.1.4 on 2021-01-14 04:52
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CourseGrade',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('courseNum', models.CharField(max_length=10)),
('courseComponent', models.CharField(max_length=50,blank=True)),
('courseOutcomes', models.CharField(max_length=50,blank=True)),
('courseWeight', models.IntegerField()),
],
),
]
| [
"64809520+Karenzhang7717@users.noreply.github.com"
] | 64809520+Karenzhang7717@users.noreply.github.com |
deae8be14b09b5d32cea6354010a9f8252983f5b | f7f4b653367cfa10cd0a79601198327fa790db47 | /Bigfish/core/_account_manager.py | cbb615f2f507952736046b44b708f32589a1cbc0 | [] | no_license | tiw-xh138/Bigfish | 9b49af52792ec3888ce77a84404fa1303660631e | 935112ea6023273dbfed497adb44097c8076d38c | refs/heads/master | 2023-04-23T07:33:42.601856 | 2021-04-12T02:33:31 | 2021-04-12T02:33:31 | 365,224,991 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 25 20:46:00 2015
@author: BurdenBear
"""
from Bigfish.utils.base import Currency
###################################################################
class AccountManager:
"""交易账户对象"""
def __init__(self, capital_base=100000, name="", currency=Currency("USD"), leverage=1):
self.__capital_base = capital_base
self.__name = name
self.__currency = currency
self.__leverage = leverage
self.initialize()
def initialize(self):
self.__capital_net = self.__capital_base
self.__capital_cash = self.__capital_base
self.__records = []
def set_capital_base(self, capital_base):
if isinstance(capital_base, int) and capital_base > 0:
self.__capital_base = capital_base
self.initialize
else:
raise(ValueError("不合法的base值%s"%capital_base))
def is_margin_enough(self, price):
"""判断账户保证金是否足够"""
return(self.__capital_cash * self.__leverage >= price)
def update_deal(self, deal):
if not deal.profit: return
self.__capital_cash += deal.profit
self.__records.append({'x':deal.time+deal.time_msc/(10**6),'y':float('%.2f'%((self.__capital_cash/self.__capital_base-1)*100))})
def get_profit_records(self):
return(self.__records)
| [
"facan346999e@126.com"
] | facan346999e@126.com |
83b6f1af607b87521cbd10d7b6bd40024907f007 | 18375af374e91e721fb16e5415bc4fc7540e5ced | /tahweela_app/urls.py | ef893285cbb6f76b48645130f27e0c3b8c5d361f | [] | no_license | youssefelmasry/tahweela_app_demo | 64d802df33ad6361a714a3119b3380b9afe98e4e | ee55b23e601f5e6580e9f051f6da89acab37d3a1 | refs/heads/master | 2023-02-25T23:05:08.853738 | 2021-01-29T17:55:33 | 2021-01-29T17:55:33 | 334,214,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | from django.urls import path
from tahweela_app.views import TahweelaBalanceView, TahweelaTransactionView
urlpatterns = [
path("get/balance/", TahweelaBalanceView.as_view()),
path("transfer/money/", TahweelaTransactionView.as_view()),
]
| [
"yusufelmasry9@gmail.com"
] | yusufelmasry9@gmail.com |
468fdc36ae7001294a1493c1070b5c443b66e893 | bc97d423d19756fbf33affd4ed98d4628d8878b3 | /my_project/itproger/main/urls.py | 49b1548ce7162c1a6ed6511e5d6aa41220dc9528 | [] | no_license | David-Hakobyan1/MY_Django | 40d63232805679bb5416d12a4ebba94fcb097959 | fdcd61a76d131ca47a203bc291212494c3587637 | refs/heads/main | 2023-06-19T15:58:42.315023 | 2021-07-18T09:55:28 | 2021-07-18T09:55:28 | 381,956,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | from django.urls import path
from . import views
urlpatterns = [
path('',views.index,name='index'),
path('create',views.create,name='create'),
path('read',views.read,name='read'),
path('update',views.update,name='update'),
path('delete',views.delete,name='delete'),
]
| [
"my@mail.ru"
] | my@mail.ru |
ef46b1abf8c643a364d8dd8117513de66eb439b3 | 99d721afe033411169081c5c3248109ea8a1ec37 | /steamtail/migrations/0013_user_apps_last_checked_on.py | e214abe5c192e9cf5f0fd27e2f6f12dcf75ce820 | [] | no_license | redodo/steamtail | edbb2655865683bdf1d990c18babb08efca4bd68 | 8964910b687ef21329556a0b446edd18aee96292 | refs/heads/master | 2020-05-04T01:08:30.590701 | 2019-04-17T15:21:12 | 2019-04-17T15:21:12 | 178,898,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | # Generated by Django 2.2 on 2019-04-09 07:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('steamtail', '0012_auto_20190409_0920'),
]
operations = [
migrations.AddField(
model_name='user',
name='apps_last_checked_on',
field=models.DateTimeField(null=True, verbose_name='apps last checked on'),
),
]
| [
"me@redodo.io"
] | me@redodo.io |
e641d5f70ea054881a3559c7ab1ce480da183407 | 958aa4c2dc3287b30a5f9a65ac528ea9726fec58 | /LessonPlan5.py | 84bfde2c05c232b526744934d43980d4d1faa0ad | [] | no_license | domonic/CS490_DomonicNeal | f875603d41d9cb466892c61f94afb0b88bbd9395 | 8a7e088b4a6e53def93ef2827b2d327eef86fc3c | refs/heads/master | 2020-05-31T10:58:53.972539 | 2019-08-23T20:41:56 | 2019-08-23T20:41:56 | 190,248,841 | 0 | 0 | null | 2019-07-23T05:05:06 | 2019-06-04T17:28:49 | Python | UTF-8 | Python | false | false | 3,648 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from scipy import stats
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
plt.style.use(style='ggplot')
plt.rcParams['figure.figsize'] = (10, 6)
'''Delete all the anomaly data for the GarageArea field (for the same data set in the use case: House Prices)
.* for this task you need to plot GaurageArea field and SalePrice in scatter plot, then check which numbers are
anomalies'''
'''Read the data from the csv file '''
data_set = pd.read_csv('./lessonplan5.csv')
'''Set the x and y (independent and dependent values for the data set)'''
data_set_y = data_set.SalePrice.values.reshape(-1, 1)
data_set_x = data_set.GarageArea.values.reshape(-1, 1)
'''Linear Regression Model'''
data_linear = LinearRegression().fit(data_set_x, data_set_y)
'''Prediction Value'''
y_predictor = data_linear.predict(data_set_x)
'''Display and plot linear regression data'''
plt.xlabel('Garage Area')
plt.ylabel('Sale Price')
plt.title('Linear Regression w/ Anomalies')
plt.scatter(data_set_x, data_set_y)
plt.plot(data_set_x, y_predictor, color='orange')
plt.show()
'''Delete Anomalies '''
data_linear_nonoutliers = data_set[(np.abs(stats.zscore(data_set.GarageArea)) < 3)]
data_linear_nonoutliers = data_linear_nonoutliers[(data_linear_nonoutliers.GarageArea != 0)]
'''Set the x and y (independent and dependent values for the data set)'''
data_linear_x = data_linear_nonoutliers.GarageArea
data_linear_y = data_linear_nonoutliers.SalePrice
plt.xlabel('Garage Area')
plt.ylabel('Sale Price')
plt.title('Linear Regression w/o Anomalies')
plt.scatter(data_linear_x, data_linear_y)
plt.plot(data_set_x, y_predictor, color='yellow')
plt.show()
'''Create Multiple Regression for the “wine quality” dataset. In this data set “quality” is the target label.
Evaluate the model using RMSE and R2 score. **You need to delete the null values in the data set
**You need to find the top 3 most correlated features to the target label(quality)'''
'''Read the data from the csv file'''
wine_quality = pd.read_csv('winequality-red.csv')
'''Number of features in the wine quality file'''
features = wine_quality.select_dtypes(include=[np.number])
'''Correlation'''
correlation = features.corr()
'''Output to screen correlation'''
print(correlation)
wine_quality_x = wine_quality.drop('quality', axis=1)
wine_quality_y = wine_quality.quality
x_train, x_test, y_train, y_test = train_test_split(wine_quality_x, wine_quality_y, random_state=42, test_size=.29)
'''Linear Regression & Model'''
wine_linear = LinearRegression()
wine_model = wine_linear.fit(x_train, y_train)
'''Performance Evaluation'''
print("R^2 is: \n", wine_model.score(x_test, y_test))
predictions = wine_model.predict(x_test)
print('RMSE is: \n', mean_squared_error(y_test, predictions))
print()
'''Linear Regression & Model Based on 3 highest correlated features'''
new_wine_quality_x = wine_quality[['sulphates', 'alcohol', 'volatile acidity']]
new_wine_quality_y = wine_quality.quality
x_train, x_test, y_train, y_test = train_test_split(wine_quality_x, wine_quality_y, random_state=38, test_size=.32)
'''Linear Regression & Model Based on 3 highest correlated features'''
new_wine_linear = LinearRegression()
new_wine_model = new_wine_linear.fit(x_train, y_train)
'''Performance Evaluation Based on 3 highest correlated features'''
print("R^2 is: \n", new_wine_model.score(x_test, y_test))
predictions = new_wine_model.predict(x_test)
print('RMSE is: \n', mean_squared_error(y_test, predictions))
| [
"domonicneal3@yahoo.com"
] | domonicneal3@yahoo.com |
aa74a3662f0f1785ebb00737c3714a2532e3bdc1 | c81361c366313b77eaea31253e2a2ffcd2cb39fd | /clever_pso.py | da7a43cfe6bb22fbbb10c443452f17270bdb6c5f | [] | no_license | m9i/loadbalancing | af133b1b8dab4580c7db554b81878f00cdf93460 | 57c7ee1afc370497bf362b96a41472d96a3c6b74 | refs/heads/master | 2020-04-14T17:01:54.849952 | 2019-01-22T20:30:50 | 2019-01-22T20:30:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,330 | py | def objective_function(v):
return sum(map(lambda x: x ** 2, v))
def random_vector(min_max):
from random import random
return list(map(lambda x: x[0] + (x[1] - x[0]) * random(), min_max))
def create_particle(search_space, vel_space):
particle = {'position': random_vector(search_space)}
particle['cost'] = objective_function(particle['position'])
particle['b_position'] = particle['position'][:]
particle['b_cost'] = particle['cost']
particle['velocity'] = random_vector(vel_space)
return particle
def get_global_best(population, current_best=None):
population.sort(key=lambda x: x['cost'])
best = population[0]
if current_best is None or best['cost'] <= current_best['cost']:
current_best = {
'position': best['position'][:],
'cost': best['cost']
}
return current_best
def update_velocity(particle, global_best, max_v, c1, c2, omega):
import random
for i in range(len(particle['velocity'])):
v = particle['velocity'][i]
v1 = c1 * random.random() * (particle['b_position'][i] - particle['position'][i])
v2 = c2 * random.random() * (global_best['position'][i] - particle['position'][i])
particle['velocity'][i] = v * omega + v1 + v2
if particle['velocity'][i] > max_v:
particle['velocity'][i] = max_v
if particle['velocity'][i] < -max_v:
particle['velocity'][i] = -max_v
def update_position(part, bounds):
for i in range(len(part['position'])):
v = part['position'][i]
part['position'][i] = v + part['velocity'][i]
if part['position'][i] > bounds[i][1]:
part['position'][i] = bounds[i][1] - abs(part['position'][i] - bounds[i][1])
part['velocity'][i] *= -1.0
elif part['position'][i] < bounds[i][0]:
part['position'][i] = bounds[i][0] - abs(part['position'][i] - bounds[i][0])
part['velocity'][i] *= -1.0
def update_best_position(particle):
if particle['cost'] > particle['b_cost']:
return
particle['b_cost'] = particle['cost']
particle['b_position'] = particle['position'][:]
def search(max_gens, search_space, vel_space, pop_size, max_vel, c1, c2, omega):
pop = [create_particle(search_space, vel_space) for i in range(pop_size)]
global_best = get_global_best(pop)
for gen in range(max_gens):
for particle in pop:
update_velocity(particle, global_best, max_vel, c1, c2, omega)
update_position(particle, search_space)
particle['cost'] = objective_function(particle['position'])
update_best_position(particle)
global_best = get_global_best(pop, global_best)
print(" > gen %d, fitness=%s" % (gen + 1, global_best['cost']))
return global_best
def main():
# problem configuration
problem_size = 2
search_space = [[-5, 5]] * problem_size
# algorithm configuration
vel_space = [[-1, 1]] * problem_size
max_gens = 100
pop_size = 50
max_vel = 100.0
c1, c2 = 2.0, 2.0
omega = 0.5
# execute the algorithm
best = search(max_gens, search_space, vel_space, pop_size, max_vel, c1, c2, omega)
print('Done. Best Solution: c=%s, v=%s' % (best['cost'], str(best['position'])))
if __name__ == "__main__":
main()
| [
"mahsa.gol89@gmail.com"
] | mahsa.gol89@gmail.com |
f20fee67909e69c0f73592ec8e958a199212941e | 7bc1c08d5074b0a38328df08b2471caea005b88d | /project/base/card.py | eff9b1a46e3c410e4ba43a7e3eb0c8b61e1630ff | [] | no_license | antsticky/qBridgeLib | 97bed3ff8b7f4425e95e913673e9c0ba1f5c5b39 | 1ff66fea28356dafb8af6d4a0761f3676bc192ab | refs/heads/main | 2023-08-28T13:59:16.151090 | 2021-11-01T07:44:19 | 2021-11-01T07:44:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,445 | py | class CardSuit:
def __init__(self, name):
self.name = name
@staticmethod
def suits():
return [CardSuit.create_by_short_name(short_name) for short_name in ["C", "D", "H", "S"]]
@staticmethod
def suits_reverse():
return [CardSuit.create_by_short_name(short_name) for short_name in ["S", "H", "D", "C"]]
@classmethod
def create_by_short_name(cls, short_name):
if short_name.upper() == "S":
return cls("spade")
elif short_name.upper() == "H":
return cls("heart")
elif short_name.upper() == "D":
return cls("diamond")
elif short_name.upper() == "C":
return cls("club")
else:
raise KeyError("Short name cannot be found")
@property
def value(self):
if self.name == "spade":
return 4
elif self.name == "heart":
return 3
elif self.name == "diamond":
return 2
elif self.name == "club":
return 1
else:
raise KeyError("Unknown suit")
def __hash__(self):
return hash(self.name) ^ hash(self.value)
def __eq__(self, other):
return all([self.name == other.name, self.value == other.value])
def __lt__(self, other):
if not isinstance(other, CardSuit):
raise NotImplementedError("other is not a CardSuit")
return self.value < other.value
def __gt__(self, other):
if not isinstance(other, CardSuit):
raise NotImplementedError("other is not a CardSuit")
return self.value > other.value
def __str__(self):
return self.short_name
def __format__(self, format_spec=None):
if format_spec in [None, "", "s"]:
return self.__str__()
else:
return self.name
@property
def short_name(self):
return self.name[0].upper()
class CardValue:
def __init__(self, name, rank):
self.display_name = name
self.rank = rank
@staticmethod
def values():
return [CardValue.create_by_display_name(display_name) for display_name in CardValue.display_names()]
@staticmethod
def display_names():
return [str(i + 2) for i in range(8)] + ["T", "J", "Q", "K", "A"]
@classmethod
def create_by_name(cls, display_name):
return cls(display_name, CardValue.display_names().index(display_name))
@classmethod
def create_by_display_name(cls, display_name):
if display_name in [str(i + 2) for i in range(8)]:
return cls(display_name, int(display_name) - 1)
elif display_name == "T":
return cls(display_name, 9)
elif display_name == "J":
return cls(display_name, 10)
elif display_name == "Q":
return cls(display_name, 11)
elif display_name == "K":
return cls(display_name, 12)
elif display_name == "A":
return cls(display_name, 13)
raise KeyError("Card cannot be found")
def __eq__(self, other):
return all([self.display_name == other.display_name, self.rank == other.rank])
def __gt__(self, other):
if not isinstance(other, CardValue):
raise NotImplementedError("other is not a CardValue")
return self.rank > other.rank
def __lt__(self, other):
if not isinstance(other, CardValue):
raise NotImplementedError("other is not a CardValue")
return self.rank < other.rank
class Card:
def __init__(self, suit, value, visible=True, played=False):
self.suit = suit
self.value = value
self.visible = visible
self.played = played
def __eq__(self, other):
return all([self.suit == other.suit, self.value == other.value])
def __gt__(self, other):
if not isinstance(other, Card):
raise NotImplementedError("other is not a Card")
elif self.suit > other.suit:
return True
elif self.suit < other.suit:
return False
else:
return self.value > other.value
def __lt__(self, other):
if not isinstance(other, Card):
raise NotImplementedError("other is not a Card")
elif self.suit < other.suit:
return True
elif self.suit > other.suit:
return False
else:
return self.value < other.value
| [
"antsticky@gmail.com"
] | antsticky@gmail.com |
d4323a8fa1e1648c6105fb1c105c9320a7657887 | 90d3b9467dcc6763865cad90a04a247cafcf5862 | /shopee/child_app/transport/urls.py | 2623f3b186fcd57a552bb35e8ab754ee8ca7fb7d | [] | no_license | vandat9xhn/django_1 | 0fa51515549eab04c27bdfeaf9e43650fe44dc70 | 6669e172d6b5a2a729dd31ea43d6c08f76b6e19c | refs/heads/master | 2023-06-23T19:46:26.558871 | 2021-07-26T15:11:12 | 2021-07-26T15:11:12 | 375,704,827 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | from django.urls import path
#
from . import views
#
urlpatterns = [
path('transport-l/', views.TransportViewL.as_view()),
]
| [
"vandat9xiloveyou@gmail.com"
] | vandat9xiloveyou@gmail.com |
d8397ec81aa5d71848edb5ef5d9d75123cbc5430 | 5c6f5a8dd71df620710bc02c03507691e85597e7 | /todo_app/microservice_requests.py | 2d2586f29eca649197429f79722996765215eea0 | [] | no_license | gitit4321/todo_app | 5a1b3aec6879f0fb919b8f7c3240b961ab6aa011 | 4cafbddb3d564ed08b2d19af56fc689b9118b609 | refs/heads/main | 2023-07-19T07:56:00.480973 | 2021-08-28T00:10:38 | 2021-08-28T00:10:38 | 388,287,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | import requests
import pyperclip as pc
from validators import url as valid_url
from validators import between
def get_shortened_url(url):
if valid_url(url):
data = {'original_url': url}
res = requests.post('http://localhost:4000/shorten', json=data)
response = res.json()
shortened_url = response['shortenedUrl']
return shortened_url
return 'not_valid'
def get_map_info(origin_zip, destination_zip):
if between(int(origin_zip), min=1000, max=99999) and between(int(destination_zip), min=1000, max=99999):
payload = {'Origin': origin_zip,
'Destination': destination_zip}
res = requests.get('https://marsican.app/maps/distance', params=payload)
response = res.json()
print(response['rows'][0]['elements'][0])
time_to_dest = response['rows'][0]['elements'][0]['duration']['text']
dist_to_dest = response['rows'][0]['elements'][0]['distance']['text']
origin_city = response['origin_addresses'][0][:-11]
dest_city = response['destination_addresses'][0][:-11]
output = f'It will take roughly {time_to_dest} to drive the {dist_to_dest} from {origin_city} to {dest_city}.'
return output
return 'not_valid'
def get_translation(query):
dummy_responses = {
"translation": "Your translated response"
}
translation = dummy_responses['translation']
return translation
| [
"gitit4321@gmail.com"
] | gitit4321@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.