blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ea211ceda69d5684aebe4589deb3ba099130fa13 | d085a640fff4a44b65dc62590ed89253fa221263 | /money_tracker.py | 6d312c9644629ea68d0028da81e3d9f89701207a | [
"MIT"
] | permissive | paulburnz314/coffee_machine | 1a1c12a7e4ad6dea71b827f70d36aa7d5189c3c6 | 36bba10d009b9f009398ccee6a05220b84e8b0b8 | refs/heads/main | 2023-03-02T00:20:06.633171 | 2021-02-12T19:25:42 | 2021-02-12T19:25:42 | 337,550,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,209 | py | class MoneyTracker:
CURRENCY = "$" # label for formatting currency
COIN_VALUES = {
"quarters": 0.25,
"dimes": 0.10,
"nickles": 0.05,
"pennies": 0.01
}
def __init__(self):
self.profit = 0
self.money_received = 0
def report(self):
"""Prints the current profit. I added the :.2f formatting"""
print(f"Money: {self.CURRENCY}{self.profit:.2f}")
def process_coins(self):
"""Returns the total calculated from coins inserted."""
print("Please insert coins.")
for coin in self.COIN_VALUES:
try:
number_of_coins = int(input(f"How many {coin}?: "))
except ValueError:
number_of_coins = 0
self.money_received += number_of_coins * self.COIN_VALUES[coin]
return self.money_received
def make_payment(self, cost):
"""Returns True when payment is accepted, or False if insufficient."""
self.process_coins()
if self.money_received >= cost:
change = round(self.money_received - cost, 2)
print(f"Here is {self.CURRENCY}{change:.2f} in change.")
self.profit += cost
self.money_received = 0
return True
else:
print("Sorry that's not enough money. Money refunded.")
self.money_received = 0
return False
| [
"noreply@github.com"
] | paulburnz314.noreply@github.com |
6fd67b6b693b6c301e3654e90e09871256f29eb2 | 7b09d131ba09c3ef5c7658eeea9075e0b4a7ec5a | /updateBlynk.py | 5d9c051f86d7db79d4f3dad8d5b0e5c7ba8a9dfd | [] | no_license | deepcore2/SDL_Pi_SkyWeather | 38afe43466fbf078629966504e010c5fe2fafb5e | 492d1df40b49a2896280cc1ddfb64e98d38e2045 | refs/heads/master | 2022-02-20T13:41:59.418699 | 2019-09-30T01:30:03 | 2019-09-30T01:30:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,691 | py |
# provides routine to update SGS Blynk Display
import time
import requests
import json
import util
import state
import traceback
# Check for user imports
try:
import conflocal as config
except ImportError:
import config
DEBUGBLYNK = False
def stopFlash():
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V30?value=0')
def blynkInit():
# initalize button states
try:
if (DEBUGBLYNK):
print "Entering blynkInit:"
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V5?value=0')
if (state.runOLED == True):
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V6?value=1')
else:
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V6?value=0')
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V30?value=0')
# initialize LEDs
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V42?value=255')
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V43?value=255')
# read english Metric in from file
try:
f = open("/home/pi/SDL_Pi_SkyWeather/state/EnglishMetric.txt", "r")
value = int(f.read())
f.close()
except Exception as e:
value = 0
#print "initial state - no EnglishMetric.txt value=", value
f1 = open("/home/pi/SDL_Pi_SkyWeather/state/EnglishMetric.txt", "w")
f1.write("0")
f1.close()
state.EnglishMetric = value
if (DEBUGBLYNK):
print "state.EnglishMetric = ", value
if (state.EnglishMetric == 0):
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V8?value=0')
else:
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V8?value=1')
if (DEBUGBLYNK):
print "Exiting blynkInit:"
except Exception as e:
print "exception in blynkInit"
print (e)
return 0
def blynkResetButton(buttonNumber):
try:
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/'+buttonNumber+'?value=0')
except Exception as e:
print "exception in blynkResetButton"
print (e)
return 0
def blynkEventUpdate(Event):
try:
put_header={"Content-Type": "application/json"}
val = Event
put_body = json.dumps([val])
if (DEBUGBLYNK):
print "blynkEventUpdate:",val
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V31', data=put_body, headers=put_header)
if (DEBUGBLYNK):
print "blynkEventUpdate:POST:r.status_code:",r.status_code
return 1
except Exception as e:
print "exception in blynkEventUpdate"
print (e)
return 0
def blynkStatusTerminalUpdate(entry):
try:
put_header={"Content-Type": "application/json"}
entry = time.strftime("%Y-%m-%d %H:%M:%S")+": "+entry+"\n"
put_body = json.dumps([entry])
if (DEBUGBLYNK):
print "blynkStateUpdate:Pre:put_body:",put_body
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V32', data=put_body, headers=put_header)
if (DEBUGBLYNK):
print "blynkStateUpdate:POST:r.status_code:",r.status_code
except Exception as e:
print "exception in blynkTerminalUpdate"
print (e)
return 0
def blynkSolarTerminalUpdate(entry):
try:
put_header={"Content-Type": "application/json"}
entry = time.strftime("%Y-%m-%d %H:%M:%S")+": "+entry+"\n"
put_body = json.dumps([entry])
if (DEBUGBLYNK):
print "blynkStateUpdate:Pre:put_body:",put_body
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V33', data=put_body, headers=put_header)
if (DEBUGBLYNK):
print "blynkStateUpdate:POST:r.status_code:",r.status_code
except Exception as e:
print "exception in blynkTerminalUpdate"
print (e)
return 0
def blynkUpdateImage():
#Blynk.setProperty(V1, "urls", "https://image1.jpg", "https://image2.jpg");
try:
if (DEBUGBLYNK):
print "blynkUpdateImage:started"
"""
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V70?value=2') # Picture URL
if (DEBUGBLYNK):
print "blynkUpdateImage:OTHER:r.status_code:",r.status_code
#r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V70?urls=http://www.switchdoc.com/2.jpg') # Picture URL
#r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V70?urls=http://www.switchdoc.com/skycamera.jpg,http://www.switchdoc.com/2.jpg') # Picture URL
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V70?value=1;url=http://www.switchdoc.com/skycamera.jpg')
if (DEBUGBLYNK):
print "blynkUpdateImage:OTHER:r.status_code:",r.status_code
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V70?value=2;url=http://www.switchdoc.com/2.jpg') # Picture URL
if (DEBUGBLYNK):
print "blynkUpdateImage:OTHER:r.status_code:",r.status_code
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V70?value=2') # Picture URL
if (DEBUGBLYNK):
print "blynkUpdateImage:OTHER:r.status_code:",r.status_code
"""
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V70?urls=http://www.switchdoc.com/SkyWeatherNoAlpha.png') # Picture URL
except Exception as e:
print "exception in blynkUpdateImage"
print (e)
return 0
def blynkStateUpdate():
try:
blynkUpdateImage()
put_header={"Content-Type": "application/json"}
# set last sample time
put_header={"Content-Type": "application/json"}
val = time.strftime("%Y-%m-%d %H:%M:%S")
put_body = json.dumps([val])
if (DEBUGBLYNK):
print "blynkEventUpdate:",val
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V44', data=put_body, headers=put_header)
if (DEBUGBLYNK):
print "blynkEventUpdate:POST:r.status_code:",r.status_code
# do the graphs
val = state.Outdoor_AirQuality_Sensor_Value
put_body = json.dumps([val])
if (DEBUGBLYNK):
print "blynkStateUpdate:Pre:put_body:",put_body
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V7', data=put_body, headers=put_header)
if (DEBUGBLYNK):
print "blynkStateUpdate:POST:r.status_code:",r.status_code
val = util.returnTemperatureCF(state.currentOutsideTemperature)
tval = "{0:0.1f} ".format(val) + util.returnTemperatureCFUnit()
put_body = json.dumps([tval])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V0', data=put_body, headers=put_header)
val = util.returnTemperatureCF(state.currentOutsideTemperature)
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V10', data=put_body, headers=put_header)
val = state.currentOutsideHumidity
put_body = json.dumps(["{0:0.1f}%".format(val)])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V1', data=put_body, headers=put_header)
val = state.currentOutsideHumidity
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V11', data=put_body, headers=put_header)
val = util.returnTemperatureCF(state.currentInsideTemperature)
tval = "{0:0.1f} ".format(val) + util.returnTemperatureCFUnit()
put_body = json.dumps([tval])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V21', data=put_body, headers=put_header)
val = util.returnTemperatureCF(state.currentInsideTemperature)
tval = "{0:0.1f}".format(val)
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V120', data=put_body, headers=put_header)
val = state.currentInsideHumidity
put_body = json.dumps(["{0:0.1f}%".format(val)])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V13', data=put_body, headers=put_header)
val = state.currentInsideHumidity
put_body = json.dumps(["{0:0.1f}".format(val)])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V121', data=put_body, headers=put_header)
if (state.fanState == False):
val = 0
else:
val = 1
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V122', data=put_body, headers=put_header)
#wind
val = util.returnWindSpeed(state.ScurrentWindSpeed)
tval = "{0:0.1f}".format(val) + util.returnWindSpeedUnit()
put_body = json.dumps([tval])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V9', data=put_body, headers=put_header)
#now humiidyt
#val = util.returnWindSpeed(state.ScurrentWindSpeed)
val = state.currentOutsideHumidity
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V19', data=put_body, headers=put_header)
# outdoor Air Quality
val = state.Outdoor_AirQuality_Sensor_Value
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V20', data=put_body, headers=put_header)
#wind direction
val = "{0:0.0f}/".format(state.ScurrentWindDirection) + util.returnWindDirection(state.ScurrentWindDirection)
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V2', data=put_body, headers=put_header)
#rain
val = "{0:0.2f}".format(state.currentTotalRain)
if (state.EnglishMetric == 1):
tval = "{0:0.2f}mm".format(state.currentTotalRain)
else:
tval = "{0:0.2f}in".format(state.currentTotalRain / 25.4)
put_body = json.dumps([tval])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V3', data=put_body, headers=put_header)
#Sunlight
val = "{0:0.0f}".format(state.currentSunlightVisible)
#print ("Sunlight Val = ", state.currentSunlightVisible)
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V4', data=put_body, headers=put_header)
#Sunlight
val = "{0:0.0f}".format(state.currentSunlightVisible)
#print ("Sunlight Val = ", state.currentSunlightVisible)
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V60', data=put_body, headers=put_header)
#barometric Pressure
if (state.EnglishMetric == 1):
tval = "{0:0.2f}hPa".format(state.currentSeaLevel)
else:
tval = "{0:0.2f}in".format((state.currentSeaLevel * 0.2953)/10.0)
put_body = json.dumps([tval])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V40', data=put_body, headers=put_header)
#barometric Pressure graph
if (state.EnglishMetric == 1):
tval = "{0:0.2f}".format(state.currentSeaLevel)
else:
tval = "{0:0.2f}".format((state.currentSeaLevel * 0.2953)/10.0)
put_body = json.dumps([tval])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V41', data=put_body, headers=put_header)
#solar data
val = "{0:0.2f}".format(state.solarVoltage)
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V50', data=put_body, headers=put_header)
val = "{0:0.1f}".format(state.solarCurrent)
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V51', data=put_body, headers=put_header)
val = "{0:0.2f}".format(state.batteryVoltage)
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V52', data=put_body, headers=put_header)
val = "{0:0.1f}".format(state.batteryCurrent)
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V53', data=put_body, headers=put_header)
val = "{0:0.2f}".format(state.loadVoltage)
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V54', data=put_body, headers=put_header)
val = "{0:0.1f}".format(state.loadCurrent)
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V55', data=put_body, headers=put_header)
val = "{0:0.1f}W".format(state.batteryPower)
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V60', data=put_body, headers=put_header)
val = "{0:0.1f}W".format(state.solarPower)
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V61', data=put_body, headers=put_header)
val = "{0:0.1f}W".format(state.loadPower)
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V62', data=put_body, headers=put_header)
val = "{0:0.1f}".format(state.batteryCharge)
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V56', data=put_body, headers=put_header)
val = "{0:0.1f}".format(state.batteryCharge)
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V127', data=put_body, headers=put_header)
delta = util.returnTemperatureCF(state.currentInsideTemperature)- util.returnTemperatureCF(state.currentOutsideTemperature)
val = "{0:0.1f}".format(delta)
put_body = json.dumps([val])
r = requests.put(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V128', data=put_body, headers=put_header)
# LEDs
if (state.barometricTrend): #True is up, False is down
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V42?color=%2300FF00') # Green
if (DEBUGBLYNK):
print "blynkAlarmUpdate:OTHER:r.status_code:",r.status_code
else:
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V42?color=%23FF0000') # red
if (state.currentAs3935LastLightningTimeStamp < time.clock() + 1800): #True is lightning, False is none
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V43?color=%2300FF00') # Green
if (DEBUGBLYNK):
print "blynkAlarmUpdate:OTHER:r.status_code:",r.status_code
else:
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/update/V43?color=%23FF0000') # red
return 1
except Exception as e:
print "exception in blynkStateUpdate"
print(traceback.format_exc())
print (e)
return 0
def blynkStatusUpdate():
if (DEBUGBLYNK):
print "blynkStatusUpdate Entry"
try:
put_header={"Content-Type": "application/json"}
# look for English or Metric
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/get/V8') # read button state
if (DEBUGBLYNK):
print "blynkStatusUpdate:POSTEM:r.status_code:",r.status_code
print "blynkStatusUpdate:POSTEM:r.text:",r.text
if (r.text == '["1"]'):
if (state.EnglishMetric == 0):
state.EnglishMetric = 1
if (DEBUGBLYNK):
print "blynkStatusUpdate:POSTBRC:state.EnglishMetric set to Metric"
blynkStatusTerminalUpdate("Set to Metric Units ")
f = open("/home/pi/SDL_Pi_SkyWeather/state/EnglishMetric.txt", "w")
f.write("1")
f.close()
else:
if (state.EnglishMetric == 1):
state.EnglishMetric = 0
f = open("/home/pi/SDL_Pi_SkyWeather/state/EnglishMetric.txt", "w")
f.write("0")
f.close()
if (DEBUGBLYNK):
print "blynkStatusUpdate:POSTBRC:state.EnglishMetric set to English"
blynkStatusTerminalUpdate("Set to English Units ")
# look for rainbow button change
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/get/V5') # read button state
if (DEBUGBLYNK):
print "blynkStatusUpdate:POSTBR:r.status_code:",r.status_code
print "blynkStatusUpdate:POSTBR:r.text:",r.text
if (r.text == '["1"]'):
state.runRainbow = True
blynkStatusTerminalUpdate("Turning Rainbow On ")
if (DEBUGBLYNK):
print "blynkStatusUpdate:POSTBRC:state.runRainbow set to True"
else:
if(state.runRainbow == True):
blynkStatusTerminalUpdate("Turning Rainbow Off ")
state.runRainbow = False
if (DEBUGBLYNK):
print "blynkStatusUpdate:POSTBRC:state.runRainbow set to False"
# turn OLED ON and OFF
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/get/V6') # read button state
#if (DEBUGBLYNK):
if (r.text == '["1"]'):
if (state.runOLED == False):
state.runOLED = True
blynkStatusTerminalUpdate("Turning OLED On ")
if (DEBUGBLYNK):
print "blynkStatusUpdate:POSTBRO:state.runOLED set to True"
if (config.OLED_Originally_Present == True):
config.OLED_Present = True
util.turnOLEDOn()
else:
if (state.runOLED == True):
blynkStatusTerminalUpdate("Turning OLED Off ")
state.runOLED = False
if (DEBUGBLYNK):
print "blynkStatusUpdate:POSTBRO:state.runOLED set to False"
if (config.OLED_Originally_Present == True):
config.OLED_Present = False
util.turnOLEDOff()
# look for Flash Strip Command
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/get/V30') # read button state
if (DEBUGBLYNK):
print "blynkStatusUpdate:POSTBF:r.status_code:",r.status_code
print "blynkStatusUpdate:POSTBF:r.text:",r.text
if (r.text == '["1"]'):
state.flashStrip = True
if (DEBUGBLYNK):
print "blynkStatusUpdate:POSTBRF:state.flashStrip set to True"
else:
state.flashStrip = False
if (DEBUGBLYNK):
print "blynkStatusUpdate:POSTBRF:state.flashStrip set to False"
return 1
except Exception as e:
print "exception in blynkStatusUpdate"
print (e)
return 0
def blynkSGSAppOnline():
try:
r = requests.get(config.BLYNK_URL+config.BLYNK_AUTH+'/isAppConnected')
if (DEBUGBLYNK):
print "blynkSGSAppOnline:POSTCHECK:r.text:",r.text
return r.text
except Exception as e:
print "exception in blynkApponline"
print (e)
return ""
| [
"jshovic@switchdoc.com"
] | jshovic@switchdoc.com |
5c69eb46b0cf53e9b0b75d460c795a7447e8db1e | 52bb5f79a099040e8f90f221e809713a53903ff4 | /vaccine/login/app.py | 1b67a38b5eb264d072774cee9fe433e898e10d28 | [
"Apache-2.0"
] | permissive | biallenchanuow/CSCI927 | 7df43a6d0023d46cd65fb9e095c9d3ab0a7b1da6 | de5b8e153f182bc1550cbca00d481426ce7f898c | refs/heads/main | 2023-08-16T23:21:56.647146 | 2021-10-12T11:17:55 | 2021-10-12T11:17:55 | 407,023,589 | 0 | 0 | Apache-2.0 | 2021-09-16T04:48:23 | 2021-09-16T04:48:22 | null | UTF-8 | Python | false | false | 1,420 | py | from flask import Flask, g
from flask.sessions import SecureCookieSessionInterface
from flask_migrate import Migrate
from flask_login import LoginManager
import models
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from route import user_blueprint
app = Flask(__name__)
app.config['SECRET_KEY'] = 'W3C20kNNhQvS3AvVhbT_JA'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///./database/user.db'
models.init_app(app)
app.register_blueprint(user_blueprint)
login_manager = LoginManager(app)
migrate = Migrate(app, models.db)
admin = Admin(app)
admin.add_view(ModelView(models.User, models.db.session))
@login_manager.user_loader
def load_user(user_id):
return models.User.query.filter_by(id=user_id).first()
@login_manager.request_loader
def load_user_from_request(request):
api_key = request.headers.get('Authorization')
if api_key:
api_key = api_key.replace('Basic ', '', 1)
user = models.User.query.filter_by(api_key=api_key).first()
if user:
return user
return None
class CustomSessionInterface(SecureCookieSessionInterface):
def save_session(self, *args, **kwargs):
if g.get('login_via_header'):
return
return super(CustomSessionInterface, self).save_session(*args, **kwargs)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5001)
| [
"sl438@uowmail.edu.au"
] | sl438@uowmail.edu.au |
fe2fe1f8b9c7ba7702b38d6238da956f25174af4 | e47d7272dea81513e26f6b9ac16ae7f2c3467a18 | /trainer.py | de2ac5660d0550bccb6aabbc542ccc6081d96f45 | [] | no_license | nitesh237/face-detect | 100313d61066a90526d270885881fc0275f28aff | 79601911d8d2d006f90b445e8b2d2eb0d2e3f750 | refs/heads/master | 2020-04-06T07:18:57.494395 | 2018-11-12T19:50:17 | 2018-11-12T19:50:17 | 157,267,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | import os
import cv2
import numpy as np
from PIL import Image
recognizer = cv2.createLBPHFaceRecognizer();
path = 'dataset';
def getImageID(path):
imagePath = [os.path.join(path,f) for f in os.listdir(path)]
faces = []
IDs = []
for image in imagePath[1:]:
face = Image.open(image).convert('L');
faceNp = np.array(face,'uint8');
ID = int(os.path.split(image)[-1].split('.')[0]);
print(ID)
faces.append(faceNp)
IDs.append(ID)
cv2.imshow("trainer", faceNp)
cv2.waitKey(10)
return np.array(IDs),faces
IDs, faces = getImageID(path)
recognizer.train(faces,IDs)
recognizer.save('Recognizer/trainingData.yml')
cv2.destroyAllWindows();
| [
"n.guptasbp@gmail.com"
] | n.guptasbp@gmail.com |
85a7b7165a40d4f9c47466f77f8df23dc5ba4db1 | f116239b0102c5f22641b70305a0e1e09982f585 | /tasks/xlive_bag_send_task.py | 5f981362ba35092c5b233d19ed0a5709b89e5c77 | [
"LicenseRef-scancode-sata"
] | permissive | wenjianX/BiliExper | 3075efbd1a2ddebd4b87fe5a5c06aa7a524c04bb | b27f5ae2c845741a2513d1498954287bf256cb54 | refs/heads/master | 2023-05-22T09:38:57.585862 | 2021-06-10T07:22:01 | 2021-06-10T07:22:01 | 379,563,848 | 1 | 0 | NOASSERTION | 2021-06-23T10:24:24 | 2021-06-23T10:24:23 | null | UTF-8 | Python | false | false | 1,751 | py | from BiliClient import asyncbili
from .push_message_task import webhook
import logging, time
async def xlive_bag_send_task(biliapi: asyncbili,
task_config: dict
) -> None:
room_id = task_config.get("room_id", 0)
if room_id == 0:
try:
room_id = (await biliapi.xliveGetRecommendList())["data"]["list"][6]["roomid"]
except Exception as e:
logging.warning(f'{biliapi.name}: 获取直播间异常,原因为{str(e)},跳过送出直播间礼物,建议手动指定直播间')
webhook.addMsg('msg_simple', f'{biliapi.name}:直播送出礼物失败\n')
return
expire = task_config.get("expire", 172800)
now_time = int(time.time())
try:
uid = (await biliapi.xliveGetRoomInfo(room_id))["data"]["room_info"]["uid"]
bagList = (await biliapi.xliveGiftBagList())["data"]["list"]
ishave = False
for x in bagList:
if x["expire_at"] - now_time < expire and x["expire_at"] - now_time > 0: #礼物到期时间小于2天
ishave = True
ret = await biliapi.xliveBagSend(room_id, uid, x["bag_id"], x["gift_id"], x["gift_num"])
if ret["code"] == 0:
logging.info(f'{biliapi.name}: {ret["data"]["send_tips"]} {ret["data"]["gift_name"]} 数量{ret["data"]["gift_num"]}')
if not ishave:
logging.info(f'{biliapi.name}: 没有{expire}s内过期的直播礼物,跳过赠送')
except Exception as e:
logging.warning(f'{biliapi.name}: 直播送出即将过期礼物异常,原因为{str(e)}')
webhook.addMsg('msg_simple', f'{biliapi.name}:直播送出礼物失败\n') | [
"2271150345@qq.com"
] | 2271150345@qq.com |
148163317eee7bc9e82fada3cd8c5026ec7e85dc | 3811bd8e323158adabbb4d44a5ca9108f23e49d1 | /project_app/models.py | 2191cc6895e6b694ee77b70c0333e788f11dd0f3 | [] | no_license | ranakumtee/django_project | 7a51b809c981bc72d28da2437524dfea98784b06 | b26c2c6eea3d944599e04825a71abeb0916e7956 | refs/heads/main | 2023-03-18T20:56:15.777648 | 2021-03-10T20:04:49 | 2021-03-10T20:04:49 | 346,653,906 | 0 | 0 | null | 2021-03-11T09:52:33 | 2021-03-11T09:52:33 | null | UTF-8 | Python | false | false | 3,474 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Tables(models.Model):
name = models.TextField()
desc = models.TextField()
class Personal(models.Model):
username = models.CharField(max_length=50)
rank = models.CharField(max_length=50)
email = models.EmailField()
fullname = models.TextField()
identification = models.CharField(max_length=30)
phone = models.TextField()
shop_name = models.CharField(max_length=50)
address_id = models.CharField(max_length=50)
address_t = models.CharField(max_length=50)
address_a = models.CharField(max_length=50)
address_city = models.CharField(max_length=50)
address_post = models.CharField(max_length=50)
address_desc = models.TextField()
class Manufacturer(models.Model):
fact_name = models.CharField(max_length=50)
fact_id = models.CharField(max_length=20)
fact_t = models.TextField()
fact_a = models.TextField()
fact_city = models.TextField()
fact_post = models.CharField(max_length=20)
fact_email = models.EmailField()
fact_phone = models.CharField(max_length=20)
fact_desc = models.TextField()
class Product(models.Model):
product_code = models.CharField(max_length=20)
product_name = models.CharField(max_length=50)
product_type = models.CharField(max_length=50)
product_size = models.CharField(max_length=10)
product_send_time = models.IntegerField()
product_cost = models.IntegerField()
product_selling = models.IntegerField()
product_balance = models.IntegerField(default=0)
product_image = models.FileField()
product_desc = models.TextField()
prodect_status = models.CharField(max_length=50)
product_fact_name = models.CharField(max_length=50)
class History_input(models.Model):
history_product_code = models.CharField(max_length=20)
history_balance = models.IntegerField()
history_total = models.IntegerField()
history_date = models.DateField()
history_user = models.CharField(max_length=50)
class Product_output(models.Model):
product_code = models.CharField(max_length=20)
product_quantity = models.PositiveIntegerField()
date_output = models.DateField()
class Shelf(models.Model):
code1_4 = models.CharField(max_length=4)
code5_6 = models.CharField(max_length=2)
code7_9 = models.CharField(max_length=3)
code = models.TextField(max_length=50, blank=True)
value = models.IntegerField()
valueremain = models.IntegerField(default=0)
class preorder(models.Model):
product_code = models.CharField(max_length=20)
balance = models.IntegerField()
employee = models.CharField(max_length=50)
date = models.DateField()
class Basket(models.Model):
product_code = models.CharField(max_length=20)
qty = models.IntegerField()
employee = models.CharField(max_length=50)
class store_stock(models.Model) :
store_id = models.CharField(max_length=20)
product_code = models.CharField(max_length=20)
qty = models.IntegerField()
class Store(models.Model):
store_name = models.CharField(max_length=50)
store_id = models.CharField(max_length=20)
store_t = models.TextField()
store_a = models.TextField()
store_city = models.TextField()
store_post = models.CharField(max_length=20)
store_email = models.EmailField()
store_phone = models.CharField(max_length=20)
store_desc = models.TextField() | [
"yanapat.pi@live.ku.th"
] | yanapat.pi@live.ku.th |
ed3962679f3569de0efc57197373f7139220afbe | be0edc20433a6ad3bf4b8f448f1c457437de4c52 | /huxley/core/admin/delegate.py | 6f7e07e1c80d5f269090bfe38f1d8dd13775523a | [
"BSD-3-Clause"
] | permissive | ethanlee16/huxley | eca8c3c1d4ea543a5875c28d4cb5c81dc4e4eddb | 5d601e952c711e9b6703170c78fb23fcc2734ead | refs/heads/master | 2021-01-15T09:20:25.310737 | 2014-12-03T14:51:33 | 2014-12-03T14:51:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | # Copyright (c) 2011-2014 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
import csv
from django.conf.urls import patterns, url
from django.contrib import admin
from django.http import HttpResponse
from huxley.core.models import Delegate
class DelegateAdmin(admin.ModelAdmin):
def roster(self, request):
'''Return a CSV file representing the entire roster of registered
delegates, including their committee, country, and school.'''
roster = HttpResponse(content_type='text/csv')
roster['Content-Disposition'] = 'attachment; filename="roster.csv"'
writer = csv.writer(roster)
ordering = 'assignment__school__name'
for delegate in Delegate.objects.all().order_by(ordering):
writer.writerow([
delegate,
delegate.committee,
delegate.country,
delegate.school
])
return roster
def get_urls(self):
urls = super(DelegateAdmin, self).get_urls()
urls += patterns('',
url(
r'roster',
self.admin_site.admin_view(self.roster),
name='core_delegate_roster',
),
)
return urls
| [
"k.mehta@berkeley.edu"
] | k.mehta@berkeley.edu |
47fc843974746e62f87a004cad569868da268f34 | 0a3815afcf4597da1f79771c16e6214059cfa5fe | /app.py | a6140e3b6d15483d4b25bd5955b5ba5bb1ffa459 | [] | no_license | attapalace/Twitter-scraper-with-flask | 5127b74c67f0a70ec60ba625b228fe4e01ac9d2b | ad3ad89da61f8d677522ae86f857245d178af8c5 | refs/heads/main | 2023-07-08T07:17:12.492235 | 2021-08-14T17:59:43 | 2021-08-14T17:59:43 | 377,746,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,923 | py | from datetime import date
from flask import Flask ,render_template,request
import os
import snscrape.modules.twitter as sntwitter
import pandas as pd
app = Flask(__name__)
@app.route('/', methods=['POST', 'GET'])
def index():
today = date.today()
#print('if you want to search for either of multiple terms write as (cats OR dogs), if you want to search for exact phrase write "cats and dogs"')
#search_term = input('Enter your search term: ')
#from_date = input('Enter starting date as 2020-06-01 foramt :') or '2021-06-01'
#until_date = input('Enter end date as 2021-06-01 foramt') or today
# Creating list to append tweet data to
tweets_list2 = []
search_term = request.form.get('search',False)
from_date = request.form.get('from',False) or '2021-06-11'
until_date = request.form.get('until',False) or str(today)
max_results= request.form.get('max',False) or '100'
if search_term:
pd.set_option("display.colheader_justify","left")
# Using TwitterSearchScraper to scrape data and append tweets to list
for i,tweet in enumerate(sntwitter.TwitterSearchScraper(f'{search_term} since:{from_date} until:{until_date}').get_items()):
if i > int(max_results):
break
tweets_list2.append([tweet.date, tweet.id, tweet.content, tweet.user.username])
df = pd.DataFrame(tweets_list2, columns=['Datetime', 'Tweet Id', 'Text', 'Username'])
return render_template('index.html', tables=[df.to_html(classes='data')]
, titles=df.columns.values)
else:
return render_template('index.html')
if __name__ == '__main__':
port = int(os.getenv("PORT", 8080))
app.run(host='0.0.0.0', port=port)
| [
"noreply@github.com"
] | attapalace.noreply@github.com |
6b2338034fb221816ccae9a9a27cb20c6014f947 | 145791316db71595fcad65a2a3b79f963ea6890d | /test_sum.py | 02126a9ad9bbc9e709051abb693d9bd8dfcfe939 | [] | no_license | bagage/vscode-python-pytest | 0fe4098904ead3ca55fe1060a89385aed96b8bd6 | e5bfa3b623a721ea946d91c7adc744cebdef735e | refs/heads/master | 2023-05-28T09:31:00.719063 | 2021-06-14T08:58:47 | 2021-06-14T08:58:47 | 376,761,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34 | py | def test_sum():
assert 1 == 2
| [
"gautier@getluko.com"
] | gautier@getluko.com |
c88fd9183f2e1a1a45fa0d2ebe786192943ae6e1 | 18c96c4f45177a3fbd58ee838431559b0d2456d9 | /OMNIBNK/settings.py | 78a232949b9e0b3df86aac9773f70f4b3f1e277e | [] | no_license | oscardb22/OMNIBNK | 26a6e8bcb93b7a862d842464cf9f7d2ed3ea8aa4 | cc895101e295235cb80dbb2f06e6b94572c08601 | refs/heads/master | 2020-06-19T06:00:46.477931 | 2019-07-12T21:20:00 | 2019-07-12T21:20:00 | 196,589,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,987 | py | from django.urls import reverse_lazy
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '^$fwni&4(-0ioqy%89ds173t!+p%we8=kc1*t-zq(n#$6l!pua'
DEBUG = True
ALLOWED_HOSTS = [*'']
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# MODULOS
'modulos.users',
'modulos.auditor',
# PLUGINS
'crispy_forms',
'django.contrib.humanize',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'OMNIBNK.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.static',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.tz',
],
},
},
]
WSGI_APPLICATION = 'OMNIBNK.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'es-CO'
TIME_ZONE = 'America/Bogota'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'users.Users'
MEDIA_URL = '/media/'
LOGIN_URL = reverse_lazy('users:login')
LOGOUT_URL = reverse_lazy('users:salir')
CRISPY_TEMPLATE_PACK = 'bootstrap3'
if DEBUG:
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static"), ]
else:
STATIC_ROOT = os.path.join(BASE_DIR, "static")
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
| [
"oscar.dominguez@credyty.com"
] | oscar.dominguez@credyty.com |
a77f23a1cf62fc058473b1776d247aafdd9e8e08 | 65d953f13b143ac1dbdb3054eede0daa4b1b5437 | /코테/카카오연습용/accounts/LoginViews.py | dba7e983a82311d0f85ea4caee8714a006dfc583 | [] | no_license | jigglypop/machine_deep_learning_study | 86784b51767185a743d31cb08ac1f20f86285014 | 53924dadaa52f4e63c37753502a530443eac658b | refs/heads/master | 2023-02-05T06:21:59.299659 | 2020-12-20T17:33:27 | 2020-12-20T17:33:27 | 222,834,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,642 | py | from django.shortcuts import render
from .serializers import PostSerializer, RegisterUserSerializer, UserSerializer, LoginUserSerializer, LargeResultsSetPagination
from rest_framework import viewsets, permissions, generics, status
from rest_framework.response import Response
from knox.models import AuthToken
from rest_framework import viewsets
from .serializers import PostSerializer,PostLikeSerializer, ProfileSerializer
from .serializers import MiniProfileSerializer,MiniListSerializer,ListProfileSerializer
from .serializers import NoticeSerializer, EventSerializer,EventJoinSerializer
from .serializers import RecommentSerializer, CommentSerializer
from .serializers import FollowerSerializer, FollowingSerializer
from .serializers import ProfileEmailSerializer
from .serializers import MainListSerializer
from .serializers import PostSearchSerializer,ProfileSearchSerializer
from .models import Post, Profile
from .models import Notice, Event
from .models import Comment, Recomment
from datetime import datetime
from django.http import JsonResponse,HttpResponse
from django.core import serializers
from pprint import pprint
from django.core.paginator import Paginator
from django.shortcuts import get_object_or_404
import json
import requests
import base64
from django.core.files.base import ContentFile
from django.conf import settings
import os
from datetime import datetime
from django.http import HttpResponse
# 이미지 업로드용
import boto3
import uuid
from PIL import Image
from io import BytesIO
class RegisterAPI(generics.GenericAPIView):
serializer_class = RegisterUserSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
return Response({
"user": UserSerializer(user, context=self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1]
})
class LoginAPI(generics.GenericAPIView):
serializer_class = LoginUserSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data
return Response({
"user": UserSerializer(user, context=self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1]
})
class UserAPI(generics.RetrieveAPIView):
permission_classes = [permissions.IsAuthenticated, ]
serializer_class = UserSerializer
def get_object(self):
return self.request.user
| [
"ydh2244@gmail.com"
] | ydh2244@gmail.com |
7d9b4cbc72ddc8c65d96a80888246cb468ee8603 | d183f9719250d85d9aaf231799c25d648a9f204b | /Problem Set 3/Mark Component.py | f6495fbfad4a21b1889e3caa195737eb9c6e0097 | [] | no_license | MijanurMollah/Udacity-Intro-to-Algorithms | 099e0f20c6078b3772bb2c8cfd2ad787557d363b | 937445148073db99c94ff362005e91e5ff6fb2d5 | refs/heads/master | 2020-09-01T16:25:55.098105 | 2019-11-03T01:58:11 | 2019-11-03T01:58:11 | 219,004,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,216 | py | # Rewrite `mark_component` to not use recursion
# and instead use the `open_list` data structure
# discussed in lecture
#
def mark_component(G, node, marked):
open_list = []
marked[node] = True
total_marked = 1
for neighbor in G[node]:
open_list.append(neighbor)
marked[neighbor] = True
total_marked += 1
while len(open_list) != 0:
current = open_list[-1]
del open_list[-1]
for neighbor in G[current]:
if neighbor not in marked:
marked[neighbor] = True
total_marked += 1
open_list.append(neighbor)
return total_marked
#########
# Code for testing
#
def make_link(G, node1, node2):
if node1 not in G:
G[node1] = {}
(G[node1])[node2] = 1
if node2 not in G:
G[node2] = {}
(G[node2])[node1] = 1
return G
def test():
test_edges = [(1, 2), (2, 3), (4, 5), (5, 6)]
G = {}
for n1, n2 in test_edges:
make_link(G, n1, n2)
marked = {}
assert mark_component(G, 1, marked) == 3
assert 1 in marked
assert 2 in marked
assert 3 in marked
assert 4 not in marked
assert 5 not in marked
assert 6 not in marked
| [
"noreply@github.com"
] | MijanurMollah.noreply@github.com |
769c233947bb21e73d616adc9283780a1161b902 | 43277f3962edfd5f16d116a3ed35cc08000a0707 | /modular/badger_utils/sacred/experiment_config_diff.py | 5d352aa5b1beb2c014775fb68bf4df78bac4dffd | [] | no_license | GoodAI/badger-2020 | 0cbeb60bf5b5fa2959504b1ba4489d5725646474 | bb3822dbcbb04ed9c153c4deffa25a81011c8ce5 | refs/heads/master | 2021-07-15T12:49:44.227988 | 2021-03-02T19:06:06 | 2021-03-02T19:06:06 | 243,016,754 | 7 | 1 | null | 2020-08-10T13:13:51 | 2020-02-25T14:15:24 | Jupyter Notebook | UTF-8 | Python | false | false | 3,324 | py | from typing import Dict, Any, List, Tuple
import pandas as pd
from badger_utils.view.config_utils import tuple_to_dict
class ExperimentConfigDiff:
_diff: Dict[List[Tuple[str, Any]], List[int]]
_common: Dict[str, Any]
def __init__(self, common: Dict[str, Any], diff: Dict[List[Tuple[str, Any]], List[int]]):
"""
Args:
common: dict of config vars, e.g. {'size': 10, 'epochs': 1000}
diff: dict with keys being list of tuples of ('name', 'value') of config and list of run_ids as value,
e.g. {[('n_experts', 4), ('n_inputs', 3)]: [23, 24], [('n_experts', 4), ('n_inputs', 2)]: [25]}
"""
self._common = common
self._diff = diff
def diff_as_df(self, explode_by_run_id: bool = False) -> pd.DataFrame:
"""
Returns:
DataFrame with columns named by config keys
plus one column "run_ids" where are stored comma separated run_ids
"""
df = pd.DataFrame([{**tuple_to_dict(r), **{'run_ids': v}} for r, v in self._diff.items()])
if explode_by_run_id:
df = df.explode('run_ids').astype({'run_ids': int}).set_index('run_ids')
df.index.name = None
return df
def diff_as_lines(self) -> List[str]:
"""
Returns:
List of one_line string representation for diff. Usable e.g. for a plot legend.
"""
return ExperimentConfigDiff.df_as_lines(self.diff_as_df())
def common_as_text(self, line_delimiter: str = '\n') -> str:
return line_delimiter.join([f'{k}: {v}' for k, v in self._common.items()])
def diff_filtered_run_ids(self, filter_dict: Dict[str, Any]) -> List[int]:
"""
Return list of run_ids for runs that match filter_dict. Only runs matching all filter conditions are selected.
Args:
filter_dict: Dict config_item -> expected_value. E.g. {'n_experts': 4, 'rollout_size': 8}
Returns:
List of run_ids
"""
filtered = self.filter_df(self.diff_as_df(), filter_dict)
return self.flatten(filtered['run_ids'])
@staticmethod
def filter_df(df: pd.DataFrame, filter_dict: Dict[str, Any]) -> pd.DataFrame:
for k, v in filter_dict.items():
df = df.loc[df[k] == v]
return df
@staticmethod
def flatten(l):
return [item for sublist in l for item in sublist]
@staticmethod
def df_as_lines(df: pd.DataFrame) -> List[str]:
"""
Convert DataFrame to list of strings representation
Args:
df: DataFrame to be converted
Returns:
List of one_line string representation for DataFrame. Usable e.g. for a plot legend.
"""
def format_config(r):
return ', '.join([f'{c}: {v}' for c, v in zip(r._fields, r)])
return [format_config(r) for r in df.itertuples(index=False, name='Row')]
@staticmethod
def df_as_description_runids_dict(df: pd.DataFrame) -> Dict[str, List[int]]:
result = {}
for idx, row in df.iterrows():
columns_values = [f'{name}: {row[name]}' for name in row.index if name != 'run_ids']
description = ', '.join(columns_values)
result[description] = row['run_ids']
return result
| [
"jaroslav.vitku@goodai.com"
] | jaroslav.vitku@goodai.com |
66335d806ccf0a4f3148e4dabc2ca3baa18b55b8 | e1fada3a9846a5593e3d3d2fdc32b23b832e38b4 | /tests/unit/cli/tools/test_eval.py | 76eee501f89daf8095a5032255767270ab304ab5 | [
"Apache-2.0"
] | permissive | GalyaZalesskaya/openvino_training_extensions | fd1ebb189900008b16b85568449e5c62d8edbad5 | 6116639caeff100b06a6c10a96c7e7f5951f20c7 | refs/heads/develop | 2023-09-03T19:32:44.702497 | 2023-03-15T06:48:24 | 2023-03-15T06:48:24 | 202,568,309 | 0 | 0 | Apache-2.0 | 2019-10-28T16:16:27 | 2019-08-15T15:41:59 | Python | UTF-8 | Python | false | false | 4,079 | py | import argparse
import pytest
from otx.cli.tools import eval as target_package
from otx.cli.tools.eval import get_args, main
from tests.test_suite.e2e_test_system import e2e_pytest_unit
@e2e_pytest_unit
def test_get_args(mocker):
mock_options = {
"--test-data-roots": "test/data/root",
"--load-weights": "weight/path",
"--save-performance": "save/path",
"--work-dir": "work/dir/path",
}
mock_command = ["otx"]
for key, value in mock_options.items():
mock_command.extend([key, value])
mocker.patch("sys.argv", mock_command)
mocker.patch.object(
target_package, "get_parser_and_hprams_data", return_value=[argparse.ArgumentParser(), {"param": "test"}, []]
)
mocker.patch.object(target_package, "add_hyper_parameters_sub_parser", return_value=argparse.ArgumentParser())
parsed_args, _ = get_args()
assert parsed_args.test_data_roots == "test/data/root"
assert parsed_args.load_weights == "weight/path"
assert parsed_args.save_performance == "save/path"
assert parsed_args.work_dir == "work/dir/path"
@pytest.fixture
def mock_args(mocker, tmp_path):
mock_args = mocker.MagicMock()
mock_args.test_data_roots = "fake_test_data_root"
mock_args.load_weights = "fake_load_weights.xml"
mock_args.save_performance = tmp_path / "save/performance.json"
mock_args.work_dir = tmp_path / "work_dir"
def mock_contains(self, val):
return val in self.__dict__
mock_args.__contains__ = mock_contains
mock_get_args = mocker.patch("otx.cli.tools.eval.get_args")
mock_get_args.return_value = [mock_args, []]
return mock_args
@pytest.fixture
def mock_config_manager(mocker):
mock_config_manager = mocker.patch.object(target_package, "ConfigManager")
mock_template = mocker.MagicMock()
mock_template.name = "fake_name"
mock_config_manager.return_value.template = mock_template
mock_config_manager.return_value.check_workspace.return_value = True
mock_config_manager.return_value.get_dataset_config.return_value = {}
mock_config_manager.return_value.get_hyparams_config.return_value = {}
return mock_config_manager
@pytest.fixture
def mock_dataset_adapter(mocker):
mock_dataset_adapter = mocker.patch("otx.cli.tools.eval.get_dataset_adapter")
mock_dataset = mocker.MagicMock()
mock_label_schema = mocker.MagicMock()
mock_dataset_adapter.return_value.get_otx_dataset.return_value = mock_dataset
mock_dataset_adapter.return_value.get_label_schema.return_value = mock_label_schema
return mock_dataset_adapter
@pytest.fixture
def mock_task(mocker):
mock_task_class = mocker.MagicMock()
mock_task = mocker.MagicMock()
mock_task_class.return_value = mock_task
mocker.patch.object(target_package, "get_impl_class", return_value=mock_task_class)
return mock_task
@e2e_pytest_unit
def test_main(
mocker,
mock_args,
mock_config_manager,
mock_dataset_adapter,
):
mocker.patch.object(
target_package,
"read_model",
return_value=mocker.MagicMock(),
)
mocker.patch.object(
target_package,
"get_impl_class",
return_value=mocker.MagicMock(),
)
mocker.patch.object(
target_package,
"get_dataset_adapter",
return_value=mock_dataset_adapter,
)
mocker.patch.object(
target_package,
"ResultSetEntity",
return_value=mocker.MagicMock(),
)
mocker.patch.object(
target_package,
"InferenceParameters",
return_value=mocker.MagicMock(),
)
mocker.patch.object(
target_package,
"Subset",
return_value=mocker.MagicMock(),
)
mocker.patch.object(
target_package,
"TaskEnvironment",
return_value=mocker.MagicMock(),
)
mocker.patch("json.dump")
mocker.patch("builtins.open")
mock_get_args = mocker.patch("otx.cli.tools.eval.get_args")
mock_get_args.return_value = [mock_args, []]
ret = main()
assert ret["retcode"] == 0
| [
"noreply@github.com"
] | GalyaZalesskaya.noreply@github.com |
ea685e6280f3ad3289add58490a51f8c7d8cb094 | c6aa0c4877aa5a14a8b7ce68c28504547d44a8bf | /DjangoProject/SensorPoll/forms.py | 13210db2f8717ca227a1a60d2cab49e1fd4345c4 | [] | no_license | Dirk7589/HouseSensor | 72215afccd0dcb040040700695437c7d02d92a9c | 3de91e23f0e99d28a7d71185daad7f06d45a719a | refs/heads/master | 2021-01-18T23:37:40.143138 | 2016-04-21T23:27:52 | 2016-04-21T23:27:52 | 40,416,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | from django import forms
from models import *
class ActiveSensorsForm(forms.ModelForm):
class Meta:
model = ActiveSensors
fields = ('Sensor_number', 'Sensor_name')
| [
"lamothe.max@gmail.com"
] | lamothe.max@gmail.com |
cfe29c23297e0b8167a1f1a3e388e74ad9a83c5c | c8cd3dbcb783b6daad866be07be950bbc4cd9fe9 | /boards/models.py | 95ad56c05b421ac32e2b4d6d8490dcb0569a8431 | [] | no_license | pauloendoh/django-boards | d6b42b829dee0c96c4bda676da5e2ac1977f0922 | 640c0672b049d190213f5bf318f390b40e697262 | refs/heads/master | 2020-03-13T03:43:19.604777 | 2018-04-26T02:22:58 | 2018-04-26T02:22:58 | 130,949,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,223 | py | from markdown import markdown
from django.db import models
from django.contrib.auth.models import User
from django.utils.safestring import mark_safe
from django.utils.text import Truncator
import math
class Board(models.Model):
name = models.CharField(max_length=30, unique=True)
description = models.CharField(max_length=100)
def __str__(self):
return self.name
def get_posts_count(self):
return Post.objects.filter(topic__board=self).count()
def get_last_post(self):
return Post.objects.filter(topic__board=self).order_by('-created_at').first()
class Topic(models.Model):
subject = models.CharField(max_length=255)
last_updated = models.DateTimeField(auto_now_add=True)
board = models.ForeignKey(Board, related_name='topics', on_delete=models.CASCADE)
starter = models.ForeignKey(User, related_name='topics', on_delete=models.CASCADE)
views = models.PositiveIntegerField(default=0) # <- here
def __str__(self):
return self.subject
def get_page_count(self):
count = self.posts.count()
pages = count / 20
return math.ceil(pages)
def has_many_pages(self, count=None):
if count is None:
count = self.get_page_count()
return count > 6
def get_page_range(self):
count = self.get_page_count()
if self.has_many_pages(count):
return range(1, 5)
return range(1, count + 1)
def get_last_ten_posts(self):
return self.posts.order_by('-created_at')[:10]
class Post(models.Model):
message = models.TextField(max_length=4000)
topic = models.ForeignKey(Topic, related_name='posts', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(null=True)
created_by = models.ForeignKey(User, related_name='posts', on_delete=models.CASCADE)
updated_by = models.ForeignKey(User, null=True, related_name='+', on_delete=models.CASCADE)
def __str__(self):
truncated_message = Truncator(self.message)
return truncated_message.chars(30)
def get_message_as_markdown(self):
return mark_safe(markdown(self.message, safe_mode='escape'))
| [
"paulo.endoh@gmail.com"
] | paulo.endoh@gmail.com |
8c36e1ab17f6eae4d21ebc502df4c780b0ed08c6 | 4c42938d71b5c29082cb7cb8560a5bfe0751b33d | /new_0412.py | 8b714ee7a76eed55fbb8b51720018bb02a9b2e32 | [] | no_license | Pimh/pdsg_renthop | 249ea8d1c79910bb13fb153d7d46e9989143ba71 | ded77ec0d51ab4a4357ef2170d7e28f45aa80469 | refs/heads/master | 2021-05-09T01:03:53.305322 | 2018-02-01T03:17:08 | 2018-02-01T03:17:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,975 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 12 08:03:31 2017
@author: PimH
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import itertools
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import log_loss, confusion_matrix
from sklearn import svm
from sklearn.feature_selection import SelectFromModel
from collections import OrderedDict
from sklearn.feature_extraction.text import TfidfVectorizer
# Load data
data = pd.read_json('train.json', orient='columns')
# Add new features
dtime_ref = datetime.datetime.strptime('2017-03-06 14:40:55',
'%Y-%m-%d %H:%M:%S')
dtime = pd.to_datetime(data['created'])
dtime_delta = dtime - dtime_ref
data['delta_dtime_created'] = dtime_delta.dt.days
data['day_of_week'] = dtime.dt.weekday
data['nPhoto'] = data.loc[:, 'photos'].apply(lambda x: len(x))
data['nFeature'] = data.loc[:, 'features'].apply(lambda x: len(x))
data['description_len'] = data.loc[:, 'description'].apply(lambda x: len(x.split(' ')))
# Convert certain features to categorical values
data['interest_level'] = data['interest_level'].astype('category',
['low', 'medium', 'high'], ordered = True)
data['building_id'] = data['building_id'].astype('category')
data['manager_id'] = data['manager_id'].astype('category')
data['day_of_week'] = data['day_of_week'].astype('category')
# Store simple feature in X_simple
X_simple = data[['bathrooms', 'bedrooms', 'latitude', 'longitude',
'price', 'delta_dtime_created', 'day_of_week', 'nPhoto', 'nFeature', 'description_len']].copy()
y = np.asarray(data.loc[:,'interest_level'])
# Split data into test and train sets
Xsim_train, Xsim_test, ysim_train, ysim_test = train_test_split(X_simple, y, test_size = 0.2,
random_state = 0)
# Collect all the words under features column
data.loc[:,'features_joined'] = data.loc[:, 'features'].apply(lambda x: '.'.join(x))
df_range = [0.95, 0.9, 0.85, 0.8]
for elem in df_range:
cnt_vec = TfidfVectorizer(max_df= elem, max_features=200000,
min_df= 1-elem, stop_words='english',
use_idf=True, ngram_range=(1,2))
X_feats = cnt_vec.fit_transform(data.loc[:,'features_joined'], y)
m,n = X_feats.shape
print('For max_df', elem, 'no. of important words: ', n)
X_feat_train, X_feat_test, y_feat_train, y_feat_test = train_test_split(X_feats, y, test_size = 0.2,
random_state = 0)
GBC_feat_clf = GradientBoostingClassifier().fit(X_feat_train, y_feat_train)
GBC_feat_pred_prob = GBC_feat_clf.predict_proba(X_feat_test.toarray())
GBC_feat_logloss = log_loss(y_feat_test, GBC_feat_pred_prob)
print 'GBC features log loss: ', GBC_feat_logloss
# max_df = 0.9 is the best | [
"pimh@seas.upenn.edu"
] | pimh@seas.upenn.edu |
f60fe656d312d24398de8241d649b5008f7daeb3 | 20324cbda561c2cc3220a30e90b7583e4f554fd9 | /scripts/vesc_control.py | 59a66ac261d5d0a49436f9ed3f9f0ffa0dba8d2d | [
"MIT"
] | permissive | DIYRobocars-BLR/vesc_control | cc19d01fea0bfcf5d75915ea57ca76bbea47bc08 | 16eae1eaf549bee7a48825abbf8ff00786b85170 | refs/heads/master | 2020-03-27T21:20:49.345641 | 2018-09-03T01:29:35 | 2018-09-03T01:29:35 | 147,136,369 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | #!/usr/bin/env python
# license removed for brevity
import rospy
from std_msgs.msg import *
def talker():
rospy.init_node('vesc_control_node', anonymous=True)
pub = rospy.Publisher('/commands/motor/current', Float64, queue_size=10)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
values=1.5
rospy.loginfo(values)
pub.publish(values)
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| [
"ss.shrek7@gmail.com"
] | ss.shrek7@gmail.com |
82147037ffb32a42caafdb8859d25db1cbd55b59 | 4f804508c78c331d7985db936d099522a5739303 | /dcorch/api/proxy/apps/controller.py | 53f93e012e597b6aacf6235591087795391f5261 | [
"Apache-2.0"
] | permissive | starlingx-staging/stx-kingbird | 406f6ada829fe285329670d81d5c9e4bcc58884e | 9869ad4640e76384fa14f031a59134cd439929a8 | refs/heads/master | 2020-03-18T00:56:00.772399 | 2018-05-20T04:28:15 | 2018-05-20T04:35:38 | 134,110,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,493 | py | # Copyright 2017 Wind River
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import webob.dec
import webob.exc
from dcorch.api.proxy.apps.dispatcher import APIDispatcher
from dcorch.api.proxy.common import constants as proxy_consts
from dcorch.api.proxy.common.service import Middleware
from dcorch.api.proxy.common import utils as proxy_utils
from dcorch.common import consts
import dcorch.common.context as k_context
from dcorch.common import exceptions as exception
from dcorch.common import utils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service.wsgi import Request
from dcorch.rpc import client as rpc_client
LOG = logging.getLogger(__name__)
controller_opts = [
cfg.BoolOpt('show_request',
default=False,
help='Print out the request information'),
cfg.BoolOpt('show_response',
default=False,
help='Print out the response information'),
]
CONF = cfg.CONF
CONF.register_opts(controller_opts)
class APIController(Middleware):
def __init__(self, app, conf):
super(APIController, self).__init__(app)
self.ctxt = k_context.get_admin_context()
self._default_dispatcher = APIDispatcher(app)
self.rpc_client = rpc_client.EngineClient()
self.response_hander_map = {}
@staticmethod
def get_status_code(response):
"""Returns the integer status code from the response.
"""
return response.status_int
@staticmethod
def _get_resource_type_from_environ(request_environ):
return proxy_utils.get_routing_match_value(request_environ, 'action')
@staticmethod
def get_resource_id_from_link(url):
return proxy_utils.get_url_path_components(url)[-1]
@staticmethod
def get_request_header(environ):
from paste.request import construct_url
return construct_url(environ)
def notify(self, environ, endpoint_type):
self.rpc_client.sync_request(self.ctxt, endpoint_type)
def process_request(self, req):
return self._default_dispatcher
def process_response(self, environ, request_body, response):
if CONF.show_response:
LOG.info("Response: (%s)", str(response))
LOG.info("Response status: (%d)", self.get_status_code(response))
handler = self.response_hander_map[CONF.type]
return handler(environ, request_body, response)
def _update_response(self, environ, request_body, response):
# overwrite the usage numbers with the aggregated usage
# from dcorch
LOG.info("Query dcorch for usage info")
desired_fields = {'quota_set': 'in_use',
'quota': 'used'}
project_id = proxy_utils.get_tenant_id(environ)
user_id = proxy_utils.get_user_id(environ)
response_data = json.loads(response.body)
# get the first match since it should only has one match
resource_type = next((x for x in desired_fields if x in response_data),
None)
if resource_type is None:
LOG.error("Could not find the quota data to update")
return response
resource_info = response_data[resource_type]
try:
usage_dict = self.rpc_client.get_usage_for_project_and_user(
self.ctxt, CONF.type, project_id, user_id)
except Exception:
return response
usage_info = json.dumps(usage_dict)
LOG.info("Project (%s) User (%s) aggregated usage: (%s)",
project_id, user_id, usage_info)
quota_usage = desired_fields[resource_type]
to_be_updated = [res for res in usage_dict if res in resource_info]
for k in to_be_updated:
resource_info[k][quota_usage] = usage_dict[k]
response_data[resource_type] = resource_info
response.body = json.dumps(response_data)
return response
@staticmethod
def print_environ(environ):
for name, value in sorted(environ.items()):
if (name not in ['CONTENT_LENGTH', 'CONTENT_TYPE'] and
not name.startswith('HTTP_')):
continue
LOG.info(' %s: %s\n' % (name, value))
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
if CONF.show_request:
self.print_request(req)
environ = req.environ
# copy the request body
request_body = req.body
application = self.process_request(req)
response = req.get_response(application)
return self.process_response(environ, request_body, response)
@staticmethod
def print_request_body(body):
if body:
LOG.info("Request body:")
for line in body.splitlines():
LOG.info(line.encode('string_escape') + '\n')
def print_request(self, req):
environ = req.environ
length = int(req.environ.get('CONTENT_LENGTH') or '0')
LOG.info("Incoming request:(%s), content length: (%d)",
environ['REQUEST_METHOD'], length)
LOG.info("Request URL: (%s)\n", self.get_request_header(environ))
LOG.info("Request header: \n")
for k, v in req.headers.iteritems():
LOG.info(" %s: %s\n", k, v)
self.print_environ(environ)
self.print_request_body(req.body)
class ComputeAPIController(APIController):
ENDPOINT_TYPE = consts.ENDPOINT_TYPE_COMPUTE
RESOURCE_TYPE_MAP = {
consts.RESOURCE_TYPE_COMPUTE_QUOTA_SET: 'quota_set',
}
OK_STATUS_CODE = [
webob.exc.HTTPOk.code,
webob.exc.HTTPCreated.code,
webob.exc.HTTPAccepted.code,
webob.exc.HTTPNoContent.code
]
def __init__(self, app, conf):
super(ComputeAPIController, self).__init__(app, conf)
self.response_hander_map = {
self.ENDPOINT_TYPE: self._process_response
}
self._resource_handler = {
proxy_consts.FLAVOR_RESOURCE_TAG: self._process_flavor,
proxy_consts.FLAVOR_ACCESS_RESOURCE_TAG:
self._process_flavor_action,
proxy_consts.FLAVOR_EXTRA_SPECS_RESOURCE_TAG:
self._process_extra_spec,
proxy_consts.KEYPAIRS_RESOURCE_TAG:
self._process_keypairs,
proxy_consts.QUOTA_RESOURCE_TAG:
self._process_quota,
proxy_consts.QUOTA_CLASS_RESOURCE_TAG:
self._process_quota
}
@staticmethod
def _get_resource_tag_from_header(url, operation, resource_type):
result = proxy_utils.get_url_path_components(url)
if (operation == consts.OPERATION_TYPE_DELETE or
resource_type == consts.RESOURCE_TYPE_COMPUTE_QUOTA_SET or
resource_type == consts.RESOURCE_TYPE_COMPUTE_QUOTA_CLASS_SET):
return result[-2]
else:
return result[-1]
@staticmethod
def _get_flavor_id_from_environ(environ):
return proxy_utils.get_routing_match_value(environ, 'flavor_id')
def _process_response(self, environ, request_body, response):
operation_type = proxy_utils.get_operation_type(environ)
if self.get_status_code(response) in self.OK_STATUS_CODE and \
operation_type != consts.OPERATION_TYPE_GET:
self._enqueue_work(environ, request_body, response)
self.notify(environ, self.ENDPOINT_TYPE)
return response
def _process_flavor(self, **kwargs):
resource_id = None
resource_info = None
resource_type = kwargs.get('resource_type')
operation_type = kwargs.get('operation_type')
if operation_type == consts.OPERATION_TYPE_POST:
operation_type = consts.OPERATION_TYPE_CREATE
resp = json.loads(kwargs.get('response_body'))
resource = json.loads(kwargs.get('request_body'))
if resource_type in resource:
resource_info = resource[resource_type]
else:
LOG.info("Can't find resource type (%s) in request (%s)",
resource_type, resource)
if resource_type in resp:
if 'links' in resp[resource_type]:
link = resp[resource_type]['links'][0]
resource_id = self.get_resource_id_from_link(link['href'])
# update the resource id if it is available
if resource_id is not None:
resource_info['id'] = resource_id
resource_info = json.dumps(resource_info)
LOG.info("Resource id: (%s)", resource_id)
LOG.info("Resource info: (%s)", resource_info)
elif operation_type == consts.OPERATION_TYPE_DELETE:
resource_id = self.get_resource_id_from_link(
kwargs.get('request_header'))
LOG.info("Resource id: (%s), resource type: (%s)",
resource_id, resource_type)
else:
# it should never happen
LOG.info("Ignore request type: (%s)", operation_type)
return operation_type, resource_id, resource_info
def _process_flavor_action(self, **kwargs):
resource_id = self._get_flavor_id_from_environ(kwargs.get('environ'))
resource_info = kwargs.get('request_body')
LOG.info("Operation:(%s), resource_id:(%s), resource_info:(%s)",
consts.OPERATION_TYPE_ACTION, resource_id, resource_info)
return consts.OPERATION_TYPE_ACTION, resource_id, resource_info
def _process_extra_spec(self, **kwargs):
environ = kwargs.get('environ')
resource_id = self._get_flavor_id_from_environ(environ)
operation_type = kwargs.get('operation_type')
if operation_type == consts.OPERATION_TYPE_DELETE:
extra_spec = proxy_utils.get_routing_match_value(
environ, 'extra_spec')
resource_dict = {consts.ACTION_EXTRASPECS_DELETE: extra_spec}
resource_info = json.dumps(resource_dict)
else:
resource_info = kwargs.get('request_body')
LOG.info("Operation:(%s), resource_id:(%s), resource_info:(%s)",
operation_type, resource_id, resource_info)
return consts.OPERATION_TYPE_ACTION, resource_id, resource_info
def _process_keypairs(self, **kwargs):
resource_info = {}
user_id = None
environ = kwargs.get('environ')
operation_type = kwargs.get('operation_type')
if operation_type == consts.OPERATION_TYPE_POST:
operation_type = consts.OPERATION_TYPE_CREATE
request = json.loads(kwargs.get('request_body'))
resource_info = request[kwargs.get('resource_type')]
if 'public_key' not in resource_info:
# need to get the public_key from response
resp = json.loads(kwargs.get('response_body'))
resp_info = resp.get(kwargs.get('resource_type'))
resource_info['public_key'] = resp_info.get('public_key')
if 'user_id' in resource_info:
user_id = resource_info['user_id']
resource_id = resource_info['name']
else:
resource_id = proxy_utils.get_routing_match_value(
environ, consts.RESOURCE_TYPE_COMPUTE_KEYPAIR)
user_id = proxy_utils.get_user_id(environ)
if user_id is None:
user_id = environ.get('HTTP_X_USER_ID', '')
# resource_id = "name/user_id"
resource_id = utils.keypair_construct_id(resource_id, user_id)
resource_info = json.dumps(resource_info)
LOG.info("Operation:(%s), resource_id:(%s), resource_info:(%s)",
operation_type, resource_id, resource_info)
return operation_type, resource_id, resource_info
def _process_quota(self, **kwargs):
environ = kwargs.get('environ')
resource_id = self.get_resource_id_from_link(
kwargs.get('request_header'))
resource_type = kwargs.get('resource_type')
operation_type = kwargs.get('operation_type')
if operation_type == consts.OPERATION_TYPE_DELETE:
resource_info = {}
else:
request = json.loads(kwargs.get('request_body'))
if resource_type in self.RESOURCE_TYPE_MAP:
resource_info = request[self.RESOURCE_TYPE_MAP.get(
resource_type)]
else:
resource_info = request[resource_type]
# add user_id to resource if it is specified
user_id = proxy_utils.get_user_id(environ)
if user_id is not None:
resource_info['user_id'] = user_id
resource_info = json.dumps(resource_info)
LOG.info("Operation:(%s), resource_id:(%s), resource_info:(%s)",
operation_type, resource_id, resource_info)
return operation_type, resource_id, resource_info
def _enqueue_work(self, environ, request_body, response):
LOG.info("enqueue_work")
request_header = self.get_request_header(environ)
operation_type = proxy_utils.get_operation_type(environ)
resource_type = self._get_resource_type_from_environ(environ)
resource_tag = self._get_resource_tag_from_header(request_header,
operation_type,
resource_type)
handler = self._resource_handler[resource_tag]
operation_type, resource_id, resource_info = handler(
environ=environ,
operation_type=operation_type,
resource_type=resource_type,
request_header=request_header,
request_body=request_body,
response_body=response.body)
try:
utils.enqueue_work(self.ctxt,
self.ENDPOINT_TYPE,
resource_type,
resource_id,
operation_type,
resource_info)
except exception.ResourceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
class SysinvAPIController(APIController):
ENDPOINT_TYPE = consts.ENDPOINT_TYPE_PLATFORM
RESOURCE_ID_MAP = {
consts.RESOURCE_TYPE_SYSINV_SNMP_TRAPDEST: 'ip_address',
consts.RESOURCE_TYPE_SYSINV_SNMP_COMM: 'community'
}
OK_STATUS_CODE = [
webob.exc.HTTPOk.code,
webob.exc.HTTPNoContent.code
]
def __init__(self, app, conf):
super(SysinvAPIController, self).__init__(app, conf)
self.response_hander_map = {
self.ENDPOINT_TYPE: self._process_response
}
def _process_response(self, environ, request_body, response):
if self.get_status_code(response) in self.OK_STATUS_CODE:
self._enqueue_work(environ, request_body, response)
self.notify(environ, self.ENDPOINT_TYPE)
return response
def _enqueue_work(self, environ, request_body, response):
LOG.info("enqueue_work")
resource_info = {}
request_header = self.get_request_header(environ)
operation_type = proxy_utils.get_operation_type(environ)
resource_type = self._get_resource_type_from_environ(environ)
# Firewall rule and certificate need special processing
p_resource_info = 'suppressed'
if resource_type == consts.RESOURCE_TYPE_SYSINV_FIREWALL_RULES:
resource_info['payload'] = request_body
resource = json.loads(response.body)[resource_type]
resource_id = resource['firewall_sig']
elif resource_type == consts.RESOURCE_TYPE_SYSINV_CERTIFICATE:
resource_info['payload'] = request_body
resource_info['content_type'] = environ.get('CONTENT_TYPE')
resource = json.loads(response.body)[resource_type]
resource_id = resource['signature']
else:
if (operation_type == consts.OPERATION_TYPE_POST and
resource_type in self.RESOURCE_ID_MAP):
# need to get the id from the request data since it is
# not available in the header
rid = self.RESOURCE_ID_MAP.get(resource_type)
resource_id = json.loads(request_body)[rid]
else:
resource_id = self.get_resource_id_from_link(request_header)
if operation_type != consts.OPERATION_TYPE_DELETE:
resource_info['payload'] = json.loads(request_body)
p_resource_info = resource_info
LOG.info("Resource id: (%s), type: (%s), info: (%s)",
resource_id, resource_type, p_resource_info)
try:
utils.enqueue_work(self.ctxt,
self.ENDPOINT_TYPE,
resource_type,
resource_id,
operation_type,
json.dumps(resource_info))
except exception.ResourceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
class CinderAPIController(APIController):
ENDPOINT_TYPE = consts.ENDPOINT_TYPE_VOLUME
RESOURCE_TYPE_MAP = {
consts.RESOURCE_TYPE_VOLUME_QUOTA_SET: 'quota_set',
}
OK_STATUS_CODE = [
webob.exc.HTTPOk.code,
]
def __init__(self, app, conf):
super(CinderAPIController, self).__init__(app, conf)
self.response_hander_map = {
self.ENDPOINT_TYPE: self._process_response
}
def _process_response(self, environ, request_body, response):
if self.get_status_code(response) in self.OK_STATUS_CODE:
operation_type = proxy_utils.get_operation_type(environ)
if operation_type == consts.OPERATION_TYPE_GET:
if proxy_utils.show_usage(environ):
response = self._update_response(environ, request_body,
response)
else:
self._enqueue_work(environ, request_body, response)
self.notify(environ, self.ENDPOINT_TYPE)
return response
def _enqueue_work(self, environ, request_body, response):
request_header = self.get_request_header(environ)
resource_id = self.get_resource_id_from_link(request_header)
resource_type = self._get_resource_type_from_environ(environ)
operation_type = proxy_utils.get_operation_type(environ)
if operation_type == consts.OPERATION_TYPE_DELETE:
resource_info = {}
else:
request = json.loads(request_body)
if resource_type in self.RESOURCE_TYPE_MAP:
resource_info = request[self.RESOURCE_TYPE_MAP.get(
resource_type)]
else:
resource_info = request[resource_type]
resource_info = json.dumps(resource_info)
LOG.info("Operation:(%s), resource_id:(%s), resource_info:(%s)",
operation_type, resource_id, resource_info)
try:
utils.enqueue_work(self.ctxt,
self.ENDPOINT_TYPE,
resource_type,
resource_id,
operation_type,
resource_info)
except exception.ResourceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
class NeutronAPIController(APIController):
ENDPOINT_TYPE = consts.ENDPOINT_TYPE_NETWORK
RESOURCE_TYPE_MAP = {
consts.RESOURCE_TYPE_NETWORK_QUOTA_SET: 'quota',
}
# the following fields will be inserted to the resource_info if
# they are not presented in the request but are provided in the
# response
DESIRED_FIELDS = ['tenant_id', 'project_id']
OK_STATUS_CODE = [
webob.exc.HTTPOk.code,
webob.exc.HTTPCreated.code,
webob.exc.HTTPNoContent.code
]
def __init__(self, app, conf):
super(NeutronAPIController, self).__init__(app, conf)
self.response_hander_map = {
self.ENDPOINT_TYPE: self._process_response
}
def _process_response(self, environ, request_body, response):
if self.get_status_code(response) in self.OK_STATUS_CODE:
self._enqueue_work(environ, request_body, response)
self.notify(environ, self.ENDPOINT_TYPE)
return response
def _enqueue_work(self, environ, request_body, response):
request_header = self.get_request_header(environ)
resource_type = self._get_resource_type_from_environ(environ)
operation_type = proxy_utils.get_operation_type(environ)
if operation_type == consts.OPERATION_TYPE_POST:
resource = json.loads(response.body)[resource_type]
resource_id = resource['id']
else:
resource_id = self.get_resource_id_from_link(request_header)
if operation_type == consts.OPERATION_TYPE_DELETE:
resource_info = {}
else:
request = json.loads(request_body)
if resource_type in self.RESOURCE_TYPE_MAP:
original_type = self.RESOURCE_TYPE_MAP.get(
resource_type)
else:
original_type = resource_type
resource_info = request[original_type]
if operation_type == consts.OPERATION_TYPE_POST:
resp_info = json.loads(response.body)[original_type]
for f in self.DESIRED_FIELDS:
if f not in resource_info and f in resp_info:
resource_info[f] = resp_info[f]
resource_info = json.dumps(resource_info)
LOG.info("Operation:(%s), resource_id:(%s), resource_info:(%s)",
operation_type, resource_id, resource_info)
try:
utils.enqueue_work(self.ctxt,
self.ENDPOINT_TYPE,
resource_type,
resource_id,
operation_type,
resource_info)
except exception.ResourceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
class OrchAPIController(APIController):
OK_STATUS_CODE = [
webob.exc.HTTPOk.code,
]
def __init__(self, app, conf):
super(OrchAPIController, self).__init__(app, conf)
self.response_hander_map = {
consts.ENDPOINT_TYPE_COMPUTE: self._process_response,
consts.ENDPOINT_TYPE_NETWORK: self._process_response
}
def _process_response(self, environ, request_body, response):
if self.get_status_code(response) in self.OK_STATUS_CODE:
response = self._update_response(environ, request_body, response)
return response
| [
"dtroyer@gmail.com"
] | dtroyer@gmail.com |
c09b4455555d767d1f1a726e68eb69f6e59bbe88 | 02f72e485de03d5bc39ed16c068b33d4ec8825e3 | /bat.py | 4d104a711a033cd8a3b4fa16ec80b7f40387faae | [] | no_license | Gatete-Bruno/Bat-Git | 3a4386f7cff014947e63933e31a46cbb004c4318 | 8ac35e0b2b480fe8fe95e6ba9c2667ddb77231bb | refs/heads/main | 2023-04-21T02:21:34.257383 | 2021-04-29T11:06:19 | 2021-04-29T11:06:19 | 362,784,684 | 0 | 0 | null | 2021-04-29T13:00:43 | 2021-04-29T10:52:48 | JavaScript | UTF-8 | Python | false | false | 32 | py | print("Hello to the Bat WOrld")
| [
"catobrunoisrael@gmail.com"
] | catobrunoisrael@gmail.com |
11108b3b42eb57c2518670f455c505757c3df7cd | 237b3c8260ffc8b8094b9974afd589af2227fbcd | /Week1/DecisionTreeExercise.py | ff464f332a0009b2da1fa8ad5dbcd8bd6646ee23 | [] | no_license | Kosernik/ML-HSE-Ya | 05b9e2da2aa3447e481c52d0c544d4dca3b1d88e | e52954041d78afa66f67bed95850318449e7e99a | refs/heads/master | 2022-11-22T18:18:13.998295 | 2020-07-27T18:33:12 | 2020-07-27T18:33:12 | 277,654,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 974 | py | import pandas as pd
from sklearn.tree import DecisionTreeClassifier
data = pd.read_csv(r'C:\MyProjects\python\ML-HSE-Ya\data\titanic.csv')
# print(data.head(10))
# PassengerId,Survived,Name,SibSp,Parch,Ticket,Cabin,Embarked
data.drop('PassengerId', axis=1, inplace=True)
# data.drop('Survived', axis=1, inplace=True)
data.drop('Name', axis=1, inplace=True)
data.drop('SibSp', axis=1, inplace=True)
data.drop('Parch', axis=1, inplace=True)
data.drop('Ticket', axis=1, inplace=True)
data.drop('Cabin', axis=1, inplace=True)
data.drop('Embarked', axis=1, inplace=True)
data.dropna(axis=0, inplace=True)
data['Sex'] = data['Sex'].apply(lambda x: 1 if x == 'male' else 0)
print(data.head(10))
decTree = DecisionTreeClassifier(random_state=241)
decTree.fit(data[['Pclass', 'Sex', 'Age', 'Fare']], data['Survived'])
importances = decTree.feature_importances_
lst = [i for i in importances]
lst.sort(reverse=True)
print(type(importances))
print(importances)
print(lst[:2])
| [
"kornilovsergei28@gmail.com"
] | kornilovsergei28@gmail.com |
32f18c3503462e8b9e16986958e5e764039917e6 | 06ffa1b0c56839163abe8a3224e0139aab8b37e9 | /train_slowfast.py | ddc5e2fbab3d559cf594d9be6069ad292606393f | [] | no_license | vietanhdev/pushup-counter | cb41f48c7f10ba51429bf7cae9bf8fb31eee5faf | 68c57a4fd1a414c2683b2e6328fd4d6223dbcd81 | refs/heads/master | 2023-01-09T00:20:31.581598 | 2020-11-18T04:10:39 | 2020-11-18T04:10:39 | 311,329,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,883 | py | import sys
import os
import json
from pathlib import Path
import tensorflow as tf
from models import build_slowfast_model
from losses import focal_loss
from data_sequence import DataSequence
# Check configuration file
args = sys.argv
if len(args) != 2:
print("Usage: python train.py <path-to-config-file.json>")
exit(0)
# Check and load config file
config_file = args[1]
if not os.path.isfile(config_file):
print("Config is not a file: {}".format(config_file))
with open(config_file, "r") as infile:
config = json.load(infile)
# Create experiment folder
experiment_folder = config["experiment_folder"]
Path(experiment_folder).mkdir(parents=True, exist_ok=True)
# Create data sequences
train_data = DataSequence(config["data"]["train_images"],
config["data"]["train_labels"],
batch_size=config["train_params"]["train_batchsize"],
seq_len=config["model"]["seq_len"],
y_steps=2)
# TODO: data sequences for validation and testing
val_data = DataSequence(config["data"]["val_images"],
config["data"]["val_labels"],
batch_size=config["train_params"]["val_batchsize"],
seq_len=config["model"]["seq_len"],
y_steps=2)
# Build model
model = build_slowfast_model()
if config["train_params"]["load_weights"]:
model.load_weights(config["train_params"]["pretrained_weights"])
# Compile for training
opt = tf.keras.optimizers.Adam(lr=config["train_params"]["learning_rate"])
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
# Fit
model_callbacks = [
tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(experiment_folder, 'model.{epoch:03d}.h5')),
tf.keras.callbacks.TensorBoard(log_dir=os.path.join(experiment_folder, 'logs')),
]
model.fit(train_data, validation_data=val_data, epochs=config["train_params"]["n_epochs"], callbacks=model_callbacks, shuffle=True) | [
"vietanh.dev@gmail.com"
] | vietanh.dev@gmail.com |
33956c8ebe76c287159bd51df8bc7b8ad9dc0954 | 9b6506075cd5bd8220189aa536c29ba0b60d953d | /tests/connections.py | 63d32bc8ac2851db9eda8360b7c5b35f7c6a6655 | [
"MIT",
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | julienmaffre/CCF | 5ba38d796137d7aedef544c9241a49590ac7cd41 | b571047136017a43ff548ed728c72efa550a4592 | refs/heads/master | 2020-09-08T04:57:44.258790 | 2019-11-11T16:28:43 | 2019-11-11T16:28:43 | 221,022,339 | 0 | 0 | Apache-2.0 | 2019-11-11T16:26:32 | 2019-11-11T16:26:31 | null | UTF-8 | Python | false | false | 3,261 | py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the Apache 2.0 License.
import sys
import os
import infra.proc
import e2e_args
import getpass
import os
import time
import logging
import multiprocessing
from random import seed
import infra.ccf
import infra.proc
import infra.jsonrpc
import json
import contextlib
import resource
import psutil
import random
from loguru import logger as LOG
def run(args):
hosts = ["localhost"]
with infra.ccf.network(
hosts, args.build_dir, args.debug_nodes, args.perf_nodes, pdb=args.pdb
) as network:
check = infra.checker.Checker()
network.start_and_join(args)
primary, others = network.find_nodes()
primary_pid = primary.remote.remote.proc.pid
num_fds = psutil.Process(primary_pid).num_fds()
max_fds = num_fds + 50
LOG.info(f"{primary_pid} has {num_fds} open file descriptors")
resource.prlimit(primary_pid, resource.RLIMIT_NOFILE, (max_fds, max_fds))
LOG.info(f"set max fds to {max_fds} on {primary_pid}")
nb_conn = (max_fds - num_fds) * 2
clients = []
with contextlib.ExitStack() as es:
for i in range(nb_conn):
try:
clients.append(es.enter_context(primary.user_client(format="json")))
LOG.info(f"Connected client {i}")
except OSError:
LOG.error(f"Failed to connect client {i}")
c = clients[int(random.random() * len(clients))]
check(c.rpc("LOG_record", {"id": 42, "msg": "foo"}), result=True)
assert (
len(clients) >= max_fds - num_fds - 1
), f"{len(clients)}, expected at least {max_fds - num_fds - 1}"
num_fds = psutil.Process(primary_pid).num_fds()
LOG.info(f"{primary_pid} has {num_fds} open file descriptors")
LOG.info(f"Disconnecting clients")
time.sleep(1)
num_fds = psutil.Process(primary_pid).num_fds()
LOG.info(f"{primary_pid} has {num_fds} open file descriptors")
clients = []
with contextlib.ExitStack() as es:
for i in range(max_fds - num_fds):
clients.append(es.enter_context(primary.user_client(format="json")))
LOG.info(f"Connected client {i}")
c = clients[int(random.random() * len(clients))]
check(c.rpc("LOG_record", {"id": 42, "msg": "foo"}), result=True)
assert (
len(clients) >= max_fds - num_fds - 1
), f"{len(clients)}, expected at least {max_fds - num_fds - 1}"
num_fds = psutil.Process(primary_pid).num_fds()
LOG.info(f"{primary_pid} has {num_fds} open file descriptors")
LOG.info(f"Disconnecting clients")
time.sleep(1)
num_fds = psutil.Process(primary_pid).num_fds()
LOG.info(f"{primary_pid} has {num_fds} open file descriptors")
if __name__ == "__main__":
def add(parser):
parser.add_argument(
"-p",
"--package",
help="The enclave package to load (e.g., libloggingenc)",
default="libloggingenc",
)
args = e2e_args.cli_args(add)
run(args)
| [
"noreply@github.com"
] | julienmaffre.noreply@github.com |
1a9d307b7fe4a46a212b922e933837a08ceb6308 | 7b995c200ebf41274d51f8cd3cedb882754e79e5 | /uploadapp/views.py | 5c57d8236f432ec6c02427484df6cd1cfedb2f03 | [] | no_license | masoomkonwar/pan-extraction-api | 62fd7cdbbf93647d2f09268bcd23205bc634a2ed | 2d6896a33fc83f1578d27815d68edd5dd4223839 | refs/heads/main | 2023-05-26T19:22:30.653686 | 2021-06-11T14:57:15 | 2021-06-11T14:57:15 | 315,983,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,891 | py | from django.shortcuts import render
from .models import User
from django.core.files.storage import FileSystemStorage
import re
import cv2
import pytesseract
from django.conf import settings
from django.http import JsonResponse
def index(request):
return render(request,'index.html')
def uploadImage(request):
pytesseract.pytesseract.tesseract_cmd = "C:\\Program Files (x86)\\Tesseract-OCR\\tesseract.exe"
pi = request.FILES['image']
fs = FileSystemStorage()
print("request handling...")
fs.save(pi.name,pi)
img=cv2.imread(settings.MEDIA_ROOT+'\\'+pi.name)
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
#print(pytesseract.image_to_boxes(img))
himg , wimg, _ = img.shape
boxes = pytesseract.image_to_data(img)
print(boxes)
for x,b in enumerate(boxes.splitlines()) :
if x!=0:
b = b.split()
if len(b)==12:
if isValidPanCardNo(b[11]):
print(b[11])
pno = b[11]
# x,y,w,h = int (b[6]),int (b[7]),int (b[8]),int (b[9])
# cv2.rectangle(img,(x,y),(w+x,y+h),(0,0,255),3)
# cv2.putText(img,b[11],(x,y),cv2.FONT_HERSHEY_COMPLEX,1,(50,50,255),2)
print(img.shape)
print(pi.name)
print(pi.size)
#return render(request,'index.html')
return JsonResponse({
"pan number" : pno
})
# Create your views here.
def isValidPanCardNo(panCardNo):
# Regex to check valid
# PAN Card number
regex = "[A-Z]{5}[0-9]{4}[A-Z]{1}"
# Compile the ReGex
p = re.compile(regex)
# If the PAN Card number
# is empty return false
if(panCardNo == None):
return False
# Return if the PAN Card number
# matched the ReGex
if(re.search(p, panCardNo) and
len(panCardNo) == 10):
return True
else:
return False | [
"msmkonwar@gmail.com"
] | msmkonwar@gmail.com |
83e803ba1d789263d3f30feabe3561c26ba87038 | 2331ef4f4ce155d32727a7f5de8f313eace80433 | /piannBlog/urls.py | 4546a425a1472d14fc36cf85111ee527dec571d3 | [] | no_license | piann/FirstBlog | 7fa3e465a4c81fe9dcccea0b3f4a8ef58160a348 | 4284f6524425ad59afa7bbe3cca211518935af3c | refs/heads/master | 2022-08-01T15:38:53.841660 | 2022-07-12T05:02:25 | 2022-07-12T05:02:25 | 146,428,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | """piannBlog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('blog.urls')),
path('admin_admin_admin/', admin.site.urls),
]
| [
"dszzang21@korea.ac.kr"
] | dszzang21@korea.ac.kr |
a56284f885fa48ed63884b6ce71bc6af019845e8 | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /Others/past/past201912-open/g/main.py | 54e6af8dac7ce0af88307994ac3cc8aa07ba57a3 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 1,159 | py | # -*- coding: utf-8 -*-
def get_pairs(group):
from itertools import combinations
pairs = list()
for p1, p2 in combinations(group, 2):
if p1 > p2:
continue
pairs.append((p1, p2))
return pairs
def main():
import sys
from itertools import product
input = sys.stdin.readline
n = int(input())
a = list()
for i in range(n):
ai = [0] * (i + 1) + list(map(int, input().split()))
a.append(ai)
groups = product(range(3), repeat=n)
ans = -(10**18)
for group in groups:
group_one = list()
group_two = list()
group_three = list()
for i, g in enumerate(group):
if g == 0:
group_one.append(i)
elif g == 1:
group_two.append(i)
else:
group_three.append(i)
pairs = get_pairs(group_one)
pairs += get_pairs(group_two)
pairs += get_pairs(group_three)
candidate = 0
for p1, p2 in pairs:
candidate += a[p1][p2]
ans = max(ans, candidate)
print(ans)
if __name__ == "__main__":
main()
| [
"k.hiro1818@gmail.com"
] | k.hiro1818@gmail.com |
c3e09bab0bfed296d9c0504d22539054f33298af | 3b60e6f4bbc011003ac4929f01eb7409918deb79 | /Analysis_v1/Simulation/Pythia/RSG/CP2RSGPythia8/RSGfragments/RSGravitonToGammaGamma_kMpl01_M_4750_TuneCP2_13TeV_pythia8_cfi.py | fb6cecf4012af20bf07d74d1f2cf406820e124f7 | [] | no_license | uzzielperez/Analyses | d1a64a4e8730325c94e2bc8461544837be8a179d | 1d66fa94763d7847011ea551ee872936c4c401be | refs/heads/master | 2023-02-09T04:54:01.854209 | 2020-09-07T14:57:54 | 2020-09-07T14:57:54 | 120,850,137 | 0 | 0 | null | 2020-06-17T16:48:16 | 2018-02-09T03:14:04 | C++ | UTF-8 | Python | false | false | 1,157 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP2Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(1.095e-3),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP2SettingsBlock,
processParameters = cms.vstring(
'ExtraDimensionsG*:all = on',
'ExtraDimensionsG*:kappaMG = 0.541643794389',
'5100039:m0 = 4750.0',
'5100039:onMode = off',
'5100039:onIfAny = 22',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP2Settings',
'processParameters',
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"uzzie.perez@cern.ch"
] | uzzie.perez@cern.ch |
8b0b16fc46a5ad36a61dc3f734fe9c8d0615b2d7 | 1e8795d1ab1bcb58518dde710b6df94ff4df0259 | /day2/for_loop3.py | 6e6352e48d70d99fcb005bd1a71fdaa6391016e4 | [] | no_license | rcas99/BeautifulPatternsMIT | b65f491fb4a770c8c10232d8c54cb4d94d57421c | c4ec3998578c9fb5cad73d5ca1863216c4ea0fc1 | refs/heads/master | 2022-12-11T11:26:49.271260 | 2020-09-08T18:37:43 | 2020-09-08T18:37:43 | 294,443,852 | 1 | 0 | null | 2020-09-10T15:08:07 | 2020-09-10T15:08:06 | null | UTF-8 | Python | false | false | 202 | py | """
Conditionals programming exercises
For loop
"""
x = int(input("Escoge un numero positivo 'x': "))
print("La serie del 10 al numero positivo 'X' es:")
for number in range(10, x):
print(number)
| [
"a01400835@itesm.mx"
] | a01400835@itesm.mx |
5f3de75aad1afc4cfe886a3e0fe4d562ec53a65a | 1a03664e4dd5f5fb12434d32129e612a76bf6d61 | /core/loaders.py | ec96d2b6531d033017d8a3b7ea1fae80ede688e7 | [] | no_license | SergioAnd95/refactored-adventure | 6601dabaa0b7125a94f0010157e17862c84c1d32 | 553d1425b6d59f69b9c526eecff07df0f18835b4 | refs/heads/master | 2020-03-30T09:05:05.893448 | 2018-10-17T15:10:40 | 2018-10-17T15:10:40 | 151,059,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | import importlib
from settings import settings
def autodiscover_app_module(module_name):
for app in settings.INSTALLED_APPS:
importlib.import_module(f'{app}.{module_name}')
def discover_urls():
"""
Find and return all routes
from apps
:return: list
"""
urlpatterns = []
for app in settings.INSTALLED_APPS:
try:
_temp = __import__(f'{app}.urls', globals(), locals(), ['urlpatterns'], 0)
urlpatterns += _temp.urlpatterns
except ModuleNotFoundError:
pass
return urlpatterns
| [
"developinc@yandex.ru"
] | developinc@yandex.ru |
a5f1cb849664a352aca6565f82e557a792243459 | 8dac93fb21c9ae8254c814ae55ca23b06fe85326 | /ex6.py | 753f2a2e438bed0e9fd93c880b0f012b5a8c2d72 | [] | no_license | duanxiaoyu/python_learing | 745c4a5a9dfa37da5777287c3cec4870e75032ca | 5b6f7d453417f9e4da3a3581f16600ce0c3b6576 | refs/heads/master | 2021-01-17T08:16:36.531178 | 2016-07-28T00:01:44 | 2016-07-28T00:01:44 | 64,262,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | x = "there are %d types of people" %10
binary = 'binary'
do_not = "don't"
y = "thoese who know %s and those who %s"%(binary, do_not)
# pirnt x & print y
print x
print y
hilarious = False
joke_evaluation = "isn't that joke so funny ?! %r"
#print jokestring + valua
print joke_evaluation %hilarious
w = "this is a left side of ..."
e = "a string with a right side"
#print tow string together
print w + e
| [
"yuzhouxiaoxiao@163.com"
] | yuzhouxiaoxiao@163.com |
f975e0a4d12496012b500813cfc94786bb7d9803 | 644b13f90d43e9eb2fae0d2dc580c7484b4c931b | /2019 baekjoon/Math/1676_factorial2.py | 2481524d4c177197c939031d86cfa79bd6a652e5 | [] | no_license | yeonnseok/ps-algorithm | c79a41f132c8016655719f74e9e224c0870a8f75 | fc9d52b42385916344bdd923a7eb3839a3233f18 | refs/heads/master | 2020-07-09T11:53:55.786001 | 2020-01-26T02:27:09 | 2020-01-26T02:27:09 | 203,962,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | # def factorial(num):
# if num == 1:
# return 1
# return num * factorial(num - 1)
#
#
# def factorial_count(num):
# target = list(str(factorial(num)))
# count = 0
# for i in reversed(range(len(target))):
# if target[i] == '0':
# count += 1
# else:
# return count
#
#
# def main():
# num = int(input())
# print(factorial_count(num))
#
#
# main()
def main():
num = int(input())
i = 5
ans = 0
while i <= num:
ans += int(num/i)
i *= 5
print(ans)
main() | [
"smr603@snu.ac.kr"
] | smr603@snu.ac.kr |
05efdcf945261287d7b9713791800d5a330653ad | 83d1cd17b8a01bc3f071009f9d234beb2e4933ff | /todo/urls.py | c2e2ceed39e35ab7b9f365bafe4a0ae192116ea1 | [] | no_license | jorgoni/proyect-djan | bd7877c16c67ca1a86f303901ba3d55843c58e5f | 9b49ada396e60eb3f7137e73abbcb460208d8d57 | refs/heads/master | 2023-05-12T11:47:06.232892 | 2021-06-03T18:24:04 | 2021-06-03T18:24:04 | 373,590,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | # -*- coding: utf-8 -*-
from django.urls import path
from . import views
urlpatterns=[
path("", views.home, name="home"),
path("homei", views.home, name="homei"),
path("home/<int:arbol_nodo>", views.homen, name="homen"),
path("respuesta/<int:arbol_nodo>", views.respuesta, name="respuesta"),
path("agregar_nodo/<int:arbol_nodo>", views.agregar_nodo, name="agregar_nodo"),
path("homsi/<int:arbol_nodo>", views.homsi, name="homsi"),
path("homno/<int:arbol_nodo>", views.homno, name="homno"),
path("hompsi/<int:arbol_nodo>", views.hompsi, name="hompsi"),
path("hompno/<int:arbol_nodo>", views.hompno, name="hompno"),
path("homnose/<int:arbol_nodo>", views.homnose, name="homnose"),
path("oportunidad/<int:arbol_nodo>", views.oportunidad, name="oportunidad"),
path("agregar/", views.agregar, name="agregar"),
path("eliminar/<int:tarea_id>/", views.eliminar, name="eliminar"),
path("editar/<int:tarea_id>/", views.editar, name="editar"),
] | [
"georgecruzortiz@gmail.com"
] | georgecruzortiz@gmail.com |
7da24d5460361be30bb65e4ed99d86609b0e2ebe | cb46a971b4340e5c6720f53ce3ded5e772b170b3 | /users/pipeline.py | e539d338fde12b9bc3b6ee30925f04382eebe68b | [] | no_license | DenisBilobram/Django-course | 0a006c20409290aa3aa721837cd9eaafd115ff7e | aa04e6a92eba134978e0deb85d529dda51fb6d62 | refs/heads/master | 2023-02-09T02:39:44.668803 | 2021-01-01T14:51:07 | 2021-01-01T14:51:07 | 323,090,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,061 | py | from collections import OrderedDict
from datetime import datetime
from urllib.parse import urlencode, urlunparse
from urllib.request import urlopen
from .forms import ProfileEditForm
import requests
from django.utils import timezone
from social_core.exceptions import AuthForbidden
from users.models import Profile
def save_user_profile(backend, user, response, *args, **kwargs):
if backend.name != 'vk-oauth2':
return
profile, created = Profile.objects.get_or_create(user=user)
api_url = urlunparse(('https',
'api.vk.com',
'/method/users.get',
None,
urlencode(OrderedDict(fields=','.join(('bdate', 'sex', 'about', 'photo_400_orig')),
access_token=response['access_token'],
v='5.92')),
None
))
resp = requests.get(api_url)
if resp.status_code != 200:
return
if created:
data = resp.json()['response'][0]
if data['sex']:
if data['sex'] == 1:
profile.gendre = Profile.FEMALE
if data['sex'] == 2:
profile.gender = Profile.MALE
if data['about']:
profile.aboutMe = data['about']
if data['bdate']:
bdate = datetime.strptime(data['bdate'], '%d.%m.%Y').date()
age = timezone.now().date().year - bdate.year
user.profile.age = age
if age < 18:
user.delete()
raise AuthForbidden('social_core.backends.vk.VKOAuth2')
avatar = urlopen(data['photo_400_orig']).read()
out = open(f"media\\users_avatars\\{response['id']}.jpg", "wb")
out.write(avatar)
out.close
profile.avatar = f"users_avatars/{response['id']}.jpg"
# profile_form = ProfileEditForm(instance=user.profile, avatar=avatar)
profile.save()
user.save()
| [
"bilobram.denis@gmail.com"
] | bilobram.denis@gmail.com |
83633aa10daa799984d01e9d3d3ade1d3c7f2a7e | 89d003625e43eec9de8ea376d3b2d8cd7206950a | /GyoiReport.py | e7f6ee60846a3200f64724711629c17405de15c1 | [
"Apache-2.0"
] | permissive | rajatkverma/GyoiThon | f263ac617402566b9c847ab8c11e633846112eb2 | 8f6d1f99b2e12875c6410ec07828cab938108ddd | refs/heads/master | 2021-04-09T13:27:52.603933 | 2018-03-16T02:07:15 | 2018-03-16T02:07:15 | 125,450,885 | 0 | 0 | null | 2018-03-16T02:10:23 | 2018-03-16T02:10:23 | null | UTF-8 | Python | false | false | 2,407 | py | #!/bin/env python
# -*- coding: utf-8 -*-
import sys
import glob
import configparser
import pandas as pd
from jinja2 import Environment, FileSystemLoader
# Create report.
class CreateReport:
def __init__(self):
# Read config file.
config = configparser.ConfigParser()
try:
config.read('./config.ini')
except FileExistsError as err:
print('File exists error: {0}', err)
sys.exit(1)
self.report_path = config['GyoiReport']['report_path']
self.report_name = config['GyoiReport']['report_name']
self.template = config['GyoiReport']['template']
self.header = str(config['GyoiReport']['header']).split('@')
def create_report(self):
# Gather reporting items.
csv_file_list = glob.glob(self.report_path + '*.csv')
# Create DataFrame.
content_list = []
for file in csv_file_list:
content_list.append(pd.read_csv(file, names=self.header, sep=','))
df_csv = pd.concat(content_list).drop_duplicates().sort_values(by=['ip', 'port'], ascending=True).reset_index(drop=True, col_level=1)
items = []
for idx in range(len(df_csv)):
items.append({'ip_addr': df_csv.loc[idx, 'ip'],
'port': df_csv.loc[idx, 'port'],
'prod_name': df_csv.loc[idx, 'service'],
'vuln_name': df_csv.loc[idx, 'vuln_name'],
'type': df_csv.loc[idx, 'type'],
'description': df_csv.loc[idx, 'description'],
'exploit': df_csv.loc[idx, 'exploit'],
'target': df_csv.loc[idx, 'target'],
'payload': df_csv.loc[idx, 'payload'],
'ref': str(df_csv.loc[idx, 'reference']).replace('@', '<br>')})
# Setting template.
env = Environment(loader=FileSystemLoader('.'))
template = env.get_template(self.template)
pd.set_option('display.max_colwidth', -1)
html = template.render({'title': 'GyoiThon Scan Report', 'items': items})
with open(self.report_path + self.report_name, 'w') as fout:
fout.write(html)
if __name__ == '__main__':
report = CreateReport()
report.create_report()
print('Finish!!')
| [
"noreply@github.com"
] | rajatkverma.noreply@github.com |
19d548e49df17fd5f2a942c7fc09007559ccdbf9 | 7cfa09e633c1eef8daaf9033dbbae609abe2b542 | /src/wifi/wscript | 2e5020b56bcd2a0792651f91ecd542a64512099b | [] | no_license | Ganz7/ns3_802.11aa | bb20878191d666eafd9e65e7947a9dfe5bb9ce04 | 8e655641e17376c65780f9441265ecbc4b1741bd | refs/heads/master | 2021-01-10T17:24:49.196147 | 2016-04-20T17:26:49 | 2016-04-20T17:26:49 | 55,539,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,487 | ## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
def build(bld):
obj = bld.create_ns3_module('wifi', ['network', 'internet', 'propagation', 'energy'])
obj.source = [
'model/wifi-information-element.cc',
'model/wifi-information-element-vector.cc',
'model/wifi-channel.cc',
'model/wifi-mode.cc',
'model/ssid.cc',
'model/wifi-phy.cc',
'model/wifi-phy-state-helper.cc',
'model/error-rate-model.cc',
'model/yans-error-rate-model.cc',
'model/nist-error-rate-model.cc',
'model/dsss-error-rate-model.cc',
'model/interference-helper.cc',
'model/yans-wifi-phy.cc',
'model/yans-wifi-channel.cc',
'model/wifi-mac-header.cc',
'model/wifi-mac-trailer.cc',
'model/mac-low.cc',
'model/wifi-mac-queue.cc',
'model/mac-tx-middle.cc',
'model/mac-rx-middle.cc',
'model/dca-txop.cc',
'model/supported-rates.cc',
'model/capability-information.cc',
'model/status-code.cc',
'model/mgt-headers.cc',
'model/random-stream.cc',
'model/dcf-manager.cc',
'model/wifi-mac.cc',
'model/regular-wifi-mac.cc',
'model/wifi-remote-station-manager.cc',
'model/ap-wifi-mac.cc',
'model/sta-wifi-mac.cc',
'model/adhoc-wifi-mac.cc',
'model/wifi-net-device.cc',
'model/arf-wifi-manager.cc',
'model/aarf-wifi-manager.cc',
'model/ideal-wifi-manager.cc',
'model/constant-rate-wifi-manager.cc',
'model/amrr-wifi-manager.cc',
'model/onoe-wifi-manager.cc',
'model/rraa-wifi-manager.cc',
'model/aarfcd-wifi-manager.cc',
'model/cara-wifi-manager.cc',
'model/minstrel-wifi-manager.cc',
'model/qos-tag.cc',
'model/qos-utils.cc',
'model/edca-txop-n.cc',
'model/msdu-aggregator.cc',
'model/amsdu-subframe-header.cc',
'model/msdu-standard-aggregator.cc',
'model/originator-block-ack-agreement.cc',
'model/dcf.cc',
'model/ctrl-headers.cc',
'model/qos-blocked-destinations.cc',
'model/block-ack-agreement.cc',
'model/block-ack-manager.cc',
'model/block-ack-cache.cc',
'model/snr-tag.cc',
'model/ht-capabilities.cc',
'model/wifi-tx-vector.cc',
'model/parf-wifi-manager.cc',
'model/aparf-wifi-manager.cc',
'model/ampdu-subframe-header.cc',
'model/mpdu-aggregator.cc',
'model/mpdu-standard-aggregator.cc',
'model/ampdu-tag.cc',
'model/wifi-radio-energy-model.cc',
'model/wifi-tx-current-model.cc',
'model/vht-capabilities.cc',
'helper/wifi-radio-energy-model-helper.cc',
'helper/vht-wifi-mac-helper.cc',
'helper/ht-wifi-mac-helper.cc',
'helper/athstats-helper.cc',
'helper/wifi-helper.cc',
'helper/yans-wifi-helper.cc',
'helper/nqos-wifi-mac-helper.cc',
'helper/qos-wifi-mac-helper.cc',
]
obj_test = bld.create_ns3_module_test_library('wifi')
obj_test.source = [
'test/block-ack-test-suite.cc',
'test/dcf-manager-test.cc',
'test/tx-duration-test.cc',
'test/power-rate-adaptation-test.cc',
'test/wifi-test.cc',
'test/wifi-aggregation-test.cc',
]
headers = bld(features='ns3header')
headers.module = 'wifi'
headers.source = [
'model/wifi-information-element.h',
'model/wifi-information-element-vector.h',
'model/wifi-net-device.h',
'model/wifi-channel.h',
'model/wifi-mode.h',
'model/ssid.h',
'model/wifi-preamble.h',
'model/wifi-phy-standard.h',
'model/yans-wifi-phy.h',
'model/yans-wifi-channel.h',
'model/wifi-phy.h',
'model/interference-helper.h',
'model/wifi-remote-station-manager.h',
'model/ap-wifi-mac.h',
'model/sta-wifi-mac.h',
'model/adhoc-wifi-mac.h',
'model/arf-wifi-manager.h',
'model/aarf-wifi-manager.h',
'model/ideal-wifi-manager.h',
'model/constant-rate-wifi-manager.h',
'model/amrr-wifi-manager.h',
'model/onoe-wifi-manager.h',
'model/rraa-wifi-manager.h',
'model/aarfcd-wifi-manager.h',
'model/cara-wifi-manager.h',
'model/minstrel-wifi-manager.h',
'model/wifi-mac.h',
'model/regular-wifi-mac.h',
'model/supported-rates.h',
'model/error-rate-model.h',
'model/yans-error-rate-model.h',
'model/nist-error-rate-model.h',
'model/dsss-error-rate-model.h',
'model/wifi-mac-queue.h',
'model/dca-txop.h',
'model/wifi-mac-header.h',
'model/wifi-mac-trailer.h',
'model/wifi-phy-state-helper.h',
'model/qos-utils.h',
'model/edca-txop-n.h',
'model/msdu-aggregator.h',
'model/amsdu-subframe-header.h',
'model/qos-tag.h',
'model/mgt-headers.h',
'model/status-code.h',
'model/capability-information.h',
'model/dcf-manager.h',
'model/mac-tx-middle.h',
'model/mac-rx-middle.h',
'model/mac-low.h',
'model/originator-block-ack-agreement.h',
'model/dcf.h',
'model/ctrl-headers.h',
'model/block-ack-agreement.h',
'model/block-ack-manager.h',
'model/block-ack-cache.h',
'model/snr-tag.h',
'model/ht-capabilities.h',
'model/parf-wifi-manager.h',
'model/aparf-wifi-manager.h',
'model/wifi-tx-vector.h',
'model/ampdu-subframe-header.h',
'model/mpdu-aggregator.h',
'model/mpdu-standard-aggregator.h',
'model/ampdu-tag.h',
'model/wifi-radio-energy-model.h',
'model/wifi-tx-current-model.h',
'model/vht-capabilities.h',
'helper/wifi-radio-energy-model-helper.h',
'helper/vht-wifi-mac-helper.h',
'helper/ht-wifi-mac-helper.h',
'helper/athstats-helper.h',
'helper/wifi-helper.h',
'helper/yans-wifi-helper.h',
'helper/nqos-wifi-mac-helper.h',
'helper/qos-wifi-mac-helper.h',
]
if bld.env['ENABLE_GSL']:
obj.use.extend(['GSL', 'GSLCBLAS', 'M'])
obj_test.use.extend(['GSL', 'GSLCBLAS', 'M'])
if (bld.env['ENABLE_EXAMPLES']):
bld.recurse('examples')
bld.ns3_python_bindings()
| [
"ganzse7en@gmail.com"
] | ganzse7en@gmail.com | |
1727b51b8a28a9c8a1d664f9248c2b26dbc432c1 | 8b9a4c4008bcc2516b4871d0fcb649d12b73afb3 | /mysite/blog/migrations/0001_initial.py | c3c4d7095c6effd9d738ba6eb9aac4ac9d9a7254 | [] | no_license | lalalajiangbiyaosi/Django-by-example | d90913598f7885735497df42909c131ddab74abd | b624b67bb6d194771259670b135f2959c847e778 | refs/heads/master | 2021-04-28T06:01:37.423814 | 2018-02-22T12:45:16 | 2018-02-22T12:45:16 | 122,191,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-02-17 07:12
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=250, unique_for_date='publish')),
('body', models.TextField()),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=10)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-publish',),
},
),
]
| [
"825936328@qq.com"
] | 825936328@qq.com |
d4b66625608a7e4c23263c810ef9efd20b4ffe98 | f85485e03a96173b9d858b728b07ce1fe9641f49 | /FinancialManagement.py | 4ca5c15beaede82926e174ca048b90502227f1cf | [] | no_license | AbdulrhmanSobhyAlsayed/Financial-management | 8d123d2f14fd2a408d13eedc0be022a939ca9dcd | 871d329c6a1b73c73119850315e8ca3f49265eeb | refs/heads/master | 2022-12-08T12:12:56.742267 | 2020-08-23T09:51:24 | 2020-08-23T09:51:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,950 | py | import os
class FinancialManagement:
def __init__(self):
self.name="null"
self.basicSalary=dict({"null":"0"})
self.expenses=dict({"null":"0"})
self.balance=0.0
self.extraIncome=dict({"null":"0"})
self.taker=dict({"null":"0"})
self.giver=dict({"null":"0"})
def setName(self,name):
self.name=name
def getName(self):
return (self.name)
def setBasicSalary(self,typeOfSalary,valueOfSalary):
self.basicSalary.update({typeOfSalary:valueOfSalary})
def getBasicSalary(self):
return ( sum(self.basicSalary.values()))
def setExpenses(self,typeOfExpense,valueOfExpense):
self.expenses.update({typeOfExpense:valueOfExpense})
def getExpenses(self):
return (sum(self.expenses.values()))
def getBalance(self):
return (self.getBasicSalary()+self.getGiver()+self.getExtraIncome()-self.getExpenses()-self.getTaker())
def setExtraIncome(self,typeOfIncome,valueOfIncome):
self.extraIncome.update({typeOfIncome:valueOfIncome})
def getExtraIncome(self):
return (sum(self.extraIncome.values()))
def setTaker(self,person, value):
self.taker.update({person:value})
def getTaker(self):
return (sum(self.taker.values()))
def setGiver(self,person, value):
self.giver.update({person:value})
def getGiver(self):
return (sum(self.giver.values()))
def toString(self):
return str("Your Name Is :%s\nYour Basic Salary Is : %f \nYour Extra Income Is : %f \nYour Expenses Is : %f \nYour Debit That You Will Have : %f \nYour Debit That You Will pay : %f \nYour Balance Is:%f\n" % (self.getName(),self.getBasicSalary(),self.getExtraIncome(),self.getExpenses(),self.getGiver(),self.getTaker(),self.getBalance()))
| [
"noreply@github.com"
] | AbdulrhmanSobhyAlsayed.noreply@github.com |
38f9f64be6b857c9631741c3b5835175b4d4b478 | 77b1b3e510076c55e428044bf5a260d1d32869ed | /Image filtering and edge detection/Task2.1P.py | c3b18ccc9b3aae85c9e293e18ed08c7f79812806 | [] | no_license | applecrumble123/ComputerVision | 604ae50e40a49fb1f0d527ce1caa84f66c17cd97 | 22881001bd5f9c3032ed8c461d8e3944df385626 | refs/heads/master | 2022-12-31T08:35:50.603070 | 2020-10-20T08:19:45 | 2020-10-20T08:19:45 | 305,631,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,000 | py | import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
import math
#load image
img = cv.imread('/Users/johnathontoh/Desktop/SIT789 - Applications of Computer Vision and Speech Processing/Week 2/Task 2.1P/Resources_2.1/empire.jpg')
img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
plt.imshow(img_gray, 'gray')
plt.show()
#kernel K defined above
avg_kernel = np.ones((5,5), np.float32) / 25
#always set the second parameter to -1, automatically calculate the dimension of image
avg_result = cv.filter2D(img_gray, -1, avg_kernel)
plt.imshow(avg_result, 'gray')
cv.imwrite('k_kernel.jpg',avg_result )
#plt.show()
#Gaussian Kernel
gaussian_kernel = np.float32([[1,4,6,4,1], [4,16,24,16,4], [6,24,36,24,6], [4,16,24,16,4], [1,4,6,4,1]])/256
gaussian = cv.filter2D(img_gray, -1, gaussian_kernel)
plt.imshow(gaussian, 'gray')
cv.imwrite('gaussian_kernel.jpg',gaussian)
#plt.show()
#Sobel Kernel
sobel_kernel = np.float32([[-1,0,1], [-2,0,2], [-1,0,1]])/8
sobel = cv.filter2D(img_gray, -1, sobel_kernel)
plt.imshow(sobel, 'gray')
cv.imwrite('sobel_kernel.jpg',sobel)
reverse_sobel_kernel = 255 - sobel
cv.imwrite('reverse_sobel_kernel.jpg',reverse_sobel_kernel)
#lt.show()
#Corner Kernel
corner_kernel = np.float32([[1,-2,1], [-2,4,-2], [1,-2,1]])/4
corner = cv.filter2D(img_gray, -1, corner_kernel)
plt.imshow(corner, 'gray')
cv.imwrite('corner_kernel.jpg',corner)
reverse_corner_kernel = 255 - sobel
cv.imwrite('reverse_corner_kernel.jpg',reverse_corner_kernel)
#plt.show()
#Testing median filter
img_noise = cv.imread('/Users/johnathontoh/Desktop/SIT789 - Applications of Computer Vision and Speech Processing/Week 2/Task 2.1P/Resources_2.1/empire_shotnoise.jpg')
img_noise_gray = cv.cvtColor(img_noise, cv.COLOR_BGR2GRAY)
ksize = 5 # neighbourhood of ksize x ksize; ksize must be an odd number
med_result = cv.medianBlur(img_noise_gray, ksize)
plt.imshow(med_result, 'gray')
cv.imwrite('median_result.jpg',med_result)
#plt.show()
#Testing bilateral filter
#radius to determine neighbourhood
rad = 5
#standard deviation for spatial distance (see slide 21 in week 2 lecture slides)
sigma_s = 10
#standard deviation for colour difference (see slide 21 in week 2 lecture slides)
sigma_c = 30
bil_result = cv.bilateralFilter(img_noise_gray, rad, sigma_c, sigma_s)
plt.imshow(bil_result, 'gray')
cv.imwrite('bilateral_result.jpg',bil_result)
#plt.show()
# Gaussian filter on img_noise_gray
gaussian_noise = cv.filter2D(img_noise_gray, -1, gaussian_kernel)
plt.imshow(gaussian_noise, 'gray')
cv.imwrite('gaussian_kernel_noise.jpg',gaussian_noise)
#plt.show()
# Edge Detection using sobel kernels
D_x = np.float32([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) / 8
der_x = cv.filter2D(img_gray, -1, D_x)
plt.imshow(der_x, 'gray')
D_y = np.float32([[-1, -2, -1], [0, 0, 0], [1, 2, 1]]) / 8
der_y = cv.filter2D(img_gray, -1, D_y)
plt.imshow(der_y, 'gray')
# height = row
# width = col
height, width = img_gray.shape
#gradient magnitude of img_gray
# initialise the array
mag_img_gray = np.zeros((height, width), np.float32)
# append each calculation into mag_img_gray
# range of index for height
for i in range(0, height):
# range of index for width
for j in range(0, width):
square_der_x = float(der_x[i, j]) * float(der_x[i, j])
square_der_y = float(der_y[i, j]) * float(der_y[i, j])
mag_img_gray[i, j] = int(math.sqrt(square_der_x + square_der_y))
plt.imshow(mag_img_gray,'gray')
cv.imwrite('sobel_edges.jpg', mag_img_gray)
reverse_sobel = 255 - mag_img_gray
plt.imshow(reverse_sobel, 'gray')
cv.imwrite('reverse_sobel_edges.jpg', reverse_sobel)
plt.show()
# Edges for canny edge detector
minVal = 100 #minVal used in hysteresis thresholding
maxVal = 200 #maxVal used in hysteresis thresholding
Canny_edges = cv.Canny(img_gray, minVal, maxVal)
plt.imshow(Canny_edges, 'gray')
cv.imwrite('canny_edges.jpg', Canny_edges)
reverse_canny = 255 - Canny_edges
plt.imshow(reverse_canny, 'gray')
cv.imwrite('reverse_canny_edges.jpg', reverse_canny)
plt.show() | [
"jhtoh@deakin.edu.au"
] | jhtoh@deakin.edu.au |
c575f9ab8bb0f669d6f36c30755a4f948d8ecd2e | 826ef921c6ea02e532398b87991353a61cb2804f | /api/serializers/experiment.py | 2b88bebc58d6a5700e4f30cfecbdd1b5dcb7a513 | [
"MIT"
] | permissive | betagouv/peps | c3823405b27fa73e3e9bf63292622d68021d622b | 6a28aac1c6daca530775db7cd173cbca2441ab54 | refs/heads/master | 2022-02-14T08:06:15.759531 | 2022-01-25T15:01:34 | 2022-01-25T15:01:34 | 202,151,130 | 6 | 7 | MIT | 2022-01-24T14:03:21 | 2019-08-13T13:31:01 | Python | UTF-8 | Python | false | false | 7,449 | py |
from rest_framework import serializers
from drf_base64.fields import Base64ImageField, Base64FileField
from data.models import Experiment
from data.models import ExperimentImage, ExperimentVideo
from api.serializers import MediaListSerializer
class ExperimentImageSerializer(serializers.ModelSerializer):
image = Base64ImageField()
id = serializers.IntegerField(required=False)
class Meta:
model = ExperimentImage
fields = (
'image',
'label',
'copyright',
'id',
)
class ExperimentImageFastSerializer(serializers.Serializer):
"""
Serializer to be used in retrieval actions. By bypassing
the overhead of the ModelSerializer it is significantly
faster : https://hakibenita.com/django-rest-framework-slow
"""
image = Base64ImageField()
id = serializers.IntegerField(required=False)
label = serializers.CharField()
copyright = serializers.CharField()
class ExperimentVideoSerializer(serializers.ModelSerializer):
video = Base64FileField()
id = serializers.IntegerField(required=False)
class Meta:
model = ExperimentVideo
fields = (
'video',
'label',
'copyright',
'id'
)
class ExperimentVideoFastSerializer(serializers.Serializer):
"""
Serializer to be used in retrieval actions. By bypassing
the overhead of the ModelSerializer it is significantly
faster : https://hakibenita.com/django-rest-framework-slow
"""
video = Base64FileField()
id = serializers.IntegerField(required=False)
label = serializers.CharField()
copyright = serializers.CharField()
class ExperimentFastSerializer(serializers.Serializer):
"""
Serializer to be used in retrieval actions. By bypassing
the overhead of the ModelSerializer it is significantly
faster : https://hakibenita.com/django-rest-framework-slow
"""
images = MediaListSerializer(required=False, child=ExperimentImageFastSerializer(required=False))
videos = MediaListSerializer(required=False, child=ExperimentVideoFastSerializer(required=False))
id = serializers.UUIDField(read_only=True)
sequence_number = serializers.IntegerField(read_only=True)
external_id = serializers.CharField(read_only=True)
tags = serializers.ListField()
approved = serializers.BooleanField()
state = serializers.CharField()
name = serializers.CharField()
short_name = serializers.CharField()
objectives = serializers.CharField()
equipment = serializers.CharField()
control_presence = serializers.BooleanField()
ongoing = serializers.BooleanField()
results = serializers.CharField()
results_details = serializers.CharField()
links = serializers.ListField()
description = serializers.CharField()
investment = serializers.CharField()
surface = serializers.CharField()
surface_type = serializers.ListField()
xp_type = serializers.CharField()
cultures = serializers.ListField()
creation_date = serializers.DateTimeField()
modification_date = serializers.DateTimeField()
class ExperimentSerializer(serializers.ModelSerializer):
images = MediaListSerializer(required=False, child=ExperimentImageSerializer(required=False))
videos = MediaListSerializer(required=False, child=ExperimentVideoSerializer(required=False))
class Meta:
model = Experiment
read_only_fields = [
'id',
'sequence_number',
'external_id',
]
fields = (
'id',
'sequence_number',
'external_id',
'tags',
'approved',
'state',
'name',
'short_name',
'objectives',
'equipment',
'control_presence',
'ongoing',
'results',
'results_details',
'links',
'description',
'investment',
'surface',
'surface_type',
'xp_type',
'images',
'videos',
'cultures',
'creation_date',
'modification_date',
)
def create(self, validated_data):
if 'images' not in validated_data and 'videos' not in validated_data:
return super().create(validated_data)
image_validated_data = validated_data.pop('images', None)
video_validated_data = validated_data.pop('videos', None)
experiment = super().create(validated_data)
if image_validated_data is not None:
experiment_image_serializer = self.fields['images']
for item in image_validated_data:
item['experiment'] = experiment
experiment_image_serializer.create(image_validated_data)
if video_validated_data is not None:
experiment_video_serializer = self.fields['videos']
for item in video_validated_data:
item['experiment'] = experiment
experiment_video_serializer.create(video_validated_data)
return experiment
def update(self, instance, validated_data):
if 'images' not in validated_data and 'videos' not in validated_data:
return super().update(instance, validated_data)
image_validated_data = validated_data.pop('images', None)
video_validated_data = validated_data.pop('videos', None)
experiment = super().update(instance, validated_data)
if image_validated_data is not None:
experiment_image_serializer = self.fields['images']
for item in image_validated_data:
item['experiment'] = experiment
experiment_image_serializer.update(experiment.images.all(), image_validated_data)
if video_validated_data is not None:
experiment_video_serializer = self.fields['videos']
for item in video_validated_data:
item['experiment'] = experiment
experiment_video_serializer.update(experiment.videos.all(), video_validated_data)
return experiment
class ExperimentBriefsFastSerializer(serializers.Serializer):
"""
Serializer to be used in retrieval actions. Only limited
information is exposed, meant for a card display.
"""
images = MediaListSerializer(required=False, child=ExperimentImageFastSerializer(required=False))
id = serializers.UUIDField(read_only=True)
sequence_number = serializers.IntegerField(read_only=True)
tags = serializers.ListField()
name = serializers.CharField()
short_name = serializers.CharField()
cultures = serializers.ListField()
creation_date = serializers.DateTimeField()
modification_date = serializers.DateTimeField()
farmer = serializers.PrimaryKeyRelatedField(read_only=True)
farmer_url_slug = serializers.SlugRelatedField(source="farmer", slug_field='url_slug', read_only=True)
livestock_types = serializers.SlugRelatedField(source="farmer", slug_field='livestock_types', read_only=True)
postal_code = serializers.SlugRelatedField(source="farmer", slug_field='postal_code', read_only=True)
farmer_name = serializers.SlugRelatedField(source="farmer", slug_field='name', read_only=True)
agriculture_types = serializers.SlugRelatedField(source="farmer", slug_field='agriculture_types', read_only=True)
objectives = serializers.CharField()
| [
"alejandro@amguillen.dev"
] | alejandro@amguillen.dev |
5a90f5b43138ebf000953a0e137ceac4e764177a | 38dec837a5063d0111b1e92ef678e04094ea8f90 | /form/migrations/0031_emp_form.py | 35c50e203231c7a5b8bfb488440801988c1b569c | [] | no_license | Nurlan1/ranking | 47321205f0408ab4978a8fa0d3e74c60698710e7 | 88e297a1217574947cb11da405132a640f80371b | refs/heads/master | 2022-05-08T12:55:40.669941 | 2021-06-04T07:48:04 | 2021-06-04T07:48:04 | 254,357,305 | 0 | 0 | null | 2022-04-22T23:11:46 | 2020-04-09T11:50:49 | HTML | UTF-8 | Python | false | false | 2,672 | py | # Generated by Django 2.2 on 2020-12-28 10:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('form', '0030_student_form_own_skills'),
]
operations = [
migrations.CreateModel(
name='Emp_form',
fields=[
('Id', models.AutoField(primary_key=True, serialize=False)),
('fio', models.TextField()),
('age', models.IntegerField()),
('gender', models.TextField()),
('raddate', models.TextField()),
('major', models.TextField()),
('job', models.TextField()),
('position', models.TextField()),
('salary', models.TextField()),
('synthesis', models.IntegerField()),
('using', models.IntegerField()),
('time_management', models.IntegerField()),
('base_knowledge', models.IntegerField()),
('preparing', models.IntegerField()),
('communication', models.IntegerField()),
('lang_knowledge', models.IntegerField()),
('comp_work', models.IntegerField()),
('exploring', models.IntegerField()),
('studying', models.IntegerField()),
('inf_work', models.IntegerField()),
('criticism', models.IntegerField()),
('adapt', models.IntegerField()),
('new_ideas', models.IntegerField()),
('solving', models.IntegerField()),
('decision', models.IntegerField()),
('team_work', models.IntegerField()),
('personal', models.IntegerField()),
('leadership', models.IntegerField()),
('team_disc_work', models.IntegerField()),
('nospec_work', models.IntegerField()),
('internat_work', models.IntegerField()),
('indep_work', models.IntegerField()),
('development', models.IntegerField()),
('initiate', models.IntegerField()),
('ethics', models.IntegerField()),
('caring', models.IntegerField()),
('ambition', models.IntegerField()),
('University_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='form.University', verbose_name='Университет')),
('Year_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='form.Year', verbose_name='Год')),
],
),
]
| [
"toktorbekov1999@gmail.com"
] | toktorbekov1999@gmail.com |
42571309656e4894e9a6eaf76de3b09b53d83ede | 9a37461f5ee5e6ca6e2f43b88394bf7b66f2a95b | /app/app/schema.py | 9805c62eb5645eb2888e22b4a0a22a711a0f77b3 | [] | no_license | goughjo02/react-tracks | c7587039dae7bc649b0fb8570f78fd8f1f4062a4 | a394a3cd79b7accf69a87144f39986f508afea08 | refs/heads/master | 2023-04-11T04:51:17.336810 | 2021-04-17T20:37:50 | 2021-04-17T20:37:50 | 356,690,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | import graphene
import graphql_jwt
import tracks.schema
import users.schema
class Query(tracks.schema.Query, users.schema.Query, graphene.ObjectType):
pass
class Mutation(tracks.schema.Mutation, users.schema.Mutation, graphene.ObjectType):
token_auth = graphql_jwt.ObtainJSONWebToken.Field()
verify_token = graphql_jwt.Verify.Field()
refresh_token = graphql_jwt.Refresh.Field()
schema = graphene.Schema(query=Query, mutation=Mutation)
| [
"goughjo02@gmail.com"
] | goughjo02@gmail.com |
46dbda76f22183920083b4a4bf2b8d279781fb8e | 21068e56ade178d4df5c579af9b39214375a6a60 | /Basic/Loop/while.py | 68011c054dd73d9d5801384267dec2c780fcdcde | [] | no_license | torikraju/python | c91da5e365ab4909fbe5417815f1c3908f23db40 | 662e2dfd84773e792319f1283566853b234d9617 | refs/heads/master | 2020-04-09T11:40:53.116281 | 2019-01-01T12:19:51 | 2019-01-01T12:19:51 | 160,319,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | blockchain = []
def add_value(transaction_amount='default value', last_transaction=[1]):
blockchain.append([last_transaction, transaction_amount])
def get_last_blockchain_value():
return blockchain[-1]
def get_user_input():
return float(input("your transaction amount please "))
tx_amount = get_user_input()
add_value(tx_amount)
while True:
tx_amount = get_user_input()
add_value(tx_amount, get_last_blockchain_value())
for element in blockchain:
print('Outputting Block')
print(element)
print('Done')
| [
"torikraju@hotmail.com"
] | torikraju@hotmail.com |
61a56e8f561435c4a10d86df63ea689a20d4c8df | 49f61714a6f78d984fd2194d6064d84e891bc5b7 | /2019-1/220/users/4266/codes/1693_1879.py | 55a2e7d2215d87de2d3bec79e75dfecd21f22dde | [] | no_license | psbarros/Variaveis3 | b5c4e1517e7d94a846ee03791d25d5821a1c651c | 3dcf6f810709ce03c78335acf9533e008a2ae125 | refs/heads/master | 2023-06-13T07:05:00.878430 | 2021-07-06T17:51:37 | 2021-07-06T17:51:37 | 383,549,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | # Ao testar sua solução, não se limite ao caso de exemplo.
extras = float(input("Digite o numero de horas extras: "))
faltou = float(input("Digite o numero de horas que faltou: "))
h = extras - ((1/4)*faltou)
if (h>400):
g = 500.0
else:
g = 100.0
print(extras, " extras e ", faltou, " de falta")
print("R$ ",g)
| [
"psb@icomp.ufam.edu.br"
] | psb@icomp.ufam.edu.br |
ef3fab69eb592ef269063a47815bcf415a20ec5a | cae9bf9cfc911309a58dbf68736649422736cf16 | /SVM.py | 5b279013201d10ce02cc169cad4f1770702e29e3 | [] | no_license | SobhanDash/SVM | b3ecc644a1ae0fadb6a508047d14a416ed1ab38a | 2710927b2dba2392e867e2500f8f3f67fd64b3aa | refs/heads/master | 2022-12-14T19:54:26.705620 | 2020-09-06T07:47:35 | 2020-09-06T07:47:35 | 293,228,035 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | import sklearn
from sklearn import datasets
from sklearn import svm
from sklearn import metrics
cancer = datasets.load_breast_cancer()
#print("Features: ", cancer.feature_names)
#print("Labels: ", cancer.target_names)
x = cancer.data #All of the features
y = cancer.target #All of the labels
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size = 0.2)
clf = svm.SVC(kernel="linear", C=3)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
acc = metrics.accuracy_score(y_test, y_pred)
print(acc)
| [
"noreply@github.com"
] | SobhanDash.noreply@github.com |
976024538682c39ea666ce8a446262993b29caed | 2a8a6327fb9a7ce8696aa15b197d5170661fb94f | /test/test_get_credit_memo_item_typewith_success_finance_information.py | 7a33e49537112879bd23b6dc4151bca3264a0e5d | [] | no_license | moderndatainc/zuora-client | 8b88e05132ddf7e8c411a6d7dad8c0baabaa6dad | d50da49ce1b8465c76723496c2561a3b8ebdf07d | refs/heads/master | 2021-09-21T19:17:34.752404 | 2018-08-29T23:24:07 | 2018-08-29T23:24:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,982 | py | # coding: utf-8
"""
Zuora API Reference
# Introduction Welcome to the reference for the Zuora REST API! <a href=\"http://en.wikipedia.org/wiki/REST_API\" target=\"_blank\">REST</a> is a web-service protocol that lends itself to rapid development by using everyday HTTP and JSON technology. The Zuora REST API provides a broad set of operations and resources that: * Enable Web Storefront integration from your website. * Support self-service subscriber sign-ups and account management. * Process revenue schedules through custom revenue rule models. * Enable manipulation of most objects in the Zuora Object Model. Want to share your opinion on how our API works for you? <a href=\"https://community.zuora.com/t5/Developers/API-Feedback-Form/gpm-p/21399\" target=\"_blank\">Tell us how you feel </a>about using our API and what we can do to make it better. ## Access to the API If you have a Zuora tenant, you can access the Zuora REST API via one of the following endpoints: | Tenant | Base URL for REST Endpoints | |-------------------------|-------------------------| |US Production | https://rest.zuora.com | |US API Sandbox | https://rest.apisandbox.zuora.com| |US Performance Test | https://rest.pt1.zuora.com | |EU Production | https://rest.eu.zuora.com | |EU Sandbox | https://rest.sandbox.eu.zuora.com | The Production endpoint provides access to your live user data. API Sandbox tenants are a good place to test code without affecting real-world data. If you would like Zuora to provision an API Sandbox tenant for you, contact your Zuora representative for assistance. **Note:** If you have a tenant in the Production Copy Environment, submit a request at <a href=\"http://support.zuora.com/\" target=\"_blank\">Zuora Global Support</a> to enable the Zuora REST API in your tenant and obtain the base URL for REST endpoints. If you do not have a Zuora tenant, go to <a href=\"https://www.zuora.com/resource/zuora-test-drive\" target=\"_blank\">https://www.zuora.com/resource/zuora-test-drive</a> and sign up for a Production Test Drive tenant. The tenant comes with seed data, including a sample product catalog. # API Changelog You can find the <a href=\"https://community.zuora.com/t5/Developers/API-Changelog/gpm-p/18092\" target=\"_blank\">Changelog</a> of the API Reference in the Zuora Community. # Authentication ## OAuth v2.0 Zuora recommends that you use OAuth v2.0 to authenticate to the Zuora REST API. Currently, OAuth is not available in every environment. See [Zuora Testing Environments](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/D_Zuora_Environments) for more information. Zuora recommends you to create a dedicated API user with API write access on a tenant when authenticating via OAuth, and then create an OAuth client for this user. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for how to do this. By creating a dedicated API user, you can control permissions of the API user without affecting other non-API users. If a user is deactivated, all of the user's OAuth clients will be automatically deactivated. Authenticating via OAuth requires the following steps: 1. Create a Client 2. Generate a Token 3. Make Authenticated Requests ### Create a Client You must first [create an OAuth client](https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users#Create_an_OAuth_Client_for_a_User) in the Zuora UI. To do this, you must be an administrator of your Zuora tenant. This is a one-time operation. You will be provided with a Client ID and a Client Secret. Please note this information down, as it will be required for the next step. **Note:** The OAuth client will be owned by a Zuora user account. If you want to perform PUT, POST, or DELETE operations using the OAuth client, the owner of the OAuth client must have a Platform role that includes the \"API Write Access\" permission. ### Generate a Token After creating a client, you must make a call to obtain a bearer token using the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) operation. This operation requires the following parameters: - `client_id` - the Client ID displayed when you created the OAuth client in the previous step - `client_secret` - the Client Secret displayed when you created the OAuth client in the previous step - `grant_type` - must be set to `client_credentials` **Note**: The Client ID and Client Secret mentioned above were displayed when you created the OAuth Client in the prior step. The [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response specifies how long the bearer token is valid for. Call [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) again to generate a new bearer token. ### Make Authenticated Requests To authenticate subsequent API requests, you must provide a valid bearer token in an HTTP header: `Authorization: Bearer {bearer_token}` If you have [Zuora Multi-entity](https://www.zuora.com/developer/api-reference/#tag/Entities) enabled, you need to set an additional header to specify the ID of the entity that you want to access. You can use the `scope` field in the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response to determine whether you need to specify an entity ID. If the `scope` field contains more than one entity ID, you must specify the ID of the entity that you want to access. For example, if the `scope` field contains `entity.1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` and `entity.c92ed977-510c-4c48-9b51-8d5e848671e9`, specify one of the following headers: - `Zuora-Entity-Ids: 1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` - `Zuora-Entity-Ids: c92ed977-510c-4c48-9b51-8d5e848671e9` **Note**: For a limited period of time, Zuora will accept the `entityId` header as an alternative to the `Zuora-Entity-Ids` header. If you choose to set the `entityId` header, you must remove all \"-\" characters from the entity ID in the `scope` field. If the `scope` field contains a single entity ID, you do not need to specify an entity ID. ## Other Supported Authentication Schemes Zuora continues to support the following additional legacy means of authentication: * Use username and password. Include authentication with each request in the header: * `apiAccessKeyId` * `apiSecretAccessKey` Zuora recommends that you create an API user specifically for making API calls. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for more information. * Use an authorization cookie. The cookie authorizes the user to make calls to the REST API for the duration specified in **Administration > Security Policies > Session timeout**. The cookie expiration time is reset with this duration after every call to the REST API. To obtain a cookie, call the [Connections](https://www.zuora.com/developer/api-reference/#tag/Connections) resource with the following API user information: * ID * Password * For CORS-enabled APIs only: Include a 'single-use' token in the request header, which re-authenticates the user with each request. See below for more details. ### Entity Id and Entity Name The `entityId` and `entityName` parameters are only used for [Zuora Multi-entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity \"Zuora Multi-entity\"). These are the legacy parameters that Zuora will only continue to support for a period of time. Zuora recommends you to use the `Zuora-Entity-Ids` parameter instead. The `entityId` and `entityName` parameters specify the Id and the [name of the entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/B_Introduction_to_Entity_and_Entity_Hierarchy#Name_and_Display_Name \"Introduction to Entity and Entity Hierarchy\") that you want to access, respectively. Note that you must have permission to access the entity. You can specify either the `entityId` or `entityName` parameter in the authentication to access and view an entity. * If both `entityId` and `entityName` are specified in the authentication, an error occurs. * If neither `entityId` nor `entityName` is specified in the authentication, you will log in to the entity in which your user account is created. To get the entity Id and entity name, you can use the GET Entities REST call. For more information, see [API User Authentication](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/A_Overview_of_Multi-entity#API_User_Authentication \"API User Authentication\"). ### Token Authentication for CORS-Enabled APIs The CORS mechanism enables REST API calls to Zuora to be made directly from your customer's browser, with all credit card and security information transmitted directly to Zuora. This minimizes your PCI compliance burden, allows you to implement advanced validation on your payment forms, and makes your payment forms look just like any other part of your website. For security reasons, instead of using cookies, an API request via CORS uses **tokens** for authentication. The token method of authentication is only designed for use with requests that must originate from your customer's browser; **it should not be considered a replacement to the existing cookie authentication** mechanism. See [Zuora CORS REST](https://knowledgecenter.zuora.com/DC_Developers/REST_API/A_REST_basics/G_CORS_REST \"Zuora CORS REST\") for details on how CORS works and how you can begin to implement customer calls to the Zuora REST APIs. See [HMAC Signatures](https://www.zuora.com/developer/api-reference/#operation/POSTHMACSignature \"HMAC Signatures\") for details on the HMAC method that returns the authentication token. # Requests and Responses ## Request IDs As a general rule, when asked to supply a \"key\" for an account or subscription (accountKey, account-key, subscriptionKey, subscription-key), you can provide either the actual ID or the number of the entity. ## HTTP Request Body Most of the parameters and data accompanying your requests will be contained in the body of the HTTP request. The Zuora REST API accepts JSON in the HTTP request body. No other data format (e.g., XML) is supported. ### Data Type ([Actions](https://www.zuora.com/developer/api-reference/#tag/Actions) and CRUD operations only) We recommend that you do not specify the decimal values with quotation marks, commas, and spaces. Use characters of `+-0-9.eE`, for example, `5`, `1.9`, `-8.469`, and `7.7e2`. Also, Zuora does not convert currencies for decimal values. ## Testing a Request Use a third party client, such as [curl](https://curl.haxx.se \"curl\"), [Postman](https://www.getpostman.com \"Postman\"), or [Advanced REST Client](https://advancedrestclient.com \"Advanced REST Client\"), to test the Zuora REST API. You can test the Zuora REST API from the Zuora API Sandbox or Production tenants. If connecting to Production, bear in mind that you are working with your live production data, not sample data or test data. ## Testing with Credit Cards Sooner or later it will probably be necessary to test some transactions that involve credit cards. For suggestions on how to handle this, see [Going Live With Your Payment Gateway](https://knowledgecenter.zuora.com/CB_Billing/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards \"C_Zuora_User_Guides/A_Billing_and_Payments/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards\" ). ## Concurrent Request Limits Zuora enforces tenant-level concurrent request limits. See <a href=\"https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Policies/Concurrent_Request_Limits\" target=\"_blank\">Concurrent Request Limits</a> for more information. ## Timeout Limit If a request does not complete within 120 seconds, the request times out and Zuora returns a Gateway Timeout error. ## Error Handling Responses and error codes are detailed in [Responses and errors](https://knowledgecenter.zuora.com/DC_Developers/REST_API/A_REST_basics/3_Responses_and_errors \"Responses and errors\"). # Pagination When retrieving information (using GET methods), the optional `pageSize` query parameter sets the maximum number of rows to return in a response. The maximum is `40`; larger values are treated as `40`. If this value is empty or invalid, `pageSize` typically defaults to `10`. The default value for the maximum number of rows retrieved can be overridden at the method level. If more rows are available, the response will include a `nextPage` element, which contains a URL for requesting the next page. If this value is not provided, no more rows are available. No \"previous page\" element is explicitly provided; to support backward paging, use the previous call. ## Array Size For data items that are not paginated, the REST API supports arrays of up to 300 rows. Thus, for instance, repeated pagination can retrieve thousands of customer accounts, but within any account an array of no more than 300 rate plans is returned. # API Versions The Zuora REST API are version controlled. Versioning ensures that Zuora REST API changes are backward compatible. Zuora uses a major and minor version nomenclature to manage changes. By specifying a version in a REST request, you can get expected responses regardless of future changes to the API. ## Major Version The major version number of the REST API appears in the REST URL. Currently, Zuora only supports the **v1** major version. For example, `POST https://rest.zuora.com/v1/subscriptions`. ## Minor Version Zuora uses minor versions for the REST API to control small changes. For example, a field in a REST method is deprecated and a new field is used to replace it. Some fields in the REST methods are supported as of minor versions. If a field is not noted with a minor version, this field is available for all minor versions. If a field is noted with a minor version, this field is in version control. You must specify the supported minor version in the request header to process without an error. If a field is in version control, it is either with a minimum minor version or a maximum minor version, or both of them. You can only use this field with the minor version between the minimum and the maximum minor versions. For example, the `invoiceCollect` field in the POST Subscription method is in version control and its maximum minor version is 189.0. You can only use this field with the minor version 189.0 or earlier. If you specify a version number in the request header that is not supported, Zuora will use the minimum minor version of the REST API. In our REST API documentation, if a field or feature requires a minor version number, we note that in the field description. You only need to specify the version number when you use the fields require a minor version. To specify the minor version, set the `zuora-version` parameter to the minor version number in the request header for the request call. For example, the `collect` field is in 196.0 minor version. If you want to use this field for the POST Subscription method, set the `zuora-version` parameter to `196.0` in the request header. The `zuora-version` parameter is case sensitive. For all the REST API fields, by default, if the minor version is not specified in the request header, Zuora will use the minimum minor version of the REST API to avoid breaking your integration. ### Minor Version History The supported minor versions are not serial. This section documents the changes made to each Zuora REST API minor version. The following table lists the supported versions and the fields that have a Zuora REST API minor version. | Fields | Minor Version | REST Methods | Description | |:--------|:--------|:--------|:--------| | invoiceCollect | 189.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice and collects a payment for a subscription. | | collect | 196.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Collects an automatic payment for a subscription. | | invoice | 196.0 and 207.0| [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice for a subscription. | | invoiceTargetDate | 196.0 and earlier | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | invoiceTargetDate | 207.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | includeExisting DraftInvoiceItems | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | includeExisting DraftDocItems | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | previewType | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `InvoiceItem`(default), `ChargeMetrics`, and `InvoiceItemChargeMetrics`. | | previewType | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `LegalDoc`(default), `ChargeMetrics`, and `LegalDocChargeMetrics`. | | runBilling | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice or credit memo for a subscription. **Note:** Credit memos are only available if you have the Invoice Settlement feature enabled. | | invoiceDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice being generated, as `yyyy-mm-dd`. | | invoiceTargetDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice is generated, as `yyyy-mm-dd`. | | documentDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice and credit memo being generated, as `yyyy-mm-dd`. | | targetDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice or a credit memo is generated, as `yyyy-mm-dd`. | | memoItemAmount | 223.0 and earlier | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | amount | 224.0 and later | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | subscriptionNumbers | 222.4 and earlier | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers of the subscriptions in an order. | | subscriptions | 223.0 and later | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers and statuses in an order. | #### Version 207.0 and Later The response structure of the [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") and [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") methods are changed. The following invoice related response fields are moved to the invoice container: * amount * amountWithoutTax * taxAmount * invoiceItems * targetDate * chargeMetrics # Zuora Object Model The following diagram presents a high-level view of the key Zuora objects. Click the image to open it in a new tab to resize it. <a href=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" target=\"_blank\"><img src=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" alt=\"Zuora Object Model Diagram\"></a> See the following articles for information about other parts of the Zuora business object model: * <a href=\"https://knowledgecenter.zuora.com/CB_Billing/Invoice_Settlement/D_Invoice_Settlement_Object_Model\" target=\"_blank\">Invoice Settlement Object Model</a> * <a href=\"https://knowledgecenter.zuora.com/BC_Subscription_Management/Orders/BA_Orders_Object_Model\" target=\"_blank\">Orders Object Model</a> You can use the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation to list the fields of each Zuora object that is available in your tenant. When you call the operation, you must specify the API name of the Zuora object. The following table provides the API name of each Zuora object: | Object | API Name | |-----------------------------------------------|--------------------------------------------| | Account | `Account` | | Accounting Code | `AccountingCode` | | Accounting Period | `AccountingPeriod` | | Amendment | `Amendment` | | Application Group | `ApplicationGroup` | | Billing Run | <p>`BillingRun`</p><p>**Note:** The API name of this object is `BillingRun` in the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation and Export ZOQL queries only. Otherwise, the API name of this object is `BillRun`.</p> | | Contact | `Contact` | | Contact Snapshot | `ContactSnapshot` | | Credit Balance Adjustment | `CreditBalanceAdjustment` | | Credit Memo | `CreditMemo` | | Credit Memo Application | `CreditMemoApplication` | | Credit Memo Application Item | `CreditMemoApplicationItem` | | Credit Memo Item | `CreditMemoItem` | | Credit Memo Part | `CreditMemoPart` | | Credit Memo Part Item | `CreditMemoPartItem` | | Credit Taxation Item | `CreditTaxationItem` | | Custom Exchange Rate | `FXCustomRate` | | Debit Memo | `DebitMemo` | | Debit Memo Item | `DebitMemoItem` | | Debit Taxation Item | `DebitTaxationItem` | | Discount Applied Metrics | `DiscountAppliedMetrics` | | Entity | `Tenant` | | Gateway Reconciliation Event | `PaymentGatewayReconciliationEventLog` | | Gateway Reconciliation Job | `PaymentReconciliationJob` | | Gateway Reconciliation Log | `PaymentReconciliationLog` | | Invoice | `Invoice` | | Invoice Adjustment | `InvoiceAdjustment` | | Invoice Item | `InvoiceItem` | | Invoice Item Adjustment | `InvoiceItemAdjustment` | | Invoice Payment | `InvoicePayment` | | Journal Entry | `JournalEntry` | | Journal Entry Item | `JournalEntryItem` | | Journal Run | `JournalRun` | | Order | `Order` | | Order Action | `OrderAction` | | Order ELP | `OrderElp` | | Order Item | `OrderItem` | | Order MRR | `OrderMrr` | | Order Quantity | `OrderQuantity` | | Order TCB | `OrderTcb` | | Order TCV | `OrderTcv` | | Payment | `Payment` | | Payment Application | `PaymentApplication` | | Payment Application Item | `PaymentApplicationItem` | | Payment Method | `PaymentMethod` | | Payment Method Snapshot | `PaymentMethodSnapshot` | | Payment Method Transaction Log | `PaymentMethodTransactionLog` | | Payment Method Update | `UpdaterDetail` | | Payment Part | `PaymentPart` | | Payment Part Item | `PaymentPartItem` | | Payment Run | `PaymentRun` | | Payment Transaction Log | `PaymentTransactionLog` | | Processed Usage | `ProcessedUsage` | | Product | `Product` | | Product Rate Plan | `ProductRatePlan` | | Product Rate Plan Charge | `ProductRatePlanCharge` | | Product Rate Plan Charge Tier | `ProductRatePlanChargeTier` | | Rate Plan | `RatePlan` | | Rate Plan Charge | `RatePlanCharge` | | Rate Plan Charge Tier | `RatePlanChargeTier` | | Refund | `Refund` | | Refund Application | `RefundApplication` | | Refund Application Item | `RefundApplicationItem` | | Refund Invoice Payment | `RefundInvoicePayment` | | Refund Part | `RefundPart` | | Refund Part Item | `RefundPartItem` | | Refund Transaction Log | `RefundTransactionLog` | | Revenue Charge Summary | `RevenueChargeSummary` | | Revenue Charge Summary Item | `RevenueChargeSummaryItem` | | Revenue Event | `RevenueEvent` | | Revenue Event Credit Memo Item | `RevenueEventCreditMemoItem` | | Revenue Event Debit Memo Item | `RevenueEventDebitMemoItem` | | Revenue Event Invoice Item | `RevenueEventInvoiceItem` | | Revenue Event Invoice Item Adjustment | `RevenueEventInvoiceItemAdjustment` | | Revenue Event Item | `RevenueEventItem` | | Revenue Event Item Credit Memo Item | `RevenueEventItemCreditMemoItem` | | Revenue Event Item Debit Memo Item | `RevenueEventItemDebitMemoItem` | | Revenue Event Item Invoice Item | `RevenueEventItemInvoiceItem` | | Revenue Event Item Invoice Item Adjustment | `RevenueEventItemInvoiceItemAdjustment` | | Revenue Event Type | `RevenueEventType` | | Revenue Schedule | `RevenueSchedule` | | Revenue Schedule Credit Memo Item | `RevenueScheduleCreditMemoItem` | | Revenue Schedule Debit Memo Item | `RevenueScheduleDebitMemoItem` | | Revenue Schedule Invoice Item | `RevenueScheduleInvoiceItem` | | Revenue Schedule Invoice Item Adjustment | `RevenueScheduleInvoiceItemAdjustment` | | Revenue Schedule Item | `RevenueScheduleItem` | | Revenue Schedule Item Credit Memo Item | `RevenueScheduleItemCreditMemoItem` | | Revenue Schedule Item Debit Memo Item | `RevenueScheduleItemDebitMemoItem` | | Revenue Schedule Item Invoice Item | `RevenueScheduleItemInvoiceItem` | | Revenue Schedule Item Invoice Item Adjustment | `RevenueScheduleItemInvoiceItemAdjustment` | | Subscription | `Subscription` | | Taxable Item Snapshot | `TaxableItemSnapshot` | | Taxation Item | `TaxationItem` | | Updater Batch | `UpdaterBatch` | | Usage | `Usage` | # noqa: E501
OpenAPI spec version: 2018-08-23
Contact: docs@zuora.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import zuora_client
from zuora_client.models.get_credit_memo_item_typewith_success_finance_information import GETCreditMemoItemTypewithSuccessFinanceInformation # noqa: E501
from zuora_client.rest import ApiException
class TestGETCreditMemoItemTypewithSuccessFinanceInformation(unittest.TestCase):
"""GETCreditMemoItemTypewithSuccessFinanceInformation unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGETCreditMemoItemTypewithSuccessFinanceInformation(self):
"""Test GETCreditMemoItemTypewithSuccessFinanceInformation"""
# FIXME: construct object with mandatory attributes with example values
# model = zuora_client.models.get_credit_memo_item_typewith_success_finance_information.GETCreditMemoItemTypewithSuccessFinanceInformation() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"jairo.velasco@alertlogic.com"
] | jairo.velasco@alertlogic.com |
056be20ed1e3365c7fdde9a90eaa63c5dcb36b19 | cce1e235c2c8e58d83af6dbadeb471ca62b710a1 | /hackerrank/data_structures/linked_lists/print_in_reverse.py | 2ce2ada1121b872bcca83b376be97f1e9c07e040 | [] | no_license | SebastianThomas1/coding_challenges | 6b51ce046b458c44db809687b6809d16d066566f | bd3bc6be7a975b6255e4b2198c953d56bd74e75a | refs/heads/master | 2023-03-03T00:18:00.147369 | 2021-02-08T21:52:02 | 2021-02-08T21:52:02 | 336,688,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | # Sebastian Thomas (coding at sebastianthomas dot de)
# https://www.hackerrank.com/challenges/print-the-elements-of-a-linked-list-in-reverse
#
# Print in Reverse
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
def reverse(head):
current_node = head
predecessor = None
while current_node:
successor = current_node.next
current_node.next = predecessor
predecessor = current_node
current_node = successor
return predecessor
def reverse_print(head):
head = reverse(head)
current_node = head
while current_node is not None:
print(current_node.data)
current_node = current_node.next
reverse(head)
| [
"sigma.online@gmx.de"
] | sigma.online@gmx.de |
e45195c3021c89716d44b391b1ecb44dbda515df | d9d61b364487ddf4e7a2fd871059ec26ecaf3fb7 | /accounts/admin.py | 9a3a61cf853c6134062d1708a473d5005971ca67 | [] | no_license | fortune/django-auth-session-sample | 623e31a06232f1c33e340bfee484677c09aa6383 | 070e400f6de30a02ef40f8a4b2c272d533772e43 | refs/heads/master | 2020-05-17T08:03:23.603123 | 2019-05-27T10:15:53 | 2019-05-27T10:15:53 | 183,596,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | from django.contrib import admin
from django.contrib.auth.forms import UserChangeForm
from django.contrib.auth.admin import UserAdmin
from .models import User
# Register your models here.
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class MyUserAdmin(UserAdmin):
form = MyUserChangeForm
fieldsets = UserAdmin.fieldsets + (
(None, {'fields': ('gender', )}),
)
admin.site.register(User, MyUserAdmin) | [
"fortunefield@gmail.com"
] | fortunefield@gmail.com |
38a89f777e8bbe9356f75f9a636906d2bcb8c7b1 | 3427decdf9d761c6cc9933662b6eb9e408c39064 | /dictionary.py | 9b711a7867bfa4e49f30b9343710338761a591e7 | [] | no_license | yunshang/python | b74add81404a68b806931e5da57b5bf51f7d1085 | 1ddbdf76640196e46e39e74d26d2b000c54a19af | refs/heads/master | 2021-01-10T21:01:05.948851 | 2014-07-24T03:40:42 | 2014-07-24T03:40:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,852 | py | #!/usr/bin/env python
# encoding: utf-8
import wx
import urllib2
import codecs
import re
import fileinput
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class Example(wx.Frame):
def __init__(self, parent, title):
super(Example, self).__init__(parent, title=title, size=(300, 250))
self.InitUI()
self.SetTransparent(200)
self.Centre()
self.Show()
def onEnter(self, event):
keycode = event.GetKeyCode()
if keycode == wx.WXK_RETURN or keycode == wx.WXK_NUMPAD_ENTER:
var = self.display.GetValue()
#var.decode('gbk').encode('utf-8')
self.display2.Value = tran(var).decode('utf-8')
#ID.decode('gbk', 'ignore').encode('utf-8')
#self.display2.Value=nID
# ab = open('resault.txt','w')
# ab.write(ID)
# ab.close()
# for line in fileinput.input("resault.txt"):
# line.decode('utf-8', 'ignore').encode('gbk')
# = line.decode('utf-8')
event.Skip()
def InitUI(self):
vbox = wx.BoxSizer(wx.VERTICAL)
self.display = wx.TextCtrl(self,size=(300, 35))
self.display.SetMaxLength(100)
self.display.SetFont(wx.Font(18, wx.FONTFAMILY_ROMAN, wx.ITALIC, wx.NORMAL))
self.display.Bind(wx.EVT_KEY_DOWN, self.onEnter)
vbox.Add(self.display, flag=wx.EXPAND|wx.TOP|wx.BOTTOM, border=4)
self.display2 = wx.TextCtrl(self, style=wx.TE_MULTILINE, size=(300, 180))
self.display2.SetFont(wx.Font(10, wx.FONTFAMILY_ROMAN, wx.ITALIC, wx.NORMAL))
vbox.Add(self.display2, flag=wx.BOTTOM, border=4)
self.SetSizer(vbox)
def tran(word):
url='http://dict.baidu.com/s?wd={0}&tn=dict'.format(word)
#print url
req=urllib2.Request(url)
resp=urllib2.urlopen(req)
resphtml=resp.read()
text = re.search(r'explain: "(.*)"',resphtml)
return text.group(1).replace('<br />',' ')
if __name__ == '__main__':
app = wx.App()
Example(None, title='dictionary')
app.MainLoop()
| [
"Bingbug1992@gmail.com"
] | Bingbug1992@gmail.com |
15aa12ee133e35281060e4580a88fe2e75fd98f9 | 7f0481f1f3508b6a957f71bf49478ceb592fe945 | /F9744/Keras/Ch02/Ch2_3.py | 2d96fd756a615e339fc8234a3768a8308657e512 | [] | no_license | dsalearning/tf.keras_python | d8c8174fac793cd5266e4aded9e83c2631311c15 | f06950d035c6aff0fd518eafcc09caffa348aefe | refs/heads/main | 2023-01-20T03:23:10.826040 | 2020-12-01T01:56:00 | 2020-12-01T01:56:00 | 309,405,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | num = 10
print(num)
if num >= 10:
print("數字是10") | [
"alvinlin98@gmail.com"
] | alvinlin98@gmail.com |
828390637851af3ac878569b4c3b034030f07415 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-css/huaweicloudsdkcss/v1/model/setting.py | 0c3511543b950ff7b9b7c9005a2fe6122a67016d | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 9,314 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class Setting:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'workers': 'int',
'batch_size': 'int',
'batch_delay_ms': 'int',
'queue_type': 'str',
'queue_check_point_writes': 'int',
'queue_max_bytes_mb': 'int'
}
attribute_map = {
'workers': 'workers',
'batch_size': 'batchSize',
'batch_delay_ms': 'batchDelayMs',
'queue_type': 'queueType',
'queue_check_point_writes': 'queueCheckPointWrites',
'queue_max_bytes_mb': 'queueMaxBytesMb'
}
def __init__(self, workers=None, batch_size=None, batch_delay_ms=None, queue_type=None, queue_check_point_writes=None, queue_max_bytes_mb=None):
"""Setting
The model defined in huaweicloud sdk
:param workers: 并行执行管道的Filters+Outputs阶段的工作线程数,默认值为CPU核数。
:type workers: int
:param batch_size: 单个工作线程在尝试执行其Filters和Outputs之前将从inputs收集的最大事件数,该值较大通常更有效,但会增加内存开销,默认为125。
:type batch_size: int
:param batch_delay_ms: 每个event被pipeline调度等待的最小时间。 单位毫秒。
:type batch_delay_ms: int
:param queue_type: 用于事件缓冲的内部队列模型。memory 为基于内存的传统队列,persisted为基于磁盘的ACKed持久化队列,默认值为memory。
:type queue_type: str
:param queue_check_point_writes: 如果使用持久化队列,则表示强制执行检查点之前写入的最大事件数,默认值为1024。
:type queue_check_point_writes: int
:param queue_max_bytes_mb: 如果使用持久化队列,则表示持久化队列的总容量(以兆字节MB为单位),确保磁盘的容量大于该值,默认值为1024。
:type queue_max_bytes_mb: int
"""
self._workers = None
self._batch_size = None
self._batch_delay_ms = None
self._queue_type = None
self._queue_check_point_writes = None
self._queue_max_bytes_mb = None
self.discriminator = None
if workers is not None:
self.workers = workers
if batch_size is not None:
self.batch_size = batch_size
if batch_delay_ms is not None:
self.batch_delay_ms = batch_delay_ms
self.queue_type = queue_type
if queue_check_point_writes is not None:
self.queue_check_point_writes = queue_check_point_writes
if queue_max_bytes_mb is not None:
self.queue_max_bytes_mb = queue_max_bytes_mb
@property
def workers(self):
"""Gets the workers of this Setting.
并行执行管道的Filters+Outputs阶段的工作线程数,默认值为CPU核数。
:return: The workers of this Setting.
:rtype: int
"""
return self._workers
@workers.setter
def workers(self, workers):
"""Sets the workers of this Setting.
并行执行管道的Filters+Outputs阶段的工作线程数,默认值为CPU核数。
:param workers: The workers of this Setting.
:type workers: int
"""
self._workers = workers
@property
def batch_size(self):
"""Gets the batch_size of this Setting.
单个工作线程在尝试执行其Filters和Outputs之前将从inputs收集的最大事件数,该值较大通常更有效,但会增加内存开销,默认为125。
:return: The batch_size of this Setting.
:rtype: int
"""
return self._batch_size
@batch_size.setter
def batch_size(self, batch_size):
"""Sets the batch_size of this Setting.
单个工作线程在尝试执行其Filters和Outputs之前将从inputs收集的最大事件数,该值较大通常更有效,但会增加内存开销,默认为125。
:param batch_size: The batch_size of this Setting.
:type batch_size: int
"""
self._batch_size = batch_size
@property
def batch_delay_ms(self):
"""Gets the batch_delay_ms of this Setting.
每个event被pipeline调度等待的最小时间。 单位毫秒。
:return: The batch_delay_ms of this Setting.
:rtype: int
"""
return self._batch_delay_ms
@batch_delay_ms.setter
def batch_delay_ms(self, batch_delay_ms):
"""Sets the batch_delay_ms of this Setting.
每个event被pipeline调度等待的最小时间。 单位毫秒。
:param batch_delay_ms: The batch_delay_ms of this Setting.
:type batch_delay_ms: int
"""
self._batch_delay_ms = batch_delay_ms
@property
def queue_type(self):
"""Gets the queue_type of this Setting.
用于事件缓冲的内部队列模型。memory 为基于内存的传统队列,persisted为基于磁盘的ACKed持久化队列,默认值为memory。
:return: The queue_type of this Setting.
:rtype: str
"""
return self._queue_type
@queue_type.setter
def queue_type(self, queue_type):
"""Sets the queue_type of this Setting.
用于事件缓冲的内部队列模型。memory 为基于内存的传统队列,persisted为基于磁盘的ACKed持久化队列,默认值为memory。
:param queue_type: The queue_type of this Setting.
:type queue_type: str
"""
self._queue_type = queue_type
@property
def queue_check_point_writes(self):
"""Gets the queue_check_point_writes of this Setting.
如果使用持久化队列,则表示强制执行检查点之前写入的最大事件数,默认值为1024。
:return: The queue_check_point_writes of this Setting.
:rtype: int
"""
return self._queue_check_point_writes
@queue_check_point_writes.setter
def queue_check_point_writes(self, queue_check_point_writes):
"""Sets the queue_check_point_writes of this Setting.
如果使用持久化队列,则表示强制执行检查点之前写入的最大事件数,默认值为1024。
:param queue_check_point_writes: The queue_check_point_writes of this Setting.
:type queue_check_point_writes: int
"""
self._queue_check_point_writes = queue_check_point_writes
@property
def queue_max_bytes_mb(self):
"""Gets the queue_max_bytes_mb of this Setting.
如果使用持久化队列,则表示持久化队列的总容量(以兆字节MB为单位),确保磁盘的容量大于该值,默认值为1024。
:return: The queue_max_bytes_mb of this Setting.
:rtype: int
"""
return self._queue_max_bytes_mb
@queue_max_bytes_mb.setter
def queue_max_bytes_mb(self, queue_max_bytes_mb):
"""Sets the queue_max_bytes_mb of this Setting.
如果使用持久化队列,则表示持久化队列的总容量(以兆字节MB为单位),确保磁盘的容量大于该值,默认值为1024。
:param queue_max_bytes_mb: The queue_max_bytes_mb of this Setting.
:type queue_max_bytes_mb: int
"""
self._queue_max_bytes_mb = queue_max_bytes_mb
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Setting):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
5fdd169dfde0cf56687a3d1233957fba84ef3e90 | c5f86a009e52e43456ab2c1d80d6ce4d5855206d | /foundation/python/bst/node.py | 86eedfdd8b8aa1224519e02d119bb463fef5c4ea | [] | no_license | MirTalpur/cs-fundamentals | 149ad0ffb571f28d666cd68b5dd93ef5870e99fc | 76b8cfafa5303110a3b539c16c00144b7ab728df | refs/heads/master | 2021-09-22T11:42:11.048807 | 2018-07-24T06:59:06 | 2018-07-24T06:59:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,801 | py | class Node(object):
"""docstring for Node"""
def __init__(self, data):
self.data = data
self.leftChild = None
self.rightChild = None
def insert(self, data):
if data < self.data:
if not self.leftChild:
self.leftChild = Node(data)
else:
self.leftChild.insert(data)
else:
if not self.rightChild:
self.rightChild = Node(data)
else:
self.rightChild.insert(data)
def remove(self, data, parentNode):
if data < self.data:
if self.leftChild is not None:
self.leftChild.remove(data, self)
elif data > self.data:
if self.rightChild is not None:
self.rightChild.remove(data, self)
else:
if self.leftChild is not None and self.rightChild is not None:
self.data = self.rightChild.getMin()
self.rightChild.remove(self.data, self)
elif parentNode.leftChild == self:
if self.leftChild is not None:
tempNode = self.leftChild
else:
tempNode = self.rightChild
parentNode.leftChild = tempNode
elif parentNode.rightChild == self:
if self.leftChild is not None:
tempNode = self.leftChild
else:
tempNode = self.rightChild
parentNode.rightChild = tempNode
def getMin(self):
if self.leftChild is None:
return self.data
else:
return self.leftChild.getMin()
def getMax(self):
if self.rightChild is None:
return self.data
else:
return self.rightChild.getMax()
# numerical order or the alphabetical ordering
# if it's a string
def traverseInOrder(self):
if self.leftChild is not None:
self.leftChild.traverseInOrder()
print(self.data)
if self.rightChild is not None:
self.rightChild.traverseInOrder()
| [
"hskalee123@gmail.com"
] | hskalee123@gmail.com |
ec9956db768eacf326814720cf23bfac4a22d5e0 | c7279eb0f50a87c0d80e1d461d9349070c0a193f | /docker_version/mqtt_app/message_eater/mongo.py | 7bbda5ed347f9e9759383ab0e1151692a4ed73cc | [] | no_license | rurentero/MQTT-PersistedEnv | 079caa56f35461b83a7cf0a4f327449835d1965b | 4213aeba896d05e61d7cb42501ad6799ebe188dc | refs/heads/master | 2022-11-06T23:31:20.588551 | 2020-06-24T18:26:12 | 2020-06-24T18:26:12 | 260,447,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,382 | py | from typing import List
from datetime import datetime
import paho.mqtt.client as mqtt
import pymongo
import pymongo.database
import pymongo.collection
import pymongo.errors
import threading
import os
#MONGO_URI = "mongodb://localhost:27017" # mongodb://user:pass@ip:port || mongodb://ip:port
MONGO_URI = "mongodb://mongodb:27017" # Line for docker images
MONGO_DB = "mqttdb"
MONGO_COLLECTION = "testingEnv"
MONGO_TIMEOUT = 1 # Time in seconds
MONGO_DATETIME_FORMAT = "%d/%m/%Y %H:%M:%S"
MONGO_URI = os.getenv("MONGO_URI", MONGO_URI)
MONGO_DB = os.getenv("MONGO_DB", MONGO_DB)
MONGO_COLLECTION = os.getenv("MONGO_COLLECTION", MONGO_COLLECTION)
MONGO_TIMEOUT = float(os.getenv("MONGO_TIMEOUT", MONGO_TIMEOUT))
MONGO_DATETIME_FORMAT = os.getenv("MONGO_DATETIME_FORMAT", MONGO_DATETIME_FORMAT)
class Mongo(object):
def __init__(self):
self.client: pymongo.MongoClient = None
self.database: pymongo.database.Database = None
self.collection: pymongo.collection.Collection = None
self.queue: List[mqtt.MQTTMessage] = list()
self.queued_msgs = 0
def connect(self):
print("Connecting Mongo")
self.client = pymongo.MongoClient(MONGO_URI, serverSelectionTimeoutMS=MONGO_TIMEOUT*1000.0)
self.database = self.client.get_database(MONGO_DB)
self.collection = self.database.get_collection(MONGO_COLLECTION)
def disconnect(self):
print("Disconnecting Mongo")
if self.client:
self.client.close()
self.client = None
def connected(self) -> bool:
if not self.client:
return False
try:
self.client.admin.command("ismaster")
except pymongo.errors.PyMongoError:
return False
else:
return True
def _enqueue(self, msg: mqtt.MQTTMessage):
print("Enqueuing")
# self.queue.append(msg)
# TODO process queue. We will only count the queued messages for testing purposes.
self.queued_msgs += 1
print("Queued messages: ",self.queued_msgs)
def __store_thread_f(self, msg: mqtt.MQTTMessage):
print("Storing")
now = datetime.now()
try:
document = {
"topic": msg.topic,
"payload": msg.payload.decode(),
# "retained": msg.retain,
"qos": msg.qos,
"timestamp": int(now.timestamp()),
"datetime": now.strftime(MONGO_DATETIME_FORMAT),
# TODO datetime must be fetched right when the message is received
# It will be wrong when a queued message is stored
}
result = self.collection.insert_one(document)
print("Saved in Mongo document ID", result.inserted_id)
if not result.acknowledged:
# Enqueue message if it was not saved properly
self._enqueue(msg)
except Exception as ex:
print(ex)
def _store(self, msg):
th = threading.Thread(target=self.__store_thread_f, args=(msg,))
th.daemon = True
th.start()
def save(self, msg: mqtt.MQTTMessage):
print("Saving")
if msg.retain:
print("Skipping retained message")
return
if self.connected():
self._store(msg)
else:
self._enqueue(msg)
| [
"noreply@github.com"
] | rurentero.noreply@github.com |
602086de4515f698b533636b2648b870169d37ec | d1413683b9e4d49799096dfde2d6527ec6bac1b5 | /lib/gencode_icedb/tsl/evidenceDataDb.py | 135eaa458c995096e194c38e2d9db24551010d1e | [] | no_license | diekhans/gencode-icedb | 24f612feab4f467f40059a64b59dc1344d91ae1b | 9e2784c6a1ed8ac86343ee3a11f5704f5d2dc0c1 | refs/heads/master | 2021-05-04T11:33:04.310537 | 2020-02-22T00:46:05 | 2020-02-22T00:46:05 | 52,420,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,018 | py | """
Read evidence alignments from tabix files.
"""
import pysam
from pycbio.sys.symEnum import SymEnum, auto
from pycbio.sys.objDict import ObjDict
from pycbio.hgdata.psl import Psl
from gencode_icedb.general.evidFeatures import EvidencePslFactory, EvidenceSamFactory
from gencode_icedb.general.transFeatures import ExonFeature
import pipettor
class EvidenceSource(SymEnum):
"""Source of evidence used in support"""
__slots__ = ()
# FIXME: move to metadata or drop
UCSC_RNA = auto()
ENSEMBL_RNA = auto()
MIXED_RNA = auto()
UCSC_EST = auto()
NANOPORE_DRNA = auto()
NANOPORE_CDNA = auto()
ISOSEQ_CDNA = auto()
def evidenceAlignsIndexPsl(pslFile):
"""Index the pslFile. It must be sorted."""
pipettor.run(["tabix", "--force", "--sequence=14", "--begin=16", "--end=17", "--zero-based", pslFile])
class EvidenceAlignsReader(object):
"""Object for accessing overlapping alignment evidence data from a data source. Either PSL file
that is bgzip compressed and tabix indexed or a BAM file.
"""
def __init__(self, evidSetUuid):
self.evidSetUuid = evidSetUuid
self.nameSubset = None # used for testing and debugging.
def setNameSubset(self, nameSubset):
"""Set file on query names. Can be a string, list, or set, or None to
clear. This is use for testing and debugging"""
if isinstance(nameSubset, str):
nameSubset = [nameSubset]
if nameSubset is not None:
frozenset(nameSubset)
self.nameSubset = nameSubset
class _PslEvidenceAlignsReader(EvidenceAlignsReader):
"Reader implementation for PSL tabix"
def __init__(self, evidSetUuid, evidPslTabix, genomeReader=None, genbankProblems=None):
super(_PslEvidenceAlignsReader, self).__init__(evidSetUuid)
self.tabix = pysam.TabixFile(evidPslTabix)
self.contigs = frozenset(self.tabix.contigs)
self.genbankProblems = genbankProblems
self.evidFactory = EvidencePslFactory(genomeReader)
def close(self):
if self.tabix is not None:
self.tabix.close()
self.tabix = None
def _makeTrans(self, psl):
genbankProblem = self.genbankProblems.getProblem(psl.qName) if self.genbankProblems is not None else None
attrs = ObjDict(genbankProblem=genbankProblem, evidSetUuid=self.evidSetUuid)
return self.evidFactory.fromPsl(psl, attrs=attrs, orientChrom=True)
def _usePsl(self, psl, strands):
return (((self.nameSubset is None) or (psl.qName in self.nameSubset))
and (psl.strand in strands))
_posStrands = frozenset(('+', '++'))
_negStrands = frozenset(('-', '+-', '-+', '--'))
_allStrands = _posStrands.union(_negStrands)
def _getSelectStrands(self, transcriptionStrand):
# strand -- is used when PSLs of 3' ESTs have been reversed.
if transcriptionStrand is None:
return self._allStrands
elif transcriptionStrand == '+':
return self._posStrands
else:
return self._negStrands
def _genOverlapping(self, coords, strands, minExons):
for line in self.tabix.fetch(coords.name, coords.start, coords.end):
psl = Psl.fromRow(line.split('\t'))
if self._usePsl(psl, strands):
trans = self._makeTrans(psl)
if len(trans.getFeaturesOfType(ExonFeature)) >= minExons:
yield trans
def genOverlapping(self, coords, transcriptionStrand=None, minExons=0):
"""Generator of overlapping alignments as TranscriptFeatures, possibly filtered
by nameSubset.
"""
if coords.name in self.contigs:
yield from self._genOverlapping(coords, self._getSelectStrands(transcriptionStrand), minExons)
class _BamEvidenceAlignsReader(EvidenceAlignsReader):
"Reader implementation for a BAM file"
def __init__(self, evidSetUuid, evidBam, genomeReader=None):
super(_BamEvidenceAlignsReader, self).__init__(evidSetUuid)
self.bamfh = pysam.AlignmentFile(evidBam)
self.contigs = frozenset([self.bamfh.get_reference_name(i) for i in range(self.bamfh.nreferences)])
self.evidFactory = EvidenceSamFactory(genomeReader)
def close(self):
if self.bamfh is not None:
self.bamfh.close()
self.bamfh = None
def _makeTrans(self, alnseg):
attrs = ObjDict(evidSetUuid=self.evidSetUuid)
return self.evidFactory.fromSam(self.bamfh, alnseg, attrs=attrs, orientChrom=True)
def _useAln(self, alnseg, transcriptionStrand):
strand = '-' if alnseg.is_reverse else '+'
return (((self.nameSubset is None) or (alnseg.query_name in self.nameSubset))
and ((transcriptionStrand is None) or (strand == transcriptionStrand)))
def _genOverlapping(self, coords, transcriptionStrand, minExons):
for alnseg in self.bamfh.fetch(coords.name, coords.start, coords.end):
if self._useAln(alnseg, transcriptionStrand):
trans = self._makeTrans(alnseg)
if len(trans.getFeaturesOfType(ExonFeature)) >= minExons:
yield trans
def genOverlapping(self, coords, transcriptionStrand=None, minExons=0):
"""Generator of overlapping alignments as TranscriptFeatures, possibly filtered
by nameSubset.
"""
if coords.name in self.contigs:
yield from self._genOverlapping(coords, transcriptionStrand, minExons)
def evidenceAlignsReaderFactory(evidSetUuid, evidFile, genomeReader=None, genbankProblems=None):
"""construct read based on file extension"""
if evidFile.endswith(".psl.gz"):
return _PslEvidenceAlignsReader(evidSetUuid, evidFile, genomeReader, genbankProblems)
elif evidFile.endswith(".bam"):
return _BamEvidenceAlignsReader(evidSetUuid, evidFile, genomeReader)
else:
raise Exception("Expected file name ending in .psl.gz or .bam, got {}".format(evidFile))
| [
"markd@ucsc.edu"
] | markd@ucsc.edu |
02e1485ac84e75fe3e43d1e153b0d258fcaf093f | 486b405835469bab887be73f62a7bfd191bd98ae | /SetupGUI.py | 21038f616ae909ff2a47132cee1501a3a31ede87 | [] | no_license | dot1991/RotMG-Headless-Launcher | d2529668625eed7d077fe43ba0fe2fc44cc23b9e | 6982ea3af659f44d94e9106bc4ab3d5ed02b1bc9 | refs/heads/main | 2023-03-14T21:14:47.731411 | 2021-03-27T21:03:38 | 2021-03-27T21:03:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,374 | py | import os
import tkinter as tk
from tkinter import messagebox
from tkinter.filedialog import askopenfilename
window = tk.Tk()
window.title('Shortcut Script Setup')
lbl_email = tk.Label(text="Email")
ent_email = tk.Entry(width=50)
lbl_password = tk.Label(text="Password")
ent_password = tk.Entry(width=50, show='\u2022')
lbl_email.grid(row=0, column=0, columnspan=2, padx=3, pady=3)
ent_email.grid(row=0, column=2, sticky='nesw', columnspan=3, padx=3, pady=3)
lbl_password.grid(row=1, column=0, columnspan=2, padx=3, pady=3)
ent_password.grid(row=1, column=2, sticky='nesw', columnspan=3, padx=3, pady=3)
lbl_name = tk.Label(text='Shortcut Name')
ent_name = tk.Entry()
lbl_name.grid(row=2, column=0, columnspan=2, padx=3, pady=3)
ent_name.grid(row=2, column=2, columnspan=3, padx=3, pady=3)
exe_path = f'C:/Users/{os.getlogin()}/Documents/RealmOfTheMadGod/Production/RotMG Exalt.exe'
ent_path = tk.Entry(width=100)
ent_path.insert(0, exe_path)
ent_path.grid(row=3, column=1, columnspan=4, padx=3, pady=3)
def exe_path_callback():
filename = askopenfilename(initialdir = f'C:\\Users\\{os.getlogin()}\\Documents\\RealmOfTheMadGod\\Production', title = "Find your RotMG Client")
ent_path.delete(0, 'end')
ent_path.insert(0, filename)
exe_path = filename
btn_browse = tk.Button(text='Browse', command=exe_path_callback, padx=3, pady=3)
btn_browse.grid(row=3, column=0)
shortcut_name = 'test'
def write_shortcut():
shortcut_name = ent_name.get()
if not os.path.exists('generated_shortcuts'):
os.makedirs('generated_shortcuts')
with open(f'C:/Users/{os.getlogin()}/Desktop/{shortcut_name}.bat', 'w') as f:
f.write('@ECHO OFF\n')
f.write(f'set ROTMG_EMAIL={ent_email.get()}\n')
f.write(f'set ROTMG_PASSWORD={ent_password.get()}\n')
f.write(f'set ROTMG_PATH={exe_path}\n')
f.write('"' + os.getcwd() + '/headless_launch.exe"')
ent_email.delete(0, 'end')
ent_password.delete(0, 'end')
ent_name.delete(0, 'end')
tk.messagebox.showinfo(title='Shortcut Created', message=f"Successfully created the shortcut {shortcut_name}.bat on your Desktop. Feel free to move the created shortcut wherever you'd like.")
btn_done = tk.Button(text='Make Shortcut Script', command=write_shortcut)
btn_done.grid(row=4, column=1, columnspan=3)
window.eval('tk::PlaceWindow . center')
window.mainloop()
| [
"rumbleqwop@gmail.com"
] | rumbleqwop@gmail.com |
3ba689c12014c59821f299b2c336a5260ccc761e | 9487973dee0ad29d7e5199ab64230311fca5556d | /Old files/webapp-Completed-Maria/webapp/routes.py | 2be8d58c2e4d5540b2dfd8a5db232b04df60e789 | [] | no_license | ahussaini821/Bioinformatics-project | 3a1be8f0a552fcf8bbe279d573ab596fd770d0c1 | 1635d61db00d81d85b14d23667a8c435fd18da31 | refs/heads/master | 2022-04-16T05:31:41.779121 | 2020-02-14T09:17:56 | 2020-02-14T09:17:56 | 232,339,540 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,217 | py | #Import the dependencies that we will be using in this file
import os
from webapp import app
from flask import render_template, request, redirect, url_for, send_file, send_from_directory
from werkzeug.utils import secure_filename
from webapp.dataAccess import searchProtein, searchGene, searchSubstrate, inhibitor
from webapp.dataAccess import characteristics, domains, targets, sequence, targetsKAccession, inhibitorAccession
#from webapp.KSEA_analysis import KSEA_analysis, bar_plot, bar_plot1, volcano, components
ALLOWED_EXTENSIONS = {'tsv'}
"""[summary]
Function to check if the file being uploaded is valid or not?
It should be one of the files from extension set to process further.
Arguments:
filename {[str]} -- Name of the file being uploaded to the server.
Returns:
[boolean] -- If the file is valid or not?
"""
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
"""[summary]
Route definition for index page in web application.
Arguments:
NA
Returns:
[render_template] -- The template for index page.
"""
@app.route('/')
@app.route('/index')
def index():
user = {'username': 'Maria'}
posts = [
{
'author': {'username': 'Maria'},
'body': 'Beautiful day!'
}
]
return render_template('index.html', title='Home', user=user, posts=posts)
"""[summary]
Route definition for Search Results page in web application.
Arguments:
NA
Returns:
[render_template] -- Based upon the search category, different templates are being rendered.
For Kianese and Gene search -- searchresults.html is rendered with related data.
For Inhibitor search -- inhibitorresults.html is rendered with related data.
For Substrate search -- substrateresults.html is rendered with related data.
"""
@app.route('/searchresults', methods = ['GET', 'POST'])
def searchResults():
searchString = ""
searchCategory = "1"
# If a post request, get the form field values for Search Query and Category
if request.method == 'POST':
formData = request.form.items()
for key, value in formData:
if key == "searchString":
searchString = value
if key == "selectSearchCategory":
searchCategory = value
# Based on the category, query database and render the results on template
if searchCategory == "1":
hkinase, dkinase = searchProtein(searchString.upper())
return render_template('searchresults.html', title='Kinase Details', header=hkinase, data=dkinase, search=searchString)
elif searchCategory == "3":
hinhibitor, dinhibitor = inhibitor(searchString)
return render_template('inhibitorresults.html', title='Inhibitor Details', header=hinhibitor, data=dinhibitor, search=searchString)
elif searchCategory == "4":
hsubstrate, dsubstrate = searchSubstrate(searchString)
return render_template('substrateresults.html', title='Substrate Details', hsubstrate=hsubstrate, dsubstrate=dsubstrate, search=searchString)
else:
header, data = searchGene(searchString.upper())
return render_template('searchresults.html', title='Similar Result', header=header, data=data, search=searchString)
"""[summary]
Route definition for File Upload page in web application.
Arguments:
NA
Returns:
[render_template] -- fileupload.html with error message if any, else kActivityAnalysis,html.
"""
@app.route('/fileUpload', methods=['GET', 'POST'])
def kActivityFileUpload():
status = ''
# If a file has been uploaded
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
status = 'No file part'
file = request.files['file']
# If no file was selected, return error message
if file.filename == '':
status = "No selected file"
# If not in allowed file format, return error message
if not allowed_file(file.filename):
status = "File format not supported."
# If everything is ok, save the file to server and call kActivityAnalysis() for proccessing it.
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FILE_PATH'], filename))
status = "File Uploaded Successfully...!!!"
return redirect(url_for('kActivityAnalysis', filename=filename))
return render_template('fileupload.html', title='File Upload', status=status)
"""[summary]
Route definition for Kinase Activity Analysis page in web application.
Arguments:
filename [{str}] -- The Complete path of the file at server.
Returns:
[render_template] -- kactivityanalysis.html with the graphs and table.
"""
@app.route('/kactivityanalysis/<filename>')
def kActivityAnalysis(filename):
# Get the path of files to process
file_path = os.path.join(app.config['UPLOAD_FILE_PATH'], filename)
file_location = os.path.join(os.getcwd() , 'webapp/db/kinase_substrate_PHOSPHO')
# Making the analysis
KSEA_results= KSEA_analysis(file_path, file_location)
name= KSEA_results.get("inhibitor_name")
# Create the plots
plot = bar_plot(KSEA_results.get("z_score"))
plot1 = bar_plot1(KSEA_results.get("z_score_sig"))
plot3 = volcano(file_path)
#Calculating how many susbstrates coul not match a kinase
Substrates_with_no_kinases = KSEA_results.get("df_all_SUBSTRATES_NO_KINASE")
amount = Substrates_with_no_kinases["control_mean"].count()
#volcano plot
kinase_table= KSEA_results.get("z_score")
#getting z_score values
# Embed plot into HTML via Flask Render
script, div = components(plot)
script1, div1 = components(plot1)
script2, div2 = components(plot3)
return render_template('kactivityanalysis.html', title='Kinases Activity Analysis', \
script=script, div=div, script1=script1, div1=div1, amount = amount, \
name= name, kinase_table=kinase_table, Substrates_with_no_kinases = Substrates_with_no_kinases, \
script2=script2, div2=div2)
"""[summary]
Route definition for Protein Details page in web application.
Arguments:
NA
Returns:
[render_template] -- protein.html with data to be displayed in each tab.
hcharacteristics, dcharacteristics - Characteristics Tab - Selected by default
hdomains, ddomains - Domains Tab
htargets, dtargets - Phosphosites Tab
htargetsKAccess, dtargetsKAccess - Targets Tab
hsequence, dsequence - Sequence Tab
hinhibitor, dinhibitor - Inhibitor Tab
"""
@app.route('/protein')
def protein():
searchString = ""
accession = ""
# Get the values for query params of GET method to the page
if request.args :
searchString = request.args['searchString']
accession = request.args['accession']
# Build up the variables to be passed to protein.html for details
hcharacteristics, dcharacteristics = characteristics(accession)
hdomains, ddomains = domains(accession)
htargets, dtargets = targets(accession)
htargetsKAccess, dtargetsKAccess = targetsKAccession(accession)
hsequence, dsequence = sequence(accession)
hinhibitor, dinhibitor = inhibitorAccession(accession)
hsearchProtein, dsearchProtein = searchProtein(searchString)
return render_template('protein.html', title='Protein Details', \
hcharacteristics=hcharacteristics, dcharacteristics=dcharacteristics, \
hdomains=hdomains, ddomains=ddomains, \
htargets=htargets, dtargets=dtargets, \
hsequence=hsequence, dsequence=dsequence, \
hsearchProtein=hsearchProtein, dsearchProtein=dsearchProtein, \
hinhibitor=hinhibitor, dinhibitor=dinhibitor, \
htargetsKAccess=htargetsKAccess, dtargetsKAccess=dtargetsKAccess, searchString=searchString)
"""[summary]
Route definition for Inhibitor Details page in web application.
Arguments:
NA
Returns:
[render_template] -- inhibitor.html with data to be displayed.
"""
@app.route('/inhibitor')
def inhibitorDetails():
searchInhibitor = ""
# Get the values for query params of GET method to the page
if request.args :
searchInhibitor = request.args['searchInhibitor']
# Get the inhibitor details from databse
hinhibitor, dinhibitor = inhibitor(searchInhibitor)
# Build the image name based upon the CNumber
structureImgName = str(dinhibitor[0][0]) + '.png'
return render_template('inhibitor.html', title='Inhibitor Details', \
hinhibitor=hinhibitor, dinhibitor=dinhibitor, searchInhibitor=searchInhibitor, structureImgName=structureImgName)
"""[summary]
Route definition for Substrate Details page in web application.
Arguments:
NA
Returns:
[render_template] -- substrate.html with data to be displayed in each tab.
hcharacteristics, dcharacteristics - Characteristics Tab - Selected by default
htargets, dtargets - Phosphosites Tab
hsequence, dsequence - Sequence Tab
"""
@app.route('/substrate')
def substrate():
searchString = ""
accession = ""
# Get the values for query params of GET method to the page
if request.args :
searchString = request.args['searchString']
accession = request.args['accession']
# Build up the variables from db methods to be passed to protein.html for details
hcharacteristics, dcharacteristics = characteristics(accession)
hsearchProtein, dsearchProtein = searchProtein(searchString)
htargets, dtargets = targets(accession)
hsequence, dsequence = sequence(accession)
return render_template('substrate.html', title='Substrate Details', \
hcharacteristics=hcharacteristics, dcharacteristics=dcharacteristics, \
hsearchProtein=hsearchProtein, dsearchProtein=dsearchProtein, \
htargets=htargets, dtargets=dtargets, \
hsequence=hsequence, dsequence=dsequence, \
searchString=searchString)
"""[summary]
Route definition for Genome Browser page in web application.
Arguments:
NA
Returns:
[render_template] -- chromosomes.html with data to be displayed.
"""
@app.route('/chromosome')
def genomeBrowser():
# A list of all the available chromosomes
chromosomes = ["Chromosome 1", "Chromosome 2", "Chromosome 3", "Chromosome 4", "Chromosome 5", "Chromosome 6",
"Chromosome 7", "Chromosome 8", "Chromosome 9", "Chromosome 10", "Chromosome 11", "Chromosome 12",
"Chromosome 13", "Chromosome 14", "Chromosome 15", "Chromosome 16", "Chromosome 17", "Chromosome 18",
"Chromosome 19", "Chromosome 20", "Chromosome 21", "Chromosome 22", "Chromosome X", "Chromosome Y",]
return render_template('chromosomes.html', title="Chromosome", chromosomes=chromosomes)
"""[summary]
Route definition for Chromosome Details page in web application.
Arguments:
NA
Returns:
[render_template] -- chromosome<number>.html with data to be displayed.
"""
@app.route('/chromosomedetails')
def genomeDetails():
chromosome = ""
# Get the selected chromosome
if request.args :
chromosome = request.args['chromosome']
return render_template(chromosome+'.html')
"""[summary]
Route definition for About Us page in web application.
Arguments:
NA
Returns:
[render_template] -- about.html with data to be displayed.
"""
@app.route('/about')
def about():
return render_template('about.html', title='About Us')
"""[summary]
Route definition for About Us page in web application.
Arguments:
NA
Returns:
[render_template] -- about.html with data to be displayed.
"""
@app.route('/contact')
def contact():
return render_template('contact.html', title='Contact Us')
| [
"55877793+ahussaini821@users.noreply.github.com"
] | 55877793+ahussaini821@users.noreply.github.com |
56e254566c6f66a6fe32334746ee9ded25d30f35 | 5b56f7a244310a4a387868309baaba7cd303be34 | /MemeEngine/MemeEngine.py | 2ac66c5a319f3507bb3c31c133533dc5e76c3aac | [] | no_license | saltamay/Udacity_Intermediate_Python_Meme_Generator | a4e5067d9bcc3961a8fc6c2ab2e4e26a85e1c8dc | ffb57ccd662da3f6ab832b2934eb8df962bedc8a | refs/heads/master | 2023-07-14T15:31:18.357474 | 2021-08-26T02:16:31 | 2021-08-26T02:16:31 | 397,812,386 | 1 | 0 | null | 2021-08-24T23:02:53 | 2021-08-19T04:09:32 | Python | UTF-8 | Python | false | false | 1,932 | py | import textwrap
import random
from PIL import Image, ImageDraw, ImageFont
class MemeEngine:
def __init__(self, output_dir: str) -> None:
self.output_dir = output_dir
def make_meme(
self,
img_path: str,
text: str,
author: str,
width=500) -> str:
"""Create a meme with a caption
Arguments:
img_path {str} -- the file location for the input image.
text {str} -- text for caption's body.
author {str} -- text for caption's author
width {int} -- The pixel width value. Default=500.
Returns:
str -- the file path to the output image.
"""
img = Image.open(img_path)
ratio = width / float(img.size[0])
height = int(ratio * float(img.size[1]))
img = img.resize((width, height), Image.NEAREST)
if text is not None:
wrapper = textwrap.TextWrapper(width=30)
draw = ImageDraw.Draw(img)
font_body = ImageFont.truetype(
'./fonts/LilitaOne-Regular.ttf', size=32)
font_author = ImageFont.truetype(
'./fonts/LilitaOne-Regular.ttf', size=20)
top = random.randint(height / 4, 3 * height / 4)
left = random.randint(10, 30)
for word in wrapper.wrap(text=text):
top = top + font_body.size
draw.text((left, top), word,
font=font_body,
fill='black', stroke_width=1, stroke_fill='white')
draw.text((left + 10, top + font_body.size), f'- {author}',
font=font_author,
fill='black', stroke_width=1, stroke_fill='white')
img_name = f'{random.randint(0, 1000000)}.jpg'
output_path = f'{self.output_dir}/{img_name}'
img.save(output_path)
return output_path
| [
"sal.tamay@outlook.com"
] | sal.tamay@outlook.com |
718f5c466bcb072ac392b31093e06e95e765a778 | c065ff2a6a377aea2303b7b8482558049958a7ec | /toydrill/1562239369/tactile.tac | bc08dcae3533cc9df2a88b7b7c9676aaa4f9f19f | [] | no_license | waedbara/vision2tactile | 7bc9861eecb4247fd254ea58dc508ed18a03b1af | edbc9dfee61b4a4b1f0caebb2f16faef090dff32 | refs/heads/master | 2022-04-02T20:43:16.621687 | 2019-12-11T08:07:39 | 2019-12-11T08:07:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | tac | ,3504,3675,3514,3654,3568,3467,3700,3569,3528,3402,3457,3599,3046,3296,3498,3420,3527,3532,3031,2852,3668,3676,3627,3561,2031,2034,1725,2050,2640,3328,3514,3353,3403,3289,3248,3462,3366,3348,3171,3256,3488,3320,3340,3234,3339,3344,3299,2877,2753,3504,3489,3478,3352,1909,1901,3056,2093,2473 | [
"brayan.inf@gmail.com"
] | brayan.inf@gmail.com |
10dc596ee3806327d1ccc373c0416d36df7405ed | 6ca8795838b63f932a03734a3b14cf322171b933 | /blog/admin.py | c2cf3f7be3e20faba6c2a782913585f831e97125 | [] | no_license | jesusvita/benihanaTipOut2 | 4a18b4dc763665c11ac6a3d5613112a6dcec601c | fd186e73fbf21bd47384b0b7f2b4ad632f4d8f21 | refs/heads/master | 2022-02-27T11:55:28.636086 | 2019-10-09T04:17:56 | 2019-10-09T04:17:56 | 212,521,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | from django.contrib import admin
from .models import Post, tipOut
admin.site.register(Post)
admin.site.register(tipOut)
| [
"jesusvitaa@gmail.com"
] | jesusvitaa@gmail.com |
8585c66c448b22e2add5a38e02bc37cc636d7286 | 395b2e9718eeb5035995130b2377c47b8df05614 | /tests/attack/test_pgd.py | 87973fe5b11c7e424ec3918db6a543beef229bab | [] | no_license | gatheluck/fourier-attack | 0a6d773e268bf1e480f04a43dcc72905af804b43 | 1668f0d2eed6182cb69904c49fe223e78cb5d0cc | refs/heads/master | 2023-03-10T05:15:10.897205 | 2021-03-01T08:19:10 | 2021-03-01T08:19:10 | 320,191,916 | 1 | 0 | null | 2021-03-01T08:19:11 | 2020-12-10T07:21:19 | Python | UTF-8 | Python | false | false | 2,035 | py | import pathlib
from typing import Final
import torch
import torchvision
import fourier_attack.attack.pgd
from fourier_attack.util import Denormalizer
class TestPgdAttack:
def test__forward(
self, pretrained_cifar10_resnet50, cifar10_stats, normalize_cifar10_loader
):
input_size: Final = 32
num_iteration: Final = 8
eps_max: Final = 16.0
step_size: Final = eps_max / num_iteration
rand_init: Final = True
scale_eps: Final = True
scale_each: Final = True
avoid_target: Final = True
norms = {"linf", "l2"}
devices = set(["cuda"]) if torch.cuda.is_available() else set()
output_root: Final = pathlib.Path("logs/test/")
output_root.mkdir(exist_ok=True, parents=True)
model = pretrained_cifar10_resnet50
criterion_func = torch.nn.functional.cross_entropy
mean, std = cifar10_stats
for norm in norms:
for device in devices:
attacker = fourier_attack.attack.pgd.PgdAttack(
input_size,
mean,
std,
num_iteration,
eps_max,
step_size,
norm,
rand_init,
scale_eps,
scale_each,
avoid_target,
criterion_func,
device,
)
for x, t in normalize_cifar10_loader:
x, t = x.to(device), t.to(device)
batch_size = x.size(0)
x_adv = attacker(model, x, t)
denormalizer = Denormalizer(input_size, mean, std, device, False)
torchvision.utils.save_image(
denormalizer(x_adv), output_root / f"forward-pgd-{norm}.png"
)
assert x_adv.size() == torch.Size([batch_size, 3, 32, 32])
break # test only first batch
| [
"gatheluck+tech@gmail.com"
] | gatheluck+tech@gmail.com |
ee47a4b6d23e2e42d141640137a6287efceccf21 | 951f4f4611e5bf2dc3970cc38aa545a54b78690b | /google/cloud/billing/budgets_v1/services/budget_service/pagers.py | 7dd43d59a473e66b6fa0df2247a29735ee27397a | [
"Apache-2.0"
] | permissive | renovate-bot/python-billingbudgets | e54771361e1e9239697e23255b00f6551a1d18b7 | 2b1e66fa19415f56e33713d57fcc516efca6d03a | refs/heads/master | 2023-06-08T01:23:04.490451 | 2021-08-18T15:42:00 | 2021-08-18T15:42:00 | 227,000,311 | 0 | 0 | Apache-2.0 | 2019-12-10T01:13:52 | 2019-12-10T01:13:51 | null | UTF-8 | Python | false | false | 5,860 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.billing.budgets_v1.types import budget_model
from google.cloud.billing.budgets_v1.types import budget_service
class ListBudgetsPager:
"""A pager for iterating through ``list_budgets`` requests.
This class thinly wraps an initial
:class:`google.cloud.billing.budgets_v1.types.ListBudgetsResponse` object, and
provides an ``__iter__`` method to iterate through its
``budgets`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListBudgets`` requests and continue to iterate
through the ``budgets`` field on the
corresponding responses.
All the usual :class:`google.cloud.billing.budgets_v1.types.ListBudgetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., budget_service.ListBudgetsResponse],
request: budget_service.ListBudgetsRequest,
response: budget_service.ListBudgetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.billing.budgets_v1.types.ListBudgetsRequest):
The initial request object.
response (google.cloud.billing.budgets_v1.types.ListBudgetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = budget_service.ListBudgetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[budget_service.ListBudgetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[budget_model.Budget]:
for page in self.pages:
yield from page.budgets
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListBudgetsAsyncPager:
"""A pager for iterating through ``list_budgets`` requests.
This class thinly wraps an initial
:class:`google.cloud.billing.budgets_v1.types.ListBudgetsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``budgets`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListBudgets`` requests and continue to iterate
through the ``budgets`` field on the
corresponding responses.
All the usual :class:`google.cloud.billing.budgets_v1.types.ListBudgetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[budget_service.ListBudgetsResponse]],
request: budget_service.ListBudgetsRequest,
response: budget_service.ListBudgetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.billing.budgets_v1.types.ListBudgetsRequest):
The initial request object.
response (google.cloud.billing.budgets_v1.types.ListBudgetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = budget_service.ListBudgetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[budget_service.ListBudgetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[budget_model.Budget]:
async def async_generator():
async for page in self.pages:
for response in page.budgets:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| [
"noreply@github.com"
] | renovate-bot.noreply@github.com |
c764dffcf73e377fbeab0b1e3fe032ab8004b975 | 5b19ced6bd173baf11c4b5e9d1c08f17ca635773 | /Python/数字在排序数组中出现的次数.py | 3f6a48c6ebcbfd2edb992331e21d261b5f5d29a5 | [] | no_license | zhaoqun05/Coding-Interviews | 8efe579b6a1a6186107f599a31a9e96389df52f3 | e05c1e6390b3df49dd02571e13fb8a3822eae649 | refs/heads/master | 2022-01-08T13:30:06.542796 | 2019-06-18T14:00:55 | 2019-06-18T14:00:55 | 282,934,693 | 2 | 0 | null | 2020-07-27T15:13:54 | 2020-07-27T15:13:53 | null | UTF-8 | Python | false | false | 1,253 | py | '''
统计一个数字在排序数组中出现的次数。
'''
# -*- coding:utf-8 -*-
class Solution:
def GetNumberOfK(self, data, k):
# 直观的想法从前到后的顺序遍历,但是算法题几乎不会将顺序查找作为考察要点……
def getFirst(nums):
start, end = 0, len(nums) - 1
while start <= end:
mid = (start + end) // 2
if data[mid] >= k: # 注意前后两个二分查找条件不一致
end = mid - 1
else:
start = mid + 1
# 导致两个函数越界的指针不一致,应该返回的指针是非越界指针
return start if start < len(nums) and nums[start] == k else -1
def getLast(nums):
start, end = 0, len(nums) - 1
while start <= end:
mid = (start + end) // 2
if data[mid] <= k:
start = mid + 1
else:
end = mid - 1
return end if end < len(nums) and nums[end] == k else -1
if not data: return 0
first, last = getFirst(data), getLast(data)
return last - first + 1 if first != -1 and last != -1 else 0
| [
"1215470124@qq.com"
] | 1215470124@qq.com |
2f561e222d595c6c365156517d18d9b6348e040b | 1d0576ee4a2cda4cc9a5ebe37664d468534f6389 | /algorithms/number_theory.py | a23c6b1daf940afd9910e83e7e5b5c2dae244e83 | [
"MIT"
] | permissive | vadimadr/python-algorithms | 2090aca095b5295cf4bf0bac2e5cf1041e812df8 | a9716308ccf9ebf54d7472da4700b5acffbea547 | refs/heads/master | 2021-11-24T23:32:15.363990 | 2021-11-05T16:48:12 | 2021-11-05T16:48:12 | 66,856,752 | 4 | 0 | MIT | 2021-11-06T20:58:10 | 2016-08-29T15:39:48 | Jupyter Notebook | UTF-8 | Python | false | false | 14,506 | py | from functools import reduce
from itertools import chain
from math import sqrt
from operator import mul
from random import randint
def even(n):
# n ~ 2*k
return n % 2 == 0
def odd(n):
# n ~ 2*k + 1
return n % 2 != 0
def gcd(a, b):
"""
Greatest common divisor (greatest common factor)
Notes
---------
Euclidean algorithm:
a > b > r_1 > r_2 > ... > r_n
a = b*q + r
b = r_1*q_1 + r_2
...
r_n-1 = r_n*q_n
gcd(a,b) = gcd(b,r)
gcd(a,0) = a
"""
while b != 0:
a, b = b, a % b
return a
def lcm(a, b):
"""
Least common multiplier
"""
return abs(a * b) // gcd(a, b)
def extended_euclidian(a, b):
"""
returns (x, y, d) such that:
x*a + y*b = d, d = gcd(a,b)
r_1 = a - b * q_1
r_2 = b - r_1 * q_2
...
r_n = r_n-2 - r_n-1*q_n = a * x_n + b * y_n
x_n = x_n-2 - x_n-1 * q_n
y_n = y_n-2 - y_n-1 * q_n
"""
# Naive version:
# x2, y2 = 1, 0
# x1, y1 = 0, 1
# while b > 0:
# q, a, b = a // b, b, a % b
# x = x2 - x1 * q
# y = y2 - y1 * q
# x2, x1 = x1, x
# y2, y1 = y1, y
# return x2, y2, a
# Recursive version O(log^2(a))
# suppose we know x1, y1 for (b, a%b) and a%b = a - b*q
# then b*x1 + (a%b)*y1 = a*y1 + b*(x1 - y1*q)
if a == b == 0:
return 0, 0, 0
if b == 0:
return 1, 0, a
x, y, d = extended_euclidian(b, a % b)
return y, x - y * (a // b), d
def binomial(n, k):
"""
Chose k objects from n.
(n, k) = n!/(k!(n-k)!)
Pascal's rule:
(n + 1, k) = (n, k) + (n, k - 1)
(k, k) = 0
"""
# if k > n:
# return 0
# if k == n or k == 0:
# return 1
# return binomial(n - 1, k) + binomial(n - 1, k - 1)
res = 1
for i in range(1, k + 1):
res = res * (n - i + 1) // i
return res
def binomial_table(n, k):
"""
Pascal's triangle:
(n, k) = C[n][k]
"""
C = [[0] * (i + 1) for i in range(n + 1)]
for i in range(n + 1):
for j in range((min(i, k) + 1)):
if j == 0 or j == i:
C[i][j] = 1
else:
C[i][j] = C[i - 1][j - 1] + C[i - 1][j]
return C[n][k]
def factorial(n):
"""
Returns n! = n(n-1)(n-2)...
"""
return reduce(mul, range(1, n), 1)
def fibonacci(n):
"""
Returns nth fibonacci number F(n)
F(n) = F(n-2) + F(n-1)
"""
k, m = 1, 1
if n < 2:
return n
for i in range(2, n):
k, m = m, k + m
return m
def isqrt(x):
# isqrt(x) = floor(sqrt(x))
return int(sqrt(x))
def coprime(a, b):
"""
Check if two integers are coprime. Integers are coprime if the only
integer that divides both of them is 1. That is gcd(a,b) = 1.
Parameters
----------
a : int
input values
b : int
input value
Returns
-------
bool
Whether integers are coprime
"""
return gcd(a, b) == 1
def is_prime(n: int) -> bool:
if n in (2, 3):
return True
if n < 2 or n % 2 == 0 or n % 3 == 0:
return False
# all numbers of the form (6n +- 1)
for q in range(5, isqrt(n) + 1, 6):
if n % q == 0 or n % (q + 2) == 0:
return False
return True
def sieve(n):
"""
Get all primes <= n
"""
s = [True] * (n + 1)
for i in range(2, isqrt(n) + 1):
if s[i]:
for j in range(i + i, n + 1, i):
s[j] = False
return [i for i in range(2, n + 1) if s[i]]
def factorize(n):
"""
Prime decomposition
Decomposes integer n into
n = p1^a1 * p2^a2 * pn^an
where p_i are primes and a_i are their exponents
Parameters
----------
n : int
integer to factorize
Returns
-------
factors : list
list of the prime factors, together with their exponents
Examples
--------
>>> factorize(2434500)
[(2, 2), (3, 2), (5, 3), (541, 1)]
"""
if n in (0, 1):
return [(n, 1)]
factors = []
if n < 0:
factors.append((-1, 1))
n = -n
# check 2, 3, then all integers in form q = 6k +- 1
for q in chain((2, 3), range(5, isqrt(n) + 1, 6)):
# q = 6k - 1
a = 0
while n % q == 0:
# q is prime because n already divided by its prime factors
n //= q
a += 1
if a > 0:
factors.append((q, a))
# 6k + 1
q += 2
a = 0
while n % q == 0:
# q is prime because n already divided by its prime factors
n //= q
a += 1
if a > 0:
factors.append((q, a))
if n != 1:
factors.append((n, 1))
return factors
def prime_pi(n):
"""
Number of primes <= n
"""
if n < 2:
return 0
primes = sieve(n)
return len(primes)
def euler_phi(n):
"""
Number of coprimes <= n
"""
if n == 1:
return 1
phi = n
for p, a in factorize(n):
phi -= phi // p
return phi
def binpow(x, r):
"""Binary exponential algorithm"""
# recursive implementation:
# if even(r):
# ans = binpow(x, r // 2)
# return ans * ans
# else:
# return binpow(x, r - 1) * x
ans = 1
while r > 0:
if odd(r):
ans *= x
x *= x
r = r // 2
return ans
def linear_diophantine(a, b, c):
"""Solve ax + by = c, where x, y are integers
1. solution exists iff c % gcd(a,b) = 0
2. all solutions have form (x0 + b'k, y0 - a'k)
Returns
-------
None if no solutions exists
(x0, y0, a', b') otherwise
"""
# d = pa + qb
p, q, d = extended_euclidian(a, b)
if d == 0 or c % d != 0:
return None
# ax + by = c <=> a'x + b'y = c'
a, b, c = a // d, b // d, c // d
return p * c, q * c, a, b
def linear_sieve(max_n):
"""Computes all primes < max_n and lits of
all smallest factors in O(max_n) time
Returns
-------
primes: list of all primes < max_n
smallest_factors: list such that if q = smallest_factors[n],
then n % q == 0 and q is minimal.
"""
smallest_factors = [0] * max_n
primes = []
for i in range(2, max_n):
if smallest_factors[i] == 0:
smallest_factors[i] = i
primes.append(i)
for p in primes:
if p > smallest_factors[i] or i * p >= max_n:
break
smallest_factors[i * p] = p
return primes, smallest_factors
primes, spf = linear_sieve(int(1e6))
def powmod(x, k, m):
"""Computes x ^ k (mod m)
using binary exponentiation in O(lg k) time"""
ans = 1
while k > 0:
if odd(k):
ans = ans * x % m
k -= 1
else:
x = x * x % m
k /= 2
return ans
def factor_twos(x):
"""Represent x = 2^s * d"""
d, s = x, 0
while even(d):
d >>= 1
s += 1
return d, s
def fermat_test(n, a):
if n == 2:
return True
return powmod(a, n - 1, n) == 1
def fermat_strong_test(n, a):
"""Performs Fermat Strong Test with base ans
Returns True if n is probable prime
For a composite integer n it returns True with probability ~ 1/4
For a prime integer n it always returns True
"""
if n == 2:
return True
# n - 1 = d * 2 ^ s
d, s = factor_twos(n - 1)
# by Fermat theorem, if n is prime then
# (a^d - 1)(a^d + 1)(a^2d + 1)(a^4d + 1)...(a^2^(s-1)d + 1) = 0 (mod n)
a = powmod(a, d, n)
if a == 1 or a == n - 1:
return True
for _ in range(s):
a = a * a % n
if a == n - 1:
return True
return False
def jacobi(a, n):
"""Returns:
0 if a is not coprime to n (gcd(a,n) != 1)
-1 if a is NOT perfect square modulo n
1 if a maybe perfect square
"""
if n == 1:
return 1
if a % n == 0:
return 0
if a < 0:
return jacobi(a + n * (1 + (-a - 1) // n), n)
# jacobi of (2^k/n)
a1, k = factor_twos(a)
j_even = 1 if n % 8 == 1 or n % 8 == 7 else -1
if even(k):
j_even = 1
# law of quadratic reciprocity
if n % 4 == 3 and a1 % 4 == 3:
return -j_even * jacobi(n % a1, a1)
return j_even * jacobi(n % a1, a1)
def lucas_strong_test(n, p, q):
if even(n):
return n == 2
D = p ** 2 - 4 * q
def div2(x):
"""Performs division by 2 modulo n"""
if odd(x):
x += n
return x // 2 % n
def lucas_double(u_k, v_k, k):
# computes U_k, V_k -> U_2k, V_2k
return u_k * v_k % n, (v_k * v_k + -2 * powmod(q, k, n)) % n
def lucas_sum(u_k, v_k, u_m, v_m):
# computes U_{k+m}, V_{k+m}
u_km = div2(u_k * v_m + u_m * v_k)
v_km = div2(v_k * v_m + D * u_k * u_m)
return u_km, v_km
# n - J(D/n) = 2^s*d
d, s = factor_twos(n - jacobi(D, n))
# compute U_d, V_d
# representing d as binary number
u, v = 0, 2 # u0, v0
u_k, v_k, k, d_rem = 1, p, 1, d # u1, v1
while d_rem:
if d_rem & 1:
u, v = lucas_sum(u, v, u_k, v_k)
u_k, v_k = lucas_double(u_k, v_k, k)
k *= 2
d_rem >>= 1
if u == 0:
return True
for _ in range(s + 1):
if v == 0:
return True
u, v = lucas_double(u, v, d)
d *= 2
# If Q != +-1 we may additionally check congruences
# V_{n+1} = 2Q (mod n) and Q^(n+1)/2 = Q*J(Q, n) (mod n)
# (Frobenius probable prime)
return False
def lucas_selfridge_test(n):
"""Lucas strong test with Selfridge parameters"""
if isqrt(n) ** 2 == n:
return False
D = 5
while jacobi(D, n) != -1:
if D > 0:
D = -(D + 2)
else:
D = -(D - 2)
return lucas_strong_test(n, 1, (1 - D) // 4)
# correctly checks all primes < 3 * 10^9
BASES_1 = [2, 3, 5, 7]
# correctly checks all primes < 9 * 10^18
BASES_2 = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]
def Miller_Rabin_test(n, bases=None, num_trials=5):
if bases is None:
# with random base probability of false positive is 1/4!
# bases = [randint(2, n - 1) for _ in range(num_trials)]
if n <= 1e9:
bases = BASES_1
else:
bases = BASES_2
for a in bases:
if n == a:
continue
if n % a == 0:
return False
if not fermat_strong_test(n, a):
return False
return True
def Ballie_PSW_test(n, max_trivial_trials=100):
"""BPSW probable primality test.
There is no known pseudo-primes that pass this test.
Lower bound for potential pseudoprime is 10^10000.
"""
for i in range(max_trivial_trials):
if primes[i] == n:
return True
if n % primes[i] == 0:
return False
if primes[i] ** 2 >= n:
return True
if not fermat_strong_test(n, 2):
return False
if not lucas_selfridge_test(n):
return False
return True
def Pollard_rho_Floyd(n, x0=2, c=1):
"""Pollard's Rho method
Attempts to find a divisor using pollard rho method
with Floyd cycle finding algorithm.
Returns either any prime factor of n or n itself.
If n = p*q then algorithm exits approximately in O(p^(1/4))
"""
def f(x):
return (x * x + c) % n
x, y, g = x0, x0, 1
while g == 1:
x = f(x)
y = f(f(y))
g = gcd(abs(x - y), n)
return g
def Pollard_rho_factor(n, check_prime=False):
"""Runs Pollard Rho method with different
initial values and intercepts (x0, c) until
some factor is found"""
if check_prime and Miller_Rabin_test(n):
return n
if even(n):
return 2
while True:
x0 = randint(2, n - 1)
c = randint(1, n - 1)
g = Pollard_rho_Floyd(n, x0, c)
if g != n:
return g
def Pollard_pm1(n, primes, max_B=1000000):
"""Pollard's p - 1 method
Attempts to find some B-powersmooth factor of n
Fcator p is B-powersmooth if p - 1 = p1^d1 * ... * pn^dn
and max(p1, ..., pn) < B
Parameters
----------
n : int
integer to factorize
primes : list
sorted list of primes < max_B
max_B : int
maximal powersmoothness of extracted factor
"""
B = 10
g = 1
while B < max_B and g < n:
a = randint(2, n - 2)
g = gcd(a, n)
if g != 1:
return g
for p in primes:
if p >= B:
break
pd = 1 # p^d
while pd * p <= B:
pd *= p
a = powmod(a, pd, n)
g = gcd(a - 1, n)
if g != 1 and g != n:
return g
B *= 2
return 1
def log_modulo(a, b, m):
"""Computes discrete logarithm i.e.
finds x such that a^x = b (mod m)
Uses Shanks algorithms which takes O(sqrt(m)) time
"""
# find x in form x = np - q for some (n, p)
# => a^x = b ~ a^np = a^q * b
a, b = a % m, b % m
n = isqrt(m) + 1
# compute all a^q * b
rhs = {}
for q in range(n + 1):
rhs[b] = q
b = b * a % m
an = powmod(a, n, m)
lhs = 1
for p in range(1, n + 1):
lhs = lhs * an % m
if lhs in rhs:
return n * p - rhs[lhs]
# some numbers do not have discrete log (example: log_2 3 (mod 7))
return None
def primitive_root(n):
"""Finds generator of group of integers modulo p.
i.e. finds integer `g` such that for each `a` exists some `k`:
a = g^k (mod n)
g exists if and only if:
1) n is p^d 2) n is 2*p^d for 3) n <= 4 (Gauss theorem)
"""
if n == 2:
return 1
phi = euler_phi(n)
phi_divisors = factorize(phi)
for g in range(2, n + 1):
if gcd(g, n) != 1:
continue
for d, _ in phi_divisors:
if powmod(g, phi // d, n) == 1:
# g can not be a generator
break
else:
return g
# generator not found
# Gauss theorem conditions did not met
return None
def kth_root_modulo(a, k, n):
"""Computes discrete kth root
i.e. find x such that x^k = a (mod n)"""
# x = g^y => x^k = (g^k)^y = a
# => y = log_{g^k} a (mod n)
g = primitive_root(n)
gk = powmod(g, k, n)
y = log_modulo(gk, a, n)
return powmod(g, y, n)
| [
"vadimadr@gmail.com"
] | vadimadr@gmail.com |
104aa7236b2381b4af5aa9d9053a24f682ac2631 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/39/usersdata/86/15445/submittedfiles/dec2bin.py | 0fccfb359d06440edc18ae24c0887321b3568f73 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | # -*- coding: utf-8 -*-
from __future__ import division
d = int(input('d:'))
soma=0
cont=0
while d>0:
soma=soma+d%2*10**cont
d=d//2
cont=cont+1
print(soma) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
90a2f233f880dee86c7b4757134d73a96ca8b4ed | 4bd46c3c9ee7c7f86ee1ee86782ca965c6fdf107 | /schdule.py | 53d79f652342474d8b2b853197f3614b86393509 | [] | no_license | jarvis7164/RISAPI_PACS | 282432b26bd579b259aae9ec65f56e31004dd992 | af1009c1bc34cbe55fbc8176cbd9b4be487a9e1e | refs/heads/master | 2023-04-30T14:13:13.890773 | 2021-05-20T07:11:02 | 2021-05-20T07:11:02 | 365,457,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/12/4 17:12
# @Author : jarvis
# @Email : 309194437@qq.com
# @File : schdule.py
# @Software: PyCharm
import datetime
import schedule
import time
def job():
print("I'm working...")
print(datetime.datetime.now())
schedule.every(5).seconds.do(job)
# schedule.every(10).minutes.do(job)
# schedule.every().hour.do(job)
# schedule.every().day.at("10:30").do(job)
# schedule.every(5).to(10).days.do(job)
# schedule.every().monday.do(job)
# schedule.every().wednesday.at("13:15").do(job)
while True:
schedule.run_pending()
time.sleep(2) | [
"309194437@qq.com"
] | 309194437@qq.com |
3f1983f3b77851249aa095044152d90113409543 | 72010b3a43f865abb4e5b6815b1daeba82049909 | /aulas_2019/moduloT.py | c4624d9c0ec19c58de331c00397111641718e9e3 | [] | no_license | malbouis/Python_intro | c8f7246839f08039c3e91d3468df523e4fb7884a | 26d961bf3091faa754c752bde1a060280269915a | refs/heads/master | 2022-02-08T04:24:34.593032 | 2022-02-03T13:46:02 | 2022-02-03T13:46:02 | 149,350,743 | 9 | 15 | null | 2019-04-16T18:03:43 | 2018-09-18T20:52:37 | Jupyter Notebook | UTF-8 | Python | false | false | 393 | py | # modulo para transpor matrices
def transpor(matriz):
nlinhas = len(matriz)
ncols = len(matriz[0])
mtransp = []
for i in range(0,ncols):
colunai=[]
for j in range(0,nlinhas):
colunai.append(matriz[j][i])
mtransp.append(colunai)
return mtransp
if __name__=='__main__':
B=[[0,1,0],[3,5,8],[12,15,55]]
print(B)
print(transpor(B))
| [
"noreply@github.com"
] | malbouis.noreply@github.com |
c10f14d4bda28ddbc62ce16813f148700c8eb2b9 | 6be58abe26943d3bda744c93a47225639c1cdf70 | /chapter4/janken.py | a0bc40f915b93e05c7ef26e60fe5623141da1bfd | [] | no_license | sasakisho/python | 2f25b09db82309754f138b496735a58736b861bd | 45892010cffb52866bd08fdd17236994d99bdc59 | refs/heads/master | 2021-01-20T06:29:32.505209 | 2017-05-02T05:29:52 | 2017-05-02T05:29:52 | 89,887,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | import random
#str = {"","グー","チョキ","パー"}
while True:
com = random.randint(1,3)
ply = int(input("グー:1、チョキ:2、パー:3 "))
print("com=",com," ply=",ply)
if(ply == 1):
if(com == 1):
print("あいこ")
continue
elif(com==2):
print("勝ち")
else:
print("負け")
elif(ply == 2):
if(com == 2):
print("あいこ")
continue
elif(com==3):
print("勝ち")
else:
print("負け")
elif(ply == 3):
if(com == 3):
print("あいこ")
continue
elif(com==1):
print("勝ち")
else:
print("負け")
else:
print("入力が不正です")
continue
ret = input("続けますか y/n ")
if(ret == "n"):
break
else:
continue
| [
"sshou@systembase.co.jp"
] | sshou@systembase.co.jp |
2d31bd6cb034de10a39b3cf3411ca4b73d910d4e | 6b01cf04fd583ad821ce0c29efdd1eee6a6eb36c | /10daysStatistics/day0WeightedMean.py | 4dbedf9e95c0dc17f818811076a971e5b6bed4ad | [] | no_license | hedayet13/practiceCoding | 453d0fa99be4261159140d3d05f057984f056eac | 79cebc92785058f3bc5655656236b427beb5cd79 | refs/heads/master | 2023-03-17T10:57:46.473764 | 2021-03-11T10:22:08 | 2021-03-11T10:22:08 | 276,454,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | lst =[1,2.3,4,5]
print(lst[2:]) | [
"islamhedayet67@gmail.com"
] | islamhedayet67@gmail.com |
b3bf3a06e6df18a8d69fda1e98d00b80eb759424 | c1c0684d6a292ba6ff672a991acb3a81b766fd83 | /the_modules/subprocess/sub_call/example1.py | 583cd7c978815ac3827f3d2eaf50153d61635f74 | [] | no_license | chenc19920308/python_untitled | c75dd6885ef72926ff71a5f3b984d3bee64828ef | 5ffec03ee5cb3692abb06eef6687a25b82e728d9 | refs/heads/master | 2020-03-18T21:58:09.247241 | 2018-05-29T15:27:17 | 2018-05-29T15:27:17 | 135,316,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,105 | py | import subprocess
print ("################## subprocess.call ###############")
print (u"call方法调用系统命令进行执行,如果出错不报错")
subprocess.call(['dir'],shell=True)
# #父进程等待子进程执行命令,返回子进程执行命令的状态码,如果出现错误,不进行报错
# 【这里说的返回执行命令的状态码的意思是:如果我们通过一个变量 res = subprocess.call(['dir',shell=True]) 获取的执行结果,
# 我们能获取到的是子进程执行命令执行结果的状态码,即res=0/1 执行成功或者不成功,并不代表说看不到执行结果,
# 在Python的console界面中我们是能够看到命令结果的,只是获取不到。想获取执行的返回结果,请看check_output。】
# 【不进行报错解释:如果我们执行的命令在执行时,操作系统不识别,系统会返回一个错误,
# 如:abc命令不存在,这个结果会在console界面中显示出来,但是我们的Python解释器不会提示任何信息,
# 如果想让Python解释器也进行报错,请看check_call】 | [
"chencong192@168.com"
] | chencong192@168.com |
da52d624ace9b40de13646230566d52bf7b6ba9c | ac39b8fddeb092324d63e3eda0e083da0a24ef46 | /LearnSecurity/migrations/0008_auto_20160815_1508.py | ecdf0dcde0da4374df684ae103fbe1c86db6f77b | [] | no_license | CezCz/LearnSecurity | 8bce1d6b38133d15dfe105be3718529907e3f2bf | 40ca9f64f471db85c52712a40f15e9c6685a011b | refs/heads/master | 2020-07-03T00:35:31.451200 | 2017-02-04T21:28:06 | 2017-02-04T21:28:06 | 74,207,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-15 13:08
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('LearnSecurity', '0007_auto_20160815_1506'),
]
operations = [
migrations.RemoveField(
model_name='level',
name='program_usage',
),
migrations.AlterField(
model_name='level',
name='program_description',
field=django.contrib.postgres.fields.jsonb.JSONField(),
),
]
| [
"czarek.czernecki@gmail.com"
] | czarek.czernecki@gmail.com |
20f1574ea5c294dcb2faad0799f85dc2559b77e4 | fe7811b0c7dc311ecf7dfeb2f533225c09d21ac3 | /node_modules/watchpack-chokidar2/node_modules/fsevents/build/config.gypi | c14fe4a0ab5a073bbbfbc1a7a651ee95febde4e0 | [
"MIT"
] | permissive | jessedrg/lista-actividades | 3592ab9ef3fc61ff275f40d65c8c23a1df9ba734 | dafb5754d8fd1d784ad79fe9ea875eba72f5f98b | refs/heads/main | 2023-05-15T05:21:44.537951 | 2021-05-30T07:06:15 | 2021-05-30T07:06:15 | 372,147,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,741 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt67l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "7",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/jessedragstra/Library/Caches/node-gyp/14.15.3",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/jessedragstra/.npm-init.js",
"userconfig": "/Users/jessedragstra/.npmrc",
"cidr": "",
"node_version": "14.15.3",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/jessedragstra/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.9 node/v14.15.3 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/h8/cbm6fty51gv09k43q8ylj9d40000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"76160818+jessedrg@users.noreply.github.com"
] | 76160818+jessedrg@users.noreply.github.com |
847428dbe3d202faf10a5e562519c1f606de2698 | 8dc84558f0058d90dfc4955e905dab1b22d12c08 | /third_party/catapult/telemetry/bin/run_snap_it_unittest | 288474f3a3d87e0d7a4684e5fbb8c7beafe91870 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | meniossin/src | 42a95cc6c4a9c71d43d62bc4311224ca1fd61e03 | 44f73f7e76119e5ab415d4593ac66485e65d700a | refs/heads/master | 2022-12-16T20:17:03.747113 | 2020-09-03T10:43:12 | 2020-09-03T10:43:12 | 263,710,168 | 1 | 0 | BSD-3-Clause | 2020-05-13T18:20:09 | 2020-05-13T18:20:08 | null | UTF-8 | Python | false | false | 1,727 | #!/usr/bin/env python
# Copyright (c) 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
TELEMETRY_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
sys.path.append(TELEMETRY_DIR)
from telemetry.core import util
from telemetry.internal.browser import browser_finder
from telemetry.internal.browser import browser_options
from telemetry.internal.util import binary_manager
from telemetry.util import wpr_modes
_SNAP_IT_TEST_URL = 'file:///%s' % (os.path.join(
util.GetCatapultThirdPartyDir(), 'snap-it', 'tests', 'tests.html'))
def RunSnapItUnittest(finder_options):
possible_browser = browser_finder.FindBrowser(finder_options)
with possible_browser.BrowserSession(
finder_options.browser_options) as browser:
tab = browser.tabs[0]
tab.Navigate(_SNAP_IT_TEST_URL)
tab.WaitForJavaScriptCondition('window.allTestDone')
num_tests = tab.EvaluateJavaScript('window.total')
failed_tests = tab.EvaluateJavaScript('window.failedTests')
for test in failed_tests:
print "Test '%s' failed" % test
if failed_tests:
print 'Failed %s tests (out of %s tests)' % (len(failed_tests), num_tests)
return 1
else:
print 'All %s tests passed' % num_tests
return 0
def main(args):
binary_manager.InitDependencyManager([])
options = browser_options.BrowserFinderOptions()
options.browser_options.extra_browser_args.add('--headless')
parser = options.CreateParser(usage="Run snap-it's unittests")
parser.parse_args(args)
return RunSnapItUnittest(options)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| [
"arnaud@geometry.ee"
] | arnaud@geometry.ee | |
d1bde920b1e734daefc323b4ba4c5edaf06f158f | 2a80db83a93ab898ef4972f34d31e06fbe6ab2c5 | /test.py | 8629e9bda0a13e5b7def981ffc7a01e122b7a647 | [
"MIT"
] | permissive | khalooei/holiday-checkup | 1e8272de3b76a1eef66777f659a76a6a34e3a467 | 683ea8aeb9b3103ffd1c9bb3d21101cd08d431a9 | refs/heads/main | 2023-04-12T09:21:10.173869 | 2021-05-06T21:34:29 | 2021-05-06T21:34:29 | 365,010,533 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | from SalavatiHolidayCheckup import HolidayCheck
from hijri_converter import convert
import jdatetime
hc = HolidayCheck()
specific_datatime = jdatetime.date(1400, 1, 1, locale='fa_IR').togregorian()
print(hc.get_holiday_status_of_datetime(specific_datatime))
specific_datatime = convert.Hijri(1442, 8, 15).to_gregorian()
print(hc.get_holiday_status_of_datetime(specific_datatime)) | [
"mkhlaooei90@gmail.com"
] | mkhlaooei90@gmail.com |
259d9c82c7a3f8b1eaad6a4ad7f776fc135aefa0 | f848ba0b886d0f9788a959ef53ee2f3b17fe3048 | /catalyst/contrib/data/transforms.py | 3d3a71b778e12a5d273f9797ef2d2ebdb2d36e42 | [
"Apache-2.0"
] | permissive | alyaxey/catalyst | d250e180267d395472101552c19f30207c4be16a | 4b2e182a2acc6538a7768eb1a5c174245773c84d | refs/heads/master | 2023-04-11T11:55:44.337373 | 2020-06-24T15:03:26 | 2020-06-24T15:03:26 | 273,520,003 | 1 | 1 | Apache-2.0 | 2023-03-28T21:55:29 | 2020-06-19T14:57:02 | Python | UTF-8 | Python | false | false | 4,910 | py | """
This subpackage was borrowed from
torchvision(https://github.com/pytorch/vision).
"""
import numpy as np
import torch
def to_tensor(pic: np.ndarray) -> torch.Tensor:
"""Convert ``numpy.ndarray`` to tensor.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
torch.Tensor: Converted image.
Raises:
TypeError: if `pic` is not np.ndarray
ValueError: if `pic` is not 2/3 dimensional.
"""
if not isinstance(pic, np.ndarray):
raise TypeError(f"pic should be ndarray. Got {type(pic)}")
if pic.ndim not in {2, 3}:
raise ValueError(
f"pic should be 2/3 dimensional. Got {pic.ndim} dimensions."
)
if pic.ndim == 2:
pic = pic[:, :, None]
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
return img
def normalize(tensor: torch.Tensor, mean, std, inplace=False):
"""Normalize a tensor image with mean and standard deviation.
.. note::
This transform acts out of place by default, i.e.,
it does not mutates the input tensor.
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation inplace.
Returns:
torch.Tensor: Normalized Tensor image.
Raises:
TypeError: if `tensor` is not torch.Tensor
"""
if not (torch.is_tensor(tensor) and tensor.ndimension() == 3):
raise TypeError("tensor is not a torch image.")
if not inplace:
tensor = tensor.clone()
dtype = tensor.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
tensor.sub_(mean[:, None, None]).div_(std[:, None, None])
return tensor
class Compose:
"""Composes several transforms together."""
def __init__(self, transforms):
"""
Args:
transforms (List): list of transforms to compose.
Example:
>>> Compose([ToTensor(), Normalize()])
"""
self.transforms = transforms
def __call__(self, img):
"""@TODO: Docs. Contribution is welcome."""
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
"""@TODO: Docs. Contribution is welcome."""
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class ToTensor(object):
"""Convert a ``numpy.ndarray`` to tensor.
Converts numpy.ndarray (H x W x C) in the range [0, 255] to a
torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
"""
def __call__(self, pic):
"""
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
torch.Tensor: Converted image.
"""
return to_tensor(pic)
def __repr__(self):
"""@TODO: Docs. Contribution is welcome."""
return self.__class__.__name__ + "()"
class Normalize(object):
"""Normalize a tensor image with mean and standard deviation.
Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])``
for ``n`` channels, this transform will normalize each channel of the input
``torch.*Tensor`` i.e.,
``output[channel] = (input[channel] - mean[channel]) / std[channel]``
.. note::
This transform acts out of place, i.e.,
it does not mutate the input tensor.
"""
def __init__(self, mean, std, inplace=False):
"""
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation in-place.
"""
self.mean = mean
self.std = std
self.inplace = inplace
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
torch.Tensor: Normalized Tensor image.
"""
return normalize(tensor, self.mean, self.std, self.inplace)
def __repr__(self):
"""@TODO: Docs. Contribution is welcome."""
return self.__class__.__name__ + "(mean={0}, std={1})".format(
self.mean, self.std
)
__all__ = ["Compose", "Normalize", "ToTensor", "normalize", "to_tensor"]
| [
"noreply@github.com"
] | alyaxey.noreply@github.com |
77632dadb07288923599339f046b2e666610bf45 | 6679fd1102802bf190294ef43c434b6047840dc2 | /openconfig_bindings/routing_policy/policy_definitions/policy_definition/statements/statement/conditions/match_tag_set/state/__init__.py | 3afc3b819cdfb034d5fee85b9dba1b707dd4a68d | [] | no_license | robshakir/pyangbind-openconfig-napalm | d49a26fc7e38bbdb0419c7ad1fbc590b8e4b633e | 907979dc14f1578f4bbfb1c1fb80a2facf03773c | refs/heads/master | 2023-06-13T17:17:27.612248 | 2016-05-10T16:46:58 | 2016-05-10T16:46:58 | 58,091,515 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,728 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-routing-policy - based on the path /routing-policy/policy-definitions/policy-definition/statements/statement/conditions/match-tag-set/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data tag-set conditions
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__tag_set','__match_set_options',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
helper = kwargs.pop("path_helper", None)
if helper is False:
self._path_helper = False
elif helper is not None and isinstance(helper, xpathhelper.YANGPathHelper):
self._path_helper = helper
elif hasattr(self, "_parent"):
helper = getattr(self._parent, "_path_helper", False)
self._path_helper = helper
else:
self._path_helper = False
self._extmethods = False
self.__match_set_options = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'INVERT': {}, u'ANY': {}},), default=unicode("ANY"), is_leaf=True, yang_name="match-set-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/routing-policy', defining_module='openconfig-routing-policy', yang_type='oc-pol-types:match-set-options-restricted-type', is_config=False)
self.__tag_set = YANGDynClass(base=ReferenceType(referenced_path='/routing-policy/defined-sets/tag-sets/tag-set/tag-set-name', caller=self._path() + ['tag-set'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="tag-set", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/routing-policy', defining_module='openconfig-routing-policy', yang_type='leafref', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-policy', u'policy-definitions', u'policy-definition', u'statements', u'statement', u'conditions', u'match-tag-set', u'state']
def _get_tag_set(self):
"""
Getter method for tag_set, mapped from YANG variable /routing_policy/policy_definitions/policy_definition/statements/statement/conditions/match_tag_set/state/tag_set (leafref)
YANG Description: References a defined tag set
"""
return self.__tag_set
def _set_tag_set(self, v, load=False):
"""
Setter method for tag_set, mapped from YANG variable /routing_policy/policy_definitions/policy_definition/statements/statement/conditions/match_tag_set/state/tag_set (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_tag_set is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tag_set() directly.
YANG Description: References a defined tag set
"""
try:
t = YANGDynClass(v,base=ReferenceType(referenced_path='/routing-policy/defined-sets/tag-sets/tag-set/tag-set-name', caller=self._path() + ['tag-set'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="tag-set", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/routing-policy', defining_module='openconfig-routing-policy', yang_type='leafref', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tag_set must be of a type compatible with leafref""",
'defined-type': "leafref",
'generated-type': """YANGDynClass(base=ReferenceType(referenced_path='/routing-policy/defined-sets/tag-sets/tag-set/tag-set-name', caller=self._path() + ['tag-set'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="tag-set", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/routing-policy', defining_module='openconfig-routing-policy', yang_type='leafref', is_config=False)""",
})
self.__tag_set = t
if hasattr(self, '_set'):
self._set()
def _unset_tag_set(self):
self.__tag_set = YANGDynClass(base=ReferenceType(referenced_path='/routing-policy/defined-sets/tag-sets/tag-set/tag-set-name', caller=self._path() + ['tag-set'], path_helper=self._path_helper, require_instance=True), is_leaf=True, yang_name="tag-set", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/routing-policy', defining_module='openconfig-routing-policy', yang_type='leafref', is_config=False)
def _get_match_set_options(self):
"""
Getter method for match_set_options, mapped from YANG variable /routing_policy/policy_definitions/policy_definition/statements/statement/conditions/match_tag_set/state/match_set_options (oc-pol-types:match-set-options-restricted-type)
YANG Description: Optional parameter that governs the behaviour of the
match operation. This leaf only supports matching on ANY
member of the set or inverting the match. Matching on ALL is
not supported)
"""
return self.__match_set_options
def _set_match_set_options(self, v, load=False):
"""
Setter method for match_set_options, mapped from YANG variable /routing_policy/policy_definitions/policy_definition/statements/statement/conditions/match_tag_set/state/match_set_options (oc-pol-types:match-set-options-restricted-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_match_set_options is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_match_set_options() directly.
YANG Description: Optional parameter that governs the behaviour of the
match operation. This leaf only supports matching on ANY
member of the set or inverting the match. Matching on ALL is
not supported)
"""
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'INVERT': {}, u'ANY': {}},), default=unicode("ANY"), is_leaf=True, yang_name="match-set-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/routing-policy', defining_module='openconfig-routing-policy', yang_type='oc-pol-types:match-set-options-restricted-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """match_set_options must be of a type compatible with oc-pol-types:match-set-options-restricted-type""",
'defined-type': "oc-pol-types:match-set-options-restricted-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'INVERT': {}, u'ANY': {}},), default=unicode("ANY"), is_leaf=True, yang_name="match-set-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/routing-policy', defining_module='openconfig-routing-policy', yang_type='oc-pol-types:match-set-options-restricted-type', is_config=False)""",
})
self.__match_set_options = t
if hasattr(self, '_set'):
self._set()
def _unset_match_set_options(self):
self.__match_set_options = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'INVERT': {}, u'ANY': {}},), default=unicode("ANY"), is_leaf=True, yang_name="match-set-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/routing-policy', defining_module='openconfig-routing-policy', yang_type='oc-pol-types:match-set-options-restricted-type', is_config=False)
tag_set = property(_get_tag_set)
match_set_options = property(_get_match_set_options)
_pyangbind_elements = {'tag_set': tag_set, 'match_set_options': match_set_options, }
| [
"rjs@jive.com"
] | rjs@jive.com |
cc7db9291a6b845cc18ac0c5621b3ac3019f67ca | 4c62149fe5db6652a9ee90dbc6ae1f676f5ff71b | /src/LCExtract/utilities.py | 91c5250888fcf202a368567b0e04d4a2303b9cc6 | [
"MIT"
] | permissive | Pommers/LCExtract | a69d3721714836cab79b267a044b0b6341876cc3 | e9cbe4f0057fd4288b2b9feba44135e4a32df65c | refs/heads/master | 2023-07-14T10:33:04.291168 | 2021-04-22T06:13:11 | 2021-04-22T06:13:11 | 354,827,150 | 0 | 0 | MIT | 2021-04-22T06:13:12 | 2021-04-05T12:28:37 | Python | UTF-8 | Python | false | false | 1,016 | py | import itertools
import sys
import threading
import time
from numbers import Number
class Namespace(object):
"""
helps referencing object in a dictionary as dict.key instead of dict['key']
"""
def __init__(self, adict):
self.__dict__.update(adict)
class Spinner:
busy = False
delay = 0.2
def __init__(self, delay=None):
self.spinner_generator = itertools.cycle('-/|\\')
if delay and isinstance(delay, Number):
self.delay = delay
def spinner_task(self):
while self.busy:
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def __enter__(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def __exit__(self, exception, value, tb):
self.busy = False
time.sleep(self.delay)
if exception is not None:
return False
| [
"richard@the-poms.com"
] | richard@the-poms.com |
5d6debb8875e78c9c69f8c9bcf19bac4b366f7be | f621926352a3dac1f8a3597206ff7412bb937531 | /SW expert/늘어지는 소리.py | b45a2b6ce685cc77aff8e5a49b026d295aa0dc90 | [] | no_license | leeseungkyung/algo | 3953982d867f7d17700bbba0820565f3439c565c | 722c4ce34282d106b2a8727460daea6c5775ab50 | refs/heads/master | 2021-02-10T15:06:17.689414 | 2020-06-04T07:35:47 | 2020-06-04T07:35:47 | 244,392,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | T = int(input())
# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.
for test_case in range(1, T + 1):
case = list(input())
h = int(input())
a = list(map(int, input().split()))
a=sorted(a, reverse=True)
for i in a:
case.insert(i, '-')
print(case) | [
"lee03190@naver.com"
] | lee03190@naver.com |
b1d441760af7a3cffac97948b1f4f725896d5b45 | 8d88db74123300ddffd6765c700d57b1608e1a03 | /ResolventSolver/Trajectory.py | 67955a93ddaccd3205b1605733865f2abab2b378 | [] | no_license | ZhouwenfengTyrantasteroid/ResolventSolver | dcf865ba7b564f074cc871453205f63f28dbcdb0 | f28a791e7353c23bd266372c0f82a8cf794a689c | refs/heads/main | 2023-08-31T21:40:38.872960 | 2021-10-16T16:32:08 | 2021-10-16T16:32:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,165 | py | # This file contains the class definition for a general trajectory in some
# vector space.
import numpy as np
class Trajectory:
"""
A trajectory in state-space stored as an array of Fourier modes.
Attributes
----------
modes : ndarray
2D array containing data of float type.
shape : tuple of int
Shape of the trajectory equivelent array.
"""
__slots__ = ['modes', 'shape']
__array_priority__ = 1e100
def __init__(self, curve, modes = 33):
"""
Initialise a trajectory with a curve definition.
Parameters
----------
curve : ndarray
Numpy array that defines a trajectory in state-space
modes : positive int, default=33
Number of modes to represent the trajectory, ignored is curve
is an array.
"""
self.modes = curve
self.shape = np.shape(curve)
def __add__(self, other_traj):
"""Add trajectory to current instance."""
return Trajectory(self.modes + other_traj.modes)
def __sub__(self, other_traj):
"""Substract trajectory from current instance."""
return Trajectory(self.modes - other_traj.modes)
def __mul__(self, factor):
"""Multiply current instance by scalar."""
return Trajectory(self.modes*factor)
def __rmul__(self, factor):
return self.__mul__(factor)
def matmul_left_const(self, factor):
"""Left multiply current instance by constant array."""
return Trajectory(np.transpose(np.matmul(factor, np.transpose(self.modes))))
def matmul_left_traj(self, other):
"""Left multiply current instance by another trajectory instance."""
if len(self.shape) == 2 and len(other.shape) == 2:
return Trajectory(np.diag(np.inner(other.modes, self.modes)))
elif len(self.shape) == 3 and len(other.shape) == 3:
return Trajectory(np.matmul(other.modes, self.modes))
else:
return Trajectory(np.squeeze(np.matmul(other.modes, np.reshape(self.modes, (*self.shape, 1)))))
def __eq__(self, other_traj, rtol = 1e-5, atol = 1e-8):
"""Evaluate (approximate) equality of trajectory and current instance."""
return np.allclose(self.modes, other_traj.modes, rtol = rtol, atol = atol)
def __getitem__(self, key):
"""Return the element(s) of the modes indexed by the given key."""
return self.modes[key]
def __setitem__(self, key, value):
"""Set the value(s) of the modes indexed by the given key."""
self.modes[key] = value
def __round__(self, decimals = 6):
"""Return a new trajectory with rounded modes."""
return Trajectory(np.around(self.modes, decimals = decimals))
def __abs__(self):
"""Define the behaviour of the in-built absolute function."""
return np.linalg.norm(self.modes)
def __repr__(self):
"""Return the modes of the instance."""
return np.array_repr(self.modes)
| [
"tburton5572@gmail.com"
] | tburton5572@gmail.com |
aa40b2684114f8c5371e199b70084f9b0e8c4d5d | ba658e913a44eef5f617dee9dac3b886c1b18f15 | /ibex35/valores_app/migrations/0001_initial.py | 01ef6685a0ba97e6866ca0171cd57da1b77538d7 | [] | no_license | neburnodrog/Ibex-35-API | 5381fa65957229aa64bd122a10bb7428e3de781e | cd84a93cb2e55b02c1d0b8de084d085373de7ebd | refs/heads/main | 2023-01-20T05:17:11.291484 | 2020-11-25T16:31:48 | 2020-11-25T16:31:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 995 | py | # Generated by Django 3.1.3 on 2020-11-24 19:21
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Valor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=20)),
('variacion', models.FloatField()),
('acumulado', models.FloatField()),
('maximo', models.FloatField()),
('minimo', models.FloatField()),
('volumen', models.FloatField()),
('capitalizacion', models.FloatField()),
('actualizacion', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'Valor',
'verbose_name_plural': 'Valores',
},
),
]
| [
"neburgordon@gmail.com"
] | neburgordon@gmail.com |
ee76da57c2cc4d0bd87484b4750e15a5bf4c5e1f | 38ec18dc1468783de4fe1da97c95afa38eba1533 | /django_movies/app1/migrations/0011_rater_user.py | f4c282a9781b3ee339c3bfc38d273dd1273f222e | [] | no_license | pnitto/django-movies | ed739b328552f200eb8b831cb31e9f4d937ee15b | be77780f70e627db6d97e565708d855976a3bfa8 | refs/heads/master | 2021-01-14T12:20:50.004963 | 2015-07-07T02:02:29 | 2015-07-07T02:02:29 | 38,266,572 | 0 | 0 | null | 2015-06-29T19:22:59 | 2015-06-29T19:22:58 | null | UTF-8 | Python | false | false | 523 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app1', '0010_auto_20150701_2159'),
]
operations = [
migrations.AddField(
model_name='rater',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True),
),
]
| [
"pnitto18@gmail.com"
] | pnitto18@gmail.com |
f80388165117e45e1386c6e6162d5b22114a3d47 | d0539a16586a9196bfda01c545b496a9336e066d | /scraping_scripts/cochrane_pubmed_mapping.py | 23bbc5b9ecfb7f2527d97686a3ab3b15015ff81f | [] | no_license | ebenjoseph/deepmed | 7bb22bd53e3222085e14a5df08d46000c21c9bc8 | 3a227c64eef3f6ee76dacbd5498261081109390c | refs/heads/master | 2020-04-05T14:05:33.986963 | 2016-09-19T03:18:43 | 2016-09-19T03:19:58 | 59,232,353 | 1 | 1 | null | 2016-08-09T19:17:23 | 2016-05-19T18:37:43 | Python | UTF-8 | Python | false | false | 19,551 | py | #Searches pubmed for the title of a study, checks that there is only 1 result, then confirms that the year matches.
#If there is a positive match, the script collects the PubMedID, metadata, and full text links
#It also tracks which Titles/UniqueIDs have already been added to the output file and skips them
import logging
import sys
from bs4 import BeautifulSoup
from time import sleep
import time
import json
import os
import requests
import csv
from multiprocessing.dummy import Pool as ThreadPool # for multithreading
import collections
from random import choice
import re
from datetime import datetime
import threading
user_agents = [
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 (KHTML, like Gecko) Version/8.0.8 Safari/600.8.9',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1',
'Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24'
]
def random_user_agent():
return choice(user_agents)
#import xml.etree.ElementTree
#SET UP LOGGING
#Config the level of information (DEBUG, INFO, WARNING, ERROR, CRITICAL), output location, and formatting of the log
#Create a second handler, change formatting, and appends it to the root to stream log to console
#set logging level from command line. python.py -log=INFO
root = logging.getLogger()
logging.basicConfig(level=logging.INFO, filename='cochrane_pubmed_mapping.log', format='%(asctime)s [%(levelname)s] %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
streamthelog = logging.StreamHandler(sys.stdout)
streamthelog.setLevel(logging.DEBUG)
streamthelog.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p'))
root.addHandler(streamthelog)
#SET UP XPATH FUNCTION FOR MULTIPLE ATTEMPTS
def getxpath(element, searches):
for xpath in searches:
try:
logging.info('Trying: %s', xpath)
temp = element.find_element_by_xpath(xpath)
logging.info('Success! XPath found: %s', temp)
return temp
except:
logging.info('Could not find: %s', xpath)
continue
return 'Not found'
def getallxpath(element, searches):
for xpath in searches:
try:
logging.debug('Trying: %s', xpath)
temp = element.find_elements_by_xpath(xpath)
logging.debug('Success! XPath found: %s', temp)
return temp
except:
logging.debug('Could not find: %s', xpath)
continue
return 'Not found'
def getxpathatt(element, attribute, searches):
for xpath in searches:
try:
logging.debug('Trying: %s', xpath)
temp = element.find_element_by_xpath(xpath).get_attribute(attribute)
logging.debug('Success! XPath found: %s', temp)
return temp
except:
logging.debug('Could not find: %s', xpath)
continue
return 'Not found'
def getallxpathatt(element, attribute, searches):
for xpath in searches:
try:
logging.debug('Trying: %s', xpath)
temp = element.find_elements_by_xpath(xpath).get_attribute(attribute)
logging.debug('Success! XPath found: %s', temp)
return temp
except:
logging.debug('Could not find: %s', xpath)
continue
return 'Not found'
def getxpathtext(element, searches):
for xpath in searches:
try:
logging.debug('Trying: %s', xpath)
temp = element.find_element_by_xpath(xpath).text
logging.debug('Success! XPath found: %s', temp)
return temp
except:
logging.debug('Could not find: %s', xpath)
continue
return 'Not found'
def getallxpathtext(element, searches):
for xpath in searches:
try:
logging.debug('Trying: %s', xpath)
temp = element.find_element_by_xpath(xpath).text
logging.debug('Success! XPath found: %s', temp)
return temp
except:
logging.debug('Could not find: %s', xpath)
continue
return 'Not found'
def hms_string(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60.
return "{}:{:>02}:{:>05.2f}".format(h, m, s)
def getsoup(root_link):
try:
session = requests.Session()
session.headers = random_user_agent()
web_page = session.get(root_link)
time.sleep(.25)
except:
logging.info('Website request failed.')
logging.debug('Requested link: %s', root_link)
return -1
return BeautifulSoup(web_page.content, "lxml")
def getlink(ref):
data = collections.OrderedDict()
start_time = time.time()
try:
coch_country = ref['coch_country']
except:
coch_country = 'N/A'
try:
coch_author = ref['coch_author']
except:
coch_author = 'N/A'
try:
coch_title = ref['coch_title']
except:
coch_title = 'N/A'
try:
coch_year = ref['coch_year']
except:
coch_year = 'N/A'
try:
coch_source = ref['coch_source']
except:
coch_source = 'N/A'
try:
coch_volume = ref['coch_volume']
except:
coch_volume = 'N/A'
try:
coch_pages = ref['coch_pages']
except:
coch_pages = 'N/A'
try:
coch_publisher = ref['coch_publisher']
except:
coch_publisher = 'N/A'
try:
coch_number = ref['coch_number']
except:
coch_number = 'N/A'
try:
coch_id = ref['coch_id']
except:
coch_id = 'N/A'
#CHECK THE COCH_ID TO SEE IF WE HAVE PULLED BEFORE
journalLock.acquire()
try:
journalsRead[coch_id]
journalLock.release()
logging.info('Title found in tracker. Skipping...')
return
except:
journalsRead[coch_id] = 1
logging.info('Title not found in tracker. Adding to tracker...')
journalfile.write(coch_id)
journalfile.write('\n')
journalLock.release()
'''
#Grab details of the Journal
resultNumber = getxpathtext(reportLink,[".//div[contains(@class,'rprtnum')]"])
resultNumber = resultNumber.replace('.','')
logging.info('Result number: %s', resultNumber)
title = getxpathtext(reportLink,[".//div[contains(@class,'rslt')]//p[@class='title']"])
logging.info('Journal title: %s', title)
pmid = getxpathtext(reportLink,[".//dl[@class='rprtid']//dd"])
logging.info('PMID: %s', pmid)
authors = getxpathtext(reportLink,[".//p[@class='desc']"])
logging.info('Authors: %s', authors)
journalName = getxpathatt(reportLink,"title",[".//span[@class='jrnl']"])
logging.info('Full journal name: %s', journalName)
journalNameShort = getxpathtext(reportLink,[".//span[@class='jrnl']"])
logging.info('Short journal name: %s', journalNameShort)
citation = getxpathtext(reportLink,[".//p[@class='details']"])
logging.info('Citation: %s', citation)
reportPage = getxpathatt(reportLink,"href",[".//p[@class='title']/a"])
logging.info('Loading report page: %s', reportPage)
'''
#Load the PubMed page (reportPage) using BeautifulSoup4
logging.info('Target article: %s', coch_title)
logging.info('Loading page with BS4...')
search_title = coch_title.replace(" ","%20")
reportLink = 'http://www.ncbi.nlm.nih.gov/pubmed/?term="' + search_title + '"'
soup = getsoup(reportLink)
logging.info('Page loaded')
#Check for a link to a single citation
try:
citlinksearch = soup.find('div', class_='portlet_title').find('a', text = re.compile('citation found'))['href']
logging.info('Found link to the citation, following it...')
soup = getsoup('http://www.ncbi.nlm.nih.gov/' + citlinksearch)
except:
print ()
#Check for only 1 result
#if 1 result, then collect metadata (match = 1)
#if multiple results or no results, then skip metadat (match = 0)
match = 0
multresult = 0
try:
#search for div class = "rprt abstract" > div class = "cit"
citsearch = soup.find('div', class_='rprt abstract').find('div',class_='cit').get_text()
logging.info('1 Result found by title')
match = 1
resultcount = '1'
except:
match = 0
try:
noresultsearch = soup.find('span', text = re.compile('No items found.'), attrs = {'class' : 'icon'}).get_text()
logging.info('No results found by title')
resultcount = 'NoResults'
multresult = 1
except:
resultcount = 'MultipleResults'
logging.info('Multiple results found by title')
multresult = 1
if multresult == 1:
#check by authors for a single match
logging.info('Searching by author')
search_authors = coch_author.replace(" ","%20")
reportLink = 'http://www.ncbi.nlm.nih.gov/pubmed/?term=' + search_authors
soup = getsoup(reportLink)
logging.info('Page loaded')
match = 0
multresult = 0
try:
#search for div class = "rprt abstract" > div class = "cit"
citsearch = soup.find('div', class_='rprt abstract').find('div',class_='cit').get_text()
logging.info('1 Result found by authors')
match = 1
resultcount = '1'
except:
match = 0
try:
noresultsearch = soup.find('span', text = re.compile('No items found.'), attrs = {'class' : 'icon'}).get_text()
logging.info('No results found by authors')
resultcount = 'NoResults'
except:
resultcount = 'MultipleResults'
logging.info('Multiple results found by authors')
multresult = 1
'''
resultcount = soup.find('div',class_='content').find('h3').get_text().replace("\n","")
if resultcount[:4] == 'See ':
logging.info('Number of results: %s', resultcount)
resultcount = '1'
match = 1
elif resultcount[:6] == 'Items:':
logging.info('Multiple results returned')
resultcount = 'MultipleResults'
match = 0
elif resultcount == 'Author information':
logging.info('Number of results: %s', resultcount)
resultcount = '1'
match = 0
else:
logging.info('first No results returned')
resultcount = 'NoResults'
match = 0
except:
try:
resultcount = soup.find('li',class_='info icon').find('span',class_='icon').get_text().replace("\n","")
if resultcount == 'No items found.':
logging.info('second No results returned')
resultcount = 'NoResults'
match = 0
else:
logging.info('Quoted phrase not found, but matched')
resultcount = '1'
match = 1
except:
logging.info('third No results returned')
resultcount = 'NoResults'
match = 0
'''
if match == 1:
#Grab meta data
try:
pmid = soup.find('a',ref='aid_type=pmid').get_text()
logging.info('PMID: %s', pmid)
except:
pmid = 'Not found'
logging.info('Could not find PMID.')
try:
title = soup.find('div',class_='rprt_all').find('h1').get_text()
logging.info('Journal title: %s', title)
except:
title = 'Not found'
logging.info('Could not find Journal title.')
try:
journalName = soup.find('div',class_="cit").find('a')['title']
logging.info('Full journal name: %s', journalName)
except:
journalName = 'Not found'
logging.info('Could not find Full journal name.')
try:
journalNameShort = soup.find('div',class_="cit").find('a').get_text()
logging.info('Short journal name: %s', journalNameShort)
except:
journalNameShort = 'Not found'
logging.info('Could not find Short journal name.')
try:
citation = soup.find('div',class_="cit").get_text()
logging.info('Citation: %s', citation)
except:
citation = 'Not found'
logging.info('Could not find citation.')
#Grab FullTextLink
try:
fullTextLink = soup.find('div', class_="linkoutlist")
fullTextLink = fullTextLink.find('h4', text=re.compile('Full Text Sources')).next_sibling
full_links = fullTextLink.findAll('a')
fullTextLinks = []
for each in full_links:
temp = [each.get_text(),each['href']]
fullTextLinks.append(temp)
logging.info('Collected full text links.')
except:
fullTextLinks = 'Not found'
##Grab publication types
try:
ptype = soup.find('h4', text='Publication Types').next_sibling
ptype = ptype.findAll('li')
pubtypes = []
temp = -1
for num in ptype:
temp += 1
pubtypes.append(ptype[temp].get_text())
except:
pubtypes = 'Not found'
logging.info('Pubtypes: %s', pubtypes)
try:
pubdate = citation[len(journalNameShort)+1:]
try:
temp = pubdate.find(";")
except:
temp = pubdate.find(".")
pubdate = pubdate[:temp]
try:
pubdate = datetime.strptime(pubdate,'%Y %b %d')
except:
pubdate = datetime.strptime(pubdate,'%Y %b')
except:
pubdate = 'Not found'
logging.info('Publication date: %s', pubdate)
##Grab MeSH Terms
try:
mterms = soup.find('h4', text='MeSH Terms').next_sibling
mterms = mterms.findAll('li')
meshterms = []
temp = -1
for num in mterms:
temp += 1
meshterms.append(mterms[temp].get_text().encode("utf-8"))
except:
meshterms = 'Not found'
logging.info('MeSH Terms: %s', meshterms)
##Grab substances
try:
subs = soup.find('h4', text='Substances').next_sibling
subs = subs.findAll('li')
substances = []
temp = -1
for num in subs:
temp += 1
substances.append(subs[temp].get_text().encode("utf-8"))
except:
substances = 'Not found'
logging.info('Substances: %s', substances)
##Grab Grant Support
try:
gsup = soup.find('h4', text='Grant Support').next_sibling
gsup = gsup.findAll('li')
grantsupport = []
temp = -1
for num in gsup:
temp += 1
grantsupport.append(gsup[temp].get_text().encode("utf-8"))
except:
grantsupport = 'Not found'
logging.info('Grants: %s', grantsupport)
##Grab authors and affiliations
try:
try:
aff = soup.find('div', class_="afflist")
aff = aff.findAll('li')
affiliations = []
temp = -1
for num in aff:
temp += 1
affiliations.append(aff[temp].get_text()[1:])
except:
affiliations = 'Not found'
##Get authors and match with affiliation
auth = soup.find('div', class_="auths")
auth = auth.findAll('a', href=True)
authorsaff = []
temp = -1
for num in auth:
temp += 1
try:
#check for multiple numbers and merge them if they exist
idx = []
if "," in auth[temp].next_sibling.get_text():
idx.append(auth[temp].next_sibling.get_text()[:-1])
if "," in auth[temp].next_sibling.next_sibling.get_text():
idx.append(auth[temp].next_sibling.next_sibling.get_text()[:-1])
if "," in auth[temp].next_sibling.next_sibling.next_sibling.get_text():
idx.append(auth[temp].next_sibling.next_sibling.next_sibling.get_text()[:-1])
if "," in auth[temp].next_sibling.next_sibling.next_sibling.next_sibling.get_text():
idx.append(auth[temp].next_sibling.next_sibling.next_sibling.next_sibling.get_text()[:-1])
if "," in auth[temp].next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.get_text():
idx.append(auth[temp].next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.get_text()[:-1])
else:
idx.append(auth[temp].next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.get_text())
else:
idx.append(auth[temp].next_sibling.next_sibling.next_sibling.next_sibling.get_text())
else:
idx.append(auth[temp].next_sibling.next_sibling.next_sibling.get_text())
else:
idx.append(auth[temp].next_sibling.next_sibling.get_text())
else:
idx.append(auth[temp].next_sibling.get_text())
fil = ""
counter = -1
for idc in idx:
counter += 1
if counter == 0:
fil = affiliations[int(idc)-1]
else:
fil = fil + "; " + affiliations[int(idc)-1]
authorsaff.append([auth[temp].get_text().encode("utf-8"),fil.encode("utf-8")])
except:
authorsaff.append([auth[temp].get_text().encode("utf-8"),'Not found'])
except:
authorsaff = 'Not found'
logging.info('Authors and affiliations: %s', authorsaff)
else:
x = 0
#Check if the year matches
try:
if int(pubdate.year) == int(coch_year):
yearmatch = 'Y'
else:
yearmatch = 'N'
except:
yearmatch = 'N/A'
#Check if the author matches
authormatch = 'N'
try:
if authorsaff[0][0].encode("utf-8") in coch_author.encode("utf-8"):
authormatch = 'Y'
except:
authormatch = 'N/A'
data = {}
data['results'] = resultcount
data['yearmatch'] = yearmatch
data['authormatch'] = authormatch
#pass through the cochrane data
data['coch_id'] = coch_id
data['coch_title'] = coch_title
data['coch_country'] = coch_country
data['coch_author'] = coch_author
data['coch_title'] = coch_title
data['coch_year'] = coch_year
data['coch_source'] = coch_source
data['coch_volume'] = coch_volume
data['coch_pages'] = coch_pages
data['coch_publisher'] = coch_publisher
data['coch_number'] = coch_number
#write everything we found to JSON file
if match == 1:
data['pmid'] = pmid
data['title'] = title.encode("utf-8")
data['authors'] = authorsaff
data['affiliations'] = affiliations
data['journal_name'] = journalName.encode("utf-8")
data['journal_code'] = journalNameShort.encode("utf-8")
data['pubtypes'] = pubtypes
data['meshterms'] = meshterms
data['citation'] = citation.encode("utf-8")
data['pubdate'] = str(pubdate)
data['pubmed_page'] = reportLink
data['fulltext_page'] = fullTextLinks
writingLock.acquire()
json.dump(data, outfile) # use previously opened file (need it opened before implementing threads)
outfile.write('\n')
writingLock.release()
# Let's go!
logging.info('Program initiated')
logging.debug('Debugging logging active')
# define new tracker
articletracker = 'cochrane_pubmed_mapping_tracker'
# read in previously read journal links
journalsRead = {}
try:
journalfile = open(articletracker, 'r')
for line in journalfile:
journalsRead[line.rstrip()] = 1
journalfile.close()
except:
journalfile = open(articletracker, 'w')
journalfile.close()
journalfile = open(articletracker, 'a')
journalLock = threading.Lock()
writingLock = threading.Lock()
# open or create output file
outfile = open('cochrane_pubmed_mapping_redo.jsonl', 'a')
refs_to_pull = []
# read in JSON
with open('cochrane_refs.jsonl') as data_file:
for line in data_file:
inputdata = json.loads(line)
refs_to_pull.append(inputdata)
# setup threadpool
pool = ThreadPool(4)
results = pool.map(getlink, refs_to_pull)
pool.close()
pool.join()
# output results
logging.info(len(results)) | [
"natewilson@Nates-MacBook-Air.local"
] | natewilson@Nates-MacBook-Air.local |
31c3f2bc5315b2434eb174584967b7ab73413cea | a85777a069297c252bb35be0ea2dc755779c9ad0 | /courses_manager/urls.py | eb7409eac3af375b39c1e142895200e0b58b3db4 | [] | no_license | igornikanovich/online-manager | cd2c1cb061dc3f5f71aaba3698f4a103260902c9 | d33779f4178798185bc558c259440957e9ba147d | refs/heads/master | 2022-12-08T23:58:15.702888 | 2019-11-01T13:06:21 | 2019-11-01T13:06:21 | 218,158,439 | 0 | 0 | null | 2022-12-08T06:47:45 | 2019-10-28T22:40:29 | Python | UTF-8 | Python | false | false | 1,114 | py | from django.conf.urls import url
from django.contrib import admin
from django.urls import path, include
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from rest_framework import permissions
schema_view = get_schema_view(
openapi.Info(
title="Snippets API",
default_version='v1',
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="contact@snippets.local"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^api/v1/', include('courses.urls', namespace='courses')),
url(r'^api/v1/', include('authentication.urls', namespace='authentication')),
url(r'^swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
url(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
url(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]
| [
"ignikanovich@gmail.com"
] | ignikanovich@gmail.com |
e1519e2d2922f3e4f795489565e7f93deeacad01 | 1d2a25c5a2773553fadfd0c2ca2b1f58eb920574 | /venv/bin/jupyter-kernelspec | 750dc10229efb605706ce5924f21032d821c3bbb | [] | no_license | uygaryo2/Per-Title_encoding | 2a750b2817e87ecae352b7ba9657b50aac0af0c5 | a80f9d191e8c8af9d249f8795a26aafb65ac25dc | refs/heads/master | 2022-10-13T15:37:36.772754 | 2019-07-09T09:16:20 | 2019-07-09T09:16:20 | 186,696,185 | 10 | 0 | null | 2022-06-21T21:58:43 | 2019-05-14T20:34:17 | Python | UTF-8 | Python | false | false | 312 | #!/Users/sezin/PycharmProjects/Per-Title_encoding/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_client.kernelspecapp import KernelSpecApp
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(KernelSpecApp.launch_instance())
| [
"sezinsezgin1@gmail.com"
] | sezinsezgin1@gmail.com | |
0dc165a9e8635b418496114fc62d3564c34a9886 | 80746c538d3d649fe84d6c1a37d9739228d658a8 | /core/api_views.py | bb6a7c0f555a5330c25ecf8e86276bad0f78ccd7 | [] | no_license | SnappGaming/FileShop | 50a256d7bd63f1f1b154c8b7f851f444c6f9a4ee | ae9afeb4b0220e481beedcd12a0418cf2ea98a0a | refs/heads/master | 2023-07-26T13:53:20.706578 | 2021-08-31T06:36:49 | 2021-08-31T06:36:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,395 | py | from django.shortcuts import get_object_or_404, reverse
from django.core.validators import validate_email
from django.core.exceptions import PermissionDenied, ValidationError
from requests.api import request
from rest_framework import generics, status
from rest_framework.permissions import AllowAny
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.exceptions import NotFound, APIException
from .models import Product, File, Order
from .constants import SUPPORTED_CRYPTOS, SUPPORTED_CURRENCIES
from .serializers import ProductSerializer, OrderSerializer, ProductSensitiveSerializer
from .utils import email_helper, create_order_helper
from .blockonomics_utils import exchanged_rate
class AnonymousView(APIView):
permission_classes = [AllowAny, ]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_exception_handler(self):
default_handler = super().get_exception_handler()
def handle_exception(exc, context):
if not isinstance(exc, self.BaseException):
return default_handler(exc, context)
is_handled = False
response_payload = {}
if isinstance(exc, self.MissingParametersError):
is_handled = True
if hasattr(exc, 'error_fields'):
response_payload['error'] = {}
for e in exc.error_fields:
response_payload['error'][e] = ["This field is required!", ]
elif isinstance(exc, self.UnsupportedCryptoError):
is_handled = True
elif isinstance(exc, self.UnsupportedCurrencyError):
is_handled = True
if is_handled:
if hasattr(exc, 'error_key'):
if not response_payload.get('error'):
response_payload['error'] = {}
response_payload['error'][exc.error_key] = exc.default_detail
else:
response_payload['error_msg'] = exc.default_detail
response_payload['error_code'] = exc.default_code
return Response(
data = response_payload,
status = exc.status_code
)
else:
return default_handler(exc, context)
return handle_exception
def check_supported_currency(self, currency, raise_error=True):
if not currency:
if raise_error:
raise self.MissingParametersError(fields=['currency', ])
return False
if str(currency).upper() not in SUPPORTED_CURRENCIES:
if raise_error:
raise self.UnsupportedCurrencyError()
return False
def check_supported_crypto(self, currency, raise_error=True):
if not currency:
if raise_error:
raise self.MissingParametersError(fields=['crypto', ])
return False
if str(currency).upper() not in SUPPORTED_CRYPTOS:
if raise_error:
raise self.UnsupportedCurrencyError()
return False
class BaseException(APIException):
def __init__(self, fields=None, *args, **kwargs):
self.error_fields = fields
super().__init__(*args, **kwargs)
class MissingParametersError(BaseException):
status_code = 400
default_detail = 'Missing Parameters'
default_code = 'ERR_MISSING_PARAMETERS'
class UnsupportedCurrencyError(BaseException):
status_code = 400
default_detail = 'This currency is not supported yet'
default_code = 'ERR_UNSUPPORTED_CURRENCY'
error_key = 'currency'
class UnsupportedCryptoError(BaseException):
status_code = 400
default_detail = 'This crypto is not supported yet'
default_code = 'ERR_UNSUPPORTED_CRYPTO'
error_key = 'crypto'
class ProductCreateAPIView(AnonymousView, generics.CreateAPIView):
model = Product
serializer_class = ProductSerializer
def create(self, *args, **kwargs):
files = self.request.FILES.getlist('files')
if not files:
return Response({
"error": {
"files": ["Atleast 1 File is required!"]
}
}, status=status.HTTP_400_BAD_REQUEST)
serializer = self.get_serializer(data=self.request.data)
if not serializer.is_valid():
return Response({
"error": serializer.errors
}, status=status.HTTP_400_BAD_REQUEST)
product = serializer.save()
for _file in files:
File.objects.create(
product=product, file_data=_file.file.read(), file_name=_file.name
)
# self.request.session["product_id"] = product.pk # To be removed
data = {
"uuid": product.uid,
"token": product.token
}
return Response(data=data, status=status.HTTP_201_CREATED)
class ProductAPIView(generics.RetrieveAPIView, generics.UpdateAPIView):
model = Product
queryset = model.objects.all()
email_template = "emails/product_page.html"
def get_serializer_class(self):
if (self.request.method == 'GET' and self.request.query_params.get('token') != None) or \
(self.request.method != 'GET' and self.request.data.get('token') != None) :
return ProductSensitiveSerializer
return ProductSerializer
def get_object(self, *args, **kwargs):
query = {
"uid": self.kwargs.get("uid")
}
if self.request.method != 'GET':
token = self.request.data.get('token')
if not token:
raise PermissionDenied({"token": "Token is required to perform this operation"})
query['token'] = token
try:
return self.get_queryset(*args, **kwargs).get(**query)
except self.model.DoesNotExist:
raise NotFound("Product not found")
def check_email(self):
"""Check if Email was in payload, then trigger the Email
This Method does not check for Response Status and it must be checked
for a success status (20X) before calling this method.
"""
if not self.request.data.get('email'):
return
product = self.get_object()
track_uri = self.request.build_absolute_uri(
reverse("core:product_info_seller", kwargs={"token": product.token})
)
extra_email_context = {
"track_uri": track_uri,
"public_uri": self.request.build_absolute_uri(
reverse("core:product_info_buyer", kwargs={"uid": product.uid})
)
}
email_helper(
self.request,
product.email,
self.email_subject,
self.email_template,
html_email_template_name=self.email_template,
extra_email_context=extra_email_context,
)
def update(self, *args, **kwargs):
response = super().update(*args, **kwargs)
if response.status_code == 200:
self.check_email()
return response
def partial_update(self, *args, **kwargs):
response = super().partial_update(*args, **kwargs)
if response.status_code == 200:
self.check_email()
return response
# class CurrencyConverterAPIView(AnonymousView):
# def post(self, *args, **kwargs):
# currency = self.request.data.get('currency', 'USD')
# price = self.request.data.get('price')
# crypto = self.request.data.get('crypto')
# self.check_supported_currency(currency)
# self.check_supported_crypto(crypto)
# if price == None:
# raise self.MissingParametersError(fields=['price', ])
# try:
# price = float(price)
# except ValueError:
# return Response({
# "error": {
# "price": ["Must be a valid value.", ]
# }
# }, status=status.HTTP_400_BAD_REQUEST)
# bits = exchanged_rate(price, crypto, currency)
# converted_price = bits/pow(10, 8)
# return Response({
# "bits": bits,
# "price": converted_price
# })
class InitiateProductBuyAPIView(AnonymousView):
def post(self, *args, **kwargs):
product_uid = self.request.data.get('product_uid')
if not product_uid:
raise self.MissingParametersError(fields=['product_uid', ])
try:
product = Product.objects.get(uid=product_uid)
except self.model.DoesNotExist:
raise NotFound("Product not found")
crypto = self.request.data.get('crypto', 'BTC')
self.check_supported_crypto(crypto)
order: Order = create_order_helper(self.request, product, crypto, product.price)
return Response({
"order_uuid": order.uid,
})
class OrderAPIView(AnonymousView, generics.RetrieveAPIView):
model = Order
serializer_class = OrderSerializer
queryset = model.objects.all()
def get_object(self):
try:
return self.get_queryset().get(uid=self.kwargs['uid'])
except self.model.DoesNotExist:
raise NotFound("Order Not Found")
class OrderConfirmCallbackAPIView(AnonymousView):
def post(self, *args, **kwargs):
status_of_transaction = self.request.data.get("status", None)
if not status_of_transaction:
raise self.MissingParametersError(fields=['status_of_transaction', ])
order = get_object_or_404(Order, uid=kwargs['uid'])
if status_of_transaction >= order.status_of_transaction:
order.status_of_transaction = max(
order.status_of_transaction, status_of_transaction
)
order.save()
return Response()
return Response({
"error": {
"status": ["Order status wasn't changed.", ]
}
}, status=status.HTTP_400_BAD_REQUEST)
| [
"thisisayushaa@gmail.com"
] | thisisayushaa@gmail.com |
4e2d44d096408a816838502d7c6b3b8ddca6a483 | 737a67744a98a536eccf5e2012628271f9120f79 | /django/integration/apps/coursesApp/urls.py | 6d242cf48c1525964f3352f9758845e37abff9d8 | [] | no_license | iota-cohort-dc/Daniel-Perez | 31d581cf6494d69404925685ca55ec9a9b97611c | 57b6a69e4f6e02f8b0694787ab195e08ad5dc52b | refs/heads/master | 2021-01-20T16:59:30.730460 | 2017-07-03T04:12:16 | 2017-07-03T04:12:16 | 82,850,732 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | from django.conf.urls import url, include
from . import views
urlpatterns = [
url(r'^$', views.index, name= "my_index"),
url(r'^addcourse$', views.addcourse),
url(r'^remove/(?P<id>\d+)$', views.remove, name= "remove"),
url(r'^remove/nah$', views.nah),
url(r'^delete/(?P<id>\d+)$', views.delete, name= "delete"),
url(r'^choose$', views.choose, name ="choose"),
url(r'^regUser$', views.regUser, name = 'regUser')
]
| [
"perez0231@yahoo.com"
] | perez0231@yahoo.com |
047b4e987fbda8f11734ed7c999d5776a8ab9eff | efa84f8a83bcf62f1c3b8b183cfe7e9d18dd2274 | /inicio/views.py | b7b4989b5fcc82bfc67189bc3b38816b32913fd1 | [] | no_license | comunidad/comunidad | 25815210175af45386d12a60fa7acb6c71da7317 | 76dcaf1c9cd27669d9213f855f5c274895566ddb | refs/heads/master | 2016-09-15T23:03:37.461476 | 2014-04-03T20:51:31 | 2014-04-03T20:51:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | from django.shortcuts import render_to_response, render
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect, response
def inicio(request):
usuario = request.user
return render_to_response('inicio/index.html',{'usuario': usuario},context_instance=RequestContext(request))
def nosotros(request):
usuario = request.user
return render_to_response('inicio/nosotros.html',{'usuario': usuario},context_instance=RequestContext(request))
def aliados(request):
usuario = request.user
return render_to_response('inicio/aliados.html',{'usuario': usuario},context_instance=RequestContext(request))
def contactenos(request):
usuario = request.user
return render_to_response('inicio/contactenos.html',{'usuario': usuario},context_instance=RequestContext(request))
def unete(request):
usuario = request.user
return render_to_response('inicio/unete.html',{'usuario': usuario},context_instance=RequestContext(request))
def historia(request):
usuario = request.user
return render_to_response('inicio/historia.html',{'usuario': usuario},context_instance=RequestContext(request))
def proyectos(request):
usuario = request.user
return render_to_response('inicio/proyectos.html',{'usuario': usuario},context_instance=RequestContext(request))
| [
"jordanzet@gmail.com"
] | jordanzet@gmail.com |
f6348d0b72dc155330508be752dc840819e56af6 | 144b2cd272ccfdee88e44ea9d9c9d5b9d5e0f10a | /src/apps/news/frontend_articles.py | 9b68b7d64d9d8e05293c92377b095202bddf336c | [] | no_license | taerwin/redmap-org-au | 9aef2735cedf593b571d4ae05c4b17b35c724e0d | 8d727ef156a8fba79dae02494642530f1acaccf1 | refs/heads/master | 2020-04-05T04:28:39.458909 | 2013-03-08T05:08:18 | 2013-03-08T05:08:18 | 37,229,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | from django.conf.urls.defaults import *
from news.views import ArticleView
urlpatterns = patterns(
'article',
url(r'^(?P<slug>.*)/$', ArticleView, name='article_view'),
)
| [
"olivergeorge@gmail.com"
] | olivergeorge@gmail.com |
fe2a9c358cea73e24517be3faf5332941075728f | 7f706ec1f1c68184e4cb27efb7011ce5adbbb7c8 | /source/input_error.py | 7da0f5e36f4d7bdbbb300db26cf278f35f79d902 | [] | no_license | DrewWeth/Etsy_Proj | 28bc1ac360b13ed4c2564be3e3cda4e72d8c3117 | f54273978b3c628254dad3726e08fe8dffc25dae | refs/heads/master | 2021-05-01T21:27:26.260997 | 2016-11-03T05:39:36 | 2016-11-03T05:39:36 | 72,502,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | class InputError(Exception):
def __init__(self, *args):
self.value = ' '.join(map(str, args))
def __str__(self):
return repr(self.value)
| [
"dgwetherington@gmail.com"
] | dgwetherington@gmail.com |
0973e303387c642ff368400ddf1818de97ed61e4 | 454e3eea5e0c231c9e0d282a1f4d12f96fd48d2e | /vislice.py | 9000cce719f0797714a4f566edade6f22e7ea6a7 | [
"MIT"
] | permissive | ZupancicMatej/Vislice | 7bab16ae6382fb3aa956b5923c38c0e62b7e56d6 | a053e24a3b236b21477e809d06ce0079cf5e92ea | refs/heads/master | 2022-12-06T18:44:22.318063 | 2020-08-25T10:38:40 | 2020-08-25T10:38:40 | 261,460,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | import bottle, model
SKRIVNI_KLJUC = 'Uganil si najbolj skrit ključ vseh časov.'
DATOTEKA_S_STANJEM = 'stanje.json'
bottle.TEMPLATE_PATH.insert(0, 'views')
vislice = model.Vislice(DATOTEKA_S_STANJEM)
@bottle.get("/")
def index():
return bottle.template('index')
@bottle.get('/img/<picture>')
def static_file(picture):
return bottle.static_file(picture, 'img')
@bottle.post('/nova_igra')
def nova_igra():
vislice.nalozi_igre_iz_datoteke()
id_igre = vislice.nova_igra()
vislice.zapisi_igre_v_datoteko()
bottle.response.set_cookie("id_igre", id_igre, secret=SKRIVNI_KLJUC, path='/')
bottle.redirect('/igra/')
@bottle.get("/igra/")
def pokazi_igro():
vislice.nalozi_igre_iz_datoteke()
id_igre = bottle.request.get_cookie("id_igre", secret=SKRIVNI_KLJUC)
igra, stanje = vislice.igre[id_igre]
return bottle.template('igra', igra = igra, stanje = stanje, id_igre = id_igre, ZMAGA=model.ZMAGA, PORAZ=model.PORAZ)
@bottle.post("/igra/")
def ugibaj():
vislice.nalozi_igre_iz_datoteke()
id_igre = bottle.request.get_cookie("id_igre", secret=SKRIVNI_KLJUC)
crka = bottle.request.forms.crka
vislice.ugibaj(id_igre, crka)
vislice.zapisi_igre_v_datoteko()
bottle.redirect(f'igra/')
bottle.run(reloader=True, debug=True) | [
"matej.zupancic.187@gmail.com"
] | matej.zupancic.187@gmail.com |
842fb2d463be662ae8287ffdcf8036943afe3972 | 06a34c24f91fe5b6f7511290c423cd0f574c309a | /two/categories/models.py | e880e7f66a60ad5939b7c2336de9b8dacb30de39 | [] | no_license | memoer/nomadcoder-challenger-airbnb-django | d3916f82891a75f56cf3e0dcf2c906bc74b4ba94 | 07158c406e33ed48f410454883bf341335f7b8ec | refs/heads/master | 2023-02-14T04:40:56.185206 | 2021-01-07T17:19:52 | 2021-01-07T17:19:52 | 327,678,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | from django.db import models
from core import models as core_models
"""
Here are the models you have to create:
- Category
name
kind (book/movie/both)
"""
class Category(core_models.TimeStampedModel):
KIND_BOOK = "book"
KIND_MOVIE = "movie"
KIND_BOTH = "both"
KIND_CHOICES = (
(KIND_BOOK, "BOOK"),
(KIND_MOVIE, "Movie"),
(KIND_BOTH, "Both"),
)
name = models.CharField(max_length=64)
kind = models.CharField(
max_length=6, choices=KIND_CHOICES, default=KIND_BOOK)
def __str__(self):
return self.name
| [
"hanjn2842@naver.com"
] | hanjn2842@naver.com |
16d3dcfb87619f27adf5368a93a879583955a7e4 | b84bf301f318b6869d1cff2dfa670e9bffc52fc6 | /flask_test/sql_test.py | 9d2e9ac4ac8c4a90c58b8489128847cbaf5320c9 | [] | no_license | cui7616/first | de33d6a3d984c08fbd664ff988b9d04ec605448f | 83f3422570b22571ae2f5e577de6b8b50b41ecb1 | refs/heads/master | 2020-04-05T21:03:38.900897 | 2018-11-12T12:27:23 | 2018-11-12T12:27:23 | 157,205,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 11 11:40:10 2018
@author: czmy
"""
import pymysql
from sqlalchemy import create_engine
import pandas as pd
connect = create_engine('mysql+pymysql://root:cuizhen@localhost:3306/uwb?charset=utf8')
pd.io.sql.to_sql(database,'databse',connect,schema='uwb',if_exists='append')
## 打开数据库连接
#db = pymysql.connect("localhost","root","cuizhen","uwb" )
#
## 使用 cursor() 方法创建一个游标对象 cursor
#cursor = db.cursor()
#
## 使用 execute() 方法执行 SQL 查询
#cursor.execute("SELECT VERSION()")
#
## 使用 fetchone() 方法获取单条数据.
#data = cursor.fetchone()
#
#print ("Database version : %s " % data)
#
## 关闭数据库连接
#db.close() | [
"18810906151@163.com"
] | 18810906151@163.com |
14a71d44b821acf98471c1793c6519564bfb9a51 | 76569c8ac1b94e7872132728ada6ed5d5a5f7263 | /UI_Framework/__init__.py | 63c1d1f988b3670775dc9d8b5d1a97e7c83f6762 | [] | no_license | wushuang-cn/hogoworts | b63b66693034ace3489df76e50c125a86d6dee52 | 6b3a5cc6ce8b04b0318ca426a6bab36c2378049b | refs/heads/master | 2023-06-15T22:42:28.087692 | 2021-07-11T10:29:25 | 2021-07-11T10:29:25 | 342,494,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | #! /usr/bin/env python
# -*- coding:utf-8 -*-
# date:2021-03-12 11:09
# author:WuShuang5 | [
"wushuang@qq.com"
] | wushuang@qq.com |
4597f1e2e257ceefb60a45c4aaf2c97df6ad2444 | 17d68fd18f56b0dc0868dd09c023ed7d939445a4 | /labs/src/lorawan-nano-gateway/node/main.py | 586689e550de0598456c19f84088cdf7fd31b424 | [] | no_license | marcozennaro/iot-uaa-isoc | 770614c23a8d9bd93b38496c1107518357065d95 | 5284f94df26c6d0ebf5aa44c66f37963c45c6df4 | refs/heads/master | 2021-01-23T16:53:47.008230 | 2017-09-07T11:29:46 | 2017-09-07T11:29:46 | 102,751,582 | 0 | 0 | null | 2017-09-07T15:07:11 | 2017-09-07T15:07:11 | null | UTF-8 | Python | false | false | 1,469 | py | """ OTAA Node example compatible with the LoPy Nano Gateway """
from network import LoRa
import socket
import binascii
import struct
import time
# Initialize LoRa in LORAWAN mode.
lora = LoRa(mode=LoRa.LORAWAN)
# create an OTA authentication params
dev_eui = binascii.unhexlify('00694156A5471B72'.replace(' ',''))
app_eui = binascii.unhexlify('70B3D57EF0005B3B'.replace(' ',''))
app_key = binascii.unhexlify('28073B6B55E1D6C9706609C695ABAEE9'.replace(' ',''))
# set the 3 default channels to the same frequency (must be before sending the OTAA join request)
lora.add_channel(0, frequency=868100000, dr_min=0, dr_max=5)
lora.add_channel(1, frequency=868100000, dr_min=0, dr_max=5)
lora.add_channel(2, frequency=868100000, dr_min=0, dr_max=5)
# join a network using OTAA
lora.join(activation=LoRa.OTAA, auth=(dev_eui, app_eui, app_key), timeout=0)
# wait until the module has joined the network
while not lora.has_joined():
time.sleep(2.5)
print('Not joined yet...')
# remove all the non-default channels
for i in range(3, 16):
lora.remove_channel(i)
# create a LoRa socket
s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
# set the LoRaWAN data rate
s.setsockopt(socket.SOL_LORA, socket.SO_DR, 5)
# make the socket blocking
s.setblocking(False)
time.sleep(5.0)
for i in range (200):
s.send(b'PKT #' + bytes([i]))
print('STA node sending packet ...')
time.sleep(4)
rx = s.recv(256)
if rx:
print(rx)
time.sleep(6)
| [
"franckalbinet@gmail.com"
] | franckalbinet@gmail.com |
985e07d5d52b8101f2054d267b452b5d3d8e17d9 | 8d9bd54ea1642f78b20e89e7197e35de1390b808 | /problem575/problem575.py | f0d095d541cd68cc7a0b12777949564bc12f32a9 | [] | no_license | nanjakorewa/MyLeetcode | 6511e59a19b957e62eb3e62a38f9c5f0296a473d | 4c98264a880e9fdfadeb888226ffbe990568fb33 | refs/heads/master | 2022-04-12T12:13:59.302383 | 2022-03-28T06:43:32 | 2022-03-28T06:43:32 | 208,409,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | class Solution:
def distributeCandies(self, candies: List[int]) -> int:
return int(min([len(set(candies)), len(candies)/2])) | [
"arukuhoriguchi@gmail.com"
] | arukuhoriguchi@gmail.com |
a6ccdabf25478b632e6731a32f33b67960c479e9 | faa856f1cbdd57a5dc4634c3ff6bd01baaca62b8 | /tableros/migrations/0008_auto_20201004_2357.py | 7bc9664ccb217f6444a08fe52cb4f93f20b81d92 | [] | no_license | nelsonpenha/NNHR_PP2 | 98284ec1efee93c6939a2e3c4271edb7b9d01e3d | 481477a8f66d4bc24481527389baea1acc847566 | refs/heads/master | 2023-02-17T10:36:29.890954 | 2021-01-15T20:26:44 | 2021-01-15T20:26:44 | 296,743,006 | 0 | 0 | null | 2021-01-15T20:26:45 | 2020-09-18T22:37:23 | Python | UTF-8 | Python | false | false | 1,335 | py | # Generated by Django 3.1.2 on 2020-10-05 02:57
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tableros', '0007_tarjeta'),
]
operations = [
migrations.CreateModel(
name='Fases',
fields=[
('idFases', models.AutoField(primary_key=True, serialize=False)),
('nombreFases', models.CharField(max_length=256)),
('fechaRegistro', models.DateField(default=datetime.datetime.now)),
('fechaLimite', models.DateField()),
('nombreTarjeta', models.CharField(max_length=256)),
('idUsuario', models.IntegerField()),
('idTarjeta', models.IntegerField()),
('estado', models.CharField(choices=[('Activo', 'Activo'), ('Inactivo', 'Inactivo')], default='Activo', max_length=15)),
],
),
migrations.RemoveField(
model_name='tarjeta',
name='idFases',
),
migrations.RemoveField(
model_name='tarjeta',
name='nombreFases',
),
migrations.AlterField(
model_name='tarjeta',
name='idTarjeta',
field=models.AutoField(primary_key=True, serialize=False),
),
]
| [
"nelsonpenha10@gmail.com"
] | nelsonpenha10@gmail.com |
70db7b9152f138caa09c16117577af4c0a45a6ca | 9a9950616e0b4baa548a50eb4db58141e5a7d744 | /test.py | fc0ee7c029dbb871c6075d7a2fadf24d55841603 | [] | no_license | NCL-LIMIT/RABT-rainfall | 6c51ba646e8fbb567e6690d7f0b36075d226a38d | e60f64df3c895d553acf62d67a37d98f2f6f7d38 | refs/heads/main | 2023-04-03T02:39:33.627162 | 2021-03-24T11:27:22 | 2021-03-24T11:27:22 | 317,844,460 | 0 | 0 | null | 2021-03-24T11:27:23 | 2020-12-02T11:44:39 | Python | UTF-8 | Python | false | false | 69 | py | def hello(event, context):
print(event)
return event['data']
| [
"nkc124@ncl.ac.uk"
] | nkc124@ncl.ac.uk |
520194aa7cbe54c13c5886ca3b5a0c3f833c6097 | ebfd7b5a5de933f074ccc08583b41c8a1e34f643 | /bccc/client/channel.py | ea6d368478cfcfd13f21238bef1377fefce38001 | [
"Apache-2.0"
] | permissive | Schnouki/bccc | 92f90c67fe63da83da14700061888059e02de3d1 | c3143e8b4b2a81393ed59b046ce6e353007314e8 | refs/heads/master | 2016-09-10T18:00:48.132575 | 2012-10-30T10:23:23 | 2012-10-30T10:23:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,302 | py | # Copyright 2012 Thomas Jost
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software stributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import datetime
from xml.etree import cElementTree as ET
import logging
import threading
import dateutil.parser
from bccc.client import Atom, ATOM_NS, ATOM_THR_NS, UpdatableAtomsList
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
# {{{ Exceptions
class ChannelError(Exception):
"""A generic error in a buddycloud channel"""
pass
class InvalidChannelName(ChannelError):
"""Invalid channel name, such as user@ or @topics.buddycloud.org"""
def __init__(self, jid):
msg = "Invalid channel name: {}".format(jid)
log.warning(msg)
super().__init__(msg)
# }}}
# {{{ Channels
class Channel:
"""A buddycloud channel, tied to a client"""
# {{{ Channel init
CONFIG_MAP = (
("title", "pubsub#title"),
("description", "pubsub#description"),
("creation", "pubsub#creation_date"),
("type", "buddycloud#channel_type")
)
def __init__(self, client, jid):
log.debug("Initializing channel %s", jid)
# Avoid invalid channel names
user, domain = jid.split("@", 1)
if len(user) == 0 or len(domain) == 0:
raise InvalidChannelName(jid)
self.client = client
self.jid = jid
# All the channels items, newest first (as returned by the server)
self.atoms = UpdatableAtomsList()
self.atoms_lock = threading.RLock()
self.load_event = threading.Event()
self.loading = False
self.oldest_id = None
# Callbacks
self.callback_config = None
self.callback_post = None
self.callback_retract = None
self.callback_status = None
def __iter__(self):
return iter(self.atoms)
def __repr__(self):
return "<bccc.client.Channel {}>".format(self.jid)
def set_callbacks(self, cb_config=None, cb_post=None, cb_retract=None, cb_status=None):
if cb_config is not None:
self.callback_config = cb_config
if cb_post is not None:
self.callback_post = cb_post
if cb_retract is not None:
self.callback_retract = cb_retract
if cb_status is not None:
self.callback_status = cb_status
# }}}
# {{{ Subscriptions/affiliations
def get_subscriptions(self):
channels = []
subnode = "/user/" + self.jid + "/subscriptions"
items = self.client.ps.get_items(self.client.inbox_jid, subnode, block=True)
for item in items["pubsub"]["items"]:
try:
chan = self.client.get_channel(item["id"])
channels.append(chan)
except ChannelError:
pass
return channels
# }}}
# {{{ PubSub event handlers
def handle_post_event(self, entries):
# Incoming entries: add them and trigger the callback
if len(entries) == 0:
return
atoms = []
for elt in entries:
a = self.atoms.add(elt)
if a is not None:
atoms.append(a)
if len(atoms) > 0 and self.callback_post is not None:
self.callback_post(atoms)
def handle_retract_event(self, entries):
if len(entries) == 0:
return
# Remove retracted items from self.atoms
with self.atoms_lock:
for id_ in entries:
self.atoms.remove(id_)
if self.callback_retract is not None:
self.callback_retract(entries)
def handle_status_event(self, entries):
if len(entries) == 0:
return
elt = entries[0]
if elt is not None:
a = Atom(elt)
if self.callback_status is not None:
self.callback_status(a)
def handle_config_event(self, config_events):
for conf in config_events:
# Convert conf to a dict
val = conf["form"]["values"]
config = {}
for (dk, ik) in self.CONFIG_MAP:
if ik in val:
config[dk] = val[ik].strip()
if "creation" in config:
config["creation"] = dateutil.parser.parse(config["creation"])
if self.callback_config is not None:
self.callback_config(config)
# }}}
# {{{ Internal helpers
def _items_to_atoms(self, items, callback=None):
atoms = []
with self.atoms_lock:
for item in items["pubsub"]["items"]:
elt = item.get_payload()
a = self.atoms.add(elt)
if a is not None:
atoms.append(a)
if len(atoms) > 0 and callback is not None:
callback(atoms)
return atoms
# }}}
# {{{ PubSub requests
def pubsub_get_items(self, node, callback, max=None, before=None, after=None):
"""
Request the contents of a node's items.
This is based on sleekxmpp.plugins.xep_0060.pubsub.xep_0060.get_items(),
but uses XEP-0059 instead of the "max_items" attribute (cf.
XEP-0060:6.5.7).
"""
iq = self.client.ps.xmpp.Iq(sto=self.client.inbox_jid, stype="get")
iq["pubsub"]["items"]["node"] = node
if max is not None:
iq["pubsub"]["rsm"]["max"] = str(max)
if before is not None:
iq["pubsub"]["rsm"]["before"] = before
if after is not None:
iq["pubsub"]["rsm"]["after"] = after
iq.send(callback=callback)
def pubsub_get_post(self, item_id):
node = "/user/{}/posts".format(self.jid)
cb = lambda items: self._items_to_atoms(items, self.callback_post)
self.client.ps.get_item(self.client.inbox_jid, node, item_id, block=False, callback=cb)
def pubsub_get_posts(self, max=None, before=None, after=None):
node = "/user/{}/posts".format(self.jid)
cb = lambda items: self._items_to_atoms(items, self.callback_post)
return self.pubsub_get_items(node, cb, max, before, after)
def pubsub_get_status(self):
def _status_cb(items):
entries = [item.get_payload() for item in items["pubsub"]["items"]]
while None in entries:
entries.remove(None)
if len(entries) > 0:
self.handle_status_event(entries)
node = "/user/{}/status".format(self.jid)
self.pubsub_get_items(node, callback=_status_cb, max=1)
def pubsub_get_config(self):
def _config_cb(iq):
conf = iq["pubsub_owner"]["configure"]
self.handle_config_event([conf])
node = "/user/{}/posts".format(self.jid)
self.client.ps.get_node_config(self.client.inbox_jid, node, callback=_config_cb)
# }}}
# {{{ Thread loading
def get_partial_thread(self, first_id, last_id):
# Hard to read. Sorry.
node = "/user/{}/posts".format(self.jid)
# Callback for items after first_id. Requests more items until last_id is found.
def _other_posts_cb(atoms):
if self.callback_post is not None:
self.callback_post(atoms)
ids = [a.id for a in atoms]
if last_id not in ids:
# Request next
self.pubsub_get_items(node, cb2, max=20, before=ids[0])
# Callback for first item. If found, will request the next ones.
def _first_post_cb(atoms):
if self.callback_post is not None:
self.callback_post(atoms)
ids = [a.id for a in atoms]
if first_id in ids:
self.pubsub_get_items(node, cb2, max=20, before=first_id)
cb1 = lambda items: self._items_to_atoms(items, _first_post_cb)
cb2 = lambda items: self._items_to_atoms(items, _other_posts_cb)
# Request first item
self.client.ps.get_item(self.client.inbox_jid, node, first_id, block=False, callback=cb1)
# }}}
# {{{ Items publishing
def _make_atom(self, text, author_name=None, id_=None, in_reply_to=None, update_time=None):
# Build something that looks like an Atom and return it
entry = ET.Element("entry", xmlns=ATOM_NS)
if author_name is None:
author_name = self.client.boundjid.bare
if update_time is None:
update_time = datetime.datetime.utcnow().isoformat()
content = ET.SubElement(entry, "content")
author = ET.SubElement(entry, "author")
name = ET.SubElement(author, "name")
updated = ET.SubElement(entry, "updated")
content.text = text
name.text = author_name
updated.text = update_time
if id_ is not None:
# Probably not necessary: added by the server.
id_el = ET.SubElement(entry, "id")
id_el.text = id_
if in_reply_to is not None:
irt = ET.SubElement(entry, "{{{}}}in-reply-to".format(ATOM_THR_NS), ref=in_reply_to)
return entry
def publish(self, text, author_name=None, id_=None, in_reply_to=None):
log.debug("Publishing to channel %s...", self.jid)
entry = self._make_atom(text, author_name=author_name, id_=id_, in_reply_to=in_reply_to)
node = "/user/{}/posts".format(self.jid)
res = self.client.ps.publish(self.client.inbox_jid, node, payload=entry, id=id_)
new_id = res["pubsub"]["publish"]["item"]["id"]
log.info("Published to channel %s with id %s", self.jid, new_id)
return new_id
def retract(self, id_):
log.debug("Retracting %s from channel %s", id_, self.jid)
node = "/user/{}/posts".format(self.jid)
self.client.ps.retract(self.client.inbox_jid, node, id_, notify=True)
def set_status(self, text, author_name=None):
log.debug("Setting status for channel %s...", self.jid)
entry = self._make_atom(text, author_name=author_name)
node = "/user/{}/status".format(self.jid)
res = self.client.ps.publish(self.client.inbox_jid, node, payload=entry)
id_ = res["pubsub"]["publish"]["item"]["id"]
log.info("Status set for channel %s with id %s", self.jid, id_)
return id_
def update_config(self, **kwds):
# Create config form
form = self.client.data_forms.make_form(ftype="submit")
form.add_field(var="FORM_TYPE", ftype="hidden", value="http://jabber.org/protocol/pubsub#node_config")
for (dk, ik) in self.CONFIG_MAP:
if dk in kwds:
form.add_field(var=ik, value=kwds[dk])
log.info("Updating config for channel %s", self.jid)
node = "/user/{}/posts".format(self.jid)
self.client.ps.set_node_config(self.client.inbox_jid, node, form)
# }}}
# }}}
# Local Variables:
# mode: python3
# End:
| [
"schnouki@schnouki.net"
] | schnouki@schnouki.net |
4135c8b2ebcd5938ccfb85de98194737134f303f | e6822cc0213b376f4a03c9b53cf6d516ec90dadb | /eventex/core/models.py | d406707d6aba1369fbe456da06081182363b7b0b | [] | no_license | miquelini/eventex | 15c20264edaa5ad2e6a21d45328ad9e15af47d7c | 51ed2101dbae461010a7da920e892d4e8b3b00b8 | refs/heads/master | 2016-09-05T10:57:13.421486 | 2013-05-22T17:32:23 | 2013-05-22T17:32:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,397 | py | # coding: utf-8
from django.db import models
from django.utils.translation import ugettext_lazy as _
from eventex.core.managers import KindContactManager, PeriodManager
class Speaker(models.Model):
name = models.CharField(_('Nome'), max_length=255)
slug = models.SlugField(_('Slug'))
url = models.URLField(_('Url'))
description = models.TextField(_(u'Descrição'), blank=True)
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('core:speaker_detail', (), {'slug': self.slug})
class Contact(models.Model):
KINDS = (
('P', _('Telefone')),
('E', _('E-mail')),
('F', _('Fax')),
)
speaker = models.ForeignKey('Speaker', verbose_name=_('palestrante'))
kind = models.CharField(_('tipo'), max_length=1, choices=KINDS)
value = models.CharField(_('valor'), max_length=255)
objects = models.Manager()
email = KindContactManager('E')
phones = KindContactManager('P')
faxes = KindContactManager('F')
def __unicode__(self):
return self.value
class Talk(models.Model):
title = models.CharField(max_length=200)
description = models.TextField()
start_time = models.TimeField(blank=True)
speakers = models.ManyToManyField('Speaker', verbose_name=_('palestrante'))
objects = PeriodManager()
class Meta:
verbose_name = _('palestra')
verbose_name_plural = _('palestras')
def __unicode__(self):
return self.title
def get_absolute_url(self):
return "/palestras/%d/" % self.pk
@property
def slides(self):
return self.media_set.filter(kind='SL')
@property
def videos(self):
return self.media_set.filter(kind='YT')
class Course(Talk):
slots = models.IntegerField()
notes = models.TextField()
objects = PeriodManager()
class Media(models.Model):
MEDIAS =(
('YT', _('YouTube')),
('SL', _('SlideShare')),
)
talk = models.ForeignKey('Talk')
kind = models.CharField(_('Tipo'), max_length=2, choices=MEDIAS)
title = models.CharField(_(u'Título'), max_length=2)
media_id = models.CharField(_('Ref'), max_length=255)
def __unicode__(self):
return u'%s - %s' % (self.talk.title, self.title)
| [
"antoniomiquelini@gmail.com"
] | antoniomiquelini@gmail.com |
b6bc5885b935c612f5064b8d00b3d7925617802f | 01b01d1a4bac32fbe0421e1962daae4605f5378c | /app/drf_project/settings.py | b2616529155158a8cd35cc5323aaa55845d875de | [
"Apache-2.0"
] | permissive | khazelton/django-tdd-docker | 393359b238c1499d8f5dafc0d8e164cadbf46ba4 | 970846fbcdda9176d47f1f6ec4dd045d779e679d | refs/heads/main | 2023-05-28T05:46:27.607041 | 2021-06-10T02:33:03 | 2021-06-10T02:33:03 | 375,543,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,164 | py | """
Django settings for drf_project project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v5r*bw=rxesmierb$&ebq4)xnnros5zk&sjxf_hxd+td#xfght'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework', # new
'movies', # new
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'drf_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'drf_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'movies.CustomUser'
| [
""
] | |
39a69bda9c3a5974ce20f69b45032e8e56118d3b | 82c4a9489318d5740eb2b6ec42b84404a96dafbb | /tools/ntu_gen18_joints_data.py | b6e5d82f917586eeb1f614657e7aeff1c5b44155 | [
"BSD-2-Clause"
] | permissive | SKBL5694/guard | 26dc217d009200cb10885ae2458b8a1bdc7c5153 | 55fa719197b08e11729a5dcc48418c49bd142f4a | refs/heads/master | 2023-04-12T14:37:48.747624 | 2021-04-26T08:22:27 | 2021-04-26T08:22:27 | 361,659,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,071 | py | import os
import sys
import pickle
import pdb
import argparse
import numpy as np
from numpy.lib.format import open_memmap
from utils.ntu_read_skeleton import read_xyz
from utils.ntu_read_skeleton_new import read_xyc
training_subjects = [
1, 2, 4, 5, 8, 9, 13, 14, 15, 16, 17, 18, 19, 25, 27, 28, 31, 34, 35, 38
]
training_cameras = [2, 3]
max_body = 2
num_joint = 18
max_frame = 300
toolbar_width = 30
def print_toolbar(rate, annotation=''):
# setup toolbar
sys.stdout.write("{}[".format(annotation))
for i in range(toolbar_width):
if i * 1.0 / toolbar_width > rate:
sys.stdout.write(' ')
else:
sys.stdout.write('-')
sys.stdout.flush()
sys.stdout.write(']\r')
def end_toolbar():
sys.stdout.write("\n")
def gendata(data_path,
out_path,
ignored_sample_path=None,
benchmark='xview',
part='eval'):
if ignored_sample_path != None:
with open(ignored_sample_path, 'r') as f:
ignored_samples = [
line.strip() + '.skeleton' for line in f.readlines()
]
else:
ignored_samples = []
sample_name = []
sample_label = []
for filename in os.listdir(data_path):
if filename in ignored_samples:
continue
action_class = int(
filename[filename.find('A') + 1:filename.find('A') + 4])
subject_id = int(
filename[filename.find('P') + 1:filename.find('P') + 4])
camera_id = int(
filename[filename.find('C') + 1:filename.find('C') + 4])
action2label = {7:0,8:1,9:2,10:3,22:4,23:5,24:6,26:7,27:8,31:9}
if benchmark == 'xview':
istraining = (camera_id in training_cameras)
elif benchmark == 'xsub':
istraining = (subject_id in training_subjects)
else:
raise ValueError()
if part == 'train':
issample = istraining
elif part == 'val':
issample = not (istraining)
else:
raise ValueError()
if issample:
sample_name.append(filename)
sample_label.append(action2label[action_class])
pdb.set_trace()
with open('{}/{}_label.pkl'.format(out_path, part), 'wb') as f:
pickle.dump((sample_name, list(sample_label)), f)
# np.save('{}/{}_label.npy'.format(out_path, part), sample_label)
fp = open_memmap(
'{}/{}_data.npy'.format(out_path, part),
dtype='float32',
mode='w+',
shape=(len(sample_label), 3, max_frame, num_joint, max_body))
# fp(样本数, 3, 最大帧(300), 骨骼数, 个体数)
for i, s in enumerate(sample_name):
print_toolbar(i * 1.0 / len(sample_label),
'({:>5}/{:<5}) Processing {:>5}-{:<5} data: '.format(
i + 1, len(sample_name), benchmark, part))
# data(3, num_frame, num_joint, max_body)
data = read_xyc(
os.path.join(data_path, s), max_body=max_body, num_joint=num_joint)
# 把读取到的数据放在 第i个样本的全部channel,'前样本帧数'层(骨骼和个体自然也全选)
fp[i, :, 0:data.shape[1], :, :] = data
end_toolbar()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='NTU-RGB-D Data Converter.')
parser.add_argument(
'--data_path', default='data/NTU-RGB-D/nturgb+d_skeletons')
# parser.add_argument(
# '--ignored_sample_path',
# default='resource/NTU-RGB-D/samples_with_missing_skeletons.txt')
parser.add_argument('--out_folder', default='data/NTU-RGB-D')
benchmark = ['xsub', 'xview']
part = ['train', 'val']
arg = parser.parse_args()
for b in benchmark:
for p in part:
out_path = os.path.join(arg.out_folder, b)
if not os.path.exists(out_path):
os.makedirs(out_path)
gendata(
arg.data_path,
out_path,
None,
benchmark=b,
part=p)
| [
"1776539381@qq.com"
] | 1776539381@qq.com |
722b57c78a30f9c81869bfc7ed632dad388fd1c2 | e794830865d3b7f3b655a87f7830501d1728a2b1 | /blog/models.py | 257809642007767922685aa9aaa1068ba5c5ad11 | [] | no_license | wooyeon11/pyneersat1 | c30f63046ed94dae790256498b5b2a937ad90dea | fbd210b7a68418599199f2fac7f6c4bc971dcbaa | refs/heads/master | 2020-03-25T04:08:16.420594 | 2018-08-03T10:03:44 | 2018-08-03T10:03:44 | 143,379,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank = True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
# Create your models here.
| [
"woo4675@gmail.com"
] | woo4675@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.