blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
06cfe0fa19da2a26141394e50c8f8718a685650d | 1ed2a9652de283276f3e74ecdedf36bb93c1e018 | /blog_website/blog_website/urls.py | 8618aac87443d72334bfc78fb7ad7c3c6ce630df | [] | no_license | robcamp-code/django_blog | 827c567043a22df3809222cabb1b30eb9b6293a8 | 0f688d0286c4fd123f00350c8e3d571fa21ca3db | refs/heads/master | 2023-06-06T07:46:39.353483 | 2021-06-26T14:12:20 | 2021-06-26T14:12:20 | 380,522,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | """blog_website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from users import views as user_views
urlpatterns = [
path('admin/', admin.site.urls),
path('register', user_views.register, name='register'),
path('blog/', include('blog.urls')),
]
| [
"robcampbell26042604@gmail.com"
] | robcampbell26042604@gmail.com |
7d5d3566f8081503f4981349ffda1b3bdbd985ce | 469456a0bf4c7c040a8136b111195a82de17cc6b | /rgb_module.py | 40e20d6431581e0f70174a91bfb45c03869c9133 | [] | no_license | 6reg/function_fun | d2e73387e78396618d074c97a2b0088647693b4e | 64bf2d6211f9bdcc030922a7f5292c37a31eddd9 | refs/heads/main | 2023-07-07T23:42:46.426225 | 2021-08-07T22:10:47 | 2021-08-07T22:10:47 | 370,854,414 | 0 | 1 | null | 2021-07-07T02:20:18 | 2021-05-25T23:39:07 | Python | UTF-8 | Python | false | false | 666 | py | DATA_FILE = "./rgb_values.txt"
def load_data():
data = {}
file = open(DATA_FILE)
n_colors = 0
for line in file:
line = line.strip()
add_color(data, line)
n_colors += 1
print(len(data), n_colors)
file.close()
return data
def add_color(data, line):
parts = line.split(',')
color_name = parts[0]
color_rgb = color_from_line(line)
if color_name not in data:
data[color_name] = []
data[color_name].append(color_rgb)
def color_from_line(line):
parts = line.split(',')
print(parts)
r = int(parts[1])
g = int(parts[2])
b = int(parts[3])
return [r, g, b]
load_data()
| [
"mathiasgreg@gmail.com"
] | mathiasgreg@gmail.com |
f6019c2598b79ad89fa55066e16fc9be6482d77c | ab49b10919255f935831568dc62948cae12afbcd | /database.py | c5b50793bdff9d6c253bfb137dadfdb20c6042e4 | [] | no_license | huangwenxi/ubc | 34ab1d3fdd446299a83287dfc4a4f926f0b75724 | ffae3c0a1e9634bda19300cd4701d19c456716a7 | refs/heads/master | 2020-03-17T20:50:10.161536 | 2018-05-28T13:48:40 | 2018-05-28T13:48:40 | 133,930,876 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,602 | py | #coding=utf-8
import sqlite3
import logging
import traceback
from pprint import pprint
from logconfig import logger
class MyDataBase (object):
def __init__(self, database_name="test.db"):
self.conn = sqlite3.connect(database_name)
def _commit(self):
self.conn.commit()
# class Cursor():
# def __init__(self, conn):
# self.conn = conn
#
# def __enter__(self):
# self.cursor = self.conn.cursor()
# return self.cursor
#
# def __exit__(self, exc_type, exc_val, exc_tb):
#
# if exc_tb is None:
# self.cursor.close()
# else:
# self.cursor.close()
# # raise sqlite3.OperationalError
def _execute(self, sql):
"""execute sql command."""
try:
self.conn.execute(sql)
self.conn.commit()
except:
self.conn.rollback()
raise
def create_table(self, table_name):
"""create table in database.
table_name:type:string
"""
sql = "CREATE TABLE IF NOT EXISTS " + str(table_name) + " (IMSI INTEGER PRIMARY KEY NOT NULL);"
try:
self._execute(sql)
except:
raise
def drop_table(self, table_name):
"""drop table from database.
table_name:type:string
"""
sql = "DROP TABLE IF EXISTS " + str(table_name) + ";"
try:
self._execute(sql)
except:
raise
def insert(self, table_name, value):
"""insert data to database table.
table_name:type:string
value:type:string
"""
sql = "INSERT INTO " + str(table_name) + " (IMSI) VALUES(" + str(value) + ");"
try:
self._execute(sql)
except:
raise
def delete(self, table_name, value):
"""delete data from database table.
table_name:type:string
value:type:string
"""
sql = "DELETE FROM " + str(table_name) + " (IMSI) VALUES(" + str(value) + ");"
try:
self._execute(sql)
except:
raise
def update(self, table_name, value):
"""update database table.
table_name:type:string
value:type:string
"""
sql = "UPDATE " + str(table_name) + " SET " + "IMSI = " + str(value) + ";"
try:
self._execute(sql)
except:
raise
# def query(self, table_name, value):
# """query database table.
# table_name:type:string
# value:type:string
# """
# sql = "SELECT IMSI FROM " + str(table_name) + " WHERE IMSI=%s" % value
# try:
# with self.Cursor(self.conn) as cur:
# cursor = cur.execute(sql)
# row = cursor.fetchone()
# return row
# except:
# raise
def query(self, table_name, value):
"""query database table.
table_name:type:string
value:type:string
"""
sql = "SELECT IMSI FROM " + str(table_name) + " WHERE IMSI=%s" % value
try:
cu = self.conn.cursor()
cu.execute(sql)
row = cu.fetchone()
return row
except Exception as e:
logger.error(e)
cu.close()
def close(self):
"""close the connection of database."""
self.conn.close()
if __name__ == '__main__':
db = MyDataBase()
row = db.query('mobilecomm', '11111111111')
if row is not None:
print("existed!")
| [
"389125903@qq.com"
] | 389125903@qq.com |
83a06f4dcd736336f0501d6bd4aa3a13a87113b8 | 7024d0fab7adee2937ddab28a2b69481ef76f9a8 | /llvm-archive/hlvm/build/bytecode.py | e21978e91a6f32943d5ac37306068592af7b29a3 | [] | no_license | WeilerWebServices/LLVM | 5b7927da69676a7c89fc612cfe54009852675450 | 1f138562730a55380ea3185c7aae565d7bc97a55 | refs/heads/master | 2022-12-22T09:05:27.803365 | 2020-09-24T02:50:38 | 2020-09-24T02:50:38 | 297,533,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,843 | py | from SCons.Environment import Environment as Environment
import re,fileinput,os
from string import join as sjoin
from os.path import join as pjoin
def AsmFromBytecodeMessage(target,source,env):
return "Generating Native Assembly From LLVM Bytecode" + source[0].path
def AsmFromBytecodeAction(target,source,env):
theAction = env.Action(env['with_llc'] + ' -f -fast -o ' + target[0].path +
' ' + source[0].path)
env.Depends(target,env['with_llc'])
return env.Execute(theAction)
def BytecodeFromAsmMessage(target,source,env):
return "Generating Bytecode From LLVM Assembly " + source[0].path
def BytecodeFromAsmAction(target,source,env):
theAction = env.Action(env['with_llvmas'] +
' -f -o ' + target[0].path + ' ' + source[0].path + ' ' +
env['LLVMASFLAGS'])
env.Depends(target,env['with_llvmas'])
return env.Execute(theAction);
def BytecodeFromCppMessage(target,source,env):
return "Generating Bytecode From C++ Source " + source[0].path
def BytecodeFromCppAction(target,source,env):
includes = ""
for inc in env['CPPPATH']:
if inc[0] == '#':
inc = env['AbsSrcRoot'] + inc[1:]
includes += " -I" + inc
defines = ""
for d in env['CPPDEFINES'].keys():
if env['CPPDEFINES'][d] == None:
defines += " -D" + d
else:
defines += " -D" + d + "=" + env['CPPDEFINES'][d]
src = source[0].path
tgt = target[0].path
theAction = env.Action(
env['with_llvmgxx'] + ' $CXXFLAGS ' +
includes + defines + " -c -emit-llvm -g -O3 -x c++ " + src + " -o " + tgt )
env.Depends(target,env['with_llvmgxx'])
return env.Execute(theAction);
def BytecodeArchiveMessage(target,source,env):
return "Generating Bytecode Archive From Bytecode Modules"
def BytecodeArchiveAction(target,source,env):
sources = ''
for src in source:
sources += ' ' + src.path
theAction = env.Action(
env['with_llvmar'] + ' cr ' + target[0].path + sources)
env.Depends(target[0],env['with_llvmar'])
return env.Execute(theAction);
def Bytecode(env):
bc2s = env.Action(AsmFromBytecodeAction,AsmFromBytecodeMessage)
ll2bc = env.Action(BytecodeFromAsmAction,BytecodeFromAsmMessage)
cpp2bc = env.Action(BytecodeFromCppAction,BytecodeFromCppMessage)
arch = env.Action(BytecodeArchiveAction,BytecodeArchiveMessage)
bc2s_bldr = env.Builder(action=bc2s,suffix='s',src_suffix='bc',
single_source=1)
ll2bc_bldr = env.Builder(action=ll2bc,suffix='bc',src_suffix='ll',
single_source=1)
cpp2bc_bldr = env.Builder(action=cpp2bc,suffix='bc',src_suffix='cpp',
single_source=1)
arch_bldr = env.Builder(action=arch,suffix='bca',src_suffix='bc',
src_builder=[ cpp2bc_bldr,ll2bc_bldr])
env.Append(BUILDERS = {
'll2bc':ll2bc_bldr, 'cpp2bc':cpp2bc_bldr, 'bc2s':bc2s_bldr,
'BytecodeArchive':arch_bldr
})
return 1
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
ed8de41b57686f39ab6621b10dee6f3dbbf38f75 | c528c03fc1598c563ab2225f61c54775e72f83c3 | /src/monitoring-service/client_mqtt.py | 288a88f4b21605cf7ea69d5eeff88221ff6a4a0a | [] | no_license | mespinasti/TFG-SmartGateway | 19533b899a03fc35d4820cb9c0a252b8ca7ef387 | 0d752decd8945baebda59cc3723f04c96deddcb7 | refs/heads/main | 2023-07-27T02:47:54.012289 | 2021-09-14T07:25:14 | 2021-09-14T07:25:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,246 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import paho.mqtt.client as mqtt
import json
import sys
client_name = "shapes-smart-sensors"
topic_living_room = "zigbee2mqtt/smart-sensors/living-room"
topic_kitchen = "zigbee2mqtt/smart-sensors/kitchen"
topic_bedroom = "zigbee2mqtt/smart-sensors/bedroom"
topic_bathroom = "zigbee2mqtt/smart-sensors/bathroom"
topic = "zigbee2mqtt/+/SENSOR"
broker = "localhost"
port = 1883
keepalive = 60
time_day = 10
def connect_mqtt() -> mqtt:
def on_connect(client, userdata, flags, rc):
if rc == 0:
print('Connected to MQTT Broker {}:{} with return code {}!'.format(broker, port, rc))
else:
print('Failed to connect MQTT Broker, return code {}\n'.format(rc))
client = mqtt.Client(client_name)
client.on_connect = on_connect
client.connect(broker, port, keepalive)
return client
def subscribe(client: mqtt):
def on_message(client, userdata, msg):
print('Received {} from {} topic!'.format(msg.payload.decode(), msg.topic))
convert_message(client, msg)
def convert_message(client, msg):
sensor_data = json.loads(msg.payload.decode())
if 'door' in msg.topic:
if sensor_data["contact"] == True:
sensor_data["contact"] = 1
print_message(msg, sensor_data)
publish(client, sensor_data, msg)
else:
sensor_data["contact"] = 0
print_message(msg, sensor_data)
publish(client, sensor_data, msg)
elif 'motion' in msg.topic:
if sensor_data["occupancy"] == True:
sensor_data["occupancy"] = 1
print_message(msg, sensor_data)
publish(client, sensor_data, msg)
else:
sensor_data["occupancy"] = 0
print_message(msg, sensor_data)
publish(client, sensor_data, msg)
else:
print_message(msg, sensor_data)
publish(client, sensor_data, msg)
def print_message(msg, sensor_data):
if 'door' in msg.topic:
print('Sensor data: {}'.format(sensor_data))
elif 'motion' in msg.topic:
print('Sensor data: {}'.format(sensor_data))
client.subscribe(topic)
client.on_message = on_message
def publish(client, sensor_data, msg):
def on_publish(client, userdata, result):
print('Sensor data published to {} topic!\n'.format(topic_publish))
def check_room(msg):
if 'living-room' in msg.topic:
return topic_living_room
elif 'kitchen' in msg.topic:
return topic_kitchen
elif 'bedroom' in msg.topic:
return topic_bedroom
elif 'bathroom' in msg.topic:
return topic_bathroom
data = json.dumps(sensor_data)
topic_publish = check_room(msg)
client.publish(topic_publish, payload=data, qos=0, retain=False)
client.on_publish = on_publish
def run():
try:
client = connect_mqtt()
subscribe(client)
client.loop_forever()
except KeyboardInterrupt:
client.disconnect()
run()
| [
"43780193+MariaEspinosaAstilleros@users.noreply.github.com"
] | 43780193+MariaEspinosaAstilleros@users.noreply.github.com |
444a6eed71517393dc2d96006997c11065029854 | 1101120d931216546797fefce7051ed13b61311a | /gyro.py | ceb489db0246709dd679ee8f5acd2368a51cb9d1 | [] | no_license | denisvitez/ev3rvp | 04289615921ae849f8da923d5b4378483ceba7b3 | a1eafbede62ac33222878340b06143abc618fd27 | refs/heads/master | 2021-01-19T21:21:46.168360 | 2017-05-23T08:04:07 | 2017-05-23T08:04:07 | 88,646,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | #!/usr/bin/env python3
import ev3dev.ev3 as ev3
from time import sleep
btn = ev3.Button()
gy = ev3.GyroSensor()
#Gyro can be reset with changing between modes :) (WTF!)
gy.mode = 'GYRO-ANG'
gy.mode = 'GYRO-RATE'
gy.mode='GYRO-ANG'
while not btn.any():
print("Gyro loop")
angle = gy.value()
print(str(angle))
sleep(0.1)
| [
"denis.vitez@gmail.com"
] | denis.vitez@gmail.com |
4d3a28630f6c2527f6ee7323c474a70b2160e19d | 16cd1387a0b187172ef94bd8cfa95f25e2e00ff5 | /plugin.video.Zamir.latest/plugin.video.Zamir/Zamir.py | ee593e3a283aa2de4610183d239f984d97b38809 | [] | no_license | t5i/XBMCAddons | d8752249d08ba085d062044aa506dcb9c6b643aa | 97114e15191962ceb838204cdc164b8e5b50670e | refs/heads/master | 2016-09-05T12:48:24.711908 | 2015-05-06T08:45:08 | 2015-05-06T08:45:08 | 24,891,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,732 | py | # -*- coding: utf-8 -*-
"""
Plugin for streaming video content from Hidabroot
"""
import urllib, urllib2, re, os, sys
import xbmcaddon, xbmc, xbmcplugin, xbmcgui
import HTMLParser
import BeautifulSoup
import urlparse
import json
from xml.sax import saxutils as su
##General vars
### http://www.hidabroot.org/he/vod_categories/10/all/all/all/0?SupplierID=1&page=0%2C99
__plugin__ = "addons://sources/video/Zamir"
__author__ = "t5i"
base_domain="www.hidabroot.org"
baseVOD_url = "http://rabbizamircohen.org/Webservices/GetAllVideo.php"
baseVOD_urlGlobl="http://www.hidabroot.org"
base_url = sys.argv[0]
__settings__ = xbmcaddon.Addon(id='plugin.video.Zamir')
__PLUGIN_PATH__ = __settings__.getAddonInfo('path')
__DEBUG__ = __settings__.getSetting('DEBUG') == 'true'
LIB_PATH = xbmc.translatePath( os.path.join( __PLUGIN_PATH__, 'resources', 'appCaster' ) )
import CommonFunctions
common = CommonFunctions
common.plugin = "plugin.video.Zamir"
def build_XBMCurl(query):
return base_url + '?' + urllib.urlencode(query)
def playMovie(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.101 Safari/537.36')
#req.add_header('Cookie', 'visid_incap_172050=p38EPb70SDKdiG5ET58Q1Y1qv1QAAAAAQUIPAAAAAAAZe7rcEcv5x02MxQ2ETK0o; incap_ses_264_172050=oTvpMEi/eBHltm1z3eqpA3tvGVUAAAAAO2gzlNYBuURCjCKde3pqqQ==; _gat=1; has_js=1; _ga=GA1.2.1227030176.1421830788')
#req.add_header('referer','http://www.hidabroot.org/he/video/70494')
print req
response = urllib2.urlopen(req)
link=response.read()
response.close()
media=re.compile('<a href="(.*?)"<span style').findall(link)
print ("the vk link is "+ str(media))
addFinalLink(media[0],str(name))
def get_params():
param=[]
paramstring=sys.argv[2]
hndl=sys.argv[1]
print "sys.argv[1] ->"+hndl
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def GetPageNLink(uri):
print "req URL = " + uri
req = urllib2.Request(uri)
req.add_header('User-Agent', ' Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
def addDir(name,url,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(url) +"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
#u= build_XBMCurl({'mode': 'folder', 'foldername': name,'httplink': url})#sys.argv[0]+ url
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo(type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False)
return ok
def addDirNextPage(name,url,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(url) +"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
#u= build_XBMCurl({'mode': 'folder', 'foldername': name,'httplink': url})#sys.argv[0]+ url
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo(type="folder", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def addLink(name,url,iconimage):
ok=True
liz=xbmcgui.ListItem(name, iconImage=iconimage, thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz)
return ok
#
#http://rabbizamircohen.org/Images/10072008_????_????_-_???_?`_10102011140129605.jpg
#http://rabbizamircohen.org/Videos/Pirke-Avot-part-A.mp4
def GetLinksPage (uri):
links=GetPageNLink(uri)
jsonObject = json.loads(links)
#match=re.compile('(<div class="views-field views-field-title">.*?</div>)').findall(links) # Videos
#print "uri ------------------------------------------------------------------------------------ views-field views-field-title "
#print links
print "next pageeeepageeeepageeeepageeeepageeeepageeeepageeeepageeeepageeee"
parent = jsonObject["Video"]
json_string = json.dumps(jsonObject,sort_keys=True,indent=2)
for item in parent:
sFile = item["videoFile"].encode('utf-8').strip()
sName = item["name"].encode('utf-8').strip()
sCat = item["category"].encode('utf-8').strip()
sImg = item["imgThumb"].encode('utf-8').strip()
addLink(sName,"http://rabbizamircohen.org/Videos/" + sFile, "http://rabbizamircohen.org/Images/" + sImg)#addDir(sName,sFile,"1","")
#print sName, " -- " , sCat
##addDirNextPage("..הבא..",baseVOD_url+str(iPage),2,"")
xbmcplugin.endOfDirectory(int(sys.argv[1]))
##print "-------------------"
##sPage=uri.split("=")[2].split("C")[1]
##iPage=int(sPage)+1
##print iPage
##for url in match:
## matchVidURL=re.compile('<a href="(/he/video/.*?)">(.*?)</a>').findall(url)
## for name,uri in matchVidURL:
## addDir(uri,name,"1","")
## #addLink(uri,name,"")
##addDirNextPage("..הבא..",baseVOD_url+str(iPage),2,"")
##xbmcplugin.endOfDirectory(int(sys.argv[1]))
def GetLinkVideo(uri,name,iconimage):
html=""
html=GetPageNLink(uri)
matchVid=re.compile('<source src="(.*?)" />').findall(html)
print matchVid[0]
addLink("",matchVid[0],"")
playMovie(matchVid[0],"")
def playMovie(uri,title):
li = xbmcgui.ListItem('Hrav Zamir')
xbmc.Player().play(item=uri, listitem=li)
params=get_params()
print "test"
args = urlparse.parse_qs(sys.argv[2][1:])
print args
mode = args.get('mode', None)
urll=args.get('url')
FolderMode='1'
NextPage='2'
print urll
###links=GetPageNLink(baseVOD_url)
#GetLinksPage(baseVOD_url)
print "mode="+str(mode) + " url=" + str(urll) + " len=" + str(len(str(urll)))
if mode==None or urll==None or len(urll)<1:
GetLinksPage(baseVOD_url)
else:
if mode[0]==FolderMode:
print "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
GetLinkVideo(baseVOD_urlGlobl+urll[0],"","")
else:
if mode[0] == NextPage:
print "222"
GetLinksPage(urll[0])
xbmcplugin.setPluginFanart(int(sys.argv[1]),xbmc.translatePath( os.path.join( __PLUGIN_PATH__, "fanart.png") ))
xbmcplugin.endOfDirectory(int(sys.argv[1]),cacheToDisc=True)
| [
"jobs.sect@gmail.com"
] | jobs.sect@gmail.com |
088b410f5a31c2d7b111041f37dc76929cbfb894 | 067735e90d676125dfe5cd58279ef300811f3b09 | /Pong.py | 9c07201380095c67921a58d9c55d5099380d6c01 | [] | no_license | Acerosa/PongGame | 255d51ae7abf85119086a2f52e467bd8e2a27f45 | 6ed5bfe567adfe2f9679cc15b60094f017c4d7f6 | refs/heads/main | 2023-01-21T08:46:16.180032 | 2020-12-01T11:56:48 | 2020-12-01T11:56:48 | 316,269,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,234 | py | import pygame, sys
from objects.Ball import Ball
from objects.Paddle import Paddle
from objects.CollisionControl import CollisionControl
from objects.Score import Score
pygame.init()
WIDTH, HEIGHT = 900, 500
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
screen = pygame.display.set_mode((WIDTH, HEIGHT))
def paint_screen():
screen.fill(BLACK)
pygame.draw.line(screen, WHITE, (WIDTH//2, 0),(WIDTH//2, HEIGHT), 6)
def reset():
paint_screen()
score1.restart_score_()
score2.restart_score_()
paddle1.restart_paddle_position()
paddle2.restart_paddle_position()
paint_screen()
pygame.display.set_caption('Pong!')
ball = Ball(screen, WHITE, WIDTH//2, HEIGHT // 2, 16)
paddle1 = Paddle(screen, WHITE, 15, HEIGHT//2-60, 20, 120)
paddle2 = Paddle(screen, WHITE, WIDTH-20-15, HEIGHT//2-60, 20, 120)
collision = CollisionControl()
score1 = Score(screen, '0', WIDTH//4, 15)
score2 = Score(screen, '0', WIDTH - WIDTH//4, 15)
#VARIABLES
playing = False
#main loop
while True:
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p and not playing:
ball.startMoving()
playing = True
if event.key == pygame.K_r:
reset()
playing = False
if event.key == pygame.K_w:
paddle1.state = 'up'
if event.key == pygame.K_s:
paddle1.state = 'down'
if event.key == pygame.K_UP:
paddle2.state = 'up'
if event.key == pygame.K_DOWN:
paddle2.state = 'down'
if event.type == pygame.KEYUP:
paddle1.state = 'stopped'
paddle2.state = 'stopped'
if playing:
#settng screen
paint_screen()
#moving ball
ball.moveBall()
#showing ball
ball.showBall()
#Paddl1
paddle1.movePaddle()
paddle1.clamp()
paddle1.showPaddle()
#Paddl2
paddle2.movePaddle()
paddle2.clamp()
paddle2.showPaddle()
#checing and setting collisions
if collision.collision_ball_paddle1(ball, paddle1):
ball.paddle_collision()
if collision.collision_ball_paddle2(ball, paddle2):
ball.margin_collision()
if collision.collision_ball_margin(ball):
ball.margin_collision()
if collision.check_for_player1_goal(ball):
paint_screen()
score1.increase()
ball.restart_ball_position()
paddle1.restart_paddle_position()
paddle2.restart_paddle_position()
playing = False
if collision.check_for_player2_goal(ball):
paint_screen()
score2.increase()
ball.restart_ball_position()
paddle1.restart_paddle_position()
paddle2.restart_paddle_position()
playing = False
score1.showScore()
score2.showScore()
| [
"rjrosa@outlook.com"
] | rjrosa@outlook.com |
e7be4e103a6dd93b9d7842801c24a3d174cbca53 | b2c94948ad2ae8367e7d82b689b5344fe62bb967 | /CalorieCloud/CalorieCloud/core/views.py | 74ebb62346c2b5fa31621efdeae31d99a7ede1a4 | [] | no_license | frenchie4111/b-calorie-cloud-mana | b0016af108594846e04543112b643cb9ac5cb9fb | e4ca3d157dc776933e0faf5f6451c21c2e08c536 | refs/heads/master | 2021-01-13T02:10:56.838523 | 2013-10-19T17:58:00 | 2013-10-19T17:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | from django.http import HttpResponse
from CalorieCloud.helpers import render, redirect
def respondToHomePage( request, flash, flash_negative ):
logged = request.user.is_authenticated()
return render( request, "core/home_page.html", { "flash" : flash, "flash_negative" : flash_negative } )
def home_page( request ): # Render Homepage
# if(not request.user.is_authenticated()):
# return redirect("/register")
# if( "logged" in request.GET and request.user.is_authenticated() ):
# return respondToHomePage( request, "Logged In", False )
# if( "already_logged" in request.GET and request.user.is_authenticated() ):
# return respondToHomePage( request, "Already Logged In", True )
# if( "registered" in request.GET ):
# return respondToHomePage( request, "Account creation successful", False )
# if( "shed_created" in request.GET ):
# return respondToHomePage( request, "Shed creation successful", False )
return respondToHomePage( request, False, False ) | [
"mike@Athena-2.local"
] | mike@Athena-2.local |
56f7c3b75781176c8c211e3d1a86345e6544e8be | ae74e9e03e9b8ba1d407bd5907d3fe197ce47a44 | /ggplot/04-graphs.py | 8d7d9c715bd502c503e9f71fb44de994503d773c | [] | no_license | dylanjorgensen/modules | 3f937298df6b1da0851cfbc4cbf6f046b81b303c | 9296284d3acdb0f899ad19f013fff4d73d0fcc0b | refs/heads/master | 2021-01-11T03:23:11.591989 | 2016-10-22T01:21:53 | 2016-10-22T01:21:53 | 71,014,891 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | from ggplot import *
import pandas as pd
import numpy as np
# Generate data
df = pd.read_csv("baseball-pitches-clean.csv")
df = df[['pitch_time', 'inning', 'pitcher_name', 'hitter_name', 'pitch_type',
'px', 'pz', 'pitch_name', 'start_speed', 'end_speed', 'type_confidence']]
print df.head()
# print df.dtypes
# Strike "scatterplot"
# print ggplot(df, aes(x='px', y='pz')) + geom_point()
# Basic "histogram"
print ggplot(df, aes(x='start_speed')) + geom_histogram()
# Basic "facet wrap"
# print ggplot(aes(x='start_speed'), data=df) + geom_histogram() + facet_wrap('pitch_name')
# Basic "bar graph"
# print ggplot(aes(x='pitch_type'), data=df) + geom_bar()
# Basic "facet grid" # This lines up the grid for comparison
# print ggplot(aes(x='start_speed'), data=df) + geom_histogram() + facet_grid('pitch_type')
# Basic "geom density" # To compare various categorical frequency's in the same field
# print ggplot(df, aes(x='start_speed')) + geom_density()
# print ggplot(df, aes(x='start_speed', color='pitch_name')) + geom_density()
| [
"dylan@dylanjorgensen.com"
] | dylan@dylanjorgensen.com |
9dc4a46f329c9eef419067bda1483d94b6bde1e4 | 6bb7208938b4fffefe0d4365e524a025dfe86733 | /test.py | 88e3f69597d8bb042313db79413a2e62d8561292 | [] | no_license | lphcreat/binning-and-woe | d673ff58e7ce3039786aa86c266d7a3922c70060 | 888a34eb3d5ebeecdc24ee1d9982776cf83bc164 | refs/heads/master | 2020-05-29T21:12:13.314703 | 2020-01-08T01:53:21 | 2020-01-08T01:53:21 | 189,374,323 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,371 | py | if __name__ == "__main__":
#分割数据
#加载数据
from sklearn.datasets import load_iris
import pandas as pd
from binning_woe.binning.sklearn_bin import NumtoCategorical as nc
from binning_woe.sklearn_woe import CattoWoe
iris = load_iris()
df=pd.concat([pd.DataFrame(iris.data),pd.DataFrame(iris.target)],ignore_index=True,axis=1)
df.columns=iris.feature_names+['target']
df=df[df['target'].isin([1,2])]
#分割数据
Sp=nc(bins_num=3,num_cols=iris.feature_names)
clf=Sp.fit(df,'target',split_func='chi')
dff=clf.transform()
Cw=CattoWoe('target')
wclf=Cw.fit(dff)
wdf=wclf.transform()
print(wdf.head())
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
cols=list(filter(lambda item:item !='target',wdf.columns))
X,x_test,Y,y_test=train_test_split(wdf[cols],wdf['target'],test_size=0.33,shuffle=True)
clf = LogisticRegression()
clf.fit(X, Y)
score_test = classification_report(y_test, clf.predict(x_test))
print(score_test)
X,x_test,Y,y_test=train_test_split(df[cols],df['target'],test_size=0.33,shuffle=True)
clf = LogisticRegression()
clf.fit(X, Y)
score_test = classification_report(y_test, clf.predict(x_test))
print(score_test) | [
"lipenghui@haoboyihai.com"
] | lipenghui@haoboyihai.com |
ce468def2393bff1ca0655ab216cd12b060d7fbd | 29c1b8ba898fd04d8fa3f8c9b9b90416a96a6079 | /Triangle.py | ece54e7d2f471437cc73f9b390dee39bf9bf5697 | [] | no_license | 3lton007/Triangle567 | 92dc4778286813f554da77668b64c66be06b718d | b7b2e25663aacaa547226f241f0c17c49e6ab6cb | refs/heads/master | 2020-12-27T21:27:24.894538 | 2020-02-05T03:20:19 | 2020-02-05T03:20:19 | 238,037,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,071 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 13:44:00 2016
Updated Jan 21, 2018
The primary goal of this file is to demonstrate a simple python program to classify triangles
@author: jrr
@author: rk
"""
def classifyTriangle(a,b,c):
"""
Your correct code goes here... Fix the faulty logic below until the code passes all of
you test cases.
This function returns a string with the type of triangle from three integer values
corresponding to the lengths of the three sides of the Triangle.
return:
If all three sides are equal, return 'Equilateral'
If exactly one pair of sides are equal, return 'Isoceles'
If no pair of sides are equal, return 'Scalene'
If not a valid triangle, then return 'NotATriangle'
If the sum of any two sides equals the squate of the third side, then return 'Right'
BEWARE: there may be a bug or two in this code
"""
# verify that all 3 inputs are integers
# Python's "isinstance(object,type) returns True if the object is of the specified type
if not(isinstance(a,int)) or not(isinstance(b,int)) or not (isinstance(c,int)):
return 'InvalidInput'
# require that the input values be >= 0 and <= 200
if a <= 0 or b <= 0 or c <= 0:
return 'InvalidInput'
# This information was not in the requirements spec but
# is important for correctness
# the sum of any two sides must be strictly less than the third side
# of the specified shape is not a triangle
if (a >= (b + c)) or (b >= (a + c)) or (c >= (a + b)):
return 'NotATriangle'
# now we know that we have a valid triangle
if a == b and b == c and a == c:
return 'Equilateral'
if round(a ** 2 + b ** 2) == round(c ** 2):
return 'Right'
if round(b ** 2 + c ** 2) == round(a ** 2):
return 'Right'
if round(a ** 2 + b ** 2) == round(c ** 2):
return 'Right'
if (a != b) and (b != c) and (c != a):
return 'Scalene'
return 'Isosceles'
| [
"eltonaloys@gmail.com"
] | eltonaloys@gmail.com |
7cf4d5fa42e5bb5f8c6671a7804cbb0e83c0c6cd | 7a343e1b7a3c2ef50d684a5ce991ebef800f32be | /shells/hapflk/VCMA/iterate.py | 14c2846986508f0aa42bbd731153c401f7894a64 | [] | no_license | melisakman/Helianthus | 04d2dc8315f8d14d2f38faa8bce7282e2cc2b439 | 5500207a2bbfe2e63c639f3194f732a41d527cb7 | refs/heads/master | 2021-11-09T05:12:31.331797 | 2021-11-03T05:30:32 | 2021-11-03T05:30:32 | 60,215,326 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | import sys
chr = sys.argv[1]
chr_length = int(sys.argv[2])
bins = int(chr_length/20000000) + 2
iterations = range(1,bins)
start = 1
end = 20000000
for i in iterations:
file_name = "hapflk_invariant_chr" + str(chr) + "_" + str(i) + ".sh"
file = open(file_name, 'w')
file.write("""#!/bin/bash
#SBATCH -D /clusterfs/rosalind/users/makman/GATK/bcftools_isec/secondFilter/
#SBATCH -J flk_""" + str(chr) + "_" + str(i) + """\n#SBATCH --partition=vector
#SBATCH --qos=vector_batch
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --mem=64000
#SBATCH -o /global/home/users/makman/GATK/outs/hapflk_sed_chr""" + str(chr) + "_" + str(i) + "_variantOnly.out" +
"""\n#SBATCH -e /global/home/users/makman/GATK/outs/hapflk_sed_chr""" + str(chr) + "_" + str(i) + "_variantOnly.err" +
"""\n#SBATCH --mail-user=makman@berkeley.edu
#SBATCH --mail-type=All
#SBATCH --time=800:00:00
module load hapflk/1.4
hapflk --file chr""" + str(chr) + "_intersect_variantOnly_modified --miss_pheno 0 --chr " + str(chr) + " --from " + str(start) + " --to " + str(end) + " -p chr" + str(chr) + "_" + str(i) + " --ncpu 16 -K 15")
start += 20000000
end += 20000000
| [
"melisakman@Meliss-MacBook-Pro.local"
] | melisakman@Meliss-MacBook-Pro.local |
13ca447a848bd0a023408cd1481328960e470cf6 | e3dd6014dfcef3a1eb8215431b382325be86926e | /NaturalLanguageProcessing/textdomain/admin.py | 1b4e1e073b7ac7e8cd385a0e84bf3d80b9e3af68 | [] | no_license | shahabhameed/NLPTextStats | b27f1a377e5340fd0978f793a118b701daac165b | e7bcebd3678bc2929b1fa59310edf2c2770046e3 | refs/heads/master | 2020-04-03T11:45:13.889218 | 2012-12-13T22:38:24 | 2012-12-13T22:38:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | from textdomain.models import Domain, Blacklist, Text, Word
from django.contrib import admin
class DomainAdmin(admin.ModelAdmin):
fields = ['name']
admin.site.register(Domain, DomainAdmin)
class TextAdmin(admin.ModelAdmin):
fields = ['name', 'text']
admin.site.register(Text, TextAdmin)
class BlacklistAdmin(admin.ModelAdmin):
fields = ['name']
admin.site.register(Blacklist, BlacklistAdmin)
admin.site.register(Word)
| [
"claybourne.david@gmail.com"
] | claybourne.david@gmail.com |
c96610c552c644c9695b00f73fcf6a9216731243 | 66c3fb74ddc601679747b164e43047f94e5d8bac | /day2.py | e97551421d1b254d0bb7878bec6e503dc4f0b239 | [] | no_license | paavop/AdventOfCode2019 | 62cde2d5ecbb81074831be3ad4a47616cae5b65c | a8d2e5b51d3b33f90e19fc3c954c5031de7cf95b | refs/heads/master | 2020-09-23T01:05:28.798274 | 2019-12-13T13:28:15 | 2019-12-14T15:04:12 | 225,361,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | import operator
filepath = 'inputs/day2.txt'
with open(filepath) as fp:
line = fp.readline().rstrip()
cmdlist_mem = line.split(",")
# Part 1
def solve_with(cmd1, cmd2, cmdlist):
cmdlist[1] = cmd1
cmdlist[2] = cmd2
ops = {
"1": operator.add,
"2": operator.mul
}
index = 0
while(True):
cmd = cmdlist[index]
if cmd == '99':
break
operation = ops.get(cmd)
first = int(cmdlist[index+1])
second = int(cmdlist[index+2])
goal = int(cmdlist[index+3])
cmdlist[goal] = str(operation(int(cmdlist[first]), int(cmdlist[second])))
index +=4
return cmdlist[0]
print(solve_with('12', '2', cmdlist_mem.copy()))
# Part 2
def look_for_result(result, cmdlist):
for i in range(99):
for j in range(99):
ans = solve_with(str(i), str(j), cmdlist.copy())
if ans == result:
return(100*i+j)
print(look_for_result('19690720', cmdlist_mem))
| [
"paavo.parssinen@nokia.com"
] | paavo.parssinen@nokia.com |
efa2c847437a4664e7fb5a130f7dc6e093b737e4 | c115ba8fc9acc9bd2fd886a9507247e02e0c1035 | /_core/config/base.py | 772cce5b3eb76dbf2a447f35d3df4b365b7390fc | [] | no_license | ingafter60/completedjangoblog | bf368740f1171819689f231edd0e3ae4a61083de | df0f84630976d43e74d916dbf4a8cb4444176f7f | refs/heads/master | 2021-04-03T19:04:06.463307 | 2020-03-20T00:40:55 | 2020-03-20T00:40:55 | 248,388,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,909 | py | """
Django settings for _core project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g%fyss+5dnhw7q*3do!y(kug#_#2@1$j^4(!k19iuzqx7zbb)w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# new
'ckeditor',
'apps.base',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = '_core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '_core.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"inyoman_gurnitha@yahoo.com"
] | inyoman_gurnitha@yahoo.com |
fe75aa03e43019e1066a7e638a69ca2612f323d6 | 91ca2b4215b74c3b3f2517baab9205997c9bcf62 | /maps/migrations/0009_auto_20180205_1945.py | 319e4b341f7faee495ba53495621ba57b938524c | [
"Apache-2.0"
] | permissive | swiftlabUAS/SwiftUTM | ae0ca347058563a040c24b740a5760187e507879 | caf40195b017ab980323cf88bf95e671e38a2676 | refs/heads/master | 2022-12-16T00:02:01.766221 | 2020-09-22T14:27:12 | 2020-09-22T14:27:12 | 297,254,220 | 0 | 1 | MIT | 2020-09-22T14:27:14 | 2020-09-21T06:55:30 | null | UTF-8 | Python | false | false | 487 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-02-05 16:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('maps', '0008_locationpoints_shortcode'),
]
operations = [
migrations.AlterField(
model_name='locationpoints',
name='shortcode',
field=models.CharField(blank=True, max_length=3, null=True),
),
]
| [
"geoffreynyagak@gmail.com"
] | geoffreynyagak@gmail.com |
f0d45c4482fab4fd7e9e9807c9f0bd38de1ebd83 | aaa204ad7f134b526593c785eaa739bff9fc4d2a | /airflow/providers/cncf/kubernetes/utils/pod_manager.py | c65150b4181d4f3ada2c2247d020f2265a91a707 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | cfei18/incubator-airflow | 913b40efa3d9f1fdfc5e299ce2693492c9a92dd4 | ffb2078eb5546420864229cdc6ee361f89cab7bd | refs/heads/master | 2022-09-28T14:44:04.250367 | 2022-09-19T16:50:23 | 2022-09-19T16:50:23 | 88,665,367 | 0 | 1 | Apache-2.0 | 2021-02-05T16:29:42 | 2017-04-18T20:00:03 | Python | UTF-8 | Python | false | false | 16,175 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Launches PODs"""
from __future__ import annotations
import json
import math
import time
import warnings
from contextlib import closing
from dataclasses import dataclass
from datetime import datetime
from typing import TYPE_CHECKING, Iterable, cast
import pendulum
import tenacity
from kubernetes import client, watch
from kubernetes.client.models.v1_pod import V1Pod
from kubernetes.client.rest import ApiException
from kubernetes.stream import stream as kubernetes_stream
from pendulum import DateTime
from pendulum.parsing.exceptions import ParserError
from urllib3.exceptions import HTTPError as BaseHTTPError
from airflow.exceptions import AirflowException
from airflow.kubernetes.kube_client import get_kube_client
from airflow.kubernetes.pod_generator import PodDefaults
from airflow.utils.log.logging_mixin import LoggingMixin
if TYPE_CHECKING:
from kubernetes.client.models.core_v1_event_list import CoreV1EventList
class PodLaunchFailedException(AirflowException):
"""When pod launching fails in KubernetesPodOperator."""
def should_retry_start_pod(exception: BaseException) -> bool:
"""Check if an Exception indicates a transient error and warrants retrying"""
if isinstance(exception, ApiException):
return exception.status == 409
return False
class PodPhase:
"""
Possible pod phases
See https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase.
"""
PENDING = 'Pending'
RUNNING = 'Running'
FAILED = 'Failed'
SUCCEEDED = 'Succeeded'
terminal_states = {FAILED, SUCCEEDED}
def container_is_running(pod: V1Pod, container_name: str) -> bool:
"""
Examines V1Pod ``pod`` to determine whether ``container_name`` is running.
If that container is present and running, returns True. Returns False otherwise.
"""
container_statuses = pod.status.container_statuses if pod and pod.status else None
if not container_statuses:
return False
container_status = next((x for x in container_statuses if x.name == container_name), None)
if not container_status:
return False
return container_status.state.running is not None
def get_container_termination_message(pod: V1Pod, container_name: str):
try:
container_statuses = pod.status.container_statuses
container_status = next((x for x in container_statuses if x.name == container_name), None)
return container_status.state.terminated.message if container_status else None
except (AttributeError, TypeError):
return None
@dataclass
class PodLoggingStatus:
"""Used for returning the status of the pod and last log time when exiting from `fetch_container_logs`"""
running: bool
last_log_time: DateTime | None
class PodManager(LoggingMixin):
"""
Helper class for creating, monitoring, and otherwise interacting with Kubernetes pods
for use with the KubernetesPodOperator
"""
def __init__(
self,
kube_client: client.CoreV1Api = None,
in_cluster: bool = True,
cluster_context: str | None = None,
):
"""
Creates the launcher.
:param kube_client: kubernetes client
:param in_cluster: whether we are in cluster
:param cluster_context: context of the cluster
"""
super().__init__()
self._client = kube_client or get_kube_client(in_cluster=in_cluster, cluster_context=cluster_context)
self._watch = watch.Watch()
def run_pod_async(self, pod: V1Pod, **kwargs) -> V1Pod:
"""Runs POD asynchronously"""
sanitized_pod = self._client.api_client.sanitize_for_serialization(pod)
json_pod = json.dumps(sanitized_pod, indent=2)
self.log.debug('Pod Creation Request: \n%s', json_pod)
try:
resp = self._client.create_namespaced_pod(
body=sanitized_pod, namespace=pod.metadata.namespace, **kwargs
)
self.log.debug('Pod Creation Response: %s', resp)
except Exception as e:
self.log.exception(
'Exception when attempting to create Namespaced Pod: %s', str(json_pod).replace("\n", " ")
)
raise e
return resp
def delete_pod(self, pod: V1Pod) -> None:
"""Deletes POD"""
try:
self._client.delete_namespaced_pod(
pod.metadata.name, pod.metadata.namespace, body=client.V1DeleteOptions()
)
except ApiException as e:
# If the pod is already deleted
if e.status != 404:
raise
@tenacity.retry(
stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_random_exponential(),
reraise=True,
retry=tenacity.retry_if_exception(should_retry_start_pod),
)
def create_pod(self, pod: V1Pod) -> V1Pod:
"""Launches the pod asynchronously."""
return self.run_pod_async(pod)
def await_pod_start(self, pod: V1Pod, startup_timeout: int = 120) -> None:
"""
Waits for the pod to reach phase other than ``Pending``
:param pod:
:param startup_timeout: Timeout (in seconds) for startup of the pod
(if pod is pending for too long, fails task)
:return:
"""
curr_time = datetime.now()
while True:
remote_pod = self.read_pod(pod)
if remote_pod.status.phase != PodPhase.PENDING:
break
self.log.warning("Pod not yet started: %s", pod.metadata.name)
delta = datetime.now() - curr_time
if delta.total_seconds() >= startup_timeout:
msg = (
f"Pod took longer than {startup_timeout} seconds to start. "
"Check the pod events in kubernetes to determine why."
)
raise PodLaunchFailedException(msg)
time.sleep(1)
def follow_container_logs(self, pod: V1Pod, container_name: str) -> PodLoggingStatus:
warnings.warn(
"Method `follow_container_logs` is deprecated. Use `fetch_container_logs` instead"
"with option `follow=True`.",
DeprecationWarning,
)
return self.fetch_container_logs(pod=pod, container_name=container_name, follow=True)
def fetch_container_logs(
self, pod: V1Pod, container_name: str, *, follow=False, since_time: DateTime | None = None
) -> PodLoggingStatus:
"""
Follows the logs of container and streams to airflow logging.
Returns when container exits.
"""
def consume_logs(*, since_time: DateTime | None = None, follow: bool = True) -> DateTime | None:
"""
Tries to follow container logs until container completes.
For a long-running container, sometimes the log read may be interrupted
Such errors of this kind are suppressed.
Returns the last timestamp observed in logs.
"""
timestamp = None
try:
logs = self.read_pod_logs(
pod=pod,
container_name=container_name,
timestamps=True,
since_seconds=(
math.ceil((pendulum.now() - since_time).total_seconds()) if since_time else None
),
follow=follow,
)
for raw_line in logs:
line = raw_line.decode('utf-8', errors="backslashreplace")
timestamp, message = self.parse_log_line(line)
self.log.info(message)
except BaseHTTPError as e:
self.log.warning(
"Reading of logs interrupted with error %r; will retry. "
"Set log level to DEBUG for traceback.",
e,
)
self.log.debug(
"Traceback for interrupted logs read for pod %r",
pod.metadata.name,
exc_info=True,
)
return timestamp or since_time
# note: `read_pod_logs` follows the logs, so we shouldn't necessarily *need* to
# loop as we do here. But in a long-running process we might temporarily lose connectivity.
# So the looping logic is there to let us resume following the logs.
last_log_time = since_time
while True:
last_log_time = consume_logs(since_time=last_log_time, follow=follow)
if not self.container_is_running(pod, container_name=container_name):
return PodLoggingStatus(running=False, last_log_time=last_log_time)
if not follow:
return PodLoggingStatus(running=True, last_log_time=last_log_time)
else:
self.log.warning(
'Pod %s log read interrupted but container %s still running',
pod.metadata.name,
container_name,
)
time.sleep(1)
def await_container_completion(self, pod: V1Pod, container_name: str) -> None:
while not self.container_is_running(pod=pod, container_name=container_name):
time.sleep(1)
def await_pod_completion(self, pod: V1Pod) -> V1Pod:
"""
Monitors a pod and returns the final state
:param pod: pod spec that will be monitored
:return: Tuple[State, Optional[str]]
"""
while True:
remote_pod = self.read_pod(pod)
if remote_pod.status.phase in PodPhase.terminal_states:
break
self.log.info('Pod %s has phase %s', pod.metadata.name, remote_pod.status.phase)
time.sleep(2)
return remote_pod
def parse_log_line(self, line: str) -> tuple[DateTime | None, str]:
"""
Parse K8s log line and returns the final state
:param line: k8s log line
:return: timestamp and log message
:rtype: Tuple[str, str]
"""
split_at = line.find(' ')
if split_at == -1:
self.log.error(
"Error parsing timestamp (no timestamp in message %r). "
"Will continue execution but won't update timestamp",
line,
)
return None, line
timestamp = line[:split_at]
message = line[split_at + 1 :].rstrip()
try:
last_log_time = cast(DateTime, pendulum.parse(timestamp))
except ParserError:
self.log.error("Error parsing timestamp. Will continue execution but won't update timestamp")
return None, line
return last_log_time, message
def container_is_running(self, pod: V1Pod, container_name: str) -> bool:
"""Reads pod and checks if container is running"""
remote_pod = self.read_pod(pod)
return container_is_running(pod=remote_pod, container_name=container_name)
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod_logs(
self,
pod: V1Pod,
container_name: str,
tail_lines: int | None = None,
timestamps: bool = False,
since_seconds: int | None = None,
follow=True,
) -> Iterable[bytes]:
"""Reads log from the POD"""
additional_kwargs = {}
if since_seconds:
additional_kwargs['since_seconds'] = since_seconds
if tail_lines:
additional_kwargs['tail_lines'] = tail_lines
try:
return self._client.read_namespaced_pod_log(
name=pod.metadata.name,
namespace=pod.metadata.namespace,
container=container_name,
follow=follow,
timestamps=timestamps,
_preload_content=False,
**additional_kwargs,
)
except BaseHTTPError:
self.log.exception('There was an error reading the kubernetes API.')
raise
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod_events(self, pod: V1Pod) -> CoreV1EventList:
"""Reads events from the POD"""
try:
return self._client.list_namespaced_event(
namespace=pod.metadata.namespace, field_selector=f"involvedObject.name={pod.metadata.name}"
)
except BaseHTTPError as e:
raise AirflowException(f'There was an error reading the kubernetes API: {e}')
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod(self, pod: V1Pod) -> V1Pod:
"""Read POD information"""
try:
return self._client.read_namespaced_pod(pod.metadata.name, pod.metadata.namespace)
except BaseHTTPError as e:
raise AirflowException(f'There was an error reading the kubernetes API: {e}')
def await_xcom_sidecar_container_start(self, pod: V1Pod) -> None:
self.log.info("Checking if xcom sidecar container is started.")
warned = False
while True:
if self.container_is_running(pod, PodDefaults.SIDECAR_CONTAINER_NAME):
self.log.info("The xcom sidecar container is started.")
break
if not warned:
self.log.warning("The xcom sidecar container is not yet started.")
warned = True
time.sleep(1)
def extract_xcom(self, pod: V1Pod) -> str:
"""Retrieves XCom value and kills xcom sidecar container"""
with closing(
kubernetes_stream(
self._client.connect_get_namespaced_pod_exec,
pod.metadata.name,
pod.metadata.namespace,
container=PodDefaults.SIDECAR_CONTAINER_NAME,
command=['/bin/sh'],
stdin=True,
stdout=True,
stderr=True,
tty=False,
_preload_content=False,
)
) as resp:
result = self._exec_pod_command(
resp,
f'if [ -s {PodDefaults.XCOM_MOUNT_PATH}/return.json ]; then cat {PodDefaults.XCOM_MOUNT_PATH}/return.json; else echo __airflow_xcom_result_empty__; fi', # noqa
)
self._exec_pod_command(resp, 'kill -s SIGINT 1')
if result is None:
raise AirflowException(f'Failed to extract xcom from pod: {pod.metadata.name}')
return result
def _exec_pod_command(self, resp, command: str) -> str | None:
res = None
if resp.is_open():
self.log.info('Running command... %s\n', command)
resp.write_stdin(command + '\n')
while resp.is_open():
resp.update(timeout=1)
while resp.peek_stdout():
res = res + resp.read_stdout() if res else resp.read_stdout()
error_res = None
while resp.peek_stderr():
error_res = error_res + resp.read_stderr() if error_res else resp.read_stderr()
if error_res:
self.log.info("stderr from command: %s", error_res)
break
if res:
return res
return res
| [
"noreply@github.com"
] | noreply@github.com |
b1d222145db5a327a7c1884a3236e29da61c99c2 | 105ed40bc984835f6a99535ac252f211e032fed9 | /examples/pointnet++_part_seg/pointnet2_part_seg.py | ebd70a67785815c444abfd68f0083fdba0d287e1 | [
"MIT"
] | permissive | fegonda/pytorch_geometric | 844277e90b5c22493ba0c5d37b09ba301c102096 | 08f98cfee70a739605860c05a8598ffe45cc5e85 | refs/heads/master | 2020-05-23T17:08:48.671962 | 2019-05-15T16:11:45 | 2019-05-15T16:11:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,477 | py | r"""
Modified from https://github.com/dragonbook/pointnet2-pytorch/blob/master/model/pointnet2_part_seg.py
"""
import torch
import torch.nn.functional as F
from torch.nn import Sequential as Seq, Linear as Lin, ReLU, Dropout, BatchNorm1d
from torch_geometric.nn import PointConv, fps, radius, knn
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.inits import reset
from torch_geometric.utils.num_nodes import maybe_num_nodes
from torch_geometric.data.data import Data
from torch_scatter import scatter_add, scatter_max
class PointNet2SAModule(torch.nn.Module):
def __init__(self, sample_radio, radius, max_num_neighbors, mlp):
super(PointNet2SAModule, self).__init__()
self.sample_ratio = sample_radio
self.radius = radius
self.max_num_neighbors = max_num_neighbors
self.point_conv = PointConv(mlp)
def forward(self, data):
x, pos, batch = data
# Sample
idx = fps(pos, batch, ratio=self.sample_ratio)
# Group(Build graph)
row, col = radius(pos, pos[idx], self.radius, batch, batch[idx], max_num_neighbors=self.max_num_neighbors)
edge_index = torch.stack([col, row], dim=0)
# Apply pointnet
x1 = self.point_conv(x, (pos, pos[idx]), edge_index)
pos1, batch1 = pos[idx], batch[idx]
return x1, pos1, batch1
class PointNet2GlobalSAModule(torch.nn.Module):
r"""
Similar to PointNet2SAModule, and use one group with all input points. It can be viewed
as a simple PointNet module and return the only one output point(set as origin point).
"""
def __init__(self, mlp):
super(PointNet2GlobalSAModule, self).__init__()
self.mlp = mlp
def forward(self, data):
x, pos, batch = data
if x is not None: x = torch.cat([x, pos], dim=1)
x1 = self.mlp(x)
x1 = scatter_max(x1, batch, dim=0)[0] # (batch_size, C1)
batch_size = x1.shape[0]
pos1 = x1.new_zeros((batch_size, 3)) # set the output point as origin
batch1 = torch.arange(batch_size).to(batch.device, batch.dtype)
return x1, pos1, batch1
class PointConvFP(MessagePassing):
r"""
Core layer of Feature propagtaion module. It can be viewed as the reverse of PointConv
with additional skip connection layer and following mlp layers.
"""
def __init__(self, mlp=None):
super(PointConvFP, self).__init__('add', 'source_to_target')
self.mlp = mlp
self.aggr = 'add'
self.flow = 'source_to_target'
self.reset_parameters()
def reset_parameters(self):
reset(self.mlp)
def forward(self, x, pos, edge_index):
r"""
Args:
x (tuple): (tensor, tensor) or (tensor, NoneType)
features from previous layer and skip connection layer
pos (tuple): (tensor, tensor)
The node position matrix. pos from preivous layer and skip connection layer.
Note, this represents a bipartite graph.
edge_index (LongTensor): The edge indices.
"""
# Do not pass (tensor, None) directly into propagate(), since it will check each item's size() inside.
x_tmp = x[0] if x[1] is None else x
# Uppool
aggr_out = self.propagate(edge_index, x=x_tmp, pos=pos)
# Fusion with skip connection layer
i, j = (0, 1) if self.flow == 'target_to_source' else (1, 0)
x_target, pos_target = x[i], pos[i]
add = [pos_target,] if x_target is None else [x_target, pos_target]
aggr_out = torch.cat([aggr_out, *add], dim=1)
# Apply mlp
if self.mlp is not None: aggr_out = self.mlp(aggr_out)
return aggr_out
def message(self, x_j, pos_j, pos_i, edge_index):
dist = (pos_j - pos_i).pow(2).sum(dim=1).pow(0.5)
dist = torch.max(dist, torch.Tensor([1e-10]).to(dist.device, dist.dtype))
weight = 1.0 / dist # (E,)
row, col = edge_index
index = col
num_nodes = maybe_num_nodes(index, None)
wsum = scatter_add(weight, col, dim=0, dim_size=num_nodes)[index] + 1e-16 # (E,)
weight /= wsum
return weight.view(-1, 1) * x_j
def update(self, aggr_out):
return aggr_out
class PointNet2FPModule(torch.nn.Module):
def __init__(self, knn_num, mlp):
super(PointNet2FPModule, self).__init__()
self.knn_num = knn_num
self.point_conv = PointConvFP(mlp)
def forward(self, in_layer_data, skip_layer_data):
in_x, in_pos, in_batch = in_layer_data
skip_x, skip_pos, skip_batch = skip_layer_data
row, col = knn(in_pos, skip_pos, self.knn_num, in_batch, skip_batch)
edge_index = torch.stack([col, row], dim=0)
x1 = self.point_conv((in_x, skip_x), (in_pos, skip_pos), edge_index)
pos1, batch1 = skip_pos, skip_batch
return x1, pos1, batch1
def make_mlp(in_channels, mlp_channels, batch_norm=True):
assert len(mlp_channels) >= 1
layers = []
for c in mlp_channels:
layers += [Lin(in_channels, c)]
if batch_norm: layers += [BatchNorm1d(c)]
layers += [ReLU()]
in_channels = c
return Seq(*layers)
class PointNet2PartSegmentNet(torch.nn.Module):
r"""
Pointnet++ part segmentaion net example.
ref:
- https://github.com/charlesq34/pointnet2/blob/master/models/pointnet2_part_seg.py
- https://github.com/rusty1s/pytorch_geometric/blob/master/examples/pointnet++.py
"""
def __init__(self, num_classes):
super(PointNet2PartSegmentNet, self).__init__()
self.num_classes = num_classes
# SA1
sa1_sample_ratio = 0.5
sa1_radius = 0.2
sa1_max_num_neighbours = 64
sa1_mlp = make_mlp(3, [64, 64, 128])
self.sa1_module = PointNet2SAModule(sa1_sample_ratio, sa1_radius, sa1_max_num_neighbours, sa1_mlp)
# SA2
sa2_sample_ratio = 0.25
sa2_radius = 0.4
sa2_max_num_neighbours = 64
sa2_mlp = make_mlp(128+3, [128, 128, 256])
self.sa2_module = PointNet2SAModule(sa2_sample_ratio, sa2_radius, sa2_max_num_neighbours, sa2_mlp)
# SA3
sa3_mlp = make_mlp(256+3, [256, 512, 1024])
self.sa3_module = PointNet2GlobalSAModule(sa3_mlp)
##
knn_num = 3
# FP3, reverse of sa3
fp3_knn_num = 1 # After global sa module, there is only one point in point cloud
fp3_mlp = make_mlp(1024+256+3, [256, 256])
self.fp3_module = PointNet2FPModule(fp3_knn_num, fp3_mlp)
# FP2, reverse of sa2
fp2_knn_num = knn_num
fp2_mlp = make_mlp(256+128+3, [256, 128])
self.fp2_module = PointNet2FPModule(fp2_knn_num, fp2_mlp)
# FP1, reverse of sa1
fp1_knn_num = knn_num
fp1_mlp = make_mlp(128+3, [128, 128, 128])
self.fp1_module = PointNet2FPModule(fp1_knn_num, fp1_mlp)
self.fc1 = Lin(128, 128)
self.dropout1 = Dropout(p=0.5)
self.fc2 = Lin(128, self.num_classes)
def forward(self, data):
assert hasattr(data, 'pos')
if not hasattr(data, 'x'): data.x = None
data_in = data.x, data.pos, data.batch
sa1_out = self.sa1_module(data_in)
sa2_out = self.sa2_module(sa1_out)
sa3_out = self.sa3_module(sa2_out)
fp3_out = self.fp3_module(sa3_out, sa2_out)
fp2_out = self.fp2_module(fp3_out, sa1_out)
fp1_out = self.fp1_module(fp2_out, data_in)
fp1_out_x, fp1_out_pos, fp1_out_batch = fp1_out
x = self.fc2(self.dropout1(self.fc1(fp1_out_x)))
x = F.log_softmax(x, dim=-1)
return x, fp1_out_batch
if __name__ == '__main__':
def make_data_batch():
# batch_size = 2
pos_num1 = 1000
pos_num2 = 1024
data_batch = Data()
# data_batch.x = None
data_batch.pos = torch.cat([torch.rand(pos_num1, 3), torch.rand(pos_num2, 3)], dim=0)
data_batch.batch = torch.cat([torch.zeros(pos_num1, dtype=torch.long), torch.ones(pos_num2, dtype=torch.long)])
return data_batch
data = make_data_batch()
print('data.pos: ', data.pos.shape)
print('data.batch: ', data.batch.shape)
num_classes = 10
net = PointNet2PartSegmentNet(num_classes)
print('num_classes: ', num_classes)
out_x, out_batch = net(data)
print('out_x: ', out_x.shape)
print('out_batch: ', out_batch.shape)
| [
"573260700@qq.com"
] | 573260700@qq.com |
51c2210f18a6592316646b8a32cb0c2d5056fd0a | fef45773908bb886dc8a583b2430cc3d598e25dc | /CS 5001 HW/word_game.py | 7741be5d3b1fdae18eedb238b8a2fb5a087ad920 | [] | no_license | LukeParkhurst/CS5001_IntensiveFoundationsOfComputerScience | c3c21853005e28ef6ac2b69caa4d603fb6c3b712 | c283e4db8f4d866d6253308e5bb80399b79f06ce | refs/heads/main | 2023-06-03T08:36:48.887144 | 2021-02-28T07:31:02 | 2021-02-28T07:31:02 | 377,648,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,769 | py | '''
Luke Parkhurst
CS 5001
HW 6
Fall 2019
'''
'''
TEST CASES:
Test Case 1: Word: LET Points: 3
Test Case 2: Word: COT Points: 5
Test Case 3: Word: FAME Points: 9
Test Case 4: Word: WI(blank)D Points: 8
'''
# import all needed functions and random to help with choice function later
from scrabble_points import bag_of_letters
from scrabble_points import get_word_value
import random
# start of the create_alphabet function
def create_alphabet():
alphabet = {} # sets the dictionary of alpahbet and gives key, value pairs
alphabet['E'] = 12
for x in ['A','I']: alphabet[x] = 9
alphabet['O'] = 8
for x in ['N','R','T']: alphabet[x] = 6
for x in ['D','S','L','U']: alphabet[x] = 4
alphabet['G'] = 3
for x in ['B','C','F','H','M','P','V','W','Y','blank']: alphabet[x] = 2
for x in ['J','K','Q','X','Z']: alphabet[x] = 1
return alphabet # returns alphabet dictionary
# beginning of the create word_list function
def create_word_list():
word_list = [] # starts the empty word list
infile = open('wordlist.txt', 'r') # opens the wordlist file and sets it to variable infile
for line in infile: # creates for loop for line in infile
word_list.append(line) # appends each line to word_list
infile.close() # closes the infile
word_list = [s.rstrip() for s in word_list] # strips the list of all whitespace characters
return word_list # returns thw word_list
# start of the user menu function
def menu():
allowed_cmmds = ['W','P','D','Q'] # creates list of allowed commands then asks for user input and gives lists of choices
userinp = input('Your options are as follows: D to draw 7 more letters \n W to make a new word to from your current letters \n P to print the list of currently made words, their point values, and your current score! \n Q to quit this expiriment (but who would!?) \n')
userinp = userinp.upper() # turns user input into upper case
if userinp not in allowed_cmmds: #checks to see if user input is a allowed command
print('That is, not, an option!') # tells them if it's an invalid option
return userinp # returns userinp
# beginning of the current_letters_mixer function, meant to help create a new current_letters list
def current_letters_mixer(current_letters, letters):
if len(letters) < 7: # checks to see if there are at least 7 letters left in bag of letters
print('Unable to give a new full set, you only have', len(letters), 'letters left!') # tells user if unable to give 7 more letters
else: # otherwise
current_letters = [] # sets empty list for current_letters
while len(current_letters) < 7: # creates while loop for if the length og current_letters is less than 7
t = random.choice(letters) # sets variable t to equal a random letter from the letters bag
letters.remove(t) # removes that character from the letters bag
current_letters.append(t) # appends the character to the current_letters list
return current_letters # returns current_letters
# beginning of the make_words command where the user can make words
def make_words(current_letters, word_list,words_made, letters, total_score, alphabet):
total_score = total_score # sets in function total score to total score
if len(current_letters) == 0: # if length of current_letters is 0, tells player to go draw some letters
print('You need some letters first!')
else: # otherwise
print(current_letters) # prints current letters
for x in current_letters: # for loop on each current letter
if x == 'blank': # if one of the letters is a 'blank' token
inp = '' # empty variable for while loop
while inp not in ['Y','N']: # while loop for userinp to by yes (Y) or no (N)
blank_counter = 1 # sets blank number to one in case user has 2 blank tokens in current letters
inp = input("Would you like to change 'blank' to a letter? Y or N ") # asks user if they would like to change 'blank' token
inp = inp.upper() # sets inp to uppercase
if inp not in ['Y','N']: # if user does not type y or n
print('Please choose yes (Y), or no (N)') # asks user to choose y or n
if inp == 'Y': # if inp is y
letter = 'letter' # creates letter variable for while loop
while len(letter) > 1 or letter not in alphabet: # while letter is greater than 1 (forces user to have a 1 character input)
letter = input("What would you like 'blank' to be as a letter? ") # sets letter to input for user choice
letter = letter.upper() # puts letter into uppercase
if len(letter) > 1 or letter not in alphabet: # checks to see if input is both one character and an allowed letter
print('A single letter that is a letter!') # notifies if it isn't a valid choice
current_letters.remove('blank') # removes the 'blank' token
current_letters.append(letter) # adds the new letter
print("For blank", blank_counter, 'it is replaced with letter', letter) # notifies blank was changed for requested input
blank_counter += 1 # adds one to blank counter in case a 2nd blank is in list
print(current_letters) # prints new current letters
if inp == 'N': # if userinp is N
print('okay!') # say 'Okay' and move on
word = input('Make a word with your letters! ') # set variable word to input
word = word.lower() # turn word into lower case to compare to word_list
if word not in word_list(): # checks to see if word is in word_list
print('Sorry, this was not an accounted for word (or possibly even a word at all...)') # if not, tells user so
elif word in words_made: # also checks if the word was already made
print('Hey, you already made this word! Try again!') # and tells user if so
else: # otherwise
word = word.upper() # sets word to uppercase
score = get_word_value(word, current_letters) # uses the get_word_value function to get score
if score == 0: # if score is returned zero
return 0 # then return 0
else: # otherwise
words_made[word] = score # uses the words_made function on word to find score
print('You current points from', word, 'are', score,'points!') # tells user of points earned and from what word
total_score += score # adds new score from score to total_Score
word.split() # splits the word so letters can be easily removed
for x in word: # for loop for letters in word
current_letters.remove(x) # removes letters in word from current letters
while len(current_letters) < 7: # while loop for current_letters if length is not 7
t = random.choice(letters) # sets variable t to a random choosen letter from letters
letters.remove(t) # removes the letter from letters list
current_letters.append(t) # adds the letter to the current letters list
print('New set of current letters: \n', current_letters) # print the new current letters
return total_score # return the new total score
# beginning of the main function
def main():
# creates all needed variables for the game by calling on respective functions
alphabet = create_alphabet(); word_list = create_word_list; letters = bag_of_letters(alphabet); current_letters = []; words_made = {}; userinp = ''; total_score = 0
# welcomes the user to the game and explains the rules
print("Welcome to this Frakenstein word game of countdown and scrabble! You have been unfortunately chosen to partake in this expiriment!")
print('The point of this game is to use the letters given to you (7 at a time) to make words. Each word is rewarded with points. \n After a word is made, the used letters disappear and new ones will replace them. \n There are 100 letters in total, and you must make as many words as possible. \n If a word is not recognized, your letters are returned and no points are given. \n If, however, you can not make a word, you can draw 7 more letters and discard your current letters.')
while userinp != 'Q': # while userinp is not Q
userinp = menu() # sets userinp to the menu function
if userinp == 'D': # if userinp is D
current_letters = current_letters_mixer(current_letters, letters) # creates the current letters lists with the current_letters_mixer function
print('New set of current letters: \n', current_letters) # prints the user's new current letters
if userinp == 'W': # if userinp is W, then calls the make_word function allowing user to make words
total_score = make_words(current_letters, word_list, words_made, letters, total_score, alphabet)
if userinp == 'P': # if userinp is P
for k, v in words_made.items(): # starts a for loop for each key, value pair in words_made dictionary
print(k,':', v, 'points') # prints the key (words made), value (points for that word) pairs
print('Current score:', total_score) # prints current total score
if userinp == 'Q': # if userinp is Q, thanks player for playing and exits giving total score
print('Thanks for playing! Your total score was', total_score, 'points!')
break
if len(letters) == 0 and len(current_letters) == 0: # if both letters list and current_letters list are empty
print('You are completely out of letters!') # tells user they are out of letters
print('Thanks for playing! Your total score was', total_score, 'points!') # thanks user and prints points
break # breaks in case loop goes funky
main() | [
"noreply@github.com"
] | noreply@github.com |
4982f0ff301fd8bff6303553caeab2a4c11b895a | 8500335ee7950993826ada37b0397f6ac5182e27 | /spider/html_outputer.py | 4f4f07ff706931f141e748918f4bd191a96ed2d1 | [] | no_license | yichengjie/spider_demo | 4a0b6420d6570049051ed72497f0e1d1a8edccfa | 4dc7f38b94c7706499e45986b450b8641dd486d9 | refs/heads/master | 2020-04-11T16:07:09.277467 | 2018-12-16T14:03:12 | 2018-12-16T14:03:12 | 161,913,190 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | '''
Created on Dec 15, 2018
@author: yicj
'''
class HtmlOutputer(object):
def __init__(self, *args, **kwargs):
object.__init__(self, *args, **kwargs)
self.datas = []
def collect_data(self,data):
if data is None:
return
self.datas.append(data)
def output_html(self):
with open('output.txt','w',encoding='utf-8') as fout:
for data in self.datas:
title = data['title']
print('title : ' , title)
summary = data['summary']
url = data['url']
msg = 'title : % s ,url: %s , summary :%s\n' %(title, url,summary)
fout.write(msg)
| [
"626659321@qq.com"
] | 626659321@qq.com |
e278f85b5f95eba97a0d0fa0fa215eb486b22c84 | e504cff04c7f3e8eb3b7f985a658b68a792a6760 | /controller/myController.py | 7d06b9cc3f966d05517d9a8e40e01cba68ddbe53 | [] | no_license | majk91/lab2_DB | 69397206ec0ee41e85b0c093fe0310c40dcf641a | 402a3f3e425cb720344db875fcfba432b4896022 | refs/heads/master | 2023-05-01T19:17:07.629664 | 2021-05-19T21:46:17 | 2021-05-19T21:46:17 | 369,008,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,891 | py | import time
import psycopg2
def getDataFromDb(conn, query):
cur = conn.cursor()
try:
millis_start = int(round(time.time() * 1000))
cur.execute(query)
rows = cur.fetchall()
millis_end = int(round(time.time() * 1000))
print("Rows: ")
for row in rows:
res = ""
for item in row:
res += str(item) + "||"
print(res)
print("Request time: 1" + str(millis_end - millis_start) + "ms")
except (Exception, psycopg2.DatabaseError) as error:
print(error)
def addDataToDb(conn, query):
cur = conn.cursor()
try:
cur.execute(query)
# rows = cur.fetchall()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
def updateData(conn, query):
cur = conn.cursor()
try:
cur.execute(query)
except (Exception, psycopg2.DatabaseError) as error:
print(error)
def removeCityByPostCode(conn, code):
cur = conn.cursor()
try:
cur.execute("DELETE FROM cities WHERE postal_code = '" + code + "'")
except (Exception, psycopg2.DatabaseError) as error:
print(error)
def removeSityByCode(conn, code):
cur = conn.cursor()
try:
cur.execute("DELETE FROM venues WHERE venues.postal_code = '"+code+"'")
cur.execute("DELETE FROM cities WHERE cities.postal_code = '"+code+"'")
except (Exception, psycopg2.DatabaseError) as error:
print(error)
def generateRandomCityPostalCode(conn, name, code):
cur = conn.cursor()
try:
cur.execute("INSERT INTO cities(name, postal_code, country_code) "
"VALUES ('"+name+"', "
"(SELECT trunc(random()*1000)::int from generate_series(1,1)),"
" '"+code+"')")
except (Exception, psycopg2.DatabaseError) as error:
print(error)
| [
"m.yurishenec@unit.com.ua"
] | m.yurishenec@unit.com.ua |
6de8a8e58c7239a48d420c9f23e548175002d66e | 44eb40bf7bbd006f441b22d149dbb06eebe97506 | /src/chap05/gradient_check.py | f9819365dec71b653a1702cc28bc95aa3ac59923 | [] | no_license | hoonest/Deep_Learning | 56939f983c81e75b79d5474c11649dd57bf7107b | dd94f46ff886f20a47b09a54593e5fd2d53f0ed4 | refs/heads/master | 2020-04-19T22:52:03.640247 | 2019-02-19T03:34:16 | 2019-02-19T03:34:16 | 168,481,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | # coding: utf-8
import sys, os
sys.path.append(os.pardir) # 부모 디렉터리의 파일을 가져올 수 있도록 설정
import numpy as np
from src.dataset.mnist import load_mnist
from src.chap05.two_layer_net import TwoLayerNet
# 데이터 읽기
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
x_batch = x_train[:3]
t_batch = t_train[:3]
grad_numerical = network.numerical_gradient(x_batch, t_batch)
grad_backprop = network.gradient(x_batch, t_batch)
# 각 가중치의 절대 오차의 평균을 구한다.
for key in grad_numerical.keys():
diff = np.average( np.abs(grad_backprop[key] - grad_numerical[key]) )
print(key + ":" + str(diff))
| [
"hoonest01@gmail.com"
] | hoonest01@gmail.com |
4a72d59aa652021f7867b07f5a9bdb54184338ac | bfce08da3cd919cac3d974a6009bc2ebcac3b6a5 | /11_problem.py | d5661e1509ee457a18cc3d3fc62b0e7f7e4ecabf | [] | no_license | JongRE2/algorithm | 8cd8d7ec33ed2da6710bd2538ce2f8207133f584 | 56b914d84ec46628610b1e72dddccaf487b50359 | refs/heads/master | 2023-03-18T01:41:45.257097 | 2021-03-14T09:38:40 | 2021-03-14T09:38:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | num=int(input())
for i in range(num):
st=input()
st=st.upper()
size=len(st)
for j in range(size//2):
if st[j]!=st[(size-1)-j]:
print("#%d NO" %(i+1))
break
else:
print("#%d YES" % (i + 1))
| [
"kjy5947@gmail.com"
] | kjy5947@gmail.com |
49bb8f49c7547d8ce6bec2d91c5babab44b368dd | ab183d4fbd81e1608d05bc296148ae5aba4fe499 | /blog/blog/urls.py | 22b9d5bcea0cec3a741a1f065fe5177aa299cdba | [] | no_license | redhanoormansyah/blog_django | 2416e88c3bed871c7f64d8b1763e8e2a4aadc7c1 | 7ef2701aea8ca5632f2a7af867f3d350b8935b1a | refs/heads/master | 2022-12-15T02:44:02.803760 | 2020-08-27T04:07:43 | 2020-08-27T04:07:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,624 | py | """blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
from myblog import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index, name='index'),
path('create/', views.blog_create, name='blog_create'),
path('blog/<int:blog_id>/', views.blog, name='blog'),
path('blog/<int:blog_id>/update', views.blog_update, name='blog_update'),
path('blog/<int:blog_id>/delete', views.blog_delete, name='blog_delete'),
path('add_category/', views.AddCategoryView.as_view(), name='add_category'),
path('category/<str:cats>/', views.CategoryView, name='category'),
path('search', views.search, name='search'),
path('tinymce/', include('tinymce.urls')),
path('accounts/', include('allauth.urls')),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"evantyoverdy95@gmail.com"
] | evantyoverdy95@gmail.com |
91695e2fd5bc8728cb17aeabf56b2f294714cf04 | 7208c52a3e7c91bc5454bfa43ed630322bcc8405 | /factorial.py | cc362bb7f7b8141db9f0755499d50989dc8ca605 | [] | no_license | drabdul/python-basic | 97b1b7d6cbb409d2339a2433856aee0b5b5a88d1 | 01dfc1584f005c2d77432e1dfe8494fee1f59810 | refs/heads/master | 2020-06-04T16:05:31.082959 | 2019-06-15T16:02:01 | 2019-06-15T16:02:01 | 192,095,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # factorial using iterative and recursive function
def factorial_iterative(n):
fact = 1
for i in range(n):
fact *= (i+1)
return fact
def factorial_recursive(n):
if n==1:
return 1
else:
return n * factorial_recursive(n-1)
num = int(input("Enter number for factorial"))
print("Factorial using iterative is " + str(factorial_iterative(num)))
print("Factorial using recursive is " + str(factorial_recursive(num))) | [
"drabdul.khorajiya@gmail.com"
] | drabdul.khorajiya@gmail.com |
6f69ef0b0fa21fd434e412c733642b78577c25ef | 2a8e0b84dccbac0d3d101a6d9d7cd3ca30424925 | /bottleneck/fairseq/fairseq/version.py | 538832544e7138418546e8ce89501f27c7afab99 | [
"MIT"
] | permissive | OhadRubin/swarm | 5a49b370baeda328fe5835f3c64eb26a20f42070 | fadebbab218709e1f886b58e2de6f2692144da27 | refs/heads/main | 2023-08-16T22:13:25.172887 | 2021-10-05T17:04:36 | 2021-10-05T17:04:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32 | py | __version__ = "1.0.0a0+ec5de78"
| [
"60722713+iclr2022-submit@users.noreply.github.com"
] | 60722713+iclr2022-submit@users.noreply.github.com |
4bf5d51862304f44ca8accdfdb8903cc214db56b | c6e096e902779cbc1023cfd8d44b97915bedc6da | /users/forms.py | 418304c86b28ece4ff2d719124902dc9b65b186c | [] | no_license | bandanarai233/heroku | 895ab64da5a512a0282c5bccec5d17d29359e84b | 92a0e3349fda6b077e18c2babbb493d0ff8a5335 | refs/heads/dev | 2022-12-10T19:17:42.924660 | 2019-11-20T05:14:39 | 2019-11-20T05:14:39 | 222,680,166 | 0 | 0 | null | 2022-12-08T06:54:29 | 2019-11-19T11:27:15 | Python | UTF-8 | Python | false | false | 716 | py | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Profile
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
class UserUpdateform(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email']
class ProfileUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['image']
# class PasswordResetForm(forms.ModelForm):
# class Meta:
# model = User
# fields = ['email']
| [
"bandanarai233@gmail.com"
] | bandanarai233@gmail.com |
24f8010c8a814b46061549f59608bd9f0e9339a9 | 865117fbfa7a0d9a5154da702651272a70c1f521 | /assignment1/assignment1/urls.py | 6cf59a77328e972131b0c904e9c5b685d4858ac7 | [] | no_license | calvinxhk/homework | 802cde65056becf6ea853c7a3fe926ff2525d0e9 | 826f41539d8fe8941ce81627f5260013fbb215ce | refs/heads/master | 2022-11-01T09:56:07.168725 | 2018-03-08T14:55:37 | 2018-03-08T14:55:37 | 121,612,005 | 0 | 0 | null | 2022-10-24T15:34:10 | 2018-02-15T09:39:32 | Python | UTF-8 | Python | false | false | 146 | py |
from django.conf.urls import url,include
urlpatterns = [
url(r'^admin/',include('backstage.views')),
url(r'',include('user.views')),
]
| [
"32790342+calvinxhk@users.noreply.github.com"
] | 32790342+calvinxhk@users.noreply.github.com |
dcca89568be7c92c425e6cc50edb8db9f084dc87 | 535fbb1836915c0726875b855b1dce71a61be464 | /statsmodels/tsa/exponential_smoothing/base.py | 3ac4c5c6bb12507163bcea257c0f9219763442ed | [
"BSD-3-Clause"
] | permissive | vedal/statsmodels | 71e712452460f02bb53e366ebd1268e30d28df45 | 257fae36043ed099de66cdba23a0a90c0e960363 | refs/heads/master | 2022-12-02T04:28:48.308911 | 2020-08-04T22:36:45 | 2020-08-04T22:36:45 | 285,242,934 | 0 | 0 | NOASSERTION | 2020-08-05T09:32:57 | 2020-08-05T09:32:56 | null | UTF-8 | Python | false | false | 36,245 | py | from collections import OrderedDict
import contextlib
import warnings
import numpy as np
import pandas as pd
from scipy.stats import norm
from statsmodels.base.data import PandasData
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.eval_measures import aic, aicc, bic, hqic
from statsmodels.tools.sm_exceptions import PrecisionWarning
from statsmodels.tools.numdiff import (
_get_epsilon,
approx_fprime,
approx_fprime_cs,
approx_hess_cs,
)
from statsmodels.tools.tools import pinv_extended
import statsmodels.tsa.base.tsa_model as tsbase
class StateSpaceMLEModel(tsbase.TimeSeriesModel):
"""
This is a temporary base model from ETS, here I just copy everything I need
from statespace.mlemodel.MLEModel
"""
def __init__(
self, endog, exog=None, dates=None, freq=None, missing="none", **kwargs
):
# TODO: this was changed from the original, requires some work when
# using this as base class for state space and exponential smoothing
super().__init__(
endog=endog, exog=exog, dates=dates, freq=freq, missing=missing
)
# Store kwargs to recreate model
self._init_kwargs = kwargs
# Prepared the endog array: C-ordered, shape=(nobs x k_endog)
self.endog, self.exog = self.prepare_data(self.data)
self.use_pandas = isinstance(self.data, PandasData)
# Dimensions
self.nobs = self.endog.shape[0]
# Setup holder for fixed parameters
self._has_fixed_params = False
self._fixed_params = None
self._params_index = None
self._fixed_params_index = None
self._free_params_index = None
@staticmethod
def prepare_data(data):
raise NotImplementedError
def clone(self, endog, exog=None, **kwargs):
raise NotImplementedError
def _validate_can_fix_params(self, param_names):
for param_name in param_names:
if param_name not in self.param_names:
raise ValueError(
'Invalid parameter name passed: "%s".' % param_name
)
@property
def k_params(self):
return len(self.param_names)
@contextlib.contextmanager
def fix_params(self, params):
"""
Fix parameters to specific values (context manager)
Parameters
----------
params : dict
Dictionary describing the fixed parameter values, of the form
`param_name: fixed_value`. See the `param_names` property for valid
parameter names.
Examples
--------
>>> mod = sm.tsa.SARIMAX(endog, order=(1, 0, 1))
>>> with mod.fix_params({'ar.L1': 0.5}):
res = mod.fit()
"""
# Initialization (this is done here rather than in the constructor
# because param_names may not be available at that point)
if self._fixed_params is None:
self._fixed_params = {}
self._params_index = OrderedDict(
zip(self.param_names, np.arange(self.k_params))
)
# Cache the current fixed parameters
cache_fixed_params = self._fixed_params.copy()
cache_has_fixed_params = self._has_fixed_params
cache_fixed_params_index = self._fixed_params_index
cache_free_params_index = self._free_params_index
# Validate parameter names and values
self._validate_can_fix_params(set(params.keys()))
# Set the new fixed parameters, keeping the order as given by
# param_names
self._fixed_params.update(params)
self._fixed_params = OrderedDict(
[
(name, self._fixed_params[name])
for name in self.param_names
if name in self._fixed_params
]
)
# Update associated values
self._has_fixed_params = True
self._fixed_params_index = [
self._params_index[key] for key in self._fixed_params.keys()
]
self._free_params_index = list(
set(np.arange(self.k_params)).difference(self._fixed_params_index)
)
try:
yield
finally:
# Reset the fixed parameters
self._has_fixed_params = cache_has_fixed_params
self._fixed_params = cache_fixed_params
self._fixed_params_index = cache_fixed_params_index
self._free_params_index = cache_free_params_index
def fit_constrained(self, constraints, start_params=None, **fit_kwds):
"""
Fit the model with some parameters subject to equality constraints.
Parameters
----------
constraints : dict
Dictionary of constraints, of the form `param_name: fixed_value`.
See the `param_names` property for valid parameter names.
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by Model.start_params.
**fit_kwds : keyword arguments
fit_kwds are used in the optimization of the remaining parameters.
Returns
-------
results : Results instance
Examples
--------
>>> mod = sm.tsa.SARIMAX(endog, order=(1, 0, 1))
>>> res = mod.fit_constrained({'ar.L1': 0.5})
"""
with self.fix_params(constraints):
res = self.fit(start_params, **fit_kwds)
return res
@property
def start_params(self):
"""
(array) Starting parameters for maximum likelihood estimation.
"""
if hasattr(self, "_start_params"):
return self._start_params
else:
raise NotImplementedError
@property
def param_names(self):
"""
(list of str) List of human readable parameter names (for parameters
actually included in the model).
"""
if hasattr(self, "_param_names"):
return self._param_names
else:
try:
names = ["param.%d" % i for i in range(len(self.start_params))]
except NotImplementedError:
names = []
return names
@classmethod
def from_formula(
cls, formula, data, subset=None, drop_cols=None, *args, **kwargs
):
"""
Not implemented for state space models
"""
raise NotImplementedError
def _wrap_data(self, data, start_idx, end_idx, names=None):
# TODO: check if this is reasonable for statespace
# squeezing data: data may be:
# - m x n: m dates, n simulations -> squeeze does nothing
# - m x 1: m dates, 1 simulation -> squeeze removes last dimension
# - 1 x n: don't squeeze, already fine
# - 1 x 1: squeeze only second axis
if data.ndim > 1 and data.shape[1] == 1:
data = np.squeeze(data, axis=1)
data = np.squeeze(data)
if self.use_pandas:
_, _, _, index = self._get_prediction_index(start_idx, end_idx)
if data.ndim < 2:
data = pd.Series(data, index=index, name=names)
else:
data = pd.DataFrame(data, index=index, columns=names)
return data
def _wrap_results(
self,
params,
result,
return_raw,
cov_type=None,
cov_kwds=None,
results_class=None,
wrapper_class=None,
):
if not return_raw:
# Wrap in a results object
result_kwargs = {}
if cov_type is not None:
result_kwargs["cov_type"] = cov_type
if cov_kwds is not None:
result_kwargs["cov_kwds"] = cov_kwds
if results_class is None:
results_class = self._res_classes["fit"][0]
if wrapper_class is None:
wrapper_class = self._res_classes["fit"][1]
res = results_class(self, params, result, **result_kwargs)
result = wrapper_class(res)
return result
def _score_complex_step(self, params, **kwargs):
# the default epsilon can be too small
# inversion_method = INVERT_UNIVARIATE | SOLVE_LU
epsilon = _get_epsilon(params, 2., None, len(params))
kwargs['transformed'] = True
kwargs['complex_step'] = True
return approx_fprime_cs(params, self.loglike, epsilon=epsilon,
kwargs=kwargs)
def _score_finite_difference(self, params, approx_centered=False,
**kwargs):
kwargs['transformed'] = True
return approx_fprime(params, self.loglike, kwargs=kwargs,
centered=approx_centered)
def _hessian_finite_difference(self, params, approx_centered=False,
**kwargs):
params = np.array(params, ndmin=1)
warnings.warn('Calculation of the Hessian using finite differences'
' is usually subject to substantial approximation'
' errors.', PrecisionWarning)
if not approx_centered:
epsilon = _get_epsilon(params, 3, None, len(params))
else:
epsilon = _get_epsilon(params, 4, None, len(params)) / 2
hessian = approx_fprime(params, self._score_finite_difference,
epsilon=epsilon, kwargs=kwargs,
centered=approx_centered)
# TODO: changed this to nobs_effective, has to be changed when merging
# with statespace mlemodel
return hessian / (self.nobs_effective)
def _hessian_complex_step(self, params, **kwargs):
"""
Hessian matrix computed by second-order complex-step differentiation
on the `loglike` function.
"""
# the default epsilon can be too small
epsilon = _get_epsilon(params, 3., None, len(params))
kwargs['transformed'] = True
kwargs['complex_step'] = True
hessian = approx_hess_cs(
params, self.loglike, epsilon=epsilon, kwargs=kwargs)
# TODO: changed this to nobs_effective, has to be changed when merging
# with statespace mlemodel
return hessian / (self.nobs_effective)
class StateSpaceMLEResults(tsbase.TimeSeriesModelResults):
r"""
Class to hold results from fitting a state space model.
Parameters
----------
model : MLEModel instance
The fitted model instance
params : ndarray
Fitted parameters
Attributes
----------
model : Model instance
A reference to the model that was fit.
nobs : float
The number of observations used to fit the model.
params : ndarray
The parameters of the model.
"""
def __init__(self, model, params, scale=1.0):
self.data = model.data
self.endog = model.data.orig_endog
super().__init__(model, params, None, scale=scale)
# Save the fixed parameters
self._has_fixed_params = self.model._has_fixed_params
self._fixed_params_index = self.model._fixed_params_index
self._free_params_index = self.model._free_params_index
# TODO: seems like maybe self.fixed_params should be the dictionary
# itself, not just the keys?
if self._has_fixed_params:
self._fixed_params = self.model._fixed_params.copy()
self.fixed_params = list(self._fixed_params.keys())
else:
self._fixed_params = None
self.fixed_params = []
self.param_names = [
"%s (fixed)" % name if name in self.fixed_params else name
for name in (self.data.param_names or [])
]
# Dimensions
self.nobs = self.model.nobs
self.k_params = self.model.k_params
self._rank = None
@cache_readonly
def nobs_effective(self):
raise NotImplementedError
@cache_readonly
def df_resid(self):
return self.nobs_effective - self.df_model
@cache_readonly
def aic(self):
"""
(float) Akaike Information Criterion
"""
return aic(self.llf, self.nobs_effective, self.df_model)
@cache_readonly
def aicc(self):
"""
(float) Akaike Information Criterion with small sample correction
"""
return aicc(self.llf, self.nobs_effective, self.df_model)
@cache_readonly
def bic(self):
"""
(float) Bayes Information Criterion
"""
return bic(self.llf, self.nobs_effective, self.df_model)
@cache_readonly
def fittedvalues(self):
# TODO
raise NotImplementedError
@cache_readonly
def hqic(self):
"""
(float) Hannan-Quinn Information Criterion
"""
# return (-2 * self.llf +
# 2 * np.log(np.log(self.nobs_effective)) * self.df_model)
return hqic(self.llf, self.nobs_effective, self.df_model)
@cache_readonly
def llf(self):
"""
(float) The value of the log-likelihood function evaluated at `params`.
"""
raise NotImplementedError
@cache_readonly
def mae(self):
"""
(float) Mean absolute error
"""
return np.mean(np.abs(self.resid))
@cache_readonly
def mse(self):
"""
(float) Mean squared error
"""
return self.sse / self.nobs
@cache_readonly
def pvalues(self):
"""
(array) The p-values associated with the z-statistics of the
coefficients. Note that the coefficients are assumed to have a Normal
distribution.
"""
pvalues = np.zeros_like(self.zvalues) * np.nan
mask = np.ones_like(pvalues, dtype=bool)
mask[self._free_params_index] = True
mask &= ~np.isnan(self.zvalues)
pvalues[mask] = norm.sf(np.abs(self.zvalues[mask])) * 2
return pvalues
@cache_readonly
def resid(self):
raise NotImplementedError
@cache_readonly
def sse(self):
"""
(float) Sum of squared errors
"""
return np.sum(self.resid ** 2)
@cache_readonly
def zvalues(self):
"""
(array) The z-statistics for the coefficients.
"""
return self.params / self.bse
def _get_prediction_start_index(self, anchor):
"""Returns a valid numeric start index for predictions/simulations"""
if anchor is None or anchor == "start":
iloc = 0
elif anchor == "end":
iloc = self.nobs
else:
iloc, _, _ = self.model._get_index_loc(anchor)
if isinstance(iloc, slice):
iloc = iloc.start
iloc += 1 # anchor is one before start of prediction/simulation
if iloc < 0:
iloc = self.nobs + iloc
if iloc > self.nobs:
raise ValueError("Cannot anchor simulation outside of the sample.")
return iloc
def _cov_params_approx(
self, approx_complex_step=True, approx_centered=False
):
evaluated_hessian = self.nobs_effective * self.model.hessian(
params=self.params,
transformed=True,
includes_fixed=True,
method="approx",
approx_complex_step=approx_complex_step,
approx_centered=approx_centered,
)
# TODO: Case with "not approx_complex_step" is not hit in
# tests as of 2017-05-19
if len(self.fixed_params) > 0:
mask = np.ix_(self._free_params_index, self._free_params_index)
if len(self.fixed_params) < self.k_params:
(tmp, singular_values) = pinv_extended(evaluated_hessian[mask])
else:
tmp, singular_values = np.nan, [np.nan]
neg_cov = np.zeros_like(evaluated_hessian) * np.nan
neg_cov[mask] = tmp
else:
(neg_cov, singular_values) = pinv_extended(evaluated_hessian)
self.model.update(self.params, transformed=True, includes_fixed=True)
if self._rank is None:
self._rank = np.linalg.matrix_rank(np.diag(singular_values))
return -neg_cov
@cache_readonly
def cov_params_approx(self):
"""
(array) The variance / covariance matrix. Computed using the numerical
Hessian approximated by complex step or finite differences methods.
"""
return self._cov_params_approx(
self._cov_approx_complex_step, self._cov_approx_centered
)
def test_serial_correlation(self, method, lags=None):
"""
Ljung-Box test for no serial correlation of standardized residuals
Null hypothesis is no serial correlation.
Parameters
----------
method : {'ljungbox','boxpierece', None}
The statistical test for serial correlation. If None, an attempt is
made to select an appropriate test.
lags : None, int or array_like
If lags is an integer then this is taken to be the largest lag
that is included, the test result is reported for all smaller lag
length.
If lags is a list or array, then all lags are included up to the
largest lag in the list, however only the tests for the lags in the
list are reported.
If lags is None, then the default maxlag is 12*(nobs/100)^{1/4}
Returns
-------
output : ndarray
An array with `(test_statistic, pvalue)` for each endogenous
variable and each lag. The array is then sized
`(k_endog, 2, lags)`. If the method is called as
`ljungbox = res.test_serial_correlation()`, then `ljungbox[i]`
holds the results of the Ljung-Box test (as would be returned by
`statsmodels.stats.diagnostic.acorr_ljungbox`) for the `i` th
endogenous variable.
See Also
--------
statsmodels.stats.diagnostic.acorr_ljungbox
Ljung-Box test for serial correlation.
Notes
-----
For statespace models: let `d` = max(loglikelihood_burn, nobs_diffuse);
this test is calculated ignoring the first `d` residuals.
Output is nan for any endogenous variable which has missing values.
"""
if method is None:
method = 'ljungbox'
if self.standardized_forecasts_error is None:
raise ValueError('Cannot compute test statistic when standardized'
' forecast errors have not been computed.')
if method == 'ljungbox' or method == 'boxpierce':
from statsmodels.stats.diagnostic import acorr_ljungbox
if hasattr(self, "loglikelihood_burn"):
d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)
# This differs from self.nobs_effective because here we want to
# exclude exact diffuse periods, whereas self.nobs_effective
# only excludes explicitly burned (usually approximate diffuse)
# periods.
nobs_effective = self.nobs - d
else:
nobs_effective = self.nobs_effective
output = []
# Default lags for acorr_ljungbox is 40, but may not always have
# that many observations
if lags is None:
seasonal_periods = getattr(self.model, "seasonal_periods", 0)
if seasonal_periods:
lags = min(2 * seasonal_periods, nobs_effective // 5)
else:
lags = min(10, nobs_effective // 5)
warnings.warn(
"The default value of lags is changing. After 0.12, "
"this value will become min(10, nobs//5) for non-seasonal "
"time series and min (2*m, nobs//5) for seasonal time "
"series. Directly set lags to silence this warning.",
FutureWarning
)
for i in range(self.model.k_endog):
if hasattr(self, "filter_results"):
x = self.filter_results.standardized_forecasts_error[i][d:]
else:
x = self.standardized_forecasts_error
results = acorr_ljungbox(
x, lags=lags, boxpierce=(method == 'boxpierce'),
return_df=False)
if method == 'ljungbox':
output.append(results[0:2])
else:
output.append(results[2:])
output = np.c_[output]
else:
raise NotImplementedError('Invalid serial correlation test'
' method.')
return output
def test_heteroskedasticity(self, method, alternative='two-sided',
use_f=True):
r"""
Test for heteroskedasticity of standardized residuals
Tests whether the sum-of-squares in the first third of the sample is
significantly different than the sum-of-squares in the last third
of the sample. Analogous to a Goldfeld-Quandt test. The null hypothesis
is of no heteroskedasticity.
Parameters
----------
method : {'breakvar', None}
The statistical test for heteroskedasticity. Must be 'breakvar'
for test of a break in the variance. If None, an attempt is
made to select an appropriate test.
alternative : str, 'increasing', 'decreasing' or 'two-sided'
This specifies the alternative for the p-value calculation. Default
is two-sided.
use_f : bool, optional
Whether or not to compare against the asymptotic distribution
(chi-squared) or the approximate small-sample distribution (F).
Default is True (i.e. default is to compare against an F
distribution).
Returns
-------
output : ndarray
An array with `(test_statistic, pvalue)` for each endogenous
variable. The array is then sized `(k_endog, 2)`. If the method is
called as `het = res.test_heteroskedasticity()`, then `het[0]` is
an array of size 2 corresponding to the first endogenous variable,
where `het[0][0]` is the test statistic, and `het[0][1]` is the
p-value.
Notes
-----
The null hypothesis is of no heteroskedasticity. That means different
things depending on which alternative is selected:
- Increasing: Null hypothesis is that the variance is not increasing
throughout the sample; that the sum-of-squares in the later
subsample is *not* greater than the sum-of-squares in the earlier
subsample.
- Decreasing: Null hypothesis is that the variance is not decreasing
throughout the sample; that the sum-of-squares in the earlier
subsample is *not* greater than the sum-of-squares in the later
subsample.
- Two-sided: Null hypothesis is that the variance is not changing
throughout the sample. Both that the sum-of-squares in the earlier
subsample is not greater than the sum-of-squares in the later
subsample *and* that the sum-of-squares in the later subsample is
not greater than the sum-of-squares in the earlier subsample.
For :math:`h = [T/3]`, the test statistic is:
.. math::
H(h) = \sum_{t=T-h+1}^T \tilde v_t^2
\Bigg / \sum_{t=d+1}^{d+1+h} \tilde v_t^2
where :math:`d` = max(loglikelihood_burn, nobs_diffuse)` (usually
corresponding to diffuse initialization under either the approximate
or exact approach).
This statistic can be tested against an :math:`F(h,h)` distribution.
Alternatively, :math:`h H(h)` is asymptotically distributed according
to :math:`\chi_h^2`; this second test can be applied by passing
`asymptotic=True` as an argument.
See section 5.4 of [1]_ for the above formula and discussion, as well
as additional details.
TODO
- Allow specification of :math:`h`
References
----------
.. [1] Harvey, Andrew C. 1990. *Forecasting, Structural Time Series*
*Models and the Kalman Filter.* Cambridge University Press.
"""
if method is None:
method = 'breakvar'
if self.standardized_forecasts_error is None:
raise ValueError('Cannot compute test statistic when standardized'
' forecast errors have not been computed.')
if method == 'breakvar':
# Store some values
if hasattr(self, "filter_results"):
squared_resid = (
self.filter_results.standardized_forecasts_error**2
)
d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)
# This differs from self.nobs_effective because here we want to
# exclude exact diffuse periods, whereas self.nobs_effective
# only excludes explicitly burned (usually approximate diffuse)
# periods.
nobs_effective = self.nobs - d
else:
squared_resid = self.standardized_forecasts_error**2
if squared_resid.ndim == 1:
squared_resid = squared_resid[np.newaxis, :]
nobs_effective = self.nobs_effective
d = 0
test_statistics = []
p_values = []
for i in range(self.model.k_endog):
h = int(np.round(nobs_effective / 3))
numer_resid = squared_resid[i, -h:]
numer_resid = numer_resid[~np.isnan(numer_resid)]
numer_dof = len(numer_resid)
denom_resid = squared_resid[i, d:d + h]
denom_resid = denom_resid[~np.isnan(denom_resid)]
denom_dof = len(denom_resid)
if numer_dof < 2:
warnings.warn('Early subset of data for variable %d'
' has too few non-missing observations to'
' calculate test statistic.' % i)
numer_resid = np.nan
if denom_dof < 2:
warnings.warn('Later subset of data for variable %d'
' has too few non-missing observations to'
' calculate test statistic.' % i)
denom_resid = np.nan
test_statistic = np.sum(numer_resid) / np.sum(denom_resid)
# Setup functions to calculate the p-values
if use_f:
from scipy.stats import f
pval_lower = lambda test_statistics: f.cdf( # noqa:E731
test_statistics, numer_dof, denom_dof)
pval_upper = lambda test_statistics: f.sf( # noqa:E731
test_statistics, numer_dof, denom_dof)
else:
from scipy.stats import chi2
pval_lower = lambda test_statistics: chi2.cdf( # noqa:E731
numer_dof * test_statistics, denom_dof)
pval_upper = lambda test_statistics: chi2.sf( # noqa:E731
numer_dof * test_statistics, denom_dof)
# Calculate the one- or two-sided p-values
alternative = alternative.lower()
if alternative in ['i', 'inc', 'increasing']:
p_value = pval_upper(test_statistic)
elif alternative in ['d', 'dec', 'decreasing']:
test_statistic = 1. / test_statistic
p_value = pval_upper(test_statistic)
elif alternative in ['2', '2-sided', 'two-sided']:
p_value = 2 * np.minimum(
pval_lower(test_statistic),
pval_upper(test_statistic)
)
else:
raise ValueError('Invalid alternative.')
test_statistics.append(test_statistic)
p_values.append(p_value)
output = np.c_[test_statistics, p_values]
else:
raise NotImplementedError('Invalid heteroskedasticity test'
' method.')
return output
def test_normality(self, method):
"""
Test for normality of standardized residuals.
Null hypothesis is normality.
Parameters
----------
method : {'jarquebera', None}
The statistical test for normality. Must be 'jarquebera' for
Jarque-Bera normality test. If None, an attempt is made to select
an appropriate test.
See Also
--------
statsmodels.stats.stattools.jarque_bera
The Jarque-Bera test of normality.
Notes
-----
For statespace models: let `d` = max(loglikelihood_burn, nobs_diffuse);
this test is calculated ignoring the first `d` residuals.
In the case of missing data, the maintained hypothesis is that the
data are missing completely at random. This test is then run on the
standardized residuals excluding those corresponding to missing
observations.
"""
if method is None:
method = 'jarquebera'
if self.standardized_forecasts_error is None:
raise ValueError('Cannot compute test statistic when standardized'
' forecast errors have not been computed.')
if method == 'jarquebera':
from statsmodels.stats.stattools import jarque_bera
if hasattr(self, "loglikelihood_burn"):
d = np.maximum(self.loglikelihood_burn, self.nobs_diffuse)
else:
d = 0
output = []
for i in range(self.model.k_endog):
if hasattr(self, "fiter_results"):
resid = self.filter_results.standardized_forecasts_error[
i, d:
]
else:
resid = self.standardized_forecasts_error
mask = ~np.isnan(resid)
output.append(jarque_bera(resid[mask]))
else:
raise NotImplementedError('Invalid normality test method.')
return np.array(output)
def summary(
self,
alpha=0.05,
start=None,
title=None,
model_name=None,
display_params=True,
):
"""
Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals. Default is 0.05.
start : int, optional
Integer of the start observation. Default is 0.
model_name : str
The name of the model used. Default is to use model class name.
Returns
-------
summary : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
from statsmodels.iolib.summary import Summary
# Model specification results
model = self.model
if title is None:
title = "Statespace Model Results"
if start is None:
start = 0
if self.model._index_dates:
ix = self.model._index
d = ix[start]
sample = ["%02d-%02d-%02d" % (d.month, d.day, d.year)]
d = ix[-1]
sample += ["- " + "%02d-%02d-%02d" % (d.month, d.day, d.year)]
else:
sample = [str(start), " - " + str(self.nobs)]
# Standardize the model name as a list of str
if model_name is None:
model_name = model.__class__.__name__
# Diagnostic tests results
try:
het = self.test_heteroskedasticity(method="breakvar")
except Exception: # FIXME: catch something specific
het = np.array([[np.nan] * 2])
try:
lb = self.test_serial_correlation(method="ljungbox")
except Exception: # FIXME: catch something specific
lb = np.array([[np.nan] * 2]).reshape(1, 2, 1)
try:
jb = self.test_normality(method="jarquebera")
except Exception: # FIXME: catch something specific
jb = np.array([[np.nan] * 4])
# Create the tables
if not isinstance(model_name, list):
model_name = [model_name]
top_left = [("Dep. Variable:", None)]
top_left.append(("Model:", [model_name[0]]))
for i in range(1, len(model_name)):
top_left.append(("", ["+ " + model_name[i]]))
top_left += [
("Date:", None),
("Time:", None),
("Sample:", [sample[0]]),
("", [sample[1]]),
]
top_right = [
("No. Observations:", [self.nobs]),
("Log Likelihood", ["%#5.3f" % self.llf]),
]
if hasattr(self, "rsquared"):
top_right.append(("R-squared:", ["%#8.3f" % self.rsquared]))
top_right += [
("AIC", ["%#5.3f" % self.aic]),
("BIC", ["%#5.3f" % self.bic]),
("HQIC", ["%#5.3f" % self.hqic]),
]
if hasattr(self, "filter_results"):
if (
self.filter_results is not None
and self.filter_results.filter_concentrated
):
top_right.append(("Scale", ["%#5.3f" % self.scale]))
else:
top_right.append(("Scale", ["%#5.3f" % self.scale]))
if hasattr(self, "cov_type"):
top_left.append(("Covariance Type:", [self.cov_type]))
format_str = lambda array: [ # noqa:E731
", ".join(["{0:.2f}".format(i) for i in array])
]
diagn_left = [
("Ljung-Box (Q):", format_str(lb[:, 0, -1])),
("Prob(Q):", format_str(lb[:, 1, -1])),
("Heteroskedasticity (H):", format_str(het[:, 0])),
("Prob(H) (two-sided):", format_str(het[:, 1])),
]
diagn_right = [
("Jarque-Bera (JB):", format_str(jb[:, 0])),
("Prob(JB):", format_str(jb[:, 1])),
("Skew:", format_str(jb[:, 2])),
("Kurtosis:", format_str(jb[:, 3])),
]
summary = Summary()
summary.add_table_2cols(
self, gleft=top_left, gright=top_right, title=title
)
if len(self.params) > 0 and display_params:
summary.add_table_params(
self, alpha=alpha, xname=self.param_names, use_t=False
)
summary.add_table_2cols(
self, gleft=diagn_left, gright=diagn_right, title=""
)
# Add warnings/notes, added to text format only
etext = []
if hasattr(self, "cov_type") and "description" in self.cov_kwds:
etext.append(self.cov_kwds["description"])
if self._rank < (len(self.params) - len(self.fixed_params)):
cov_params = self.cov_params()
if len(self.fixed_params) > 0:
mask = np.ix_(self._free_params_index, self._free_params_index)
cov_params = cov_params[mask]
etext.append(
"Covariance matrix is singular or near-singular,"
" with condition number %6.3g. Standard errors may be"
" unstable." % np.linalg.cond(cov_params)
)
if etext:
etext = [
"[{0}] {1}".format(i + 1, text) for i, text in enumerate(etext)
]
etext.insert(0, "Warnings:")
summary.add_extra_txt(etext)
return summary
| [
"samuel.scherrer@posteo.de"
] | samuel.scherrer@posteo.de |
c40028e0be80e217c13d3970ba03c03ab2bcfb82 | 09ae3f372d1000f118ad80874870ae420a4be66f | /scikit-learn-master/sklearn/linear_model/logistic.py | a1d49ac570e92af42337bb68f34b6c9caceb5b80 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | lqkweb/learnMLflow | 998f80c3828879b8d542125bc95c6345b8e9b29a | 13c5decaebba95b1b90f92021be35e343b4764af | refs/heads/master | 2022-10-18T06:17:23.584172 | 2019-01-18T09:51:38 | 2019-01-18T09:51:38 | 166,145,472 | 2 | 0 | Apache-2.0 | 2022-09-30T18:26:17 | 2019-01-17T02:22:29 | Python | UTF-8 | Python | false | false | 92,589 | py | """
Logistic Regression
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <f@bianp.net>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Lars Buitinck
# Simon Wu <s8wu@uwaterloo.ca>
# Arthur Mensch <arthur.mensch@m4x.org
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from scipy.special import expit
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (log_logistic, safe_sparse_dot, softmax,
squared_norm)
from ..utils.extmath import row_norms
from ..utils.fixes import logsumexp
from ..utils.optimize import newton_cg
from ..utils.validation import check_X_y
from ..utils import deprecated
from ..exceptions import (NotFittedError, ConvergenceWarning,
ChangedBehaviorWarning)
from ..utils.multiclass import check_classification_targets
from ..utils._joblib import Parallel, delayed, effective_n_jobs
from ..utils.fixes import _joblib_parallel_args
from ..model_selection import check_cv
from ..metrics import get_scorer
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
Returns
-------
w : ndarray, shape (n_features,)
Coefficient vector without the intercept weight (w[-1]) if the
intercept should be fit. Unchanged otherwise.
c : float
The intercept.
yz : float
y * np.dot(X, w).
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
yz = y * z
return w, c, yz
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(n_samples)
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)),
dtype=X.dtype)
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver(solver, penalty, dual):
if solver == 'warn':
solver = 'liblinear'
warnings.warn("Default solver will be changed to 'lbfgs' in 0.22. "
"Specify a solver to silence this warning.",
FutureWarning)
all_solvers = ['liblinear', 'newton-cg', 'lbfgs', 'sag', 'saga']
if solver not in all_solvers:
raise ValueError("Logistic Regression supports only solvers in %s, got"
" %s." % (all_solvers, solver))
all_penalties = ['l1', 'l2', 'elasticnet', 'none']
if penalty not in all_penalties:
raise ValueError("Logistic Regression supports only penalties in %s,"
" got %s." % (all_penalties, penalty))
if solver not in ['liblinear', 'saga'] and penalty not in ('l2', 'none'):
raise ValueError("Solver %s supports only 'l2' or 'none' penalties, "
"got %s penalty." % (solver, penalty))
if solver != 'liblinear' and dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
if penalty == 'elasticnet' and solver != 'saga':
raise ValueError("Only 'saga' solver supports elasticnet penalty,"
" got solver={}.".format(solver))
if solver == 'liblinear' and penalty == 'none':
raise ValueError(
"penalty='none' is not supported for the liblinear solver"
)
return solver
def _check_multi_class(multi_class, solver, n_classes):
if multi_class == 'warn':
multi_class = 'ovr'
if n_classes > 2:
warnings.warn("Default multi_class will be changed to 'auto' in"
" 0.22. Specify the multi_class option to silence "
"this warning.", FutureWarning)
if multi_class == 'auto':
if solver == 'liblinear':
multi_class = 'ovr'
elif n_classes > 2:
multi_class = 'multinomial'
else:
multi_class = 'ovr'
if multi_class not in ('multinomial', 'ovr'):
raise ValueError("multi_class should be 'multinomial', 'ovr' or "
"'auto'. Got %s." % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
return multi_class
@deprecated('logistic_regression_path was deprecated in version 0.21 and '
'will be removed in version 0.23.0')
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='warn',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
.. deprecated:: 0.21
``logistic_regression_path`` was deprecated in version 0.21 and will
be removed in 0.23.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1', 'l2', or 'elasticnet'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial', 'auto'}, default: 'ovr'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.20
Default will change from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float or None, optional (default=None)
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
return _logistic_regression_path(
X, y, pos_class=None, Cs=10, fit_intercept=True, max_iter=100,
tol=1e-4, verbose=0, solver='lbfgs', coef=None, class_weight=None,
dual=False, penalty='l2', intercept_scaling=1., multi_class='warn',
random_state=None, check_input=True, max_squared_sum=None,
sample_weight=None, l1_ratio=None)
def _logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='warn',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1', 'l2', or 'elasticnet'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial', 'auto'}, default: 'ovr'
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.20
Default will change from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float or None, optional (default=None)
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs,
n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
solver = _check_solver(solver, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64,
accept_large_sparse=solver != 'liblinear')
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
multi_class = _check_multi_class(multi_class, solver, len(classes))
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=X.dtype, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=X.dtype)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver not in ['sag', 'saga']:
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y).astype(X.dtype, copy=False)
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F', dtype=X.dtype)
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
if n_classes == 1:
w0[0, :coef.shape[1]] = -coef
w0[1, :coef.shape[1]] = coef
else:
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
iprint = [-1, 50, 1, 100, 101][
np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=iprint, pgtol=tol, maxiter=max_iter)
if info["warnflag"] == 1:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.", ConvergenceWarning)
# In scipy <= 1.0.0, nit may exceed maxiter.
# See https://github.com/scipy/scipy/issues/7854.
n_iter_i = min(info['nit'], max_iter)
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver in ['sag', 'saga']:
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
# alpha is for L2-norm, beta is for L1-norm
if penalty == 'l1':
alpha = 0.
beta = 1. / C
elif penalty == 'l2':
alpha = 1. / C
beta = 0.
else: # Elastic-Net penalty
alpha = (1. / C) * (1 - l1_ratio)
beta = (1. / C) * l1_ratio
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, alpha,
beta, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag,
is_saga=(solver == 'saga'))
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
n_classes = max(2, classes.size)
multi_w0 = np.reshape(w0, (n_classes, -1))
if n_classes == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0.copy())
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return np.array(coefs), np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='warn', random_state=None,
max_squared_sum=None, sample_weight=None,
l1_ratio=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga'}
Decides which solver to use.
penalty : str, 'l1', 'l2', or 'elasticnet'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
random_state : int, RandomState instance or None, optional, default None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' and
'liblinear'.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
l1_ratio : float or None, optional (default=None)
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
sample_weight = sample_weight[train]
coefs, Cs, n_iter = _logistic_regression_path(
X_train, y_train, Cs=Cs, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, solver=solver, max_iter=max_iter,
class_weight=class_weight, pos_class=pos_class,
multi_class=multi_class, tol=tol, verbose=verbose, dual=dual,
penalty=penalty, intercept_scaling=intercept_scaling,
random_state=random_state, check_input=False,
max_squared_sum=max_squared_sum, sample_weight=sample_weight)
log_reg = LogisticRegression(solver=solver, multi_class=multi_class)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
scores = list()
if isinstance(scoring, str):
scoring = get_scorer(scoring)
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr', and uses the cross-
entropy loss if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag', 'saga' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers. **Note
that regularization is applied by default**. It can handle both dense
and sparse input. Use C-ordered arrays or CSR matrices containing 64-bit
floats for optimal performance; any other input format will be converted
(and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation, or no regularization. The 'liblinear' solver
supports both L1 and L2 regularization, with a dual formulation only for
the L2 penalty. The Elastic-Net regularization is only supported by the
'saga' solver.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1', 'l2', 'elasticnet' or 'none', optional (default='l2')
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver. If 'none' (not supported by the
liblinear solver), no regularization is applied.
.. versionadded:: 0.19
l1 penalty with SAGA solver (allowing 'multinomial' + L1)
dual : bool, optional (default=False)
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, optional (default=True)
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, optional (default=1)
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'*
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`. Used when ``solver`` == 'sag' or
'liblinear'.
solver : str, {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, \
optional (default='liblinear').
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' and
'saga' are faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'
handle multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs', 'sag' and 'saga' handle L2 or no penalty
- 'liblinear' and 'saga' also handle L1 penalty
- 'saga' also supports 'elasticnet' penalty
- 'liblinear' does not handle no penalty
Note that 'sag' and 'saga' fast convergence is only guaranteed on
features with approximately the same scale. You can
preprocess the data with a scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
.. versionchanged:: 0.20
Default will change from 'liblinear' to 'lbfgs' in 0.22.
max_iter : int, optional (default=100)
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
multi_class : str, {'ovr', 'multinomial', 'auto'}, optional (default='ovr')
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.20
Default will change from 'ovr' to 'auto' in 0.22.
verbose : int, optional (default=0)
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, optional (default=False)
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers.
n_jobs : int or None, optional (default=None)
Number of CPU cores used when parallelizing over classes if
multi_class='ovr'". This parameter is ignored when the ``solver`` is
set to 'liblinear' regardless of whether 'multi_class' is specified or
not. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors.
See :term:`Glossary <n_jobs>` for more details.
l1_ratio : float or None, optional (default=None)
The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only
used if ``penalty='elasticnet'`. Setting ``l1_ratio=0`` is equivalent
to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent
to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a
combination of L1 and L2.
Attributes
----------
classes_ : array, shape (n_classes, )
A list of class labels known to the classifier.
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem is binary.
In particular, when `multi_class='multinomial'`, `coef_` corresponds
to outcome 1 (True) and `-coef_` corresponds to outcome 0 (False).
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape (1,) when the given problem is binary.
In particular, when `multi_class='multinomial'`, `intercept_`
corresponds to outcome 1 (True) and `-intercept_` corresponds to
outcome 0 (False).
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
.. versionchanged:: 0.20
In SciPy <= 1.0.0 the number of lbfgs iterations may exceed
``max_iter``. ``n_iter_`` will now report at most ``max_iter``.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegression(random_state=0, solver='lbfgs',
... multi_class='multinomial').fit(X, y)
>>> clf.predict(X[:2, :])
array([0, 0])
>>> clf.predict_proba(X[:2, :]) # doctest: +ELLIPSIS
array([[9.8...e-01, 1.8...e-02, 1.4...e-08],
[9.7...e-01, 2.8...e-02, ...e-08]])
>>> clf.score(X, y)
0.97...
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
LogisticRegressionCV : Logistic regression with built-in cross validation
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
https://www.csie.ntu.edu.tw/~cjlin/liblinear/
SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach
Minimizing Finite Sums with the Stochastic Average Gradient
https://hal.inria.fr/hal-00860051/document
SAGA -- Defazio, A., Bach F. & Lacoste-Julien S. (2014).
SAGA: A Fast Incremental Gradient Method With Support
for Non-Strongly Convex Composite Objectives
https://arxiv.org/abs/1407.0202
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
https://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='warn', max_iter=100,
multi_class='warn', verbose=0, warm_start=False, n_jobs=None,
l1_ratio=None):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
self.l1_ratio = l1_ratio
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self : object
"""
solver = _check_solver(self.solver, self.penalty, self.dual)
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if self.penalty == 'elasticnet':
if (not isinstance(self.l1_ratio, numbers.Number) or
self.l1_ratio < 0 or self.l1_ratio > 1):
raise ValueError("l1_ratio must be between 0 and 1;"
" got (l1_ratio=%r)" % self.l1_ratio)
elif self.l1_ratio is not None:
warnings.warn("l1_ratio parameter is only used when penalty is "
"'elasticnet'. Got "
"(penalty={})".format(self.penalty))
if self.penalty == 'none':
if self.C != 1.0: # default values
warnings.warn(
"Setting penalty='none' will ignore the C and l1_ratio "
"parameters"
)
# Note that check for l1_ratio is done right above
C_ = np.inf
penalty = 'l2'
else:
C_ = self.C
penalty = self.penalty
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
if solver in ['newton-cg']:
_dtype = [np.float64, np.float32]
else:
_dtype = np.float64
X, y = check_X_y(X, y, accept_sparse='csr', dtype=_dtype, order="C",
accept_large_sparse=solver != 'liblinear')
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
multi_class = _check_multi_class(self.multi_class, solver,
len(self.classes_))
if solver == 'liblinear':
if effective_n_jobs(self.n_jobs) != 1:
warnings.warn("'n_jobs' > 1 does not have any effect when"
" 'solver' is set to 'liblinear'. Got 'n_jobs'"
" = {}.".format(effective_n_jobs(self.n_jobs)))
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
if solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(_logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if solver in ['sag', 'saga']:
prefer = 'threads'
else:
prefer = 'processes'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer=prefer))(
path_func(X, y, pos_class=class_, Cs=[C_],
l1_ratio=self.l1_ratio, fit_intercept=self.fit_intercept,
tol=self.tol, verbose=self.verbose, solver=solver,
multi_class=multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
penalty=penalty, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for class_, warm_start_coef_ in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
ovr = (self.multi_class in ["ovr", "warn"] or
(self.multi_class == 'auto' and (self.classes_.size <= 2 or
self.solver == 'liblinear')))
if ovr:
return super()._predict_proba_lr(X)
else:
decision = self.decision_function(X)
if decision.ndim == 1:
# Workaround for multi_class="multinomial" and binary outcomes
# which requires softmax prediction with only a 1D decision.
decision_2d = np.c_[-decision, decision]
else:
decision_2d = decision
return softmax(decision_2d, copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
See glossary entry for :term:`cross-validation estimator`.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
Elastic-Net penalty is only supported by the saga solver.
For the grid of `Cs` values and `l1_ratios` values, the best
hyperparameter is selected by the cross-validator `StratifiedKFold`, but
it can be changed using the `cv` parameter. The 'newton-cg', 'sag',
'saga' and 'lbfgs' solvers can warm-start the coefficients (see
:term:`Glossary<warm_start>`).
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats or int, optional (default=10)
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, optional (default=True)
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
cv : int or cross-validation generator, optional (default=None)
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
.. versionchanged:: 0.20
``cv`` default value if None will change from 3-fold to 5-fold
in v0.22.
dual : bool, optional (default=False)
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1', 'l2', or 'elasticnet', optional (default='l2')
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is
only supported by the 'saga' solver.
scoring : string, callable, or None, optional (default=None)
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is 'accuracy'.
solver : str, {'newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'}, \
optional (default='lbfgs')
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' and
'saga' are faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs'
handle multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty, whereas
'liblinear' and 'saga' handle L1 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
Note that 'sag' and 'saga' fast convergence is only guaranteed on
features with approximately the same scale. You can preprocess the data
with a scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
max_iter : int, optional (default=100)
Maximum number of iterations of the optimization algorithm.
class_weight : dict or 'balanced', optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
n_jobs : int or None, optional (default=None)
Number of CPU cores used during the cross-validation loop.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, optional (default=0)
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool, optional (default=True)
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
intercept_scaling : float, optional (default=1)
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial', 'auto'}, optional (default='ovr')
If the option chosen is 'ovr', then a binary problem is fit for each
label. For 'multinomial' the loss minimised is the multinomial loss fit
across the entire probability distribution, *even when the data is
binary*. 'multinomial' is unavailable when solver='liblinear'.
'auto' selects 'ovr' if the data is binary, or if solver='liblinear',
and otherwise selects 'multinomial'.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
.. versionchanged:: 0.20
Default will change from 'ovr' to 'auto' in 0.22.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
l1_ratios : list of float or None, optional (default=None)
The list of Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``.
Only used if ``penalty='elasticnet'``. A value of 0 is equivalent to
using ``penalty='l2'``, while 1 is equivalent to using
``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination
of L1 and L2.
Attributes
----------
classes_ : array, shape (n_classes, )
A list of class labels known to the classifier.
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape(1,) when the problem is binary.
Cs_ : array, shape (n_cs)
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
l1_ratios_ : array, shape (n_l1_ratios)
Array of l1_ratios used for cross-validation. If no l1_ratio is used
(i.e. penalty is not 'elasticnet'), this is set to ``[None]``
coefs_paths_ : array, shape (n_folds, n_cs, n_features) or \
(n_folds, n_cs, n_features + 1)
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, n_cs, n_features)`` or
``(n_folds, n_cs, n_features + 1)`` depending on whether the
intercept is fit or not. If ``penalty='elasticnet'``, the shape is
``(n_folds, n_cs, n_l1_ratios_, n_features)`` or
``(n_folds, n_cs, n_l1_ratios_, n_features + 1)``.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class. Each dict value
has shape ``(n_folds, n_cs`` or ``(n_folds, n_cs, n_l1_ratios)`` if
``penalty='elasticnet'``.
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
`C_` is of shape(n_classes,) when the problem is binary.
l1_ratio_ : array, shape (n_classes,) or (n_classes - 1,)
Array of l1_ratio that maps to the best scores across every class. If
refit is set to False, then for each class, the best l1_ratio is the
average of the l1_ratio's that correspond to the best scores for each
fold. `l1_ratio_` is of shape(n_classes,) when the problem is binary.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
If ``penalty='elasticnet'``, the shape is ``(n_classes, n_folds,
n_cs, n_l1_ratios)`` or ``(1, n_folds, n_cs, n_l1_ratios)``.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegressionCV
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegressionCV(cv=5, random_state=0,
... multi_class='multinomial').fit(X, y)
>>> clf.predict(X[:2, :])
array([0, 0])
>>> clf.predict_proba(X[:2, :]).shape
(2, 3)
>>> clf.score(X, y) # doctest: +ELLIPSIS
0.98...
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv='warn', dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=None, verbose=0,
refit=True, intercept_scaling=1., multi_class='warn',
random_state=None, l1_ratios=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
self.l1_ratios = l1_ratios
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
"""
solver = _check_solver(self.solver, self.penalty, self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
if self.penalty == 'elasticnet':
if self.l1_ratios is None or len(self.l1_ratios) == 0 or any(
(not isinstance(l1_ratio, numbers.Number) or l1_ratio < 0
or l1_ratio > 1) for l1_ratio in self.l1_ratios):
raise ValueError("l1_ratios must be a list of numbers between "
"0 and 1; got (l1_ratios=%r)" %
self.l1_ratios)
l1_ratios_ = self.l1_ratios
else:
if self.l1_ratios is not None:
warnings.warn("l1_ratios parameter is only used when penalty "
"is 'elasticnet'. Got (penalty={})".format(
self.penalty))
l1_ratios_ = [None]
if self.penalty == 'none':
raise ValueError(
"penalty='none' is not useful and not supported by "
"LogisticRegressionCV."
)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C",
accept_large_sparse=solver != 'liblinear')
check_classification_targets(y)
class_weight = self.class_weight
# Encode for string labels
label_encoder = LabelEncoder().fit(y)
y = label_encoder.transform(y)
if isinstance(class_weight, dict):
class_weight = dict((label_encoder.transform([cls])[0], v)
for cls, v in class_weight.items())
# The original class labels
classes = self.classes_ = label_encoder.classes_
encoded_labels = label_encoder.transform(label_encoder.classes_)
multi_class = _check_multi_class(self.multi_class, solver,
len(classes))
if solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
# Use the label encoded classes
n_classes = len(encoded_labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
encoded_labels = encoded_labels[1:]
classes = classes[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
if multi_class == 'multinomial':
iter_encoded_labels = iter_classes = [None]
else:
iter_encoded_labels = encoded_labels
iter_classes = classes
# compute the class weights for the entire dataset y
if class_weight == "balanced":
class_weight = compute_class_weight(class_weight,
np.arange(len(self.classes_)),
y)
class_weight = dict(enumerate(class_weight))
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if self.solver in ['sag', 'saga']:
prefer = 'threads'
else:
prefer = 'processes'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer=prefer))(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio
)
for label in iter_encoded_labels
for train, test in folds
for l1_ratio in l1_ratios_)
# _log_reg_scoring_path will output different shapes depending on the
# multi_class param, so we need to reshape the outputs accordingly.
# Cs is of shape (n_classes . n_folds . n_l1_ratios, n_Cs) and all the
# rows are equal, so we just take the first one.
# After reshaping,
# - scores is of shape (n_classes, n_folds, n_Cs . n_l1_ratios)
# - coefs_paths is of shape
# (n_classes, n_folds, n_Cs . n_l1_ratios, n_features)
# - n_iter is of shape
# (n_classes, n_folds, n_Cs . n_l1_ratios) or
# (1, n_folds, n_Cs . n_l1_ratios)
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
if multi_class == 'multinomial':
coefs_paths = np.reshape(
coefs_paths,
(len(folds), len(l1_ratios_) * len(self.Cs_), n_classes, -1)
)
# equiv to coefs_paths = np.moveaxis(coefs_paths, (0, 1, 2, 3),
# (1, 2, 0, 3))
coefs_paths = np.swapaxes(coefs_paths, 0, 1)
coefs_paths = np.swapaxes(coefs_paths, 0, 2)
self.n_iter_ = np.reshape(
n_iter_,
(1, len(folds), len(self.Cs_) * len(l1_ratios_))
)
# repeat same scores across all classes
scores = np.tile(scores, (n_classes, 1, 1))
else:
coefs_paths = np.reshape(
coefs_paths,
(n_classes, len(folds), len(self.Cs_) * len(l1_ratios_),
-1)
)
self.n_iter_ = np.reshape(
n_iter_,
(n_classes, len(folds), len(self.Cs_) * len(l1_ratios_))
)
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(classes, scores))
self.coefs_paths_ = dict(zip(classes, coefs_paths))
self.C_ = list()
self.l1_ratio_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
for index, (cls, encoded_label) in enumerate(
zip(iter_classes, iter_encoded_labels)):
if multi_class == 'ovr':
scores = self.scores_[cls]
coefs_paths = self.coefs_paths_[cls]
else:
# For multinomial, all scores are the same across classes
scores = scores[0]
# coefs_paths will keep its original shape because
# logistic_regression_path expects it this way
if self.refit:
# best_index is between 0 and (n_Cs . n_l1_ratios - 1)
# for example, with n_cs=2 and n_l1_ratios=3
# the layout of scores is
# [c1, c2, c1, c2, c1, c2]
# l1_1 , l1_2 , l1_3
best_index = scores.sum(axis=0).argmax()
best_index_C = best_index % len(self.Cs_)
C_ = self.Cs_[best_index_C]
self.C_.append(C_)
best_index_l1 = best_index // len(self.Cs_)
l1_ratio_ = l1_ratios_[best_index_l1]
self.l1_ratio_.append(l1_ratio_)
if multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, :, best_index, :],
axis=1)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
# Note that y is label encoded and hence pos_class must be
# the encoded label / None (for 'multinomial')
w, _, _ = _logistic_regression_path(
X, y, pos_class=encoded_label, Cs=[C_], solver=solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=class_weight,
multi_class=multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight,
l1_ratio=l1_ratio_)
w = w[0]
else:
# Take the best scores across every fold and the average of
# all coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
if self.multi_class == 'ovr':
w = np.mean([coefs_paths[i, best_indices[i], :]
for i in range(len(folds))], axis=0)
else:
w = np.mean([coefs_paths[:, i, best_indices[i], :]
for i in range(len(folds))], axis=0)
best_indices_C = best_indices % len(self.Cs_)
self.C_.append(np.mean(self.Cs_[best_indices_C]))
best_indices_l1 = best_indices // len(self.Cs_)
self.l1_ratio_.append(np.mean(l1_ratios_[best_indices_l1]))
if multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.l1_ratio_ = np.tile(self.l1_ratio_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
self.l1_ratio_ = np.asarray(self.l1_ratio_)
self.l1_ratios_ = np.asarray(l1_ratios_)
# if elasticnet was used, add the l1_ratios dimension to some
# attributes
if self.l1_ratios is not None:
for cls, coefs_path in self.coefs_paths_.items():
self.coefs_paths_[cls] = coefs_path.reshape(
(len(folds), self.Cs_.size, self.l1_ratios_.size, -1))
for cls, score in self.scores_.items():
self.scores_[cls] = score.reshape(
(len(folds), self.Cs_.size, self.l1_ratios_.size))
self.n_iter_ = self.n_iter_.reshape(
(-1, len(folds), self.Cs_.size, self.l1_ratios_.size))
return self
def score(self, X, y, sample_weight=None):
"""Returns the score using the `scoring` option on the given
test data and labels.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples,)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Score of self.predict(X) wrt. y.
"""
if self.scoring is not None:
warnings.warn("The long-standing behavior to use the "
"accuracy score has changed. The scoring "
"parameter is now used. "
"This warning will disappear in version 0.22.",
ChangedBehaviorWarning)
scoring = self.scoring or 'accuracy'
if isinstance(scoring, str):
scoring = get_scorer(scoring)
return scoring(self, X, y, sample_weight=sample_weight)
| [
"leiqk@dxy.cn"
] | leiqk@dxy.cn |
e52133f604e945d2b4c1d307e1b24450a342f81c | 8bbd5063f90b02238ec5f1b5ece53c513fedec38 | /venv/Scripts/easy_install-script.py | 65f8e9fce01eefb5aa0a78501b8f56f560ed2322 | [] | no_license | amandacgross/visualize | e3ce8433a648cb43acc8a25ff53f12d8538df43f | 3d89044a242918a4fb3bdc806bd75e46624a623f | refs/heads/master | 2020-05-18T22:07:29.513667 | 2019-05-03T01:20:13 | 2019-05-03T01:20:13 | 184,681,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | #!C:\Users\amand\OneDrive\Courses\image_for_text\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"amanda.gross6@gmail.com"
] | amanda.gross6@gmail.com |
0f904cda5cbead094cf2db0ce91bc601a881c5c8 | cd7250c70e935667dc53dddfb0ed31e020264b6a | /docsrc/source/conf.py | f71a8e5431ec11347d612498d885b88d78162070 | [] | no_license | apbeecham/finpy | 3fb26dec61e91736116c9d31e80fd56fe1898078 | 76114a842fdb7dae7d702d94425b740da8c2c2c1 | refs/heads/master | 2023-05-26T18:28:08.479939 | 2020-07-03T11:48:59 | 2020-07-03T11:48:59 | 222,422,208 | 0 | 0 | null | 2023-05-22T22:18:48 | 2019-11-18T10:27:40 | Python | UTF-8 | Python | false | false | 2,156 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'finpy'
copyright = '2020, Adam Beecham'
author = 'Adam Beecham'
# The full version, including alpha/beta/rc tags
release = '0.1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'autoapi.extension'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Extension configuration -------------------------------------------------
autoapi_type = 'python'
autoapi_dirs = ['../../finpy'] | [
"kjharper93@gmail.com"
] | kjharper93@gmail.com |
6614311229101c923dbe72e4768e313363266518 | 59ff59e365277666a4b23d1d1c935c5dcca3803a | /chat/forms.py | 8d2cb0671b43da7529e10b7a218724d4b86d1138 | [] | no_license | Atif252/Instagram-clone | 5def5c163db87bd197e341fc53d34198ff3dffb0 | 93b09186fda77e55d30c7cdd072afe1635edec4c | refs/heads/master | 2022-12-11T01:28:36.129270 | 2020-01-23T16:20:39 | 2020-01-23T16:20:39 | 224,570,739 | 1 | 0 | null | 2022-12-08T03:15:48 | 2019-11-28T04:37:37 | Python | UTF-8 | Python | false | false | 259 | py | from django import forms
from chat.models import Chat, Message
from django.db.models import Q
from django.http import request
class CreateChatMessage(forms.ModelForm):
class Meta:
model = Message
fields = ['message_body', 'message_sender', 'chat']
| [
"atifsikander252@gmail.com"
] | atifsikander252@gmail.com |
0d399b3c2d49ebf9b122a374ffd30ba15918ed1c | daeebdbbce15b25975f2fdca5ed43cde36be9e05 | /src/pipelinex/hatch_dict/hatch_dict.py | 410fe9aa5745baafc8af14b107003ae9a29384dd | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | permissive | shibuiwilliam/pipelinex | 4b3e7340c027dffba310785cb900c2c503877841 | 5bc4911c6b253cf29f9b6153d11be42a7d82dd48 | refs/heads/master | 2021-05-16T19:03:32.125740 | 2021-04-28T01:19:52 | 2021-04-28T01:19:52 | 363,591,360 | 1 | 0 | NOASSERTION | 2021-05-02T07:22:06 | 2021-05-02T07:22:06 | null | UTF-8 | Python | false | false | 9,894 | py | import importlib
from typing import Any, Union, List, Iterable # NOQA
from logging import getLogger
log = getLogger(__name__)
class HatchDict:
def __init__(
self,
egg, # type: Union[dict, List]
lookup={}, # type: dict
support_nested_keys=True, # type: bool
self_lookup_key="$", # type: str
support_import=True, # type: bool
additional_import_modules=["pipelinex"], # type: Union[List, str]
obj_key="=", # type: str
eval_parentheses=True, # type: bool
):
# type: (...) -> None
assert egg.__class__.__name__ in {"dict", "list"}
assert lookup.__class__.__name__ in {"dict"}
assert support_nested_keys.__class__.__name__ in {"bool"}
assert self_lookup_key.__class__.__name__ in {"str"}
assert additional_import_modules.__class__.__name__ in {"list", "str"}
assert obj_key.__class__.__name__ in {"str"}
aug_egg = {}
if isinstance(egg, dict):
if support_nested_keys:
aug_egg = dot_flatten(egg)
aug_egg.update(egg)
self.aug_egg = aug_egg
self.egg = egg
self.lookup = {}
self.lookup.update(_builtin_funcs())
self.lookup.update(lookup)
self.self_lookup_key = self_lookup_key
self.support_import = support_import
self.additional_import_modules = (
[additional_import_modules]
if isinstance(additional_import_modules, str)
else additional_import_modules or [__name__]
)
self.obj_key = obj_key
self.eval_parentheses = eval_parentheses
self.warmed_egg = None
self.snapshot = None
def get(
self,
key=None, # type: Union[str, int]
default=None, # type: Any
lookup={}, # type: dict
):
# type: (...) -> Any
assert (key is None) or (
key.__class__.__name__
in {
"str",
"int",
}
), "Received key: {}".format(key)
assert lookup.__class__.__name__ in {"dict"}, "Received lookup: s{}".format(
lookup
)
if key is None:
d = self.egg
else:
if isinstance(self.egg, dict):
d = self.aug_egg.get(key, default)
if isinstance(self.egg, list):
assert isinstance(key, int)
d = self.egg[key] if (0 <= key < len(self.egg)) else default
if self.self_lookup_key:
s = dict()
while d != s:
d, s = _dfs_apply(
d_input=d,
hatch_args=dict(lookup=self.aug_egg, obj_key=self.self_lookup_key),
)
self.warmed_egg = d
if self.eval_parentheses:
d, s = _dfs_apply(
d_input=d, hatch_args=dict(eval_parentheses=self.eval_parentheses)
)
self.warmed_egg = d
lookup_input = {}
lookup_input.update(self.lookup)
lookup_input.update(lookup)
if isinstance(self.egg, dict):
forcing_module = self.egg.get("FORCING_MODULE", "")
module_aliases = self.egg.get("MODULE_ALIASES", {})
for m in self.additional_import_modules:
d, s = _dfs_apply(
d_input=d,
hatch_args=dict(
lookup=lookup_input,
support_import=self.support_import,
default_module=m,
forcing_module=forcing_module,
module_aliases=module_aliases,
obj_key=self.obj_key,
),
)
self.snapshot = s
return d
def get_params(self):
return self.snapshot
def keys(self):
return self.egg.keys()
def items(self):
assert isinstance(self.egg, dict)
return [(k, self.get(k)) for k in self.egg.keys()]
def _dfs_apply(
d_input, # type: Any
hatch_args, # type: dict
):
# type: (...) -> Any
eval_parentheses = hatch_args.get("eval_parentheses", False) # type: bool
lookup = hatch_args.get("lookup", dict()) # type: dict
support_import = hatch_args.get("support_import", False) # type: bool
default_module = hatch_args.get("default_module", "") # type: str
forcing_module = hatch_args.get("forcing_module", "") # type: str
module_aliases = hatch_args.get("module_aliases", {}) # type: dict
obj_key = hatch_args.get("obj_key", "=") # type: str
d = d_input
s = d_input
if isinstance(d_input, dict):
obj_str = d_input.get(obj_key)
d, s = {}, {}
for k, v in d_input.items():
d[k], s[k] = _dfs_apply(v, hatch_args)
if obj_str:
if obj_str in lookup:
a = lookup.get(obj_str)
d = _hatch(d, a, obj_key=obj_key)
elif support_import:
if forcing_module:
obj_path_list = obj_str.rsplit(".", 1)
obj_str = "{}.{}".format(forcing_module, obj_path_list[-1])
if module_aliases:
obj_path_list = obj_str.rsplit(".", 1)
if len(obj_path_list) == 2 and obj_path_list[0] in module_aliases:
module_alias = module_aliases.get(obj_path_list[0])
if module_alias is None:
obj_path_list.pop(0)
else:
obj_path_list[0] = module_alias
obj_str = ".".join(obj_path_list)
a = load_obj(obj_str, default_obj_path=default_module)
d = _hatch(d, a, obj_key=obj_key)
if isinstance(d_input, list):
d, s = [], []
for v in d_input:
_d, _s = _dfs_apply(v, hatch_args)
d.append(_d)
s.append(_s)
if isinstance(d_input, str):
if (
eval_parentheses
and len(d_input) >= 2
and d_input[0] == "("
and d_input[-1] == ")"
):
d = eval(d)
return d, s
def _hatch(
d, # type: dict
a, # type: Any
obj_key="=", # type: str
pos_arg_key="_", # type: str
attr_key=".", # type: str
):
d.pop(obj_key)
if d:
assert callable(a), "{} is not callable.".format(a)
pos_args = d.pop(pos_arg_key, None)
if pos_args is None:
pos_args = []
if not isinstance(pos_args, list):
pos_args = [pos_args]
attribute_name = d.pop(attr_key, None)
for k in d:
assert isinstance(
k, str
), "Non-string key '{}' in '{}' is not valid for callable: '{}'.".format(
k, d, a.__name__
)
d = a(*pos_args, **d)
if attribute_name:
d = getattr(d, attribute_name)
# if isinstance(d, MethodType):
# d = lambda *args: d(args[0])
else:
d = a
return d
def dot_flatten(d):
try:
from flatten_dict import flatten
d = flatten(d, reducer="dot")
except Exception:
log.warning("{} failed to be flattened.".format(d), exc_info=True)
return d
def pass_(*argsignore, **kwargsignore):
return None
def pass_through(*args, **kwargs):
return args[0] if args else list(kwargs.values())[0] if kwargs else None
class ToPipeline:
def __init__(self, *args):
if len(args) == 1:
args = args[0]
self.args = args
def __call__(self):
return self.args
class Construct:
def __init__(self, obj):
self.obj = obj
def __call__(self, *args, **kwargs):
return self.obj(*args, **kwargs)
class Method:
method = None
def __init__(self, *args, **kwargs):
if self.method is None:
self.method = kwargs.pop("method")
self.args = args
self.kwargs = kwargs
def __call__(self, d):
if isinstance(d, dict):
d = HatchDict(d)
attr = getattr(d, self.method, None)
if callable(attr):
return attr(*self.args, **self.kwargs)
else:
return d
class Get(Method):
method = "get"
def feed(func, args):
assert callable(func)
if isinstance(args, dict):
posargs = args.pop("_", [])
kwargs = args
elif isinstance(args, (list, tuple)):
posargs = args
kwargs = dict()
else:
posargs = [args]
kwargs = dict()
def _feed(*argsignore, **kwargsignore):
return func(*posargs, **kwargs)
return _feed
def _builtin_funcs():
return dict(
pass_=pass_,
pass_through=pass_through,
ToPipeline=ToPipeline,
Construct=Construct,
Method=Method,
Get=Get,
)
"""
Copyright 2018-2019 QuantumBlack Visual Analytics Limited
regarding `load_obj` function copied from
https://github.com/quantumblacklabs/kedro/blob/0.15.4/kedro/utils.py
"""
def load_obj(obj_path: str, default_obj_path: str = "") -> Any:
"""Extract an object from a given path.
Args:
obj_path: Path to an object to be extracted, including the object name.
default_obj_path: Default object path.
Returns:
Extracted object.
Raises:
AttributeError: When the object does not have the given named attribute.
"""
obj_path_list = obj_path.rsplit(".", 1)
obj_path = obj_path_list.pop(0) if len(obj_path_list) > 1 else default_obj_path
obj_name = obj_path_list[0]
module_obj = importlib.import_module(obj_path)
if not hasattr(module_obj, obj_name):
raise AttributeError(
"Object `{}` cannot be loaded from `{}`.".format(obj_name, obj_path)
)
return getattr(module_obj, obj_name)
| [
"yusuke.minami86@gmail.com"
] | yusuke.minami86@gmail.com |
beda1750d055f278c7feba99c51342ec22251e02 | 2d4e020e6ab48c46e0a19cb69048d9e8d26e46a6 | /Job_Portal/job_portal/main/migrations/0005_auto_20210202_0143.py | 94b50b03f39910c2733310db1be8d839c9c1ae73 | [] | no_license | IsmailTitas1815/Learning | a92476fcf7bcd28a7dc1ab2f4eb3a5c27034728f | 207eaf4101a6d161c1044310f4b3cc54e9c514eb | refs/heads/master | 2023-07-04T20:13:07.263331 | 2021-08-07T20:07:39 | 2021-08-07T20:07:39 | 293,100,950 | 0 | 0 | null | 2021-05-07T16:55:29 | 2020-09-05T15:18:46 | Python | UTF-8 | Python | false | false | 577 | py | # Generated by Django 3.1.5 on 2021-02-01 19:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20210202_0138'),
]
operations = [
migrations.RemoveField(
model_name='candidate',
name='tag',
),
migrations.AddField(
model_name='candidate',
name='tag',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='main.tag'),
),
]
| [
"titas.sarker1234@gmail.com"
] | titas.sarker1234@gmail.com |
bc2c72df3cc0746a0cc7f021b142b01a992d27b6 | 32bb58a2bdd0ab2247d41d4a9ad322b6b917e4b4 | /pyten/UI/helios.py | 465e83768eeebf5b1ecd98aae24207e49d073724 | [] | no_license | yangjichen/PublicOpinionAnalysis-TrendPrediction-TensorCompletion | a0a5d1aea44dc963313374c0b5150b9a1c33b4b5 | f3dd34f221e29b519799970786b0be01e41f0449 | refs/heads/master | 2021-02-06T17:49:25.325990 | 2020-03-15T12:42:34 | 2020-03-15T12:42:34 | 243,936,548 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,435 | py | import pyten.UI
def helios(scenario=None):
"""
Helios Main API returns decomposition or Recovery Result of All three scenario
"""
# Initialization
Ori = None # Original Tensor
full = None # Full Tensor reconstructed by decomposed matrices
Final = None # Decomposition Results e.g. Ttensor or Ktensor
Rec = None # Recovered Tensor (Completed Tensor)
# User Interface
if scenario is None:
scenario = raw_input("Please choose the scenario:\n"
" 1. Basic Tensor Decomposition/Completion 2.Tensor Decompostion/Completion with Auxiliary Information"
" 3.Dynamic Tensor Decomposition/Completion 4.Scalable Tensor Decomposition/Completion 0.Exit \n")
if scenario == '1': # Basic Tensor Decomposition/Completion
[Ori, full, Final, Rec] = pyten.UI.basic()
elif scenario == '2': # Tensor Completion with Auxiliary Information
[Ori, full, Final, Rec] = pyten.UI.auxiliary()
elif scenario == '3': # Dynamic Tensor Decomposition
[Ori, full, Final, Rec] = pyten.UI.dynamic()
elif scenario == '4': # Dynamic Tensor Decomposition
[Ori, full, Final, Rec] = pyten.UI.scalable()
elif scenario == '0':
print ('Successfully Exit')
return Ori, full, Final, Rec
else:
raise ValueError('No Such scenario')
# Return result
return Ori, full, Final, Rec
| [
"yangjichen@yangjichendeMacBook-Pro.local"
] | yangjichen@yangjichendeMacBook-Pro.local |
cf08ddf5aa4fd4f0d5bbd4c4f17f8720aa26e1c0 | bb0f5ec6ee0ed99afb09087ff0ea9bfe32b7ea49 | /utills/amount.py | 87463e68aad88f653aba56ac9ab975e44a5ea3b3 | [] | no_license | persontianshuang/lottery_admin | 49f3386d75671d0b2c43dfea3987e7fa8df84c9b | d8ebc7cf778cac24055a709886fbaa3b03325a69 | refs/heads/master | 2021-09-03T10:04:40.151502 | 2018-01-08T08:09:50 | 2018-01-08T08:09:50 | 111,886,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,423 | py | import random,time
from datetime import datetime
class AmountCommon():
def __init__(self,db):
self.db = db
def sum_amount(self, db):
return sum([x.amount for x in db])
def all(self):
return str(self.sum_amount(self.db))
def today(self):
t = time.localtime(time.time())
time1 = time.mktime(time.strptime(time.strftime('%Y-%m-%d 00:00:00', t),
'%Y-%m-%d %H:%M:%S'))
today_zero = int(time1)
now = int(time.time())
tdb = self.db.filter(created__range=(today_zero, now))
return str(self.sum_amount(tdb))
def this_month(self):
d = datetime.now()
b = '{0}-{1}-1 00:00:00'.format(d.year, d.month)
month_zero = int(time.mktime(time.strptime(b, "%Y-%m-%d %H:%M:%S")))
now = int(time.time())
tdb = self.db.filter(created__range=(month_zero, now))
return str(self.sum_amount(tdb))
def time_formater(self,year,month,day):
b = '{0}-{1}-{2} 00:00:00'.format(year, month, day)
month_zero = int(time.mktime(time.strptime(b, "%Y-%m-%d %H:%M:%S")))
return month_zero
def time_range(self,last,now):
tlast = self.time_formater(last[0], last[1], last[2])
tnow = self.time_formater(now[0], now[1], now[2])
tdb = self.db.filter(created__range=(tlast, tnow))
return str(self.sum_amount(tdb)) | [
"mengyouhan@gmail.com"
] | mengyouhan@gmail.com |
1c964cf811d980e82a8bbe2f10233965f46ad3be | 9870fe8cd8186cf94cc638a678e326a36ad9fe03 | /user.py | bf6227662585b4dda7e95fa5f4506be3a13ff659 | [] | no_license | imzboy/melytix_local_solution | e45e49923a56897fa3840148f04280647f159ea6 | 63e5bf2fd90a794c97d8bae214322ac10a33dd30 | refs/heads/master | 2022-12-26T16:26:23.495645 | 2020-09-17T07:43:00 | 2020-09-17T07:43:00 | 296,255,082 | 0 | 0 | null | 2020-09-17T07:43:35 | 2020-09-17T07:43:34 | null | UTF-8 | Python | false | false | 9,089 | py | from flask_login import UserMixin
import pymongo
import os
from hashlib import pbkdf2_hmac
# Connecting to Mogodb Atlas
collection = pymongo.MongoClient(
'mongodb+srv://MaxTeslya:7887334Mna@melytixdata'
'-ryedw.mongodb.net/test?retryWrites=true&w=majority')
db = collection.MelytixUsers.Users
class User(UserMixin):
def __init__(self, id_, email, password):
self.id = id_
self.email = email
self.password = password
# self.google_access_token = google_access_token
# self.google_refresh_token = google_refresh_token
# self.fb_access_token = fb_access_token
@staticmethod
def get_by_email(email: str):
user = db.find_one({'email': email})
if user:
log_user = User(id_=str(user['_id']), email=user['email'],
password=user['password'])
try:
log_user.google_access_token = user['g_access_token']
log_user.google_refresh_token = user['g_refresh_token']
except KeyError:
pass
try:
log_user.metrics = user['metrics']
except KeyError:
pass
return log_user
else:
return None
@staticmethod
def get_by_id(user_id):
user = db.find_one({'_id': user_id})
if user:
log_user = User(id_=str(user['_id']), email=user['email'],
password=user['password'])
try:
log_user.google_access_token = user['tokens']['g_access_token']
log_user.google_refresh_token = user['tokens']['g_refresh_token']
except KeyError:
pass
try:
log_user.metrics = user['metrics']
except KeyError:
pass
return log_user
else:
return None
return log_user
@staticmethod
def register(email: str, passwrd: str) -> None:
salt = os.urandom(24)
passwrd = pbkdf2_hmac('sha256', passwrd.encode('utf-8'), salt, 100000)
db.insert_one({
'email': email,
'password': passwrd,
'salt': salt
})
@staticmethod
def verify_password(email, inputted_pass):
user = db.find_one({'email': email})
if user:
salt = user['salt']
inputted_pass = pbkdf2_hmac(
'sha256',
inputted_pass.encode('utf-8'),
salt,
100000)
if user['password'] == inputted_pass:
return True
else:
return False
else:
return 404
@staticmethod
def add_scopes(email: str, scope: list):
"""adding scopes for google apis in the database for future usage
Args:
email (str): the email that we use to find the user
scope (list): the scopes that we are adding
"""
db.find_one_and_update(
{'email': email},
{'$set': {
'SCOPE': scope
}},
upsert=False
)
@staticmethod
def insert_tokens(email: str, access_token: str, refresh_token: str):
"""Mongodb find adn update func for adding user tokens in db
Args:
email: the email that we use to find the user
access_token: the google access token
refresh_token: the google refresh token"""
db.find_one_and_update(
{'email': email},
{'$set': {
'tokens': {'g_access_token': access_token,
'g_refresh_token': refresh_token}
}},
upsert=False
)
"""@staticmethod
def get_by_email(email):
db = get_db()
user = db.execute(
"SELECT * FROM user WHERE email = ?", (email,)
).fetchone()
if not user:
return None
user = User(
id_=user[0], email=user[1],
password=user[2], google_access_token=user[3],
google_refresh_token=user[4], fb_access_token=user[5])
return user
@staticmethod
def get_by_id(user_id):
db = get_db()
user = db.execute(
"SELECT * FROM user WHERE id = ?", user_id
).fetchone()
if not user:
return None
user = User(
id_=user[0], email=user[1],
password=user[2], google_access_token=user[3],
google_refresh_token=user[4], fb_access_token=user[5]
)
return user
@staticmethod
def create(email, password, google_access_token,
google_refresh_token, fb_access_token):
db = get_db()
db.execute(
"INSERT INTO user (email, password, google_access_token,"
"google_refresh_token, fb_access_token) "
"VALUES (?, ?, ?, ?, ?)",
(email, password, google_access_token,
google_refresh_token, fb_access_token),
)
db.commit()
@staticmethod
def get_google_token(user_id):
db = get_db()
token = db.execute(
"SELECT google_access_token FROM user WHERE id = ?", (user_id,)
).fetchone()
if not token:
return None
return token
@staticmethod
def get_google_refresh_token(user_id):
db = get_db()
refresh_token = db.execute(
"SELECT google_refresh_token FROM user WHERE id = ?", (user_id,)
).fetchone()
if not refresh_token:
return None
return refresh_token
@staticmethod
def insert_tokens(email, g_access_token, g_refresh_token):
db = get_db()
db.execute(
"UPDATE user SET google_access_token = ?,"
"google_refresh_token = ? WHERE email = ?",
(g_access_token, g_refresh_token, email)
)
db.commit()
@staticmethod
def get_fb_token(user_id):
db = get_db()
token = db.execute(
"SELECT fb_access_token FROM user WHERE id = ?", (user_id,)
).fetchone()
if not token:
return None
return token
@staticmethod
def insert_fb_token(email, fb_access_token):
db = get_db()
db.execute(
"UPDATE user SET fb_access_token = ? WHERE email = ?",
(fb_access_token, email)
)
db.commit()
@staticmethod
def create_ga_table(user_id, data):
sessions = data['sessions']
users = data['users']
pageviews = data['pageviews']
pageviewsPerSession = data['pageviewsPerSession']
avgSessionDuration = data['avgSessionDuration']
bounces = data['bounces']
percentNewSession = data['percentNewSession']
NewVisitors = data['NewVisitors']
ReturningVisitors = data['ReturningVisitors']
ga_db = get_ga_db()
ga_db.execute(
"INSERT INTO ga_data (id ,sessions, users, pageviews,"
"pageviewsPerSession, avgSessionDuration, bounces, "
"percentNewSession, NewVisitors, ReturningVisitors) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(user_id, sessions, users, pageviews,
pageviewsPerSession, avgSessionDuration,
bounces, percentNewSession, NewVisitors, ReturningVisitors)
)
ga_db.commit()
@staticmethod
def get_ga_data(user_id):
ga_db = get_ga_db()
data = ga_db.execute(
"SELECT * FROM ga_data WHERE id = ?", (user_id,)
).fetchone()
if not data:
return None
ga_data = {
'sessions': data[1],
'users': data[2],
'pageviews': data[3],
'pageviewsPerSession': data[4],
'avgSessionDuration': data[5],
'bounces': data[6],
'percentNewSession': data[7],
'NewVisitors': data[8],
'ReturningVisitors': data[9]
}
return ga_data
@staticmethod
def update_ga_data(user_id, data):
sessions = data['sessions']
users = data['users']
pageviews = data['pageviews']
pageviewsPerSession = data['pageviewsPerSession']
avgSessionDuration = data['avgSessionDuration']
bounces = data['bounces']
percentNewSession = data['percentNewSession']
NewVisitors = data['NewVisitors']
ReturningVisitors = data['ReturningVisitors']
ga_db = get_ga_db()
ga_db.execute(
"UPDATE ga_data SET sessions = ?, users = ?,"
"pageviews = ?, pageviewsPerSession = ?,"
"avgSessionDuration = ""?, bounces = ?,"
"percentNewSession = ?, NewVisitors = ?,"
"ReturningVisitors = ? WHERE id = ?",
(sessions, users, pageviews,
pageviewsPerSession, avgSessionDuration,
bounces, percentNewSession, NewVisitors,
ReturningVisitors, user_id)
)
ga_db.commit()
"""
| [
"rmastodon0603@gmail.com"
] | rmastodon0603@gmail.com |
54aa72c6ca565b7aa1d189e7744b9fcb0f24dd40 | d09c6ff7114f69a9326883c5b9fcc70fa994e8a2 | /_pycharm_skeletons/renderdoc/GLVertexAttribute.py | add4c7232eea14baa06e4a12c24a237ae897c01a | [
"MIT"
] | permissive | Lex-DRL/renderdoc-py-stubs | 3dd32d23c0c8219bb66387e6078244cff453cd83 | 75d280e4f500ded506f3315a49fc432b37ab4fa6 | refs/heads/master | 2020-08-22T16:55:39.336657 | 2019-11-03T01:21:26 | 2019-11-03T01:21:26 | 216,441,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,700 | py | # encoding: utf-8
# module renderdoc
# from P:\1-Scripts\_Python\Py-Autocomplete\renderdoc.pyd
# by generator 1.146
# no doc
# imports
import enum as __enum
from .SwigPyObject import SwigPyObject
class GLVertexAttribute(SwigPyObject):
"""
Describes the configuration for a single vertex attribute.
.. note:: If old-style vertex attrib pointer setup was used for the vertex attributes then it will
be decomposed into 1:1 attributes and buffers.
"""
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
byteOffset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""
The byte offset from the start of the vertex data in the vertex buffer from
:data:`vertexBufferSlot`.
"""
enabled = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""``True`` if this vertex attribute is enabled."""
format = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The :class:`ResourceFormat` of the vertex attribute."""
genericValue = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""A :class:`PixelValue` containing the generic value of a vertex attribute."""
this = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
thisown = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
vertexBufferSlot = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The vertex buffer input slot where the data is sourced from."""
__dict__ = None # (!) real value is ''
| [
"drl.i3x@gmail.com"
] | drl.i3x@gmail.com |
bad614b63df9fe3d16123dcae4b47f6ee685ab23 | c1254c18878b0b759878b7c37040e67f4c51652d | /display.py | f668d7f55825f81d6b2a304226eba161409347f8 | [] | no_license | lyp694096417/ufldl_cnn | 73cd2f0babf1eda08353d54429e370fb933c8173 | 1400756f9033e86ad303173a24c6dd06bc3ffcc3 | refs/heads/master | 2021-08-22T19:17:07.168770 | 2017-12-01T01:51:52 | 2017-12-01T01:51:52 | 112,620,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,109 | py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.figure as fig
import os.path as p
import PIL
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
# 图像保存路径
PATH = p.dirname(__file__)
"""
实现画板显示图像的类
"""
class ImgPanel:
def __init__(self, parent):
# 初始化画板的父容器和初始页面内容
self.parent = parent
self.index = 0
# 创建画板的控制按钮
btn = Button(parent, text = 'Next', command = self.change)
# 设置按钮在画板上的位置
btn.pack(side = BOTTOM, fill = BOTH)
def displayImg(self, images, figsize = (8,8)):
"""
函数功能: 创建画板并将图像数据载入到画板容器
参数说明:
images: 图像数据
figsize: 显示图像的初始大小
"""
# 图像的数量
n_images = len(images)
self.maxIndex = n_images
# 创建画板容器和图像容器
self.canvas = []
Figs = []
for i in range(n_images):
Figs.append(fig.Figure(figsize))
self.canvas.append(FigureCanvasTkAgg(Figs[i],master = self.parent))
self.canvas[0]._tkcanvas.pack()
# 载入图像数据
for i,data in enumerate(images):
axe = Figs[i].add_subplot(111)
axe.imshow(data, cmap = cm.gray, interpolation = 'bicubic')
def change(self):
"""
函数功能: 'Next'按钮的响应函数
"""
self.index+=1
if self.index == self.maxIndex:
self.index = 0
self.canvas[self.n - 1]._tkcanvas.pack_forget()
self.canvas[self.n]._tkcanvas.pack()
def normalize(image):
image = image - np.mean(image)
std_dev = 3 * np.std(image)
image = np.maximum(np.minimum(image, std_dev), -std_dev) / std_dev
image = (image + 1) * 0.5
return image
def displayNetwork(A, filename = 'features.png'):
"""
函数功能: 显示灰度图像的特征向量图像
参数说明: A.T是特征矩阵,输入数据左乘A.T相当与将数据从输入层映射到隐藏层
故A的shape[0]是输入层结点数,shape[1]是隐藏层结点数
即每一列是一个特征向量,对应于一副图像
"""
# 计算特征向量的数量以及每幅特征向量对应图像的行和列
(n_pixels, n_images) = A.shape
pixel_dim = int(np.ceil(np.sqrt(n_pixels))) # 特征图像的图像维度
n_row = int(np.ceil(np.sqrt(n_images))) # 每幅画布上显示图像的行数
n_col = int(np.ceil(n_images / n_row)) # 每幅画布上显示图像的列数
buf = 1 # 特征图像在画布上的间隔距离
images = np.ones(shape = (buf + n_row * (pixel_dim + buf), buf + n_col * (pixel_dim + buf)))
k = 0
for i in range(n_row):
for j in range(n_col):
if k >= n_images:
break
x_i = buf + i * (pixel_dim + buf)
x_j = buf + j * (pixel_dim + buf)
y_i = x_i + pixel_dim
y_j = x_j + pixel_dim
imgData = normalize(A[:,k])
images[x_i:y_i, x_j:y_j] = imgData.reshape(pixel_dim,pixel_dim)
k+=1
plt.imshow(images, cmap = cm.gray, interpolation='bicubic')
plt.show()
def displayColorNetwork(A, filename = 'colorfeatures.png'):
"""
函数功能:显示RGB图像的特征图像
"""
# 计算特征向量的数量以及每幅特征向量对应图像的行和列
(n_pixels, n_images) = A.shape
n_pixels = int(n_pixels / 3)
pixel_dim = int(np.ceil(np.sqrt(n_pixels))) # 特征图像的图像维度
n_row = int(np.ceil(np.sqrt(n_images))) # 每幅画布上显示图像的行数
n_col = int(np.ceil(n_images / n_row)) # 每幅画布上显示图像的列数
buf = 1
# 拆分RGB的三个通道数据
R = A[0:n_pixels,:]
G = A[n_pixels:2 * n_pixels,:]
B = A[2 * n_pixels:3 * n_pixels,:]
images = np.ones(shape = (buf + n_row * (pixel_dim + buf), buf + n_col * (pixel_dim + buf), 3))
k = 0
for i in range(n_row):
for j in range(n_col):
if k>=n_images:
break
x_i = i * (pixel_dim + buf)
y_i = x_i + pixel_dim
x_j = j * (pixel_dim + buf)
y_j = x_j + pixel_dim
R_data = normalize(R[:,k])
G_data = normalize(G[:,k])
B_data = normalize(B[:,k])
images[x_i:y_i,x_j:y_j,0] = R_data.reshape(pixel_dim,pixel_dim)
images[x_i:y_i,x_j:y_j,1] = G_data.reshape(pixel_dim,pixel_dim)
images[x_i:y_i,x_j:y_j,2] = B_data.reshape(pixel_dim,pixel_dim)
k+=1
Fig,axes = plt.subplots(1,1)
axes.imshow(images)
axes.set_frame_on(False)
axes.set_axis_off()
plt.show()
def showImage(images, figsize = (8,8)):
root = Tk()
IS = ImgPanel(root)
IS.showImages(images,figsize)
root.mainloop()
| [
"694096417@qq.com"
] | 694096417@qq.com |
6693f92c19a5231ba312a431956c0819bfc7ca0b | 1b344b9b9c9d7736c13d3bb4680dc49b6443ab8b | /programadorEventos/apps.py | 84bc84d961a6e478533470f4efc38ed27d6183bd | [] | no_license | patriciouca/SD | f791d82303069642e4cb270365848db2744b75ce | 5acae164b140f7e4dbcefe296ff0f1cecfdc595a | refs/heads/master | 2020-05-15T19:45:35.979822 | 2019-05-06T15:13:45 | 2019-05-06T15:13:45 | 182,463,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | from django.apps import AppConfig
class ProgramadoreventosConfig(AppConfig):
name = 'programadorEventos'
| [
"patricio.fernandezflorez@alum.uca.es"
] | patricio.fernandezflorez@alum.uca.es |
ac08670ebbc05a19e4c88e548ff1633c49fcbdbb | 8eec18afdceae60126a69d698031e4be4086fe5b | /backend_server/CreateQuadrantLists.py | e65a68d9ee0426605b52163d41d7d11883aa5d37 | [
"Apache-2.0"
] | permissive | matteoluzzi/ParkingFinder | 93f89542bd2448d5d28fe9da7d23933064c880c7 | dfd9990ea76abf6e40fe93423bb37a9e3581a8ee | refs/heads/master | 2020-05-19T12:43:07.635641 | 2015-04-03T13:56:07 | 2015-04-03T13:56:07 | 25,400,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,980 | py | #USATO UNA VOLTA SOLA, NON FA PARTE DEL PROGETTO
import Quadrant as quadrant
import SearchQuadrant as searchquadrant
import QuadrantTextFileLoader as loader
#import ParkingDYDBLoader as DBloader
def initializeQuadrants(searcher,parkingList):
for item in parkingList:
#print item
#print item.getLatitude()
lat = item.getLatitude()
lon = item.getLongitude()
point = [lat,lon]
targetQuadrant = searcher.searchQuadrant(point)
if targetQuadrant != -1:
targetQuadrant.addToParkList(item)
else:
raise Exception('quadrant not found for an item '+str(item.getId())+' latitude '+str(lat)+" longitude "+str(lon))
#myDBLoader = DBloader.ParkingDYDBLoader('posti',True,str("sdcc.wpvbcm.cfg.usw2.cache.amazonaws.com:11211"),600)
print "ok1"
listaposti = loader.QuadrantTextFileLoader.loadParking('postiroma.txt')
print "ok2"
listaQuadranti = searchquadrant.SearchQuadrant(loader.QuadrantTextFileLoader.load('listaquadranti.txt',0))
print "ok3"
initializeQuadrants(listaQuadranti,listaposti)
print "ok4"
#testquadrant = listaQuadranti.getQuadrantInstance(12)
#loader.QuadrantTextFileLoader.loadQuadrantParkings(testquadrant,"parkings/listquadrant62.txt",myDBLoader)
#testquadrant.getParkList()[0].update()
#quadrantParkList = testquadrant.getParkList()
#nparkings = len(quadrantParkList)
#print "chiamo batch update su "+str(nparkings)
#myDBLoader.batchUpdate(testquadrant.getParkList())
#print testquadrant.getParkList()[0]
#print "pippo"+str(testquadrant.getParkList()[0])
counter = 1
mylist = list()
for item in listaQuadranti.quadrantsList:
if(item.getNumberOfParkings()<=3000):
mylist.append(item)
else:
qnlist = item.getSplitted(3000)
for itemint in qnlist:
mylist.append(itemint)
out_file2 = open("newpark/listaquadranti.txt","w")
for item in mylist:
print str(item.getNumberOfParkings())
listapostiquadrante = item.getParkList()
quadrantID = item.getID()
out_file = open("newpark/listquadrant"+str(counter)+".txt","w")
for itemint in listapostiquadrante:
itemid = itemint.getId()
itemlat = itemint.getLatitude()
itemlon = itemint.getLongitude()
out_file.write(str(itemid)+"#"+str(itemlat)+"#"+str(itemlon)+"\n")
testl = list()
testl.append(itemlat)
testl.append(itemlon)
testr = item.inside(testl)
if (testr==False):
raise Exception("wrong quadrants")
else:
print "#"
bounds = item.getBoundaries()
NWString= str(bounds['NW'][0])+"|"+str(bounds['NW'][1])
NEString= str(bounds['NE'][0])+"|"+str(bounds['NE'][1])
SWString= str(bounds['SW'][0])+"|"+str(bounds['SW'][1])
SEString= str(bounds['SE'][0])+"|"+str(bounds['SE'][1])
myline = str(counter)+"#"+NWString+"#"+NEString+"#"+SWString+"#"+SEString+"\n"
out_file2.write(myline)
out_file.close()
counter = counter+1
out_file2.close()
#testpoint = list()
#testpoint.append(41.92)
#testpoint.append(12.34)
#print listaQuadranti.searchQuadrant(testpoint)
#print "percentuale parcheggi liberi"+str(testquadrant.getPercentageFreeParkings())
| [
"paride.casulli@gmail.com"
] | paride.casulli@gmail.com |
43b423d4e47a26ed45bc02d99ba02570d1486d22 | f0e37b5a23a44f701014c4bc13f0110fb12ff95b | /sentiment/sentiment_for_treatment.py | 5054395bf3ecb64b1d1f277afb3239515cd8436f | [] | no_license | ramon349/BIONLP_final | d411eff3600daef5c1f38776887f220014e29967 | 6de7f834adc0a12ddb0963438a5d47998ed7d4f1 | refs/heads/master | 2023-01-15T15:46:45.111613 | 2020-11-25T04:33:50 | 2020-11-25T04:33:50 | 304,123,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,503 | py | import json
from collections import defaultdict
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
from LDA import LDA
sentiment_data_path = "./sentiment_results.txt"
# format
# 'id':str,
# 'text':str,
# 'treatments':str,
# 'sentiment_label':['NEGATIVE','POSITIVE'],
# 'sentiment_score': float
def load_sentiment_results():
data = []
with open(sentiment_data_path, 'r', encoding='utf-8') as file:
for line in file:
data.append(json.loads(line))
return data
def split_data_by_treatment(data):
treatment_to_samples = defaultdict(list)
for sample in data:
treatment_to_samples[sample['treatments']].append(sample)
return treatment_to_samples
if __name__ == '__main__':
sentiment_data = load_sentiment_results()
treat_to_sample = split_data_by_treatment(sentiment_data)
treatment_sorted_keys = sorted(treat_to_sample.keys(), key=lambda x: -len(treat_to_sample[x]))
print('Data Distribution:')
print([(x, len(treat_to_sample[x])) for x in treatment_sorted_keys])
print('total number of treatment:', len(treatment_sorted_keys))
for treatment in treatment_sorted_keys[:2]:
print('Analysing treatment:', treatment)
print('total number of samples:', len(treat_to_sample[treatment]))
all_samples = treat_to_sample[treatment]
# LDA analysis
text_collection = [x['text'] for x in all_samples]
LDA_result = LDA(text_collection)
print('Topic Analysis:\n', LDA_result)
# Label analysis
label_collection = [1 if x['sentiment_label'] == 'POSITIVE' else 0 for x in all_samples]
print('sentiment bias:')
print('positive:', sum(label_collection))
print('negative:', len(label_collection) - sum(label_collection))
print('positive rate:', sum(label_collection)/len(label_collection))
# word cloud analysis
for number_of_topics, topics in LDA_result:
topic = '0.027*"breast" + 0.024*"cancer" + 0.016*"chemo" + 0.010*"treatment" + 0.010*"year" + 0.009*"get" + 0.009*"feel" + 0.009*"surgeri" + 0.008*"mom" + 0.008*"worri" + 0.007*"radiotherapi" + 0.006*"know" + 0.006*"good" + 0.006*"mani" + 0.006*"got" + 0.005*"like" + 0.005*"hope" + 0.005*"menopaus" + 0.004*"remov" + 0.004*"professor"'
wordcloud = WordCloud(background_color='white').generate(topic)
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
print('\n\n') | [
"zys_819@126.com"
] | zys_819@126.com |
a68196f031bbeb0ba2c2698e025995cba76ce678 | 44a724fbac833f10c73a70f140d6c6692d4c758e | /website/registration/forms.py | da51b193f26f02d67afbf28c947ebf07d916cf8e | [] | no_license | Nussy/owf2014 | 856598b414a58ef5065481dad66841fb9fb01f7d | 09224a3ab82d5ceabe286678bae77967be42537c | refs/heads/master | 2020-12-24T11:33:49.230217 | 2014-07-08T12:15:04 | 2014-07-08T12:15:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,126 | py | from flask.ext.wtf import Form, BooleanField, TextField, TextAreaField, required, email
from flask.ext.wtf.html5 import EmailField
from flask.ext.babel import lazy_gettext as _l
from website.registration.models import Track
__all__ = ['RegistrationForm']
def make_mixin_class():
class DynamicMixin(object):
pass
for track in Track.query.all():
label = "%s: %s" % (track.theme, track.title)
name = "track_%d" % track.id
field = BooleanField(label=label)
setattr(DynamicMixin, name, field)
return DynamicMixin
def make_registration_form_class():
mixin_class = make_mixin_class()
class RegistrationForm(mixin_class, Form):
email = EmailField(label=_l(u"Your email address"),
validators=[required(), email()])
coming_on_oct_3 = BooleanField(label=_l(u"Will you come on Oct. 3th? (Thursday)"))
coming_on_oct_4 = BooleanField(label=_l(u"Will you come on Oct. 4th? (Friday)"))
coming_on_oct_5 = BooleanField(label=_l(u"Will you come on Oct. 5th? (Saturday)"))
return RegistrationForm
def make_confirmation_form_class():
mixin_class = make_mixin_class()
class ConfirmationForm(mixin_class, Form):
email = EmailField(label=_l(u"Your email address"),
validators=[required(), email()])
coming_on_oct_3 = BooleanField(label=_l(u"Will you come on Oct. 3th? (Thursday)"))
coming_on_oct_4 = BooleanField(label=_l(u"Will you come on Oct. 4th? (Friday)"))
coming_on_oct_5 = BooleanField(label=_l(u"Will you come on Oct. 5th? (Saturday)"))
first_name = TextField(label=_l("First name"))
last_name = TextField(label=_l("Last name"))
organization = TextField(label=_l("Organization"))
url = TextField(label=_l("URL"))
url = TextAreaField(label=_l("Biography"))
# twitter_handle = Column(UnicodeText(100), default="", nullable=False)
# github_handle = Column(UnicodeText(200), default="", nullable=False)
# sourceforge_handle = Column(UnicodeText(200), default="", nullable=False)
# linkedin_url = Column(UnicodeText(200), default="", nullable=False)
return ConfirmationForm
| [
"sf@fermigier.com"
] | sf@fermigier.com |
c9785aeb965c051f5eef85117acfd4edc9ef9bdf | 33c65f7ec4e6bcf85bb52975ddc720cd75dea92b | /AT_Week7And8/Homework/App4.py | b5978451c7c97c5e802f2eeebb5aba11f5a78b34 | [] | no_license | Yueleng/IIPP | 797567bde4f2ec304835707a50114563c584c027 | b966553260614d39293ef85c34e974f84fc91ed6 | refs/heads/master | 2021-05-10T15:38:31.913560 | 2018-04-20T00:57:26 | 2018-04-20T00:57:26 | 118,556,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,112 | py | """
Algorithmic Thinking, Part II
Application 4.
@author: Yueleng & Kexin
"""
#DESKTOP = True
from json import dumps
from json import loads
from random import choice
from random import shuffle
from operator import itemgetter
from string import ascii_lowercase
import matplotlib.pyplot as plt
import numpy as np
from project4 import build_scoring_matrix
from project4 import compute_alignment_matrix
from project4 import compute_global_alignment
from project4 import compute_local_alignment
import math
import random
from urllib.request import urlopen
# if DESKTOP:
# import matplotlib.pyplot as plt
# import alg_project4_solution as student
# else:
# import simpleplot
# import userXX_XXXXXXX as student
# URLs for data files
PAM50_URL = "http://storage.googleapis.com/codeskulptor-alg/alg_PAM50.txt"
HUMAN_EYELESS_URL = "http://storage.googleapis.com/codeskulptor-alg/alg_HumanEyelessProtein.txt"
FRUITFLY_EYELESS_URL = "http://storage.googleapis.com/codeskulptor-alg/alg_FruitflyEyelessProtein.txt"
CONSENSUS_PAX_URL = "http://storage.googleapis.com/codeskulptor-alg/alg_ConsensusPAXDomain.txt"
WORD_LIST_URL = "http://storage.googleapis.com/codeskulptor-assets/assets_scrabble_words3.txt"
###############################################
# provided code
def read_scoring_matrix(filename):
"""
Read a scoring matrix from the file named filename.
Argument:
filename -- name of file containing a scoring matrix
Returns:
A dictionary of dictionaries mapping X and Y characters to scores
# NOTE: In python3, urlopen returns binary instead of string. Decode into ascii before using the data.
"""
scoring_dict = {}
scoring_file = urlopen(filename)
ykeys = scoring_file.readline()
ykeys = ykeys.decode('ascii')
ykeychars = ykeys.split()
for line in scoring_file.readlines():
line = line.decode('ascii')
vals = line.split()
xkey = vals.pop(0)
scoring_dict[xkey] = {}
for ykey, val in zip(ykeychars, vals):
scoring_dict[xkey][ykey] = int(val)
return scoring_dict
def read_protein(filename):
"""
Read a protein sequence from the file named filename.
Arguments:
filename -- name of file containing a protein sequence
Returns:
A string representing the protein
"""
protein_file = urlopen(filename)
protein_seq = protein_file.read()
protein_seq = protein_seq.rstrip()
protein_seq = protein_seq.decode('ascii')
return protein_seq
def read_words(filename):
"""
Load word list from the file named filename.
Returns a list of strings.
"""
# load assets
word_file = urlopen(filename)
# read in files as string
words = word_file.read()
# template lines and solution lines list of line string
# if the input value is '\n' then TypeError: a bytes-like object is required, not 'str'
word_list = words.split(b'\n')
word_list = [word.decode('ascii') for word in word_list]
print("Loaded a dictionary with", len(word_list), "words")
return word_list
def agreement(xs, ys, scoring, alignmnet):
_, x, _ = compute_global_alignment(xs, ys, scoring, alignmnet)
similarity = [1. for (a, b) in zip(x, ys) if a == b] #??? balta2ar wrong? Not Wrong!
return 100. * len(similarity) / len(x)
def rprot(n, alpha):
'''
choice(input) returns random element of input.
input coulde be list, tuple or string.
this function return n elements of alpha. Allow duplicate of elements.
Since there is not pop up during every choice.
'''
return ''.join([choice(alpha) for _ in range(n)])
def compare(n, nh, nf, alpha, cons, scoring, align):
'''
n: number of trials
nh: number of characters chosen from alpha and assign to x
nf: number of characters chosen from alpha and assing to y
alpha: original string set: alpha = 'ACBEDGFIHKMLNQPSRTWVYXZ'
cons: Consensus strings
scoring: scoring matrix for alpha
align: alignment matrix????? What is this? Somthing wrong??
'''
ag1, ag2 = [], []
for i in range(n):
x, y = rprot(nh, alpha), rprot(nf, alpha)
_, xs, ys = compute_local_alignment(x, y, scoring, align)
xs_nodash = ''.join([x for x in xs if x!= '-'])
ys_nodash = ''.join([y for y in ys if y!= '-'])
ag1.append(agreement(xs_nodash, cons, scoring, align))
ag2.append(agreement(ys_nodash, cons, scoring, align))
hc_agree = sum(ag1) / float(n)
fc_agree = sum(ag2) / float(n)
print('Random Human vs Consensus agree = %s%%' %hc_agree)
print('Random Fly vs Consensus agree = %s%%' % fc_agree)
def question1And2():
human = read_protein(HUMAN_EYELESS_URL)
fly = read_protein(FRUITFLY_EYELESS_URL)
print(len(human), len(fly))
scoring = read_scoring_matrix(PAM50_URL)
local_align_matrix = compute_alignment_matrix(human, fly, scoring, False)
score, xs, ys = compute_local_alignment(human, fly, scoring, local_align_matrix)
print('Question 1')
print('The score of the local alignment is: ', score)
print('The sequence for the HumanEyelessProtein is: ', xs)
print('The sequence for the FruitflyEyelessProtein is: ', ys)
print()
print('Question2')
consensus = read_protein(CONSENSUS_PAX_URL)
# Step1: Delete any dashes '-' present in the sequence.
human_nodash = ''.join([x for x in xs if x!= '-'])
fly_nodash = ''.join([y for y in ys if y!= '-'])
# Step2: Compute the global alignment of this dash-less sequence with the ConsensusPAXDomain sequence.
hc_global_align_matrix = compute_alignment_matrix(human_nodash, consensus, scoring, True)
fc_global_align_matrix = compute_alignment_matrix(fly_nodash, consensus, scoring, True)
# Step3: Compare corresponding elements of these two globally-aligned sequences (local vs consensus) and
# compute the percentage of elements in these two sequences that agree
# NOTE: func agreement contains Stpe2 and Step3.
hc_agree = agreement(human_nodash, consensus, scoring, hc_global_align_matrix)
fc_agree = agreement(fly_nodash, consensus, scoring, fc_global_align_matrix)
print('Human vs Consensus agree = %s%%' % hc_agree)
print('Fly vs Consensus agree = %s%%' % fc_agree)
# alpha = 'ACBEDGFIHKMLNQPSRTWVYXZ'
# compare(1000, len(human), len(fly), consensus, scoring, local_align)
# pirnt()
def generate_null_distribution2(seq_x, seq_y, scoring_matrix, num_trials):
# This function does work. I don't understand why balta2ar write it this way by using distr.json
distr = {} # store the whole distribution {score1: count1, score2: count2, ..., scoren: countn}
raw = [] # store all the scores: [score1, score2, ..., scoren], could be duplicate
try:
with open('distr.json') as f:
pair = loads(f.read())
return pair['distr'], pair['raw']
except Exception as e:
print('can\'t open file', str(e))
for _ in range(num_trials):
temp = list(seq_y)
shuffle(temp)
rand_y = ''.join(temp)
align_matrix = compute_alignment_matrix(seq_x, rand_y, scoring_matrix, False)
score, _, _ = compute_local_alignment(seq_x, rand_y, scoring_matrix, align_matrix)
if score not in distr:
distr[score] = 0
distr[score] += 1
raw.append(score)
with open('distr.json', 'w') as f:
f.write(dumps({'distr': distr, 'raw': raw}))
return distr, raw
def generate_null_distribution(seq_x, seq_y, scoring_matrix, num_trials):
distr = {} # store the whole distribution {score1: count1, score2: count2, ..., scoren: countn}
raw = [] # store all the scores: [score1, score2, ..., scoren], could be duplicate
for _ in range(num_trials):
temp = list(seq_y)
shuffle(temp)
rand_y = ''.join(temp)
align_matrix = compute_alignment_matrix(seq_x, rand_y, scoring_matrix, False) # Returns local alignment matrix.
score, _, _ = compute_local_alignment(seq_x, rand_y, scoring_matrix, align_matrix)
if score not in distr:
distr[score] = 0
distr[score] += 1
raw.append(score)
return distr, raw
def norm(d):
total = float(sum(d.values()))
# return {k: v / total for k, v in d.iteritems()}
# In python3, iteritems has been replaced by items.
return {k: v / total for k, v in d.items()}
def str_keys(d):
# Convert the key k into int k for an input dictionary d.
return {int(k): v for k, v in d.items()}
def question4And5(filename):
human = read_protein(HUMAN_EYELESS_URL)
fly = read_protein(FRUITFLY_EYELESS_URL)
scoring = read_scoring_matrix(PAM50_URL)
distr, raw = generate_null_distribution(human, fly, scoring, 1000)
# What does this mean?
from pprint import pprint as pp
distr = str_keys(distr)
pp(distr)
distr = norm(distr)
pairs = list(distr.items()) #[(k1, v1), (k2, v2), ..., (kn, vn)]
pairs = sorted(pairs, key = itemgetter(0)) # sort by key. sort by k1, k2, k3, ... kn.
print(pairs)
index = np.arange(len(pairs))
# map(function, iteriable)
# since the second parameter of plt.bar() should be a sequnce instead of a generator.
# In python2 map() function returns a list, but in python3 map() function repterns a generator.
plt.bar( index, list(map(itemgetter(1), pairs)) )
plt.xticks(index , list(map(itemgetter(0), pairs)), fontsize = 8)
plt.xlabel('Score')
plt.ylabel('Fraction of total trials')
plt.title('Distribution of scores')
plt.tight_layout
plt.savefig(filename)
s_score = 875 # What does this mean? --> The result from Questio1.
n = 1000
mean = sum(raw) / n
std = np.sqrt(sum((x - mean) ** 2 for x in raw) / n)
z_score = (s_score - mean) / std
print('mean = %f' % mean)
print('std = %f' % std)
print('z_score = %f' % z_score)
def edit_dist(xs, ys):
alphabet = ascii_lowercase # what is ascii_lowercase??
scoring = build_scoring_matrix(alphabet, 2, 1, 0)
align = compute_alignment_matrix(xs, ys, scoring, True) # True means global alignment.
score, _, _ = compute_global_alignment(xs, ys, scoring, align)
return len(xs) + len(ys) - score
def check_spelling(checked_word, dist, word_list):
'''
Compare every element x in word_list with checked_word a, return
a set of words that satisfies edit_dist(a, x) <= given distance.
'''
return set([word for word in word_list
if edit_dist(checked_word, word) <= dist])
def question8():
# Why does he use this method to read list of words
# while the function read_words() is given?
# Does the code work?
# words = [x.strip() for x in open(WORD_LIST_URL).readlines]
word_list = read_words(WORD_LIST_URL)
print('len = ', len(word_list))
print('type = ', type(word_list[0]))
humble_words = check_spelling('humble', 1, word_list)
firefly_words = check_spelling('firefly', 2, word_list)
print(len(humble_words), humble_words)
print(len(firefly_words),firefly_words)
def main():
# Uncomment this one by one.
# question1And2()
# question4And5('q4.png') # Very Slow. Try 20 iterations first to debug and then change to 1000 iterations
question8()
if __name__ == '__main__':
main()
| [
"wangyueleng@gmail.com"
] | wangyueleng@gmail.com |
d0cca4222c8b6367b193a93bbb16784b03bdbf6d | 0bd3e809967ce2e02353a1c5559725bf3c9b6a7e | /update_bind_conf_gw.py | dc4273fb723c2b745ee9c0d667b3be3768301f8e | [] | no_license | ka-ba/backend-scripts | 79ea992852d4afaf24c1cd60146be1e3df06aa20 | 87ddce68d224a13f7062d8ec3825a46fb98fa343 | refs/heads/master | 2021-01-20T03:01:53.095776 | 2015-04-21T13:13:27 | 2015-04-21T13:13:27 | 27,978,828 | 0 | 0 | null | 2015-04-21T12:49:20 | 2014-12-14T00:50:09 | Python | UTF-8 | Python | false | false | 1,114 | py | #!/usr/bin/env python3
def update_bind_conf():
from photon.util.files import read_file
from common import pinit
photon, settings = pinit('update_bind_conf', verbose=True)
for repo in ['scripts', 'meta']:
photon.git_handler(
settings['icvpn']['icdns'][repo]['local'],
remote_url=settings['icvpn']['icdns'][repo]['remote']
)._pull()
bind_conf = photon.template_handler('${config_content}')
config_content=photon.m(
'genarating bind conf',
cmdd=dict(
cmd='./mkdns -f bind -s %s -x mainz -x wiesbaden' %(settings['icvpn']['icdns']['meta']['local']),
cwd=settings['icvpn']['icdns']['scripts']['local']\
)
).get('out')
bind_conf.sub = dict(config_content=config_content)
conf = settings['icvpn']['icdns']['conf']
if bind_conf.sub != read_file(conf):
bind_conf.write(conf, append=False)
photon.m(
'reloading bind daemon',
cmdd=dict(
cmd='sudo rndc reload'
)
)
if __name__ == '__main__':
update_bind_conf()
| [
"frieder.griesshammer@der-beweis.de"
] | frieder.griesshammer@der-beweis.de |
c8681f5003d1c009cf84cdd0a7565d93d635ff44 | f155da9d88bfcd81bc107d0761e81777cd435364 | /joint_patch.py | f11e8b9c272a21864531c5e9f4d9f6fb052ccaff | [] | no_license | llk2why/RetinaSample | 1caa41bd06e06a35a5cbfa176c95a36d7dfc2489 | e968f941d9373a75b4a9069ec10c9f3cd3910998 | refs/heads/master | 2020-05-28T02:11:40.468402 | 2019-10-08T06:18:36 | 2019-10-08T06:18:36 | 188,849,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,406 | py | import os
import cv2
import yaml
import numpy as np
from utilities import LOG_INFO
# LOG_INFO('====> Begin reading yaml')
# with open('./yamls/chop.yaml') as f:
# PATCH_INFO = yaml.load(f)
names = [
'SDIM1895',
'SDIM1898',
'SDIM1899',
'SDIM1901',
'SDIM1905',
'SDIM1906',
'SDIM1912',
'SDIM1916',
'SDIM1918',
'SDIM1920',
'SDIM1925',
'SDIM1926',
'SDIM1927',
'SDIM1928',
'SDIM1929',
'SDIM1930',
'SDIM1931',
'SDIM1933',
]
name_pair = {
's1895':'SDIM1895',
's1898':'SDIM1898',
's1899':'SDIM1899',
's1901':'SDIM1901',
's1905':'SDIM1905',
's1906':'SDIM1906',
's1912':'SDIM1912',
's1916':'SDIM1916',
's1918':'SDIM1918',
's1920':'SDIM1920',
's1925':'SDIM1925',
's1926':'SDIM1926',
's1927':'SDIM1927',
's1928':'SDIM1928',
's1929':'SDIM1929',
's1930':'SDIM1930',
's1931':'SDIM1931',
's1933':'SDIM1933',
}
def get_psnr(x,y):
x,y = x.astype(np.float),y.astype(np.float)
mse = np.mean(np.square(x-y))
psnr = 20*np.log10(255)-10*np.log10(mse)
return psnr
def joint_patch(patch_dir,name,tag):
files = [x for x in os.listdir(patch_dir) if name in x]
fpaths = [os.path.join(patch_dir,file) for file in files]
patch_info = PATCH_INFO[name]
joint = np.zeros(patch_info['size']).astype(np.float)
cnt = np.zeros(patch_info['size']).astype(np.float)
for file,fpath in zip(files,fpaths):
patch_name = os.path.splitext(file)[0]
img = cv2.imread(fpath)
(r1,c1),(r2,c2) = patch_info[patch_name]
joint[r1:r2,c1:c2] += img
cnt[r1:r2,c1:c2] += 1
joint = joint/cnt
joint[joint>255]=255
joint[joint<0]=0
joint_dir = 'joint({})'.format(tag)
if not os.path.exists(joint_dir):
os.makedirs(joint_dir)
cv2.imwrite('{}/{}.tiff'.format(joint_dir,name),joint.astype(np.uint8))
def combine(patch_dir,tag):
LOG_INFO('====> Begin joint patches')
for i,name in enumerate(names):
LOG_INFO('====> {}/{}'.format(i+1,len(names)))
joint_patch(patch_dir,name,tag)
def compare_psnr(img_dir,suffix,tag):
files = [x for x in os.listdir(img_dir) if suffix in x]
psnrs = []
with open('psnr_{}.txt'.format(tag),'w') as f:
for file in files:
fpath = os.path.join(img_dir,file)
if 's' in file:
std_name = name_pair[os.path.splitext(file)[0].split('_')[0]]
else:
std_name = file.split('.')[0]
std_fpath = os.path.join('C:\data\dataset\Sandwich 0612 fullsize',std_name+'.TIF')
print(fpath)
print(std_fpath)
im_y = cv2.imread(fpath)
im_x = cv2.imread(std_fpath)
psnr = get_psnr(im_x,im_y)
psnrs.append(psnr)
f.write('{}:{}\n'.format(file,psnr))
f.write('avg:{}\n'.format(np.mean(psnrs)))
def main():
# RGGB_dir = r'C:\data\dataset\result best'
# combine(RGGB_dir,'RGGB')
# RYYB_dir = r'C:\data\dataset\Sandwich 0612 fullsize Mosaic Reconstruct RYYB'
# combine(RYYB_dir,'RYYB')
compare_psnr(r'C:\Users\linc\Downloads\L01\output','png','RGGB(CS)')
compare_psnr(r'joint(RGGB)','tiff','RGGB')
compare_psnr(r'joint(RYYB)','tiff','RYYB')
if __name__ == '__main__':
main()
| [
"lincolnleellk@163.com"
] | lincolnleellk@163.com |
4da0991f7b39852b65f3abedb8683be21793d5ed | b10aa32006817ddd437bb24294755d1c7dc1d26f | /homeless/plotly_homeless_county.py | f0c0582feb52b07199a3774475fa34ca6c8ee720 | [] | no_license | eoincarley/python-blog | 304d5a685617ba45bdc83f8b004b3eff19d22b86 | c6d5c7b86e7559ab3ff84a04f095e2460d810ad3 | refs/heads/master | 2021-04-26T22:25:43.912151 | 2018-03-21T14:32:02 | 2018-03-21T14:32:02 | 124,090,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdate
import time
import matplotlib.colors as colors_fun
import matplotlib.cm as cm
import plotly.plotly as py
import plotly.graph_objs as go
import pdb
file = open('homeless_by_county.csv', 'rt')
reader = csv.reader(file)
data = [x for x in reader]
county = [x[0] for x in data]
number = [x[1] for x in data]
colors = cm.GnBu(np.linspace(1, 0, len(number)) )
colors = [colors_fun.rgb2hex(color) for color in colors]
trace = go.Pie(labels=county, values=number,
hoverinfo='label+percent', textinfo='value',
textfont=dict(size=20),
marker=dict(colors=colors,
line=dict(color='#000000', width=0.5)))
py.iplot([trace], filename='Homeless-by-county') | [
"eoincarley@gmail.com"
] | eoincarley@gmail.com |
3a3b8cdf81dc0afc8794d2cd28a19005aa3943c7 | 96eec7e71727215ab3a8bdb15aed5f1b55749486 | /Lesson_3.2.py | 1bbf77237936b45e4265a7b31c44867afb403b29 | [] | no_license | Krambelbot/Lesson_3.2_20170928 | 6a179fa81006915e62a7eb1ab4a223f0af840338 | 81737363d3b011259ce29829a24654901f33b9e7 | refs/heads/master | 2021-05-15T02:52:38.572954 | 2017-10-05T20:46:34 | 2017-10-05T20:46:34 | 105,933,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | from collections import OrderedDict
# another comment
od = OrderedDict({
('pen', 3),
('pineapple', 2),
('apple', 1)
})
print(od.values())
print(od.keys())
# version 0.2
d = {
'pen': 3,
'pineapple': 2,
'apple': 1,
'fruit': 'peach'
}
print(d.values())
print(d.keys()) | [
"Mindwalk85@gmail.com"
] | Mindwalk85@gmail.com |
c401f8ee3aa47ccf7823765dca0109b97fd84f9f | 88dbd703e0d41817890c98f87602f783b037884c | /app.py | 2ec7ce424ec82cef74b11374cbfec78e161d4973 | [] | no_license | Walid-Khall/CastingAgency | e82262c7578df4c51fd304482fae0f438d693ded | 568484317f48272a0a048e8dfe4d2b8a73c85a26 | refs/heads/master | 2022-12-03T05:38:25.989852 | 2020-08-17T23:14:20 | 2020-08-17T23:14:20 | 288,305,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,257 | py | import os
from flask import Flask, request, jsonify, abort
from sqlalchemy import exc
import json
from flask_cors import CORS
from models import setup_db, Movie, Actor
from auth import AuthError, requires_auth
# app = Flask(__name__)
# setup_db(app)
# CORS(app)
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__)
setup_db(app)
CORS(app)
# Get Movies and Actors endpoints:-----------------------------------
@app.route('/movies')
@requires_auth('get:movies')
def get_movies(jwt):
try:
selection = Movie.query.order_by(Movie.id).all()
movies = [movie.format() for movie in selection]
return jsonify({
"success": True,
"movies": movies
})
except BaseException:
abort(404)
@app.route('/actors')
@requires_auth('get:actors')
def get_actors(jwt):
try:
selection = Actor.query.order_by(Actor.id).all()
actors = [actor.format() for actor in selection]
return jsonify({
"success": True,
"actors": actors
})
except BaseException:
abort(404)
# DELETE Movies and Actors endpoints
@app.route('/movies/<id>', methods=['DELETE'])
@requires_auth('delete:movies')
def delete_movies(jwt, id):
try:
movie = Movie.query.filter(Movie.id == id).one_or_none()
movie.delete()
return jsonify({
"success": True,
"delete": id
})
except BaseException:
abort(404)
@app.route('/actors/<id>', methods=['DELETE'])
@requires_auth('delete:actors')
def delete_actors(jwt, id):
try:
actor = Actor.query.filter(Actor.id == id).one_or_none()
actor.delete()
return jsonify({
"success": True,
"delete": id
})
except BaseException:
abort(404)
# POST Movies and Actors endpoints----------------------------------------
@app.route('/movies', methods=['POST'])
@requires_auth('post:movies')
def post_movies(jwt):
try:
body = request.get_json()
new_title = body.get('title')
new_release = body.get('release')
movies = Movie(title=new_title, release=new_release)
movies.insert()
movies = movies.format()
return jsonify({
"success": True,
"movie": movies
})
except BaseException:
abort(400)
@app.route('/actors', methods=['POST'])
@requires_auth('post:actors')
def post_actors(jwt):
try:
body = request.get_json()
new_name = body.get('name'),
new_age = body.get('age'),
new_gender = body.get('gender')
actors = Actor(name=new_name, age=new_age, gender=new_gender)
actors.insert()
actors = actors.format()
return jsonify({
"success": True,
"actors": actors
})
except BaseException:
abort(400)
# PATCH Movies and Actors endpoints---------------------------------------
@app.route('/movies/<id>', methods=['PATCH'])
@requires_auth('patch:movies')
def update_movies(jwt, id):
body = request.get_json()
new_title = body.get('title')
new_release = body.get('release')
try:
selection = Movie.query.filter(Movie.id == id).all()
for movies in selection:
movies.title = new_title
movies.release = new_release
movies.update()
movies = movies.format()
return jsonify({
"success": True,
"movie": movies
})
except BaseException:
abort(400)
@app.route('/actors/<id>', methods=['PATCH'])
@requires_auth('patch:movies')
def update_actors(jwt, id):
body = request.get_json()
new_name = body.get('name'),
new_age = body.get('age'),
new_gender = body.get('gender')
try:
selection = Actor.query.filter(Actor.id == id).all()
for actors in selection:
actors.name = new_name
actors.age = new_age
actors.gender = new_gender
actors.update()
actors = actors.format()
return jsonify({
"success": True,
"actor": actors
})
except BaseException:
abort(404)
# Error Handling
'''
Example error handling for unprocessable entity
'''
@app.errorhandler(422)
def unprocessable(error):
return jsonify({
"success": False,
"error": 422,
"message": "unprocessable"
}), 422
# -------------------------------- done -----------------------------
@app.errorhandler(404)
def resource_not_found(error):
return jsonify({
"success": False,
"error": 404,
"message": "resource not found"
}), 404
@app.errorhandler(400)
def bad_request(error):
return jsonify({
"success": False,
"error": 400,
"message": "bad request"
}), 400
@app.errorhandler(405)
def not_allowd(error):
return jsonify({
"success": False,
"error": 405,
"message": "method not allowd"
}), 405
@app.errorhandler(500)
def server_error(error):
return jsonify({
"success": False,
"error": 500,
"message": "internal server error"
}), 500
'''
@TODO implement error handler for AuthError
error handler should conform to general task above
'''
@app.errorhandler(AuthError)
def auth_error(error):
return jsonify({
'code': 'invalid_header',
'description': 'Authorization malformed.'
}), 401
return app
app = create_app()
if __name__ == '__main__':
app.run() | [
"khallefwalid@gmail.com"
] | khallefwalid@gmail.com |
06deec1459a143840dae93676f208d7ae5c1c75b | c3bd83c3de598e94a598f4164509db41ae94424b | /bampy/writer.py | 5103caac1621ee601d9a171cb58c19b85c92eb6a | [
"MIT"
] | permissive | innovate-invent/bampy | 680f988e0a3bc14eba44b63e0416fb4aedc23b2f | b9ea46ddc40d7954798741bddd3cf9af1df25eb4 | refs/heads/master | 2018-12-18T22:37:11.855747 | 2018-09-15T00:34:34 | 2018-09-15T00:34:34 | 116,548,666 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,235 | py | import io
from . import bam, bgzf, sam
from .bgzf import zlib
class Writer:
def __init__(self, output):
self._output = output
@staticmethod
def sam(output, offset=0, sam_header=b'', references=()) -> 'Writer':
"""
Determines is the output is randomly accessible and returns an instance of SAMStreamWriter or SAMBufferWriter.
:param output: The buffer or stream to output to.
:param offset: If a buffer, the offset into the buffer to start at.
:param sam_header: Bytes like object containing the SAM formatted header to write to the output.
:param references: List of Reference objects to use in record references
:return: Instance of SAMStreamWriter or SAMBufferWriter
"""
sam_header = sam.pack_header(sam_header, references)
if isinstance(output, (io.RawIOBase, io.BufferedIOBase)):
output.write(sam_header)
return SAMStreamWriter(output)
else:
sam_len = len(sam_header)
output[offset: offset + sam_len] = sam_header
return SAMBufferWriter(output, offset + sam_len)
@staticmethod
def bam(output, offset=0, sam_header=b'', references=()) -> 'Writer':
"""
TODO
:param output:
:param offset:
:param sam_header:
:param references:
:return:
"""
sam_header = sam.pack_header(sam_header, references)
if isinstance(output, (io.RawIOBase, io.BufferedIOBase)):
bam.header_to_stream(output, sam_header, references)
return BAMStreamWriter(output)
else:
return BAMBufferWriter(output, bam.header_to_buffer(output, offset, sam_header, references))
@staticmethod
def bgzf(output, offset=0, sam_header=b'', references=(), level=zlib.DEFAULT_COMPRESSION_LEVEL):
"""
TODO
:param output:
:param offset:
:param sam_header:
:param references:
:return:
"""
writer = BGZFWriter(output, offset, level=level)
writer._output(bam.pack_header(sam_header, references))
writer._output.finish_block()
return writer
def __call__(self, *args, **kwargs):
raise NotImplementedError()
class StreamWriter(Writer):
def __init__(self, output):
super().__init__(output)
class BufferWriter(Writer):
def __init__(self, output, offset=0):
super().__init__(output)
self._offset = offset
class SAMStreamWriter(Writer):
def __call__(self, record):
self._output.writelines((bytes(record), b'\n'))
class SAMBufferWriter(BufferWriter):
def __call__(self, record):
r = bytes(record) + b'\n'
l = len(r)
self._output[self.offset:self.offset + l] = r
self.offset += l
class BAMStreamWriter(StreamWriter):
def __call__(self, record):
record.to_stream(self._output)
class BAMBufferWriter(BufferWriter):
def __call__(self, record):
record.to_buffer(self._output, self.offset)
self.offset += len(record)
class BGZFWriter(Writer):
def __init__(self, output, offset=0, level=zlib.DEFAULT_COMPRESSION_LEVEL):
super().__init__(bgzf.Writer(output, offset, level=level))
def __call__(self, record):
data = record.pack()
record_len = len(record)
if record_len < bgzf.MAX_CDATA_SIZE and self._output.block_remaining() < record_len:
self._output.finish_block()
for datum in data:
self._output(datum)
@property
def offset(self):
if self._output:
return self._output.offset
else:
return self._offset
def finalize(self):
if self._output:
self._output.finish_block()
offset = self._output.offset
output = self._output._output
if isinstance(output, (io.RawIOBase, io.BufferedIOBase)):
output.write(bgzf.EMPTY_BLOCK)
else:
output[offset:offset + bgzf.SIZEOF_EMPTY_BLOCK] = bgzf.EMPTY_BLOCK
self._offset = offset + bgzf.SIZEOF_EMPTY_BLOCK
self._output = None
def __del__(self):
self.finalize()
| [
"innovate.invent@gmail.com"
] | innovate.invent@gmail.com |
51fa016e1c1e8f8413a36b5d13b3ac5e585a1ade | aaddc9b334b4d265d61cd97464d9ff73f32d9bec | /12_DRF_API_ModalViewSet/DRF_API_ModalViewSet/wsgi.py | 59140f929e09da11239464ede2ab10ba8c216e53 | [] | no_license | DharmendraB/DRF-Django-RestFramework-API | f3549955e53d43f7dad2a78468ad0792ebfb70d5 | 4f12ab84ca5f69cf2bb8e392b5490247d5f00e0e | refs/heads/main | 2023-05-30T20:59:46.635078 | 2021-06-11T04:32:52 | 2021-06-11T04:32:52 | 375,905,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | """
WSGI config for DRF_API_ModalViewSet project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DRF_API_ModalViewSet.settings')
application = get_wsgi_application()
| [
"ghldharmendra@gmail.com"
] | ghldharmendra@gmail.com |
af6ad9f809ae76c6ed91c3dc648f8bfed2c86319 | 74ce452f54f51b407ee52032f55c561c1e50d3e7 | /convert_section_6_table.py | 96faddae4c5ae0f036e8cba02ca9993272faf02c | [] | no_license | artiya4u/thailand-budget-pdf2csv | 094b814dc4e8375ca0e3f2e549d89d54104d89a6 | 25b663b65d0e0a1eb80e14ee1520a33914fe4ab7 | refs/heads/main | 2023-05-17T23:47:19.574796 | 2021-06-02T11:38:26 | 2021-06-02T11:38:26 | 373,096,035 | 0 | 0 | null | 2021-06-02T08:47:14 | 2021-06-02T08:47:11 | null | UTF-8 | Python | false | false | 8,423 | py | import csv
import glob
import os
import re
# sudo apt install poppler-utils
year = '2565'
project_title_prefix = ('ผลผลิต', 'แผนงาน', 'โครงการ', 'ครงการ', 'แ นงาน')
org_prefix = ['กระทรวง', 'สานัก', 'องค์กรปกครอง', 'จังหวัดและก', 'รัฐวิสา', 'หน่ วยงาน', 'ส่วนราชการ', 'สภา']
org_prefix = [(' ' * 30) + x for x in org_prefix] # Add spaces to know organization name in center of the page
section_6_2_prefix = [
'6.2 จาแนกตามแผนงาน ผลผลิต/โครงการ และงบรายจ่าย',
'6.2 จําแนกตามแผนงาน ผลผลิต/โครงการ และงบรายจ่าย',
f'6. สรุปงบประมาณรายจ่ายประจาปี งบประมาณ พ.ศ. {year} จาแนกตามแผนงาน ผลผลิต/โครงการ และงบรายจ่าย',
'6.2 า'
]
specials = ['ุ', 'ู', 'ึ', 'ำ', 'ั', 'ี', '้', '็', '่', '๋']
def fix_align(txt):
input_txt = list(txt)
result = list('')
skip_next = False
for idx, val in enumerate(input_txt):
if val in specials and idx > 1 and idx + 1 < len(input_txt) and input_txt[idx + 1] == ' ':
temp = result.pop()
result.append(val)
result.append(temp)
skip_next = True
else:
if not skip_next:
result.append(val)
skip_next = False
return ''.join(result)
def thai_number_to_arabic(thai_number):
thai_number = thai_number.replace('๐', '0')
thai_number = thai_number.replace('๑', '1')
thai_number = thai_number.replace('๒', '2')
thai_number = thai_number.replace('๓', '3')
thai_number = thai_number.replace('๔', '4')
thai_number = thai_number.replace('๕', '5')
thai_number = thai_number.replace('๖', '6')
thai_number = thai_number.replace('๗', '7')
thai_number = thai_number.replace('๘', '8')
thai_number = thai_number.replace('๙', '9')
thai_number = thai_number.replace(',', '')
return thai_number
def replace_dash(text):
if text == '-':
return '0'
else:
return text
def convert_table_6(pdf_budget_file):
print(f'Start convert file: {pdf_budget_file}')
os.system(f'pdftotext -layout {pdf_budget_file}')
text_file_name = pdf_budget_file.replace('.pdf', '.txt')
project_budgets = []
with open(text_file_name) as text_file:
book_year = 0
issue_num = 0
book_num = 0
sub_book_num = 0
item_num = 0
page = 0
count = 0
lines = text_file.readlines()
is_section_6 = False
is_row = False
org_name = None
sub_org_name = None
project_name = ''
personnel_budget = 0
operational_budget = 0
investing_budget = 0
subsidy_budget = 0
other_budget = 0
sum_budget = None
for line in lines:
count += 1
if any(line.find(x) > 0 for x in org_prefix) and lines[count].strip() != '':
org_name = line.strip()
sub_org_name = lines[count].strip()
if line.find('เอกสารงบประมาณ ฉ') > 0:
line = thai_number_to_arabic(line)
numbers = re.findall('[0-9]+', line)
if len(numbers) > 0:
issue_num = numbers[0]
if line.startswith('ประจ') and line.find('งบประมาณ พ.ศ.') > 0:
line = thai_number_to_arabic(line)
numbers = re.findall('[0-9]+', line)
if len(numbers) > 0:
book_year = int(numbers[0]) - 543
if line.find('เล่มท') > 0:
line = thai_number_to_arabic(line)
numbers = re.findall('[0-9]+', line)
if len(numbers) == 2:
book_num = numbers[1]
sub_book_num = numbers[0]
# ignore page number.
if line.startswith(''):
try:
num = int(line.strip())
if num > page:
page = num
except ValueError:
pass
continue
segments = line.split(' ')
segments = list(filter(lambda x: x != '', segments))
segments = list(map(str.strip, segments))
segments = list(map(fix_align, segments))
segments = list(map(replace_dash, segments))
print(segments)
# Condition find for section 6.2
if any(line.startswith(x) for x in section_6_2_prefix):
is_section_6 = True
continue
# Inside 6.2 section loop fill all value
if line.startswith('รวม') and is_section_6:
is_row = True
continue
if is_section_6 and is_row:
no_number_title = re.sub(r'\d+\.', '', segments[0]).strip()
if no_number_title.startswith(project_title_prefix) \
or segments[0].find('7. รายละเอียดงบประมาณจ') >= 0:
if project_name != '' and sum_budget is not None and sum_budget != 'รวม':
is_plan = re.search(r'\d+\.', project_name) is not None
cross_func = project_name.find('แผนงานบูรณาการ') > 0
item_num += 1
ref_doc = f'{book_year}.{issue_num}.{book_num}.{sub_book_num}'
item_id = f'{ref_doc}.{item_num}'
plan = {
'ITEM_ID': item_id,
'REF_DOC': ref_doc,
'REF_PAGE_NO': page,
'MINISTRY': org_name,
'BUDGETARY_UNIT': sub_org_name,
'CROSS_FUNC': cross_func,
'PROJECT': project_name,
'is_plan': is_plan,
'personnel_budget': personnel_budget,
'operational_budget': operational_budget,
'investing_budget': investing_budget,
'subsidy_budget': subsidy_budget,
'other_budget': other_budget,
'sum_budget': sum_budget,
}
print(plan)
project_budgets.append(plan)
project_name = ''
sum_budget = None
if segments[0].find('7. รายละเอียดงบประมาณจ') >= 0:
is_row = False
is_section_6 = False
sum_budget = None
continue
if no_number_title.startswith(project_title_prefix):
project_name = segments[0]
else:
project_name += segments[0]
if len(segments) == 7:
personnel_budget = segments[1]
operational_budget = segments[2]
investing_budget = segments[3]
subsidy_budget = segments[4]
other_budget = segments[5]
sum_budget = segments[6]
if len(project_budgets) > 0:
try:
os.makedirs('budget-csv/')
except OSError:
pass
csv_file_name = 'budget-csv/' + pdf_budget_file.split('/')[1].replace('.pdf', '.csv')
f = open(csv_file_name, 'w')
w = csv.DictWriter(f, project_budgets[0].keys())
w.writeheader()
w.writerows(project_budgets)
f.close()
if __name__ == '__main__':
pdf_path = 'budget-pdf/'
list_of_files = sorted(filter(os.path.isfile, glob.glob(pdf_path + '*.pdf')))
for file in list_of_files:
if file.endswith('.pdf'):
convert_table_6(file)
# convert_table_6('budget-pdf/10.pdf') | [
"artiya4u@gmail.com"
] | artiya4u@gmail.com |
dc6ac3b7f7d6dab01f3763ce82aac0a080feddcb | 6dd78838e2e148f65cd3cfaedb0775b48f31c3f9 | /esko_app/filters.py | 3285e0a35ce1f468ac33f795a3c88f1db81bee3b | [] | no_license | reg-11/122-Project | 8955400b22a26c76fe4a0810c75535fef40d9748 | 7d7cb7c043f1ac09b6a0a176a100f675d47d3ae8 | refs/heads/main | 2023-05-14T06:58:54.292631 | 2021-05-30T07:48:33 | 2021-05-30T07:48:33 | 369,425,628 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | from django import forms
import django_filters
from .models import Post, Category
class PostFilter(django_filters.FilterSet):
sell = 'sell'
services = 'services/rent'
swap = 'swap'
find = 'find'
category_choices = [
(sell, sell),
(services, services),
(swap, swap),
(find, find),
]
#category = django_filters.ChoiceFilter(choices=Post.objects.all())
#category = django_filters.ChoiceFilter(choices=category_choices, widget=forms.Select(attrs={'class':'form-select', 'style':'background:$primary'}))
class Meta:
model = Post
fields = [
'category',
]
| [
"noreply@github.com"
] | noreply@github.com |
35140645296b6f5b8fecde4986a75d13ae4709be | 297dd54eaae9c4a14f992dac95886e21caf64860 | /app.py | 0bf3a0ca30d769b524ce53dcc21340029882d21a | [
"MIT"
] | permissive | nobuf/xls2csv-server | 79cd8b4fc9847c4b64e182578cbc5657c8a6bc2a | 0c1482301656fb5656c8c46fb0e8815e18f6c992 | refs/heads/master | 2021-06-03T19:30:55.574884 | 2020-05-03T04:54:29 | 2020-05-03T04:54:29 | 96,628,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | import os
from subprocess import call
from flask import Flask, request, Response
UPLOAD_FOLDER = '/tmp'
app = Flask(__name__)
@app.route("/", methods=['POST'])
def index():
file = request.files['file']
sheet = request.form.get('sheet', 1, type=int)
if not file:
return "`file` must be included", 400
input = os.path.join(UPLOAD_FOLDER, "a.xls")
medium = os.path.join(UPLOAD_FOLDER, "a.xlsx")
output = os.path.join(UPLOAD_FOLDER, "a.csv")
file.save(input)
call(["libreoffice", "--headless", "--convert-to", "xlsx", "--outdir", UPLOAD_FOLDER, input])
call(["xlsx2csv", "--sheet", str(sheet), medium, output])
with open(output, 'r') as f:
return Response(f.read(), mimetype="text/csv") | [
"nobu.funaki@gmail.com"
] | nobu.funaki@gmail.com |
4ef86447dfc34d083e46ab46a50b7495626f2834 | bbfb887631b2c438b6387025e5e7de3ac310ed92 | /get_time.py | ee5ee729bb1ef3fedf5348d36d4b3b46c2a284c9 | [] | no_license | cjx3711/timelapse-timer | 94918bb18a75c9348d4b726d04a221cd7b018f0d | fed1d1071ee48d8dcaaa5d3e6da091ac2656288c | refs/heads/master | 2023-06-05T11:36:01.048679 | 2021-06-26T06:44:25 | 2021-06-26T06:44:25 | 380,433,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | # Refer to README.md for explainations
import os
import pathlib
import datetime
import time
seconds_threshold = 45
total_seconds = 0
files = [f for f in os.listdir('count') if os.path.isfile(f"count/{f}")]
print(f"Length {len(files)}")
files.sort()
previous_file = None
for f in files:
if previous_file is None:
previous_file = f
else:
prev_time = time.strptime(previous_file[0:17], '%Y-%m-%d_%H%M%S')
this_time = time.strptime(f[0:17], '%Y-%m-%d_%H%M%S')
seconds_between = time.mktime(this_time) - time.mktime(prev_time)
if (seconds_between <= seconds_threshold):
total_seconds += seconds_between
else:
print(f"Skipped because {seconds_between} > {seconds_threshold}s")
previous_file = f
print(f"Total seconds taken {total_seconds}")
print(f"Total minutes taken {total_seconds / 60}")
print(f"Total hours taken {total_seconds / 60 / 60}")
hours = int(total_seconds / 60 / 60)
minutes = int(total_seconds / 60) % 60
seconds = int(total_seconds) % 60
print(f"Total time taken {hours}:{minutes}:{seconds}") | [
"chaijiaxun@gmail.com"
] | chaijiaxun@gmail.com |
98aae3d685f997107b0897bee48f8b3ff514a34a | a02d8d6409abacce30accc3187c43bd0373b1e01 | /utils.py | 9ba69fe62b910780d2981f78f66b20b5e1bf0e85 | [] | no_license | piggy2008/R3Net | 39c81b530d26ee8a79a274bba5474a6b0079684e | 003fe4b20c1531c387bee0b06754742335aca81c | refs/heads/master | 2020-05-15T13:18:28.140204 | 2019-11-29T08:35:43 | 2019-11-29T08:35:43 | 182,294,884 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | import torch
import os
from model import R3Net
def MaxMinNormalization(x,Max,Min):
x = (x - Min) / (Max - Min);
return x;
def load_part_of_model(new_model, src_model_path, device_id=0):
src_model = torch.load(src_model_path, map_location='cuda:' + str(device_id))
m_dict = new_model.state_dict()
for k in src_model.keys():
print(k)
param = src_model.get(k)
m_dict[k].data = param
new_model.load_state_dict(m_dict)
return new_model
if __name__ == '__main__':
ckpt_path = './ckpt'
exp_name = 'VideoSaliency_2019-05-14 17:13:16'
args = {
'snapshot': '30000', # your snapshot filename (exclude extension name)
'crf_refine': False, # whether to use crf to refine results
'save_results': True, # whether to save the resulting masks
'input_size': (473, 473)
}
src_model_path = os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth')
net = R3Net(motion='GRU')
net = load_part_of_model(net, src_model_path) | [
"piggy2008@example.com"
] | piggy2008@example.com |
c357f2f4d90e50fb093b204e32dfe29fb03401e6 | 6728e0b7aecda8e6fc1ccd3db66a0bbc3d451b80 | /findDolphins/renderVideo.py | 05e4ce1d1a04a66b048385fcef1b6d2353610258 | [] | no_license | lewisfish/dolphin-counter | 5ddb056e128b2d00b175274bf0c79bd92476b62b | 8122e21aaf4962e8989da1fdfed381b4f3e94f64 | refs/heads/master | 2022-12-09T16:30:09.758569 | 2020-09-30T14:26:59 | 2020-09-30T14:26:59 | 233,601,005 | 2 | 0 | null | 2022-11-22T05:18:10 | 2020-01-13T13:23:45 | Python | UTF-8 | Python | false | false | 3,643 | py | import cv2
from collections import OrderedDict
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from argparse import ArgumentParser
def createDict(filename: str):
f = open(filename, "r")
lines = f.readlines()
mydict = {}
for line in lines:
if line[0] == "#":
videoFile = line[1:].strip()
mydict[videoFile] = {}
cap = cv2.VideoCapture(videoFile) # converts to BGR by default
fps = cap.get(cv2.CAP_PROP_FPS) # get fps
cap.release()
else:
lineSplit = line.split(",")
frameNum = int(lineSplit[0])
x0 = int(lineSplit[1][2:])
y0 = int(lineSplit[2])
x1 = int(lineSplit[3])
y1 = int(lineSplit[4][:-2])
coords = [[x0, y0], [x1, y1]]
if frameNum not in mydict[videoFile]:
mydict[videoFile][frameNum] = []
mydict[videoFile][frameNum].append(coords)
return mydict
if __name__ == '__main__':
parser = ArgumentParser(description="Render video from output of dolphin detection.")
parser.add_argument("-f", "--file", type=str,
help="Path to output file to be analysed.")
parser.add_argument("-v", "--video", type=str,
help="Path to video file to be cut up.")
# parser.add_argument("-d", "--debug", action="count", default=0,
# help="Display debug info.")
parser.add_argument("-pt", "--plot", action="store_true",
help="Display plot of dolphin count over all frames.")
parser.add_argument("-nv", "--novideo", action="store_true",
help="If provided do not render video.")
args = parser.parse_args()
file = args.file
genny = createDict(file)
videoFile = args.video
genny = {k: OrderedDict(sorted(v.items())) for k, v in genny.items()}
if not args.novideo:
cap = cv2.VideoCapture(videoFile) # converts to BGR by default
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
_, frame = cap.read()
h, w, layers = frame.shape
writer = cv2.VideoWriter("output-new-1.avi", cv2.VideoWriter_fourcc(*"XVID"), 1, (w, h))
i = 0
dolphinCount = []
for video in genny:
for time in genny[video]:
if not args.novideo:
cap.set(cv2.CAP_PROP_POS_FRAMES, time)
_, frame = cap.read()
numDolphins = 0
for bbox in genny[video][time]:
x1 = int(bbox[0][1])
x2 = int(bbox[1][1])
y1 = int(bbox[0][0]) + 130
y2 = int(bbox[1][0]) + 130
if not args.novideo:
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
numDolphins += 1
dolphinCount.append(numDolphins)
# plt.imshow(frame)
# plt.show()
if not args.novideo:
org = (50, 1040-100)
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
color = (255, 0, 0)
thickness = 2
cv2.putText(frame, f"{numDolphins}", org, font,
fontScale, color, thickness, cv2.LINE_AA)
# frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
writer.write(frame.astype("uint8"))
print(i)
i += 1
if args.plot:
plt.plot(dolphinCount)
plt.show()
if not args.novideo:
cv2.destroyAllWindows()
cap.release()
writer.release()
| [
"lewistmcmillan@gmail.com"
] | lewistmcmillan@gmail.com |
5fe233c344d98ba662dd17f349d77e2c6e08ec0f | 9c2a814d1d9d2fd10c688bd82a86328ebaa60b39 | /benchmark/namedtuple.py | 706f6650f330d6dbc85b4ae70152d7b1d1f4913e | [] | no_license | jfgreen/python-representation-sizes | bdca1ac690a220f93e3e2f5d6f7df35f348f140d | e2a4a2bccd61bc13bd6dff3fe2cd3abddf12d493 | refs/heads/master | 2021-02-09T21:51:40.032261 | 2020-03-02T09:18:22 | 2020-03-02T09:18:22 | 244,327,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | #!/usr/bin/env python3
import sys
import collections
count = int(sys.argv[1])
TupleFruit = collections.namedtuple('TupleFruit', ['name', 'price', 'colour'])
def fruit_as_namedtuple():
return TupleFruit('mango', 123, 'red')
basket = [fruit_as_namedtuple() for _ in range(count)]
total_price = sum((f.price for f in basket))
print(total_price)
| [
"1288679+jfgreen@users.noreply.github.com"
] | 1288679+jfgreen@users.noreply.github.com |
b71dc37ebb0e299b5e5afaf20e6be5533853536a | 878ce815366d6334556586b38a3b18d2cfdeeae3 | /Player.py | b06ac0d4ed915dffd39d9737421f05b56b20d3b2 | [] | no_license | stevennhnk113/MancalaPlayer | e3b64a0e76b5afcd973a994f1b5b1ab458f85dd7 | bdff5ef6cf1e7dab2c7487e4b9f42b1f3d490188 | refs/heads/master | 2021-01-22T10:40:08.643901 | 2017-05-28T09:08:29 | 2017-05-28T09:08:29 | 92,650,790 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,039 | py | # File: Player.py
# Author:
# Date:
# Defines a simple artificially intelligent player agent
# You will define the alpha-beta pruning search algorithm
# You will also define the score function in the MancalaPlayer class,
# a subclass of the Player class.
from random import *
from decimal import *
from copy import *
from MancalaBoard import *
# a constant
INFINITY = 1.0e400
def max(a, b):
if a>=b:
return a
return b
def min(a, b):
if a<=b:
return a
return b
class Player:
""" A basic AI (or human) player """
HUMAN = 0
RANDOM = 1
MINIMAX = 2
ABPRUNE = 3
CUSTOM = 4
def __init__(self, playerNum, playerType, ply=0):
self.num = playerNum
self.opp = 2 - playerNum + 1
self.type = playerType
self.ply = ply
def __repr__(self):
return str(self.num)
def minimaxMove( self, board, ply ):
""" Choose the best minimax move. Returns (move, val) """
move = -1
score = -INFINITY
turn = self
for m in board.legalMoves( self ):
if ply == 0:
return (self.score(board), m)
if board.gameOver():
return (-1, -1) # Can't make a move, the game is over
nb = deepcopy(board)
nb.makeMove(self, m)
opp = Player(self.opp, self.type, self.ply)
s, oppMove = opp.minValue(nb, ply-1, turn)
if s > score:
move = m
score = s
return score, move
def maxValue( self, board, ply, turn):
""" Find the minimax value for the next move for this player
at a given board configuation
Returns (score, oppMove)"""
if board.gameOver():
return (turn.score( board ), -1)
score = -INFINITY
move = -1
for m in board.legalMoves( self ):
if ply == 0:
return (turn.score( board ), m)
# make a new player to play the other side
opponent = Player(self.opp, self.type, self.ply)
# Copy the board so that we don't ruin it
nextBoard = deepcopy(board)
nextBoard.makeMove( self, m )
s, oppMove = opponent.minValue(nextBoard, ply-1, turn)
if s > score:
move = m
score = s
return (score, move)
def minValue( self, board, ply, turn ):
""" Find the minimax value for the next move for this player
at a given board configuation"""
if board.gameOver():
return turn.score( board ), -1
score = INFINITY
move = -1
for m in board.legalMoves( self ):
if ply == 0:
return (turn.score( board ), m)
# make a new player to play the other side
opponent = Player(self.opp, self.type, self.ply)
# Copy the board so that we don't ruin it
nextBoard = deepcopy(board)
nextBoard.makeMove( self, m )
s, oppMove = opponent.maxValue(nextBoard, ply-1, turn)
if s < score:
score = s
move = m
return (score, move)
# The default player defines a very simple score function
# You will write the score function in the MancalaPlayer below
# to improve on this function.
def score(self, board):
""" Returns the score for this player given the state of the board """
if board.hasWon( self.num ):
return 100.0
elif board.hasWon( self.opp ):
return 0.0
else:
return 50.0
# You should not modify anything before this point.
# The code you will add to this file appears below this line.
# You will write this function (and any helpers you need)
# You should write the function here in its simplest form:
# 1. Use ply to determine when to stop (when ply == 0)
# 2. Search the moves in the order they are returned from the board's
# legalMoves function.
# However, for your custom player, you may copy this function
# and modify it so that it uses a different termination condition
# and/or a different move search order.
def alphaBetaMove( self, board, ply ):
""" Choose a move with alpha beta pruning """
move = -1
score = -INFINITY
abScore = [-INFINITY, INFINITY]
turn = self
for m in board.legalMoves( self ):
# Can't make a move, the game is over
if board.gameOver():
return (-1, -1)
if ply == 0:
return (self.score(board), m)
nextABScore = deepcopy (abScore)
nb = deepcopy(board)
nb.makeMove(self, m)
opp = version4(self.opp, self.type, self.ply)
s, oppMove, a, b = opp.minValueAB(nb, ply-1, turn, nextABScore[0], nextABScore[1])
if s > score:
move = m
score = s
return score, move
def maxValueAB( self, board, ply, turn, a, b):
""" Find the minimax value for the next move for this player
at a given board configuation
Returns (score, oppMove)"""
score = -INFINITY
move = -1
abScore = [a,b]
alpha = 0
beta = 1
#a = turn.score( board )
#print "game over returning ", turn.score( board )
# print "\n This is max ", ply
# print abScore
if board.gameOver():
# print "gameOver"
return (turn.score( board ), -1, a, b)
for m in board.legalMoves( self ):
# print "int the loop"
if ply == 0:
return (self.score(board), m, a, b)
nextABScore = deepcopy (abScore)
nb = deepcopy(board)
nb.makeMove(self, m)
opp = version4(self.opp, self.type, self.ply)
if score < abScore[beta]:
s, oppMove, a, b = opp.minValueAB(nb, ply-1, turn, nextABScore[0], nextABScore[1])
if s > score:
move = m
score = s
abScore[0] = s
# print "yes"
#print "a ", a, " b ", b, ply
#else:
#print "Beta is ", abScore[beta], ", score is ", score, ", now prune ", m
return (score, move, abScore[0], abScore[1])
def minValueAB( self, board, ply, turn, a, b):
""" Find the minimax value for the next move for this player
at a given board configuation"""
score = INFINITY
move = -1
abScore = [a,b]
alpha = 0
beta = 1
#print "\n This is min ", ply
#print abScore
if board.gameOver():
#print "gameOver"
return turn.score( board ), -1, a, b
for m in board.legalMoves( self ):
#print "int the loop"
if ply == 0:
return (self.score(board), m, a, b)
nextABScore = deepcopy (abScore)
nb = deepcopy(board)
nb.makeMove(self, m)
opp = version4(self.opp, self.type, self.ply)
if score > abScore[0]: #index 1 is beta, if my min value is smaller than the previous alpha, i stop
s, oppMove, a, b = opp.maxValueAB(nb, ply-1, turn, nextABScore[0], nextABScore[1])
if s < score:
move = m
score = s
abScore[beta] = s
#print "yes"
#print "a ", a, " b ", b, ply
#else:
#print "Beta is ", abScore[beta], ", score is ", score, ", now prune ", m
return (score, move, abScore[0], abScore[1])
def chooseMove( self, board ):
""" Returns the next move that this player wants to make """
if self.type == self.HUMAN:
move = input("Please enter your move:")
while not board.legalMove(self, move):
print move, "is not valid"
move = input( "Please enter your move" )
return move
elif self.type == self.RANDOM:
move = choice(board.legalMoves(self))
print "chose move", move, "with value", val
return move
elif self.type == self.MINIMAX:
val, move = self.minimaxMove( board, self.ply )
print "chose move", move, " with value", val
return move
elif self.type == self.ABPRUNE:
val, move = self.alphaBetaMove( board, self.ply)
print "chose move", move, " with value", val
return move
elif self.type == self.CUSTOM:
# TODO: Implement a custom player
# You should fill this in with a call to your best move choosing
# function. You may use whatever search algorithm and scoring
# algorithm you like. Remember that your player must make
# each move in about 10 seconds or less.
val, move = None, None
print "chose move", move, " with value", val
return move
else:
print "Unknown player type"
return -1
# Note, you should change the name of this player to be a custom name
# that identifies you or your team.
class version4(Player):
""" Defines a player that knows how to evaluate a Mancala gameboard
intelligently """
def score(self, board):
""" Evaluate the Mancala board for this player """
# Currently this function just calls Player's score
# function. You should replace the line below with your own code
# for evaluating the board
""" Returns the score for this player given the state of the board """
if board.hasWon( self.num ):
return 100.0
elif board.hasWon( self.opp ):
return 0.0
else:
# 2 points if: there is a play that can land on zero spot where
# where the opposite spot has something
score = 0
if(self.num == 0):
mySpots = board.P1Cups
oppSpots = board.P2Cups
else:
mySpots = board.P2Cups
oppSpots = board.P1Cups
#Account for the number of empty spots, more empty spot is better
# emtpy_spot_index is the index of the empty spot
for emtpy_spot_index in range(len(mySpots)):
if mySpots[emtpy_spot_index] == 0 and oppSpots[emtpy_spot_index] > 0:
for findingIndex in range(0, emtpy_spot_index):
if mySpots[findingIndex] == emtpy_spot_index - findingIndex:
score = score + 2*oppSpots[emtpy_spot_index]
#Less empty spots on the opp side will better
for emtpy_spot_index in range(len(oppSpots)):
if oppSpots[emtpy_spot_index] != 0:
score = score + 2
#Dont give the opp the second chance
spot_till_mancala = 6 #The beginning spot has to take 6 moves
# before they hit the mancala
for spot in mySpots:
if spot > spot_till_mancala:
score = score + 1
elif spot == spot_till_mancala:
score = score + 2
spot_till_mancala = spot_till_mancala - 1
# for spot in mySpots:
# if spot > spot_till_mancala:
# score = score + 1
# elif spot == spot_till_mancala:
# nb = deepcopy(board)
# nb.makeMove(self, mySpots.index(spot))
# score = self.score(nb)
# spot_till_mancala = spot_till_mancala - 1
return score
| [
"stevennhnk113@gmail.com"
] | stevennhnk113@gmail.com |
459ae950fbc11b3a49e9d7084e8463f627186bfa | 47fed2cd061c5c9e57255f0fc0ed19dd6843adfb | /square.py | 11c99ce7d304242fac22f9ac5823017892ef3f5b | [] | no_license | drkmbadgley/Python | bc66087889171b218ed14eee7001df52b46a8bcf | 6ddd0c89e4843365a8957f2cf8586e91f26fdec6 | refs/heads/master | 2021-08-30T09:00:13.318486 | 2017-12-17T04:52:02 | 2017-12-17T04:52:02 | 114,508,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | x = input ("type a number:")
x = int(x)
def f():
return x**2
print (f())
| [
"drkmbadgley@gmail.com"
] | drkmbadgley@gmail.com |
81cb4c85bfe472444a4f9420bbd8f16d3a1e136b | 27ad6657fe640a020fa618ada4981c5036d4f696 | /site/jslibs/libraries/views.py | 1b5b87461739ccbb185e00a2dad2eb5234d82ec5 | [] | no_license | Boldewyn/jslibs.net | b0fc7465e9ab7993508a8ad841647e9a6db77af3 | 32ed667e8b9cb36076874ec6e321cd6ba4fcb6da | refs/heads/master | 2021-01-20T05:07:41.515136 | 2011-10-16T18:52:53 | 2011-10-16T18:52:53 | 2,569,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | #
#
#
from django.http import HttpResponse
from libraries.models import LibraryModel, BrowserModel, KeywordModel
def detail(request, name):
return HttpResponse('It Works, %s' % name)
| [
"git@manuel-strehl.de"
] | git@manuel-strehl.de |
dde1873c9874fb3cc522d0ece27b2590eca2cecb | 4fc7b3e22c3c00f47a119a03cfad5b3e6bed14d0 | /equation-solver/gaussian-elimination/gauss_elimination.py | 4b876f421b3351bf5048cf8b5856fb2afd4dd693 | [] | no_license | guhwanbae/GuLA | 54178de74ded210a35cd93fa238c5ae97a5ff6ee | 5ff775ec3e336fe8fdef46d534dd234273bfeb09 | refs/heads/master | 2020-04-13T05:45:08.735322 | 2019-09-17T13:25:21 | 2019-09-17T13:25:21 | 163,002,081 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | # Author : Gu-hwan Bae
# Summary : Get a row echelon form.
import gula.gaussian as ggaussian
import numpy as np
# Case I
# All rows are linear independent. rank(M) = 3, null(M) = 0
M = np.array([[1, 2, 3],
[1, 2, 4],
[3, 7, 4]])
G = ggaussian.echelon(M)
# Matrix G has three basis.
print('>> Echelon form matrix =\n', G)
# Case II
# First and second row are linear dependent. rank(M) = 2, null(M) = 1
M = np.array([[1, 2, 4],
[1, 2, 4],
[3, 7, 4]])
G = ggaussian.echelon(M)
# Matrix G has two basis.
print('>> Echelon form matrix =\n', G)
# Case III
# Rows are standart generator of R3 field. rank(M) = 3, null(M) = 0
M = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0]])
G = ggaussian.echelon(M)
# Matrix G has three basis.
print('>> Echelon form matrix =\n', G)
# Case IV
# All rows are linear are linear dependent. rank(M) = 1, null(M) = 2
M = np.array([[2, 2, 2],
[4, 4, 4],
[6, 6, 6]])
G = ggaussian.echelon(M)
# Matrix G has three basis.
print('>> Echelon form matrix =\n', G)
| [
"guhwan.bae@gmail.com"
] | guhwan.bae@gmail.com |
f00d8996dc10333f7dda3f51a196117af191bb41 | 760c05dbc3a322d91c98ac5f5e0fe686278fe9de | /dojoreads/settings.py | 449a639ffc2ae23e14bc60b1e27807dad6c652d3 | [] | no_license | xiao-Tan/dojoreads | d4bed09c758b864178bb4f155632f2c37f07951e | 25758c5eeb1bc4d466c2420375fa00cc8f1e64be | refs/heads/master | 2022-02-02T04:06:55.439179 | 2019-10-24T19:01:48 | 2019-10-24T19:01:48 | 209,441,356 | 0 | 0 | null | 2022-01-21T20:02:50 | 2019-09-19T02:04:19 | Python | UTF-8 | Python | false | false | 3,124 | py | """
Django settings for dojoreads project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$6km68k(*zft@zvws4dv+8d&f%4(8*-+wwq4f=d5mfh-sjyd5-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.read_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dojoreads.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dojoreads.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"tanxiao@tanxiaodeMacBook-Pro.local"
] | tanxiao@tanxiaodeMacBook-Pro.local |
9aa250b7e4e824eb82ffaa4ef2408b16d0daa7ec | d34f2da4cfbb0f379feddd047d39a43e7cec6024 | /7. Local Variables.py | f384147cd4b839d74cfa1206cc87a6c8934bc4b8 | [] | no_license | akashgupta-ds/Python | 82297112dc5414f29ec69e1a28c423778dcbf31a | 9fa62aa3b42ccba525b7f1d39159163fb534ace7 | refs/heads/master | 2022-11-05T09:38:51.998810 | 2020-06-28T08:30:54 | 2020-06-28T08:30:54 | 275,540,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | # 3. Local Variables
# Method level variables
# we should not use self,cls,classname
# local variable can not be called outside specified block/method
x=100 #Global variable
class localVariableClass():
a=10
def __init__(self):
# del localVariableClass.a # Deletion of Static variable inside constructor [this is valid]
print(self.a)
print(x)
def degreeMethod(self,radian):
# local variable
pi=22/7
x=999
degree=radian * (180/pi)
print("Degree:",degree)
print("Gloabl Variable changed:", x)
obj=localVariableClass()
# print(obj.__dict__)
obj.degreeMethod(10)
# print(obj.__dict__)
print("Gloabl Variable does not changed:", x) # Global variable call outside class
print(localVariableClass.a)
print(localVariableClass.__dict__)
objlr=localVariableClass()
print(localVariableClass.__dict__) | [
"noreply@github.com"
] | noreply@github.com |
3c5843e0690c188dca077dbecafd9946cdea29b4 | cdb12346f2ab4a5ff51aa88a83e4724a978afd11 | /arquitetura-microsservicos/marketing-api/swagger_server/__main__.py | 875046c7339291f747fa74dedc62c171181f4ee3 | [] | no_license | bylucasxdx/arquitetura-software-puc | b121fea9f72650cc576cb01311f3c13b6233671c | 138eb939d8fb37743834031fad3620876d6f52ee | refs/heads/main | 2023-04-09T10:23:48.536039 | 2021-04-13T01:03:22 | 2021-04-13T01:03:22 | 357,378,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | #!/usr/bin/env python3
import connexion
from swagger_server import encoder
def main():
app = connexion.App(__name__, specification_dir='./swagger/')
app.app.json_encoder = encoder.JSONEncoder
app.add_api('swagger.yaml', arguments={'title': 'MarketingApi'})
app.run(port=8080)
if __name__ == '__main__':
main()
| [
"lucasmedeiros1994@gmail.com"
] | lucasmedeiros1994@gmail.com |
f4dd1045102b7757bf5cba9829d5bbd26739a5b1 | 42401a8f1ad4b0566d5415edf87d0b6c8e795c6d | /shakaar.py | e70b581da3c4bd1f5e46455b723755471db83e22 | [] | no_license | recantha/ShakaarBot | 9b8e64340a11782801be5b152cd023ddbc59569a | 38985dde36aa908ed665bf9dcad70afa4c15a3c4 | refs/heads/master | 2020-03-31T02:41:38.286282 | 2019-10-27T13:40:51 | 2019-10-27T13:40:51 | 151,835,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,071 | py | # Code for Shakaar robot
#
# By Mike Horne, based on code by Tom Oinn/Emma Norling code
# Need floating point division of integers
from __future__ import division
from time import sleep
import sys
import subprocess
g_motor_left_multiplier = -0.5
g_motor_right_multiplier = 0.5
try:
import ThunderBorg3 as ThunderBorg
TB=ThunderBorg.ThunderBorg()
TB.Init()
print("ThunderBorg initialised")
if not TB.foundChip:
print("No Thunderborg found")
sys.exit()
def set_speeds(power_left, power_right):
"""
As we have an motor hat, we can use the motors
:param power_left:
Power to send to left motor
:param power_right:
Power to send to right motor, will be inverted to reflect chassis layout
"""
motor_left_multiplier = g_motor_left_multiplier
motor_right_multiplier = g_motor_right_multiplier
# If one wants to see the 'raw' 0-100 values coming in
# print("source left: {}".format(power_left))
# print("source right: {}".format(power_right))
# Take the 0-100 inputs down to 0-1 and reverse them if necessary
power_left = (motor_left_multiplier * power_left) / 100
power_right = (motor_right_multiplier * power_right) / 100
# Print the converted values out for debug
# print("left: {}".format(power_left))
# print("right: {}".format(power_right))
# If power is less than 0, we want to turn the motor backwards, otherwise turn it forwards
TB.SetMotor2(power_left)
TB.SetMotor1(power_right)
def stop_motors():
TB.MotorsOff()
except ImportError:
print("Something occurred. Printing values instead")
def set_speeds(power_left, power_right):
"""
No motor hat - print what we would have sent to it if we'd had one.
"""
motor_left_multiplier = g_motor_left_multiplier
motor_right_multiplier = g_motor_left_multiplier
power_left = (motor_left_multiplier * power_left) / 100
power_right = (motor_right_multiplier * power_right) / 100
print('DEBUG Left: {}, Right: {}'.format(power_left, power_right))
sleep(0.3)
def stop_motors():
"""
No motor hat, so just print a message.
"""
print('DEBUG Motors stopping')
# All we need, as we don't care which controller we bind to, is the ControllerResource
from approxeng.input.selectbinder import ControllerResource
# Enable logging of debug messages, by default these aren't shown
# import logzero
# logzero.setup_logger(name='approxeng.input', level=logzero.logging.DEBUG)
class RobotStopException(Exception):
"""
The simplest possible subclass of Exception, we'll raise this if we want to stop the robot
for any reason. Creating a custom exception like this makes the code more readable later.
"""
pass
def mixer(yaw, throttle, max_power=100):
"""
Mix a pair of joystick axes, returning a pair of wheel speeds. This is where the mapping from
joystick positions to wheel powers is defined, so any changes to how the robot drives should
be made here, everything else is really just plumbing.
:param yaw:
Yaw axis value, ranges from -1.0 to 1.0
:param throttle:
Throttle axis value, ranges from -1.0 to 1.0
:param max_power:
Maximum speed that should be returned from the mixer, defaults to 100
:return:
A pair of power_left, power_right integer values to send to the motor driver
"""
left = throttle + yaw
right = throttle - yaw
scale = float(max_power) / max(1, abs(left), abs(right))
return int(left * scale), int(right * scale)
# Outer try / except catches the RobotStopException we just defined, which we'll raise when we want to
# bail out of the loop cleanly, shutting the motors down. We can raise this in response to a button press
try:
while True:
# Inner try / except is used to wait for a controller to become available, at which point we
# bind to it and enter a loop where we read axis values and send commands to the motors.
try:
# Bind to any available joystick, this will use whatever's connected as long as the library
# supports it.
with ControllerResource(print_events=False, controller_class=None, hot_zone=0.2, dead_zone=0.1) as joystick:
print('Found a controller, HOME to exit, Left Stick to drive')
# Loop until the joystick disconnects, or we deliberately stop by raising a
# RobotStopException
while joystick.connected:
print("Connected")
# Get joystick values from the left analogue stick
x_axis, y_axis = joystick['rx', 'ly']
# Get power from mixer function
power_left, power_right = mixer(yaw=x_axis, throttle=y_axis)
# Set motor speeds
set_speeds(power_left, power_right)
# Get a ButtonPresses object containing everything that was pressed since the last
# time around this loop.
button_presses = joystick.check_presses()
# Print out any buttons that were pressed, if we had any
if button_presses.has_presses:
print('Button presses: {}'.format(button_presses))
# If home was pressed, raise a RobotStopException to bail out of the loop
# Home is generally the PS button for playstation controllers, XBox for XBox etc
if 'triangle' in button_presses:
raise RobotStopException()
if 'dright' in button_presses:
if 'square' in button_presses:
command = '/usr/bin/sudo /sbin/shutdown -h now'
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
print(output)
if 'dleft' in button_presses:
if 'circle' in button_presses:
command = '/usr/bin/sudo /sbin/shutdown -r now'
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
print(output)
if 'r2' in button_presses:
g_motor_left_multiplier = g_motor_left_multiplier - 0.1
g_motor_right_multiplier = g_motor_right_multiplier + 0.1
if 'r1' in button_presses:
g_motor_left_multiplier = g_motor_left_multiplier + 0.1
g_motor_right_multiplier = g_motor_right_multiplier - 0.1
if g_motor_left_multiplier < -1.0:
g_motor_left_multiplier = -1.0
if g_motor_right_multiplier > 1.0:
g_motor_right_multiplier = 1.0
if g_motor_left_multiplier > 0:
g_motor_left_multiplier = 0
if g_motor_right_multiplier < 0:
g_motor_right_multiplier = 0
print('Joystick not connected')
stop_motors()
x_axis, y_axis = joystick['lx', 'ly']
button_presses = joystick.check_presses()
except IOError:
# We get an IOError when using the ControllerResource if we don't have a controller yet,
# so in this case we just wait a second and try again after printing a message.
print('No controller found yet')
sleep(1)
except RobotStopException:
# This exception will be raised when the home button is pressed, at which point we should
# stop the motors.
stop_motors()
# And exit
| [
"mike@recantha.co.uk"
] | mike@recantha.co.uk |
efe0dcc2970fad7cd05e90def47afc9c1b80324d | 26e341ae4cdc76a247eee00a004f6a0707fa9856 | /module/dbmodules.py | 2a2630faed9d61b497f468d95f24057141a39e8d | [] | no_license | kjj13/Senier_project | 90755d008d1cd43bcbace8f234c06d7205d2a385 | 72d6596889f9f001e0ec78d195bf20b1c7314a27 | refs/heads/master | 2023-03-15T04:44:44.859969 | 2021-03-11T07:50:20 | 2021-03-11T07:50:20 | 313,630,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | # file name : dbModule.py
# pwd : /myflask/module/dbModule.py
import pymysql
class Database():
def __init__(self):
self.db= pymysql.connect(host='18.212.183.253',
port=3306,
user='jjoong',
passwd='1234',
db='pi',
charset='utf8')
self.cursor= self.db.cursor(pymysql.cursors.DictCursor)
def execute(self, query, args={}):
self.cursor.execute(query, args)
def executeOne(self, query, args={}):
self.cursor.execute(query, args)
row= self.cursor.fetchone()
return row
def executeAll(self, query, args={}):
self.cursor.execute(query, args)
row= self.cursor.fetchall()
return row
def commit():
self.db.commit()
def close():
self.db.close()
| [
"rlawnd123@hanmail.net"
] | rlawnd123@hanmail.net |
f9f082cd98c61c0c7b1d1d46c5dc15be431fba88 | e2455e7dceaaf86aad0715c9ca43ffd89fefb21c | /python_variables.py | 95544ffce6e6f30972be30d78167759db7836c1a | [] | no_license | masterarif/python_new_project | 0d14a8bc7a8ed1c9bd40ca131f780d0a4d079ba4 | cb9c2a9107cb72766287ec12a0cccabd0999997b | refs/heads/master | 2023-01-13T05:08:42.770884 | 2020-11-16T11:17:06 | 2020-11-16T11:17:06 | 269,759,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,114 | py | #
#
# # Python variables
# #
# # first_name = "Arif"
# # last_name = "khan"
# #
# # # Concatenate first and last name
# # full_name = first_name + " " + last_name
# #
# # # print full name
# # print(full_name)
#
#
# # # get user intput
# #
# # user_name = input("What is your name ")
# # print("Hi " + user_name)
# # get user input for dob, post_code, nationality, sex, status,
#
# # user_dob = input("What is you date of birth ")
# # user_postcode = input("What is your post code ")
# # user_nationality = input("What is your Sex ")
# # user_status = input("What is your Marital Status ")
# # print("Hi " + user_dob)
# # print("Hi " + user_nationality)
# # print("Hi " + user_postcode)
# # print("Hi " + user_status)
# # user_city = input("What is the name of the city")
# # user_city_upper = user_city.upper()
# # print("Hi " + user_city_upper)
# # name = "Mark"
# # print(name)
# # name = "Ace"
# # print(name)
# # thanx = "Thanks for your input!"
# # print(thanx)
#
# # original_num = 23
# # new_num = original_num + 7
# # print(new_num)
# #
# # original_num = 23
# # num_to_be_added = 7
# # new_num = original_num + num_to_be_added
# # print(new_num)
#
# #####################
# # num = .075
# # total = num + 200
# # print(total)
# # print(10/2)
# # x = 5
# # print(type(x))
# # x = 20.5
# # print(type(x))
# # x = ["apple", "banana", "cherry"]
# # print(type(x))
# # #####################
# # x = ("apple", "banana", "cherry")
# # print(type(x))
# # #####################
# # x = {"name" : "John", "age" : 36}
# # print(type(x))
# # #####################
# # x = True
# # print(type(x))
# # #####################
# # text = "Hello World1"
# # print(len(text))
# # #####################
# # txt = " Hello World "
# # print("Hi" + txt + "How")
# # txt = txt.strip()
# # print("Hi" + txt + "How")
# # #####################
# # age = 36
# # txt = "My name is John, and I am {} "
# # print(txt.format(age))
# # #####################
# # print(bool("abc"))
# # #####################
# # print(bool(0))
# # ##################### sets
# # fruits = ["apple", "banana"]
# # if "apple" in fruits:
# # print("Yes, apple is a fruit!")
# # #####################
# # fruits = ["apple", "banana", "cherry"]
# # print(fruits[1])
# # #####################
# # fruits = ("apple", "banana", "cherry")
# # print(len(fruits))
# # #####################
# # fruits = {"apple", "banana", "cherry"}
# # more_fruits = ["orange", "mango", "grapes"]
# # fruits.update(more_fruits)
# # print(fruits)
# # ##################### dictionaries
# # car = {
# # "brand": "Ford",
# # "model": "Mustang",
# # "year": 1964
# # }
# # print(car.get("model"))
# # #####################
# # car = {
# # "brand": "Ford",
# # "model": "Mustang",
# # "year": 1964
# # }
# # car["year"] = 2020
# # print(car.get("year"))
# # ######################
# # car = {
# # "brand": "Ford",
# # "model": "Mustang",
# # "year": 1964
# # }
# # car["color"] = "red"
# # print(car["color"])
# # #####################
# #
# # car = {
# # "brand": "Ford",
# # "model": "Mustang",
# # "year": 1964
# # }
# # car.pop("model")
# # print(car)
# #
# # #####################
# # car = {
# # "brand": "Ford",
# # "model": "Mustang",
# # "year": 1964
# # }
# # car.clear()
# # print(car)
# # ###################### if
# a = 50
# # b = 10
# # if a > b:
# # print("Hello World")
# #
# # ########################
# # a = 50
# # b = 10
# # if a != b:
# # print("Hello World")
# # ########################
# #
# # a = 50
# # b = 10
# # if a == b:
# # print("Yes")
# # else:
# # print("No")
# #
# # #########################
# # a = 50
# # b = 10
# # if a == b:
# # print("1")
# # elif a > b:
# # print("2")
# # else:
# # print("3")
#
# ########################
#
# '''
# Use the correct short
# hand syntax to write the following
# conditional expression in one line:
#
# if 5 > 2: print("Yes"): else print("No")
#
# print("Yes") if 5 > 2 else print("No")
# '''
#print("Yes") if 5 > 2 else print("No")
#
# city_to_check = "Tucson"
# cleanest_cities = ["Cheyenne", "Santa Fe", "Tucson", "Great Falls", "Honolulu"]
#print("Hello World!") | [
"marifk2000@googlemail.com"
] | marifk2000@googlemail.com |
fe0a8e57d7291e005d35046d0578465c2d2142e6 | 468c58b19102d51772a9a8cca7a2bd4aa4d9cbb2 | /.history/bst/views_20191018212515.py | 5655f18172f2ae4080f4453b4dd535b0f9fbc987 | [] | no_license | ysonam/Tree | f3dcf1f89c5e3e2d7826d895ed69b1fd9d366de4 | 93ad9738ea00e8bccd1a83a6f23b090f8596cd98 | refs/heads/master | 2020-08-21T11:24:51.472584 | 2019-10-19T04:59:46 | 2019-10-19T05:11:53 | 216,148,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,912 | py | from django.shortcuts import render, HttpResponse
from rest_framework_jwt.settings import api_settings
from rest_framework.parsers import FileUploadParser,MultiPartParser,FormParser,JSONParser
from rest_framework.decorators import api_view
from .serializers import *
from tree.settings import DATABASES
from .models import *
from django.http import Http404,JsonResponse
from django.db.models import Q
from django.core import serializers
from django.core.exceptions import ValidationError
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import mixins
from rest_framework import generics
import sqlite3
import treant
from binarytree import tree,heap,Node
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
class add_data(generics.CreateAPIView):
queryset= userdata.objects.all()
serializer_class = userviewSerializer
model= userdata
def post(self,request,format=None):
serializer = userviewSerializer(data=request.data)
print(serializer)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors)
def get(self, request,format=None):
print(request.data)
if 'user_id' in request.GET and request.GET['user_id']:
user_id = request.GET['user_id']
data = userdata.objects.values(id)
print(list(data.data))
# snippet = self.get_object(user_id)
# print(snippet)
serializer = userviewSerializer(data)
return Response(serializer.data)
def put(self, request, format=None):
if 'user_id' in request.GET and request.GET['user_id']:
user_id = request.GET['user_id']
data = userdata.objects.get(id=user_id)
serializer = userviewSerializer(data, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, format=None):
if 'user_id' in request.GET and request.GET['user_id']:
user_id = request.GET['user_id']
data = userdata.objects.get(id=user_id)
data.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class treedisplay(APIView):
queryset= userdata.objects.all()
serializer_class = userviewSerializer
def get(self,request,user_id):
users = userdata.objects.get(id=user_id)
array=users.data
tree= treant.tree(array)
print(tree)
return Response({'msg':"yes"})
| [
"sonamy974@gmail.com"
] | sonamy974@gmail.com |
1a12b7c2d2f69df76cef3e44e1259cdf614dfc4d | 927e8a9390d219a14fce6922ab054e2521a083d3 | /tree/largest bst.py | a70af6bfda90d12ff4c3307819dc8596461b2aa9 | [] | no_license | RavinderSinghPB/data-structure-and-algorithm | 19e7784f24b3536e29486ddabf4830f9eb578005 | f48c759fc347471a44ac4bb4362e99efacdd228b | refs/heads/master | 2023-08-23T21:07:28.704498 | 2020-07-18T09:44:04 | 2020-07-18T09:44:04 | 265,993,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,794 | py | from mathpro.math import inf
from collections import deque
import sys
sys.setrecursionlimit(10000)
class Node1:
def __init__(self,isBst,size,mini,maxi):
self.isBst = isBst
self.size = size
self.mini = mini
self.maxi = maxi
def bst(root):
if not root:
x=Node1(True,0,1000000,0)
return x
left=bst(root.left)
right=bst(root.right)
if left.isBst and right.isBst and root.data>left.maxi and root.data<right.mini:
x= Node1(True,1+left.size+right.size,min(root.data,left.mini),max(root.data,right.maxi))
else:
x= Node1(False,max(left.size,right.size),1000000,0)
return x
def largestBSTBT(root):
return bst(root).size
def largestBSTBT(root):
# Base cases : When tree is empty or it has
# one child.
if (root == None):
return 0, -inf, inf, 0, True
if (root.left == None and root.right == None):
return 1, root.data, root.data, 1, True
# Recur for left subtree and right subtrees
l = largestBSTBT(root.left)
r = largestBSTBT(root.right)
# Create a return variable and initialize its
# size.
ret = [0, 0, 0, 0, 0]
ret[0] = (1 + l[0] + r[0])
# If whole tree rooted under current root is
# BST.
if (l[4] and r[4] and l[1] <
root.data and r[2] > root.data):
ret[2] = min(l[2], min(r[2], root.data))
ret[1] = max(r[1], max(l[1], root.data))
# Update answer for tree rooted under
# current 'root'
ret[3] = ret[0]
ret[4] = True
return ret
# If whole tree is not BST, return maximum
# of left and right subtrees
ret[3] = max(l[3], r[3])
ret[4] = False
return ret
# Tree Node
class Node:
def __init__(self, val):
self.right = None
self.data = val
self.left = None
def InOrder(root):
'''
:param root: root of the given tree.
:return: None, print the space separated in order Traversal of the given tree.
'''
if root is None: # check if the root is none
return
InOrder(root.left) # do in order of left child
print(root.data, end=" ") # print root of the given tree
InOrder(root.right) # do in order of right child
# Function to Build Tree
def buildTree(s):
# Corner Case
if (len(s) == 0 or s[0] == "N"):
return None
# Creating list of strings from input
# string after spliting by space
ip = list(map(str, s.split()))
# Create the root of the tree
root = Node(int(ip[0]))
size = 0
q = deque()
# Push the root to the queue
q.append(root)
size = size + 1
# Starting from the second element
i = 1
while size > 0 and i < len(ip):
# Get and remove the front of the queue
currNode = q[0]
q.popleft()
size = size - 1
# Get the current node's value from the string
currVal = ip[i]
# If the left child is not null
if (currVal != "N"):
# Create the left child for the current node
currNode.left = Node(int(currVal))
# Push it to the queue
q.append(currNode.left)
size = size + 1
# For the right child
i = i + 1
if (i >= len(ip)):
break
currVal = ip[i]
# If the right child is not null
if (currVal != "N"):
# Create the right child for the current node
currNode.right = Node(int(currVal))
# Push it to the queue
q.append(currNode.right)
size = size + 1
i = i + 1
return root
if __name__ == "__main__":
t = int(input())
for _ in range(0, t):
s = input()
root = buildTree(s)
print(largestBSTBT(root)[3])
| [
"ravindersingh.gfg@gmail.com"
] | ravindersingh.gfg@gmail.com |
8842ee3902b0698ba66b408da29e300213155e41 | b53fc2afc8fe498a0f15ec504a2898c4ab3c4a41 | /MBsandbox/wip/run_projections_isimip3b.py | b8690994a9a35cef84774ceb58b7811f8e5703af | [
"BSD-3-Clause"
] | permissive | pat-schmitt/massbalance-sandbox | f891929a9472cf410ca787d438bb6af030b3ac42 | 89e4a1877677caaa8399e921e8b506ba9b51dd3e | refs/heads/master | 2023-04-21T07:25:45.842828 | 2021-05-12T08:19:44 | 2021-05-12T08:19:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,138 | py | start_ind = 0
end_ind = 10
ensemble = 'mri-esm2-0_r1i1p1f1'
ssps = ['ssp126', 'ssp370'] #, 'ssp585']
# this could be input arguments when executing the script
uniform_firstprior = True
cluster = True
melt_f_prior = 'freq_bayesian'
uniform = False
cluster = True
dataset = 'WFDE5_CRU'
step = 'run_proj'
import numpy as np
import sys
#start_ind = int(np.absolute(int(sys.argv[1])))
#end_ind = int(np.absolute(int(sys.argv[2])))
step = str(sys.argv[1])
# glen_a = 'single'
glen_a = 'per_glacier_and_draw'
import os
if step == 'run_proj':
jobid = int(os.environ.get('JOBID'))
if jobid >27:
sys.exit('do not need more arrays in alps...')
#jobid = int(sys.argv[2])
print('this is job id {}, '.format(jobid))
start_ind = int(jobid*127)
end_ind = int(jobid*127 + 127)
print('start_ind: {}, end_ind: {}'.format(start_ind, end_ind))
print(ensemble, dataset, step)
# mb_type = str(sys.argv[3])
# grad_type = str(sys.argv[4])
#print(type(start_ind), type(end_ind), mb_type, grad_type)
###############################
import pymc3 as pm
# conda install -c conda-forge python-graphviza
import pandas as pd
import xarray as xr
import seaborn as sns
import pickle
import ast
import matplotlib.pyplot as plt
import matplotlib
# %matplotlib inline
import statsmodels as stats
import scipy
import scipy.stats as stats
from IPython.core.pylabtools import figsize
import os
import oggm
from oggm import cfg, utils, workflow, tasks, graphics
from oggm.core import massbalance, flowline, climate
import logging
log = logging.getLogger(__name__)
#import aesara.tensor as aet
#import aesara
# from drounce_analyze_mcmc import effective_n, mcse_batchmeans
# plotting bayesian stuff
import arviz as az
az.rcParams['stats.hdi_prob'] = 0.95
cfg.initialize(logging_level='WARNING') # logging_level='WARNING'
cfg.PARAMS['use_multiprocessing'] = True
cfg.PARAMS['continue_on_error'] = True
working_dir = os.environ.get('OGGM_WORKDIR')
# working_dir = '/home/users/lschuster/oggm_files/all'
cfg.PATHS['working_dir'] = working_dir
# use Huss flowlines
base_url = ('https://cluster.klima.uni-bremen.de/~oggm/gdirs/oggm_v1.4/'
'L1-L2_files/elev_bands')
import theano
import theano.tensor as aet
from oggm.shop import gcm_climate
from oggm import entity_task
# import the MBsandbox modules
from MBsandbox.mbmod_daily_oneflowline import process_wfde5_data
from MBsandbox.mbmod_daily_oneflowline import process_era5_daily_data, TIModel, BASENAMES
from MBsandbox.help_func import compute_stat, minimize_bias, optimize_std_quot_brentq
from MBsandbox.wip.help_func_geodetic import minimize_bias_geodetic, optimize_std_quot_brentq_geod, get_opt_pf_melt_f
from MBsandbox.wip.bayes_calib_geod_direct import get_TIModel_clim_model_type, get_slope_pf_melt_f, bayes_dummy_model_better, bayes_dummy_model_ref_std, bayes_dummy_model_ref
from MBsandbox.wip.projections_bayescalibration import (process_isimip_data, run_from_climate_data_TIModel,
MultipleFlowlineMassBalance_TIModel, inversion_and_run_from_climate_with_bayes_mb_params)
cfg.PARAMS['hydro_month_nh']=1
pd_geodetic_comp_alps = pd.read_csv(
'/home/users/lschuster/bayesian_calibration/WFDE5_ISIMIP/alps_geodetic_solid_prcp.csv',
)
pd_geodetic_comp_alps.index = pd_geodetic_comp_alps.rgiid
# TODO: check if this is right!!!
y0 = 1979
ye = 2018+1
#for mb_type in ['mb_monthly' , 'mb_daily', 'mb_real_daily',]:
# for grad_type in ['cte', 'var_an_cycle']:
if step=='ice_thickness_calibration':
print(len(pd_geodetic_comp_alps.dropna().index.values))
gdirs = workflow.init_glacier_directories( pd_geodetic_comp_alps.dropna().index.values)
print(len(gdirs))
# cfg.set_logging_config(logging_level='WARNING')
# Get end date. The first gdir might have blown up, try some others
#if start_ind != 0 and end_ind < 3400:
# sys.exit(
print('we want to calibrate for all Alpine glaciers at once, so all glaciers are selected, even if start_ind or end_ind are given')
for mb_type in ['mb_monthly', 'mb_daily', 'mb_real_daily']:
for grad_type in ['cte', 'var_an_cycle']:
# compute apparent mb from any mb ...
print(mb_type, grad_type)
if glen_a == 'single':
for gdir in gdirs:
try:
# in this case a-factor calibrated individually for each glacier ...
sample_path = '/home/users/lschuster/bayesian_calibration/WFDE5_ISIMIP/burned_trace_plus200samples/'
burned_trace = az.from_netcdf(sample_path + '{}_burned_trace_plus200samples_WFDE5_CRU_{}_{}_meltfpriorfreq_bayesian.nc'.format(gdir.rgi_id, mb_type, grad_type))
melt_f_point_estimate = az.plots.plot_utils.calculate_point_estimate(
'mean', burned_trace.posterior.melt_f.stack(
draws=("chain", "draw"))).values
pf_point_estimate = az.plots.plot_utils.calculate_point_estimate(
'mean',
burned_trace.posterior.pf.stack(draws=("chain", "draw"))).values
mb = TIModel(gdir, melt_f_point_estimate, mb_type=mb_type,
grad_type=grad_type, baseline_climate=dataset,
residual=0, prcp_fac=pf_point_estimate)
mb.historical_climate_qc_mod(gdir)
climate.apparent_mb_from_any_mb(gdir, mb_model=mb,
mb_years=np.arange(y0, ye, 1))
except:
print('burned_trace is not working for glacier: {} with {} {}'.format(gdir.rgi_id, mb_type, grad_type))
# Inversion: we match the consensus
#TODO: check this filtering approach with Fabien!
border = 80
filter = border >= 20
# here I calibrate on glacier per glacier basis!
df = oggm.workflow.calibrate_inversion_from_consensus(gdirs,
apply_fs_on_mismatch=False,
error_on_mismatch=False,
filter_inversion_output=filter)
# check if calibration worked: total volume of OGGM selected glaciers should ratio to ITMIX should be closer than one percent
np.testing.assert_allclose(
df.sum()['vol_itmix_m3'] / df.sum()['vol_oggm_m3'], 1, rtol=1e-2)
a_factor = gdirs[0].get_diagnostics()['inversion_glen_a'] / cfg.PARAMS['inversion_glen_a']
np.testing.assert_allclose(a_factor, gdirs[-1].get_diagnostics()['inversion_glen_a'] / cfg.PARAMS['inversion_glen_a'])
# ToDO: need to include the a-factor computation in a task ... and somehow make sure that the right a-factor is used for the right mb type ...
#workflow.execute_entity_task(
print(a_factor)
df['glen_a_factor_calib'] = a_factor
df.to_csv(working_dir + '/ice_thickness_inversion_farinotti_calib_{}_{}_{}_meltfprior{}.csv'.format(
dataset, mb_type, grad_type, melt_f_prior))
elif glen_a =='per_glacier_and_draw_with_uncertainties':
sys.exit('not yet implemented')
elif step == 'run_proj':
print(type(start_ind))
run_init = True
if run_init:
gdirs = workflow.init_glacier_directories(pd_geodetic_comp_alps.dropna().index.values[start_ind:end_ind],
from_prepro_level=2,
prepro_border=80,
prepro_base_url=base_url,
prepro_rgi_version='62')
workflow.execute_entity_task(tasks.compute_downstream_line, gdirs)
workflow.execute_entity_task(tasks.compute_downstream_bedshape, gdirs)
#workflow.execute_entity_task(oggm.shop.ecmwf.process_ecmwf_data, gdirs, dataset='ERA5dr', output_filesuffix='_monthly_ERA5dr')
#workflow.execute_entity_task(process_era5_daily_data, gdirs, output_filesuffix='_daily_ERA5dr')
workflow.execute_entity_task(process_wfde5_data, gdirs, output_filesuffix='_daily_WFDE5_CRU', temporal_resol='daily',
climate_path='/home/www/lschuster/', cluster=True)
workflow.execute_entity_task(process_wfde5_data, gdirs, output_filesuffix='_monthly_WFDE5_CRU', temporal_resol='monthly',
climate_path='/home/www/lschuster/', cluster=True)
print('start_monthly')
for ssp in ['ssp126', 'ssp370']: #, 'ssp585']:
print(ssp)
workflow.execute_entity_task(process_isimip_data, gdirs,
ensemble = ensemble,
ssp = ssp,
temporal_resol ='monthly',
climate_historical_filesuffix='_monthly_WFDE5_CRU',
cluster=True);
print('start daily')
for ssp in ['ssp126', 'ssp370']: #, 'ssp585']:
print(ssp)
workflow.execute_entity_task(process_isimip_data, gdirs,
ensemble = ensemble,
ssp = ssp,
temporal_resol ='daily',
climate_historical_filesuffix='_daily_WFDE5_CRU',
cluster=True);
else:
gdirs = workflow.init_glacier_directories( pd_geodetic_comp_alps.dropna().index.values[start_ind:end_ind])
for mb_type in ['mb_monthly', 'mb_daily', 'mb_real_daily']: # 'mb_monthly',
for grad_type in ['cte', 'var_an_cycle']:
log.workflow(print(len(gdirs)))
log.workflow(print(mb_type, grad_type))
if glen_a == 'single':
a_factor = pd.read_csv(working_dir + '/ice_thickness_inversion_farinotti_calib_{}_{}_{}_meltfprior{}.csv'.format(dataset, mb_type, grad_type, melt_f_prior))['glen_a_factor_calib'].mean()
elif glen_a == 'per_glacier_and_draw':
# no uncertainties in Farinotti ice thickness estimate assumed
a_factor = glen_a
log.workflow(print(a_factor))
workflow.execute_entity_task(inversion_and_run_from_climate_with_bayes_mb_params, gdirs, ssps = ssps,
a_factor=a_factor, y0=y0, ye_h=2014, mb_type=mb_type,
grad_type=grad_type, ensemble=ensemble, #burned_trace=burned_trace,
melt_f_prior=melt_f_prior, dataset=dataset,
path_proj=working_dir + '/proj/')
| [
"noreply@github.com"
] | noreply@github.com |
2f30aa90d5a11c3af39770259b39a87d3d976652 | 87cd294aa80838c3ba4de323aac48def33aa5a56 | /Lab2/key_util.py | 5cb93de2128a72dbc4d17bd73854931c8e611533 | [] | no_license | czyszczonik/Cryptography | ee334af6ef645f399ab51b01f1e4526a21fe9c53 | 99680cca01bf7833fb7f934c83294a40db9ddac2 | refs/heads/master | 2022-08-01T05:13:29.787061 | 2020-05-24T14:44:01 | 2020-05-24T14:44:01 | 248,529,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | import jks
def getKey(keystore_path = None, identifier = None, keystore_password = None):
try:
keystore = jks.KeyStore.load(keystore_path, keystore_password)
except Exception as exception:
raise exception
try:
keyEntry = keystore.secret_keys[identifier]
except Exception as exception:
raise exception
return keyEntry.key
def getDefaultKey():
return getKey("keystores/default.jks", "default", "default")
| [
"czyszczonikjakub@gmail.com"
] | czyszczonikjakub@gmail.com |
c77d81c969be0773c6c2af71b765ec85938dbdb6 | 7eed97a461e0be81c7f300bd10552fea2194af81 | /backend/wager/home/api/urls.py | 16a328d6bc5accc4502204f46f53dc239a7297ed | [] | no_license | psmiley2/MitFinTechHackathon2019 | e1cbda74e452ff7a7b50d1605228d5426bd8dbd1 | 9ec560ac0cd4f6d44692a5770ddd1d671aabe69c | refs/heads/master | 2020-09-09T04:52:19.442639 | 2019-11-13T02:08:52 | 2019-11-13T02:08:52 | 221,352,158 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | from home.api.views import QuestionViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'', QuestionViewSet, base_name='questions')
urlpatterns=router.urls
| [
"pdsmiley2@gmail.com"
] | pdsmiley2@gmail.com |
0793246985e5a9bc25f14463fbf02406b1ce6306 | b7f63e9b53cd983ab02f7c12358ea46745f686e8 | /Homework-6/notepad.py | 4a005b14ffed7bd689ed6fa09cafc4b73ada459d | [] | no_license | zamsalak/tests | e9bea3990594d98be7a11ef6d108184506785364 | 1e7d848c761478e56c8abc890fe3e843d15eb141 | refs/heads/master | 2020-03-26T19:20:18.986016 | 2018-12-01T16:45:18 | 2018-12-01T16:45:18 | 145,259,880 | 1 | 0 | null | 2018-08-18T23:56:19 | 2018-08-18T23:43:56 | null | UTF-8 | Python | false | false | 4,061 | py | import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import mainScreen
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.ui_main = mainScreen.Ui_MainWindow()
self.ui_main.setupUi(self)
self.file = None
self.saved = False
self.statusBar = QStatusBar()
self.setStatusBar(self.statusBar)
self.ui_main.actionOpen.triggered.connect(self.actionOpenTriggered)
self.ui_main.actionQuit.triggered.connect(self.actionQuitTriggered)
self.ui_main.actionSave.triggered.connect(self.actionSaveTriggered)
self.ui_main.actionSave_As.triggered.connect(self.actionSave_AsTriggered)
self.ui_main.actionFont.triggered.connect(self.actionFontTriggered)
self.ui_main.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
def actionOpenTriggered(self):
fileDialog = QFileDialog(self, 'Dosya Seçiniz')
fileDialog.setNameFilter('Text Files (*.txt)')
if fileDialog.exec() == QDialog.Accepted:
self.setWindowTitle(fileDialog.selectedFiles()[0])
self.file = fileDialog.selectedFiles()[0]
with open(fileDialog.selectedFiles()[0], 'r') as f:
self.ui_main.textEdit.setPlainText(f.read())
def _wantToSave(self):
msg = QMessageBox.warning(self, 'Exiting', 'Do you want to save last changes?',
QMessageBox.Save | QMessageBox.Cancel)
return True if msg == 2048 else False
def actionQuitTriggered(self):
if self.file is not None:
if self.saved is False:
if self._wantToSave():
self.actionSaveTriggered()
else:
self.close()
elif self.file is None:
if self.ui_main.textEdit.toPlainText() != '':
if self._wantToSave():
self.actionSave_AsTriggered()
else:
self.close()
self.close()
#-------Backup---------
# def actionQuitTriggered(self):
# if self.file is not None:
# if self.saved is False:
# msg = QMessageBox.warning(self, 'Exiting', 'Do you want to save last changes?', QMessageBox.Save | QMessageBox.Cancel)
# if msg == 2048:
# self.actionSaveTriggered()
# elif msg == 4194304:
# self.close()
# elif self.file is None:
# if self.ui_main.textEdit.toPlainText() != '':
#
# self.actionSave_AsTriggered()
#
# self.close()
def actionSaveTriggered(self):
if self.file is not None and self.saved is False:
with open(self.file, 'r+') as f:
f.write(self.ui_main.textEdit.toPlainText())
self.saved = True
self.statusMessageSaved()
elif self.file is None and self.ui_main.textEdit.toPlainText() != '':
self.actionSave_AsTriggered()
else:
return
def actionSave_AsTriggered(self):
path = QFileDialog.getSaveFileName(self, 'Dizin', '.', 'Text Files(*.txt);;All Files(*.*)')
if path[0] != '' and self.saved is False:
self.file = path[0]
with open(self.file, 'w') as f:
f.write(self.ui_main.textEdit.toPlainText())
self.saved = True
self._statusMessageSaved()
def actionFontTriggered(self):
font = QFontDialog.getFont()[0]
self.ui_main.textEdit.setFont(font)
self.statusBar.showMessage('Saved!')
def statusMessageSaved(self):
self.statusBar.showMessage('%s is saved!' % self.file, 3000)
def closeEvent(self, event):
self.actionQuitTriggered()
def main():
app = QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.show()
app.exec()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
2628a34ccb9b376dd38cd42dc815bb48bc211c6f | a573769f4f65da1d872740c8da8168fa76ea078c | /comments/migrations/0001_initial.py | 3af5074cf1324a053cd165035104f6ecef9ae4bb | [] | no_license | xiaxiaobai/blogproject | 74e8d227aecb16470377f3d8709c5b4e1ee91e69 | 749bc2e7ca9b9c022fff55a8ae0d2e1d612f1c71 | refs/heads/master | 2021-05-11T09:07:46.568116 | 2018-01-19T03:11:29 | 2018-01-19T03:11:29 | 118,069,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | # Generated by Django 2.0 on 2018-01-15 02:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0003_auto_20180112_2035'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=255)),
('url', models.URLField(blank=True)),
('text', models.TextField()),
('created_time', models.DateTimeField(auto_now_add=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post')),
],
),
]
| [
"437227678@qq.com"
] | 437227678@qq.com |
4c4e5195bbda4ff43923cbef8b62a3b17b216a38 | afff123c377caad3173bafdefcc925b9038aa2e2 | /HackerRank/catAndMouse.py | 457ccac6ea957a625155acde979dd864be8278a3 | [] | no_license | IstiyakMilon/100_Days_Of_Code | 084830066b832fb5ca50f98a5f85a2d3cee2be76 | 3fb1f9cfb2612c3bcee139488e27a29f0c08551a | refs/heads/master | 2022-12-03T07:59:34.969897 | 2020-08-25T01:39:27 | 2020-08-25T01:39:27 | 250,290,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | def catAndMouse(x, y, z):
catADist=abs(z-x)
catBDist=abs(z-y)
if catADist==catBDist:
return "Mouse C"
elif catADist<catBDist:
return "Cat A"
else:
return "Cat B"
print(catAndMouse(1,2,3))
| [
"milon.istiyak@gmail.com"
] | milon.istiyak@gmail.com |
5345ddf04efa8e820bb3b50de5752e9b8629bd31 | 7a5a02ad84c22e77cc240c3b6f88e8bde233e893 | /test.py | 6aab3740f818c4a431dea393f4a386929e190eac | [] | no_license | hinfeyg2/remote_flask | 3929c0f59ccc07686ed0edbf75a9aec424e31688 | 9e746631f2405b26f47415a336554e9629cff4e1 | refs/heads/master | 2021-04-27T06:17:48.979151 | 2018-12-08T13:31:50 | 2018-12-08T13:31:50 | 122,610,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | from nowAndNext import nowAndNext
print(nowAndNext())
| [
"hinfeyg2@gmail.com"
] | hinfeyg2@gmail.com |
1b0065d737286d80674232b22d439aa7f0ea8dee | d7e399ad67ceef75285bbb45e4657e5016457753 | /Problem_11.py | b11c2b36c09ef1361437e00c684147b8850f2397 | [] | no_license | AnshumaanBajpai/Project-Euler | 8e49b63f93788e26d3dcf72cb24d9cf3ce319a58 | 65cc01bbca4e8adf50cb2a997e0f7932b6352799 | refs/heads/master | 2020-04-04T19:53:50.654127 | 2014-10-23T01:50:26 | 2014-10-23T01:50:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 22 20:52:01 2014
@author: Anshumaan
"""
## This script is a solution to problem 11 in project Euler. In this I have to
## calculate the maximum product of four number in a grid.
## Importing the data and storing it as a grid
data = []
with open("Problem_11_data.txt") as p11:
for line in p11:
data.append([int(i) for i in line.split()])
## data collected and saved
max_rows = [] # This will store the maximum product for each row.
max_column = [] # This will store the maximum product for each column
max_diag_right = [] # This will store the maximum product for diagonal_right
max_diag_left = [] # This will store the maximum product for diagonal_left
for i in range(len(data)):
products = [] # This will contain all possible products for a given row
for j in range(len(data[i]) - 3):
products.append(data[i][j] * data[i][j + 1] * data[i][j + 2]
* data[i][j + 3])
max_rows.append(max(products))
# print max_rows
for i in range(len(data[0])):
products = [] # This will contain all possible products for a given column
for j in range(len(data) - 3):
products.append(data[j][i] * data[j + 1][i] * data[j + 2][i]
* data[j + 3][i])
max_column.append(max(products))
# print max_column
for i in range(len(data) - 3):
products = [] # This will contain all possible products for a given diagon
for j in range(len(data[i]) - 3):
products.append(data[i][j] * data[i + 1][j + 1] * data[i + 2][j + 2]
* data[i + 3][j + 3])
max_diag_right.append(max(products))
#print max_diag_right
for i in range(len(data) - 3):
products = [] # This will contain all possible products for a given diagon
for j in range(3, 20):
products.append(data[i][j] * data[i + 1][j - 1] * data[i + 2][j - 2]
* data[i + 3][j - 3])
max_diag_left.append(max(products))
#print max_diag_left
print max([max(max_rows), max(max_column), max(max_diag_right),
max(max_diag_left)])
| [
"bajpai.anshumaan@gmail.com"
] | bajpai.anshumaan@gmail.com |
12e02b815a9e800bff129d585c75f10cfdce9a25 | 06aeb83c8ad40d60f578fbf6e2d1ac841e0a2520 | /Week 3/knapsack.py | 796da824f572410b703d3497da5d054d1e28f932 | [] | no_license | nickslavsky/Algorithms-pt2 | a2554dc489f72549b82deda903edac03147df6e4 | c427384bc819d5f6228d76af1a9ebab129159fc8 | refs/heads/master | 2020-06-19T22:41:42.759182 | 2016-11-26T13:47:47 | 2016-11-26T13:47:47 | 74,828,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,885 | py | """
In this programming problem and the next you'll code up the knapsack algorithm from lecture.
You can assume that all numbers are positive. You should assume that item weights and the knapsack capacity are integers.
In the box below, type in the value of the optimal solution.
"""
import numpy as np
import time
def load_data(file_name):
"""
reads the file and returns 2 numpy arrays of weights and values
[knapsack_size][number_of_items]
[value_1] [weight_1]
[value_2] [weight_2]
...
"""
# the size of the file is reasonable for this problem, can speed things up by loading the whole file
with open(file_name) as data:
file_contents = data.read()
# split the lines
lines = file_contents.split('\n')
# pop the size and the total number of items to initialize
knapsack_size, number_of_items = map(int, lines.pop(0).split())
weights, values = np.zeros(number_of_items), np.zeros(number_of_items)
i = 0
for line in lines:
spl = line.split()
if spl:
values[i], weights[i] = map(int, spl)
i += 1
return knapsack_size, weights, values
def solve_knapsack_problem(knapsack_size, weights, values):
a_current = np.zeros(knapsack_size+1)
a_previous = np.zeros(knapsack_size+1)
n = len(weights)
for i in range(1, n+1):
a_current[:weights[i-1]] = a_previous[:weights[i-1]]
a_current[weights[i-1]:] = np.fmax(
a_previous[weights[i-1]:],
a_previous[:knapsack_size + 1 - weights[i-1]] + values[i-1]
)
np.copyto(a_previous, a_current)
return a_current[knapsack_size]
if __name__ == '__main__':
W, w, v = load_data('knapsack1.txt')
t1 = time.time()
print('Maximum knapsack value is: {0:.0f}'.format(solve_knapsack_problem(W, w, v)))
print('Solved in {0:.3f}s'.format(time.time() - t1))
| [
"nickslavsky@gmail.com"
] | nickslavsky@gmail.com |
5cabe83e56ac7a0d1732a4634fd9d45f52bfd539 | 75effe121670ccb4aad020f1c7b159e72e64f02a | /src/testpython/mail/mailtest2.py | 46c0f59b76e94189fce0ce2f00a53c22987fc08a | [] | no_license | zengjc/test | de4a4cb357f9da9c483a893ecf6a4fb3de6f2513 | 0669b05b07baead5ba40b86002a0d412a86be5d3 | refs/heads/master | 2021-01-19T12:36:30.962408 | 2015-12-21T10:41:23 | 2015-12-21T10:41:23 | 33,598,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | #!/usr/bin/env python3
#coding: utf-8
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
sender = 's'
receiver = 'zengjc@163.com'
subject = 'python email test'
smtpserver = '192.168.9.112'
username = 's'
password = 's'
msgRoot = MIMEMultipart('related')
msgRoot['Subject'] = 'test message'
msgText = MIMEText('这是邮件正文内容',_subtype='plain',_charset='gb2312')
msgRoot.attach(msgText)
#构造附件
att = MIMEText(open('E:\\python\\parameter.ini', 'r').read(), 'base64', 'utf-8')
att["Content-Type"] = 'application/octet-stream'
att["Content-Disposition"] = 'attachment; filename="parameter.ini"'
msgRoot.attach(att)
smtp = smtplib.SMTP()
smtp.connect('192.168.9.112')
smtp.login(username, password)
smtp.sendmail(sender, receiver, msgRoot.as_string())
smtp.quit() | [
"11607150@qq.com"
] | 11607150@qq.com |
26f7368579a95e61ba6ebd0e0e9bb13e6026d3f0 | 22fcde1d1fa7ec3f7f21094d2f6dc0d5ce81aada | /reportbypoint.py | 4de42e39f77756623c0cbb4435cc9996e9aa3f37 | [] | no_license | gvSIGAssociation/gvsig-desktop-scripting-ReportByPoint | b903a05dd6ca1b896edd8e39d0b55906784bd923 | a0bc235ac32aec67f52c441154ac58be6e71dce2 | refs/heads/master | 2020-04-01T09:28:14.843117 | 2019-03-04T00:20:34 | 2019-03-04T00:20:34 | 153,076,332 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,868 | py | # encoding: utf-8
import gvsig
from gvsig import geom
from java.awt.geom import Point2D
from org.gvsig.fmap.mapcontrol.tools.Listeners import PointListener
#from org.gvsig.fmap.mapcontrol.tools.Behavior import MouseMovementBehavior
#from org.gvsig.fmap.mapcontrol.tools.Listeners import AbstractPointListener
from org.gvsig.fmap.mapcontext.layers.vectorial import SpatialEvaluatorsFactory
from org.gvsig.fmap import IconThemeHelper
import reportbypointpanelreport
reload(reportbypointpanelreport)
from addons.ReportByPoint.reportbypointpanelreport import ReportByPointPanelReport
from org.gvsig.fmap.mapcontrol.tools.Behavior import PointBehavior
from org.gvsig.tools import ToolsLocator
from addons.ReportByPoint.rbplib.getHTMLReportByPoint import getHTMLReportByPoint
class ReportByPoint(object):
def __init__(self):
self.__behavior = None
self.__layer = None
def getTooltipValue(self, point, projection):
return ""
def setTool(self, mapControl):
actives = mapControl.getMapContext().getLayers().getActives()
#if len(actives)!=1:
# # Solo activamos la herramienta si hay una sola capa activa
# #print "### reportbypoint.setTool: active layers != 1 (%s)" % len(actives)
# return
#mode = actives[0].getProperty("reportbypoint.mode")
#if mode in ("", None):
# # Si la capa activa no tiene configurado el campo a mostrar
# # tampoco activamos la herramienta
# #print '### reportbypoint.setTool: active layer %s not has property "reportbypoint.fieldname"' % actives[0].getName()
# return
self.__layer = actives[0]
#if it has the tool
if not mapControl.hasTool("reportbypoint"):
#print '### QuickInfo.setTool: Add to MapControl 0x%x the "quickinfo" tool' % mapControl.hashCode()
#
# Creamos nuestro "tool" asociando el MouseMovementBehavior con nuestro
# QuickInfoListener.
#self.__behavior = MouseMovementBehavior(ReportByPointListener(mapControl, self))
self.__behavior = PointBehavior(ReportByPointListener(mapControl, self))
#self.__behavior.setMapControl(mapControl)
#
# Le añadimos al MapControl la nueva "tool".
mapControl.addBehavior("reportbypoint", self.__behavior)
#print '### QuickInfo.setTool: setTool("quickinfo") to MapControl 0x%x' % mapControl.hashCode()
#
# Activamos la tool.
mapControl.setTool("reportbypoint")
class ReportByPointListener(PointListener):
def __init__(self, mapControl, reportbypoint):
PointListener.__init__(self)
self.mapControl = mapControl
self.reportbypoint = reportbypoint
self.projection = self.mapControl.getProjection()
def getReportByPoint(self, p):
content = getHTMLReportByPoint(p, self.mapControl)
return content
def showReport(self, event):
p = event.getMapPoint()
#tip = self.reportbypoint.getTooltipValue(p,self.projection)
#self.mapControl.setToolTipText(unicode(tip, 'utf-8'))
#from addons.ScriptingComposerTools.javadocviewer.webbrowserpanel import BrowserPanel
#p = BrowserPanel()
report = ReportByPointPanelReport()
i18nManager =ToolsLocator.getI18nManager()
report.showTool(i18nManager.getTranslation("_Report_by_point_info"))
content = self.getReportByPoint(p)
report.setHTMLText(content)
#print content
def point(self, event):
self.showReport(event)
def pointDoubleClick(self, event):
self.showReport(event)
def getImageCursor(self):
"""Evento de PointListener"""
return IconThemeHelper.getImage("cursor-select-by-point")
def cancelDrawing(self):
"""Evento de PointListener"""
return False
def main(*args):
viewDoc = gvsig.currentView()
viewPanel = viewDoc.getWindowOfView()
mapControl = viewPanel.getMapControl()
reportbypoint = ReportByPoint()
reportbypoint.setTool(mapControl)
| [
"masquesig@gmail.com"
] | masquesig@gmail.com |
fe70326740b6a0076abc3e8d128549821babdd53 | f7630fd6c829cb306e72472296e3a513844d99af | /lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_firewall_gtp_noippolicy.py | 1f59b674b7a6e1afe2365d02864e506d84060883 | [] | no_license | baltah666/automation | 6eccce20c83dbe0d5aa9a82a27937886e3131d32 | 140eb81fe9bacb9a3ed1f1eafe86edeb8a8d0d52 | refs/heads/master | 2023-03-07T10:53:21.187020 | 2023-02-10T08:39:38 | 2023-02-10T08:39:38 | 272,007,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,542 | py | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_firewall_gtp_noippolicy
short_description: no description
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "1.0.0"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: |
only set to True when module schema diffs with FortiManager API structure,
module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: |
the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
gtp:
description: the parameter (gtp) in requested url
type: str
required: true
firewall_gtp_noippolicy:
description: the top level parameters set
required: false
type: dict
suboptions:
action:
type: str
description: no description
choices:
- 'allow'
- 'deny'
end:
type: int
description: no description
id:
type: int
description: no description
start:
type: int
description: no description
type:
type: str
description: no description
choices:
- 'etsi'
- 'ietf'
'''
EXAMPLES = '''
- hosts: fortimanager00
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: No IP policy.
fmgr_firewall_gtp_noippolicy:
bypass_validation: False
adom: FortiCarrier # This is FOC-only object, need a FortiCarrier adom
gtp: 'ansible-test' # name
state: present
firewall_gtp_noippolicy:
action: allow #<value in [allow, deny]>
id: 1
type: ietf #<value in [etsi, ietf]>
- name: gathering fortimanager facts
hosts: fortimanager00
gather_facts: no
connection: httpapi
collections:
- fortinet.fortimanager
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: retrieve all the No IP policy in the GTP
fmgr_fact:
facts:
selector: 'firewall_gtp_noippolicy'
params:
adom: 'FortiCarrier' # This is FOC-only object, need a FortiCarrier adom
gtp: 'ansible-test' # name
noip-policy: 'your_value'
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/gtp/{gtp}/noip-policy',
'/pm/config/global/obj/firewall/gtp/{gtp}/noip-policy'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/gtp/{gtp}/noip-policy/{noip-policy}',
'/pm/config/global/obj/firewall/gtp/{gtp}/noip-policy/{noip-policy}'
]
url_params = ['adom', 'gtp']
module_primary_key = 'id'
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'forticloud_access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'gtp': {
'required': True,
'type': 'str'
},
'firewall_gtp_noippolicy': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'options': {
'action': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'choices': [
'allow',
'deny'
],
'type': 'str'
},
'end': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'id': {
'required': True,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'start': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'type': 'int'
},
'type': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True,
'7.2.0': True
},
'choices': [
'etsi',
'ietf'
],
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'firewall_gtp_noippolicy'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
connection.set_option('forticloud_access_token',
module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| [
"baltah666@gmail.com"
] | baltah666@gmail.com |
5ae8591acd1e3b0fd8018378024aec66bcdb5904 | a53071e00cf7c60adbe3b5c90ba06d0d4421ec46 | /python/repeated-ui.py | 452103025d131e446ac729c72c614967530570d8 | [] | no_license | limatrix/tools | 6ab665db9c2ec7fd64cb998e99600c13f329047a | 864b182e2d488ce8dd7bd3ef8875bf368c35ce29 | refs/heads/master | 2021-05-16T14:06:23.673459 | 2021-02-02T07:40:02 | 2021-02-02T07:40:02 | 117,974,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,316 | py | import hashlib
import sys
import os
import shutil
import time
from tkinter import *
from tkinter.filedialog import askdirectory
from tkinter.messagebox import showinfo
from tkinter.filedialog import askopenfilename
global global_copy_entry
global global_copy_button
global global_hash_entry
global global_hash_button
global global_text
global global_path_list
global global_current_label
global global_md5_list
global global_copy_directory
global global_del_indicate
global global_file_ds
global global_validate_indicate
global global_current_directory
global_path_list = []
global_md5_list = []
global_del_indicate = False
global_file_ds = None
global_validate_indicate = None
def print_to_current_label(directory, file):
"""显示当前处理的文件"""
global global_current_label
dirlen = len(directory) + 1
name = file[dirlen:]
global_current_label['text'] = name
time.sleep(0.02)
global_current_label.update()
def print_to_text(s):
"""显示信息到text"""
global global_text
global_text.insert(0.0, s)
time.sleep(0.02)
global_text.update()
def print_to_file(directory, fname, md5hex):
"""将信息写到文件"""
global global_file_ds
dirlen = len(directory) + 1
name = fname[dirlen:]
global_file_ds.write(md5hex + "," + name + "\n")
def get_relavte_path(fname):
"""截取绝对路径为相对路径"""
global global_current_directory
dirlen = len(global_current_directory) + 1
name = fname[dirlen:]
return name
def print_repeated_summary(sum_count, opt_count):
"""重复文件扫描完成后打印信息"""
global global_del_indicate
temp_str = ""
return_str = "共扫描文件: %d个\n" % sum_count
if global_del_indicate is True:
temp_str = "共删除文件: %d个\n" % opt_count
else:
temp_str = "共拷贝文件: %d个\n" % opt_count
return_str = return_str + temp_str
print_to_text(return_str)
def remove_empty_directory(directory):
for root, dirs, files in os.walk(directory):
for name in dirs:
workpath = os.path.join(root, name)
if not os.listdir(workpath):
print("del path %s\n" % workpath)
os.removedirs(workpath)
pass
pass
pass
def calc_md5(fname):
md5 = hashlib.md5()
with open(fname, 'rb') as fp:
while True:
block = fp.read(8192)
if not block:
break
else:
md5.update(block)
md5hex = md5.hexdigest()
fp.close()
return md5hex
def repeated_opts(f, fname):
global global_del_indicate
global global_copy_directory
if global_del_indicate is False:
count = 1
target_name = os.path.join(global_copy_directory, f)
while os.path.exists(target_name):
(name, ext) = os.path.splitext(f)
ends = '_%d' % count
count = count + 1
if name.endswith(ends):
f = '%s%d%s' % (name[0:-1], count, ext)
else:
f = '%s_%d%s' % (name, count, ext)
target_name = os.path.join(global_copy_directory, f)
else:
print_to_text('move file %s to %s as %s\n' % (get_relavte_path(fname), global_copy_directory, f))
shutil.move(fname, target_name)
else:
print_to_text('deleting %s\n' % get_relavte_path(fname))
os.remove(fname)
pass
def find_repeated(directory):
global global_md5_list
global global_current_directory
global_current_directory = directory
sum_count = 0
opt_count = 0
for root, dirs, files in os.walk(directory):
for f in files:
fname = os.path.join(root, f)
print_to_current_label(directory, fname)
md5hex = calc_md5(fname)
sum_count = sum_count + 1
if md5hex not in global_md5_list:
if check_button_var.get() != 1:
global_md5_list.append(md5hex)
pass
else:
repeated_opts(f, fname)
opt_count = opt_count + 1
pass
pass
return (sum_count, opt_count)
def selectPath(p):
global global_path_list
_path = askdirectory()
global_path_list[p].set(_path)
def empty_all_path():
global global_path_list
for l in global_path_list:
l.set("")
pass
def targetDialogOpt(opt):
global global_copy_entry, global_copy_button, global_del_indicate, \
global_copy_directory, global_del_indicate
global_copy_directory = ''
if opt == 2:
global_copy_entry.grid(row = 0, column = 3)
global_copy_button.grid(row = 0, column = 4, padx = 7)
global_del_indicate = False
elif opt == 1:
global_copy_entry.grid_remove()
global_copy_button.grid_remove()
global_del_indicate = True
def validationOpt(opt):
global global_validate_indicate
global_validate_indicate = opt
def generate_validate(directory):
global global_file_ds
count = 0
global_file_ds = open(directory + "/hash.db", "w+")
for root, dirs, files in os.walk(directory):
for f in files:
if f == "hash.db":
continue
fname = os.path.join(root, f)
count = count + 1
print_to_current_label(directory, fname)
md5hex = calc_md5(fname)
print_to_file(directory, fname, md5hex)
pass
pass
global_file_ds.close()
global_file_ds = None
print_str = "目录: " + directory + "\n" \
"文件总数: " + str(count) + "\n" \
"校验文件: " + "hash.db\n"
print_to_text(print_str)
def proc_validate(directory):
dictionary = {}
rlist = []
fset = set()
rset = set()
for line in open(directory + "/hash.db"):
lst = line.split(",")
dictionary[lst[0]] = lst[1]
fset.add(lst[0])
for root, dirs, files in os.walk(directory):
for f in files:
if f == "hash.db":
continue
fname = os.path.join(root, f)
print_to_current_label(directory, fname)
md5hex = calc_md5(fname)
rset.add(md5hex)
if md5hex not in dictionary:
rlist.append(fname)
pass
pass
# hash.db里有, 文件夹里没有的
temp1 = list(fset.difference(rset))
# 文件夹里有, hash.db里没有的
temp2 = list(rset.difference(fset))
return_str = "文件夹里丢失的文件:\n\n"
for l in temp1:
return_str = return_str + " " + dictionary[l]
return_str = return_str + "\n文件夹里新增的文件:\n\n"
for l in rlist:
return_str = return_str + " " + get_relavte_path(l) + "\n"
print_to_text(return_str)
def validate():
global global_validate_indicate
print_start_label()
empty_text()
if global_validate_indicate == 1:
for i in range(4):
path = global_path_list[i].get()
if path:
generate_validate(path)
pass
pass
elif global_validate_indicate == 2:
for i in range(4):
path = global_path_list[i].get()
if path:
proc_validate(path)
pass
pass
print_end_label()
empty_all_path()
def print_start_label():
"""显示开始执行"""
# global_current_label['text'] = "执行中...."
def print_end_label():
"""显示执行完成"""
global global_current_label
global_current_label['text'] = "执行完成"
def empty_text():
"""清空文本显示"""
global global_text
global_text.delete(0.0, END)
def show_info(s):
"""显示提示信息"""
showinfo(message = s)
def init_repeated_copy_dir():
"""初始化重复文件拷贝的目的地"""
global global_copy_directory, global_del_indicate
global_copy_directory = global_path_list[4].get()
if global_del_indicate == False and global_copy_directory == "":
show_info("选择删除,或指定拷贝文件的目录")
return False
return True
def empty_md5_list():
"""清空存放文件md5值的列表"""
global global_md5_list
global_md5_list.clear()
def empty_path_list():
"""清空存放path的列表"""
global global_path_list
global_path_list.clear()
def load_hash_file(file):
global global_md5_list
for line in open(file):
lst = line.split(",")
global_md5_list.append(lst[0])
pass
def findRepeated():
"""查找重复文件的总入口"""
global global_path_list, global_md5_list
sum_count = 0
opt_count = 0
if True == init_repeated_copy_dir():
print_start_label()
empty_text()
empty_md5_list()
if check_button_var.get() == 1:
hash_file = global_hash_file.get()
load_hash_file(hash_file)
for i in range(4):
path = global_path_list[i].get()
if path:
(a,b) = find_repeated(path)
sum_count = sum_count + a
opt_count = opt_count + b
remove_empty_directory(path)
pass
pass
print_end_label()
empty_all_path()
pass
print_repeated_summary(sum_count, opt_count)
def check_button_proc():
opt = check_button_var.get()
if opt == 1:
global_hash_entry.grid(row = 0, column = 2)
global_hash_button.grid(row = 0, column = 3, padx = 7)
elif opt == 0:
global_hash_entry.grid_remove()
global_hash_button.grid_remove()
pass
def selectfile(opt):
file = askopenfilename()
global_hash_file.set(file)
root = Tk()
root.title("重复文件管理")
root.geometry('800x600+250+80')
root.maxsize(800, 600)
root.minsize(800, 600)
for i in range(6):
global_path_list.append(StringVar())
repeat_radio_var = IntVar()
validate_radio_var = IntVar()
check_button_var = IntVar()
global_hash_file = StringVar()
frame_cur_row = 0
###
### HASH选择FRAME
###
hashFrame = Frame(root, height = 200, width = 796)
hashFrame.grid(row = frame_cur_row, columnspan = 6, sticky = W)
Label(hashFrame, text = "使用校验文件作为重复检查基准").grid(row = 0, column = 0, sticky = W, padx = 7, pady = 2)
Checkbutton(hashFrame, variable = check_button_var, command = check_button_proc).grid(row = 0, column = 1, sticky = W, padx = 7, pady = 2)
global_hash_entry = Entry(hashFrame, textvariable = global_hash_file, width = 40)
global_hash_button = Button(hashFrame, text = "选择", command = lambda : selectfile(1), width = 6, borderwidth = 0, fg = "blue")
###
### 目录选择FRAME
###
frame_cur_row = frame_cur_row + 1
topFrame = Frame(root, height = 200, width = 796)
topFrame.grid(row = frame_cur_row, columnspan = 6, sticky = W)
### 文字说明
currow = 0
Label(topFrame, text = "选择将要扫描的文件夹, 最多支持4个文件夹. 按文件夹顺序扫描, 如果选择删除重复文件, \
排在后面的文件将被删除.").grid(row = currow, column = 0, columnspan = 6, sticky = W, padx = 7, pady = 5)
### 文件选择1
currow = currow + 1
Label(topFrame, text = "路径", width = 6).grid(row = currow, column = 0)
Entry(topFrame, textvariable = global_path_list[0], width = 40).grid(row = currow, column = 1)
Button(topFrame, text = "选择", command = lambda : selectPath(0), width = 6, borderwidth = 0, \
fg = "blue").grid(row = currow, column = 2, padx = 7)
Label(topFrame, text = "路径", width = 6).grid(row = currow, column = 3)
Entry(topFrame, textvariable = global_path_list[1], width = 40).grid(row = currow, column = 4)
Button(topFrame, text = "选择", command = lambda : selectPath(1), width = 6, borderwidth = 0, \
fg = "blue").grid(row = currow, column = 5, padx = 7)
### 文件选择2
currow = currow + 1
Label(topFrame, text = "路径", width = 6).grid(row = currow, column = 0)
Entry(topFrame, textvariable = global_path_list[2], width = 40).grid(row = currow, column = 1)
Button(topFrame, text = "选择", command = lambda : selectPath(2), width = 6, borderwidth = 0, \
fg = "blue").grid(row = currow, column = 2, padx = 7)
Label(topFrame, text = "路径", width = 6).grid(row = currow, column = 3)
Entry(topFrame, textvariable = global_path_list[3], width = 40).grid(row = currow, column = 4)
Button(topFrame, text = "选择", command = lambda : selectPath(3), width = 6, borderwidth = 0, \
fg = "blue").grid(row = currow, column = 5, padx = 7)
###
### 操作方式FRAME
###
frame_cur_row = frame_cur_row + 1
middleFrame = Frame(root, height = 200, width = 796)
middleFrame.grid(row = frame_cur_row, columnspan = 6, sticky = W)
currow = 0
Label(middleFrame, text = "重复文件的操作方式").grid(row = currow, column = 0, padx = 7, sticky = W)
Radiobutton(middleFrame, text = "删除", variable = repeat_radio_var, value = 1, command = lambda : targetDialogOpt(1)) \
.grid(row = currow, column = 1, sticky = W)
Radiobutton(middleFrame, text = "拷贝", variable = repeat_radio_var, value = 2, command = lambda : targetDialogOpt(2)) \
.grid(row = currow, column = 2, sticky = W)
global_copy_entry = Entry(middleFrame, textvariable = global_path_list[4], width = 40)
global_copy_button = Button(middleFrame, text = "选择", command = lambda : selectPath(4), width = 6, borderwidth = 0, fg = "blue")
btnRun = Button(middleFrame, text = "执行", borderwidth = 0, fg = "red", command = findRepeated)
btnRun.grid(row = currow, column = 5, padx = 10, sticky = W)
#
currow = currow + 1
Label(middleFrame, text = "完整性校验").grid(row = currow, column = 0, padx = 7, sticky = W)
Radiobutton(middleFrame, text = "生成校验文件", variable = validate_radio_var, value = 1, command = lambda : validationOpt(1)) \
.grid(row = currow, column = 1, sticky = W)
Radiobutton(middleFrame, text = "检查完整性", variable = validate_radio_var, value = 2, command = lambda : validationOpt(2)) \
.grid(row = currow, column = 2, sticky = W)
btnRun = Button(middleFrame, text = "执行", borderwidth = 0, fg = "red", command = validate)
btnRun.grid(row = currow, column = 5, padx = 10, sticky = W)
###
### 显示当前处理的文件的FRAME
###
frame_cur_row = frame_cur_row + 1
buttomFrame = Frame(root, height = 200, width = 796)
buttomFrame.grid(row = frame_cur_row, columnspan = 6, sticky = W)
global_current_label = Label(buttomFrame)
global_current_label.grid(row = 0, column = 0, columnspan = 6, sticky = W, padx = 7)
###
### text
###
frame_cur_row = frame_cur_row + 1
textFrame = Frame(root, height = 200, width = 796)
textFrame.grid(row = frame_cur_row, columnspan = 6, sticky = W)
global_text = Text(textFrame, width = 110, height = 37)
global_text.grid(row = 0, column = 0, columnspan = 6, ipadx = 10, ipady = 10)
root.mainloop() | [
"shawnyin@yeah.net"
] | shawnyin@yeah.net |
5c838facc9a610103f50de4382ca722be9bae35e | 05980e498640c6b9b0dbdbcd82a263ec51f13821 | /manage.py | 249fde867e4c28bd799bb5cf89def1c492795a19 | [] | no_license | shriramholla/Ardor | 483c1796e0003d208b5f1ff2f7e1c087fecb19e7 | f239186fa0d7758c3ff42f13ef91b8a3b8e1605a | refs/heads/master | 2021-01-05T06:38:10.145387 | 2020-02-16T15:47:50 | 2020-02-16T15:47:50 | 240,917,493 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sherlock.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"bab.shriram@gmail.com"
] | bab.shriram@gmail.com |
dd82b18fdd834026142e9b15a1f6046ad32828f7 | 97c99e3189c5f922e721bd873c3e73b13ea977ca | /solutions/ex4_solution.py | cde410a376099e0feb0bf243c1f501d15e9c2378 | [] | no_license | dhesse/IN-STK-5000-Autumn-21---Exercises | b302145c03e2451fb92713e034894ba3966380b4 | 87693dc22cfbba6661b02af529d188e979161afc | refs/heads/main | 2023-08-28T12:12:25.274754 | 2021-11-09T16:37:41 | 2021-11-09T16:37:41 | 387,602,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,839 | py | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import r2_score, mean_absolute_error
class Ex4:
"""We will solve various tasks using a data set containing
Diamond prices. You have to write the code to complete the tasks. The
data set is loaded for you, no need to do anything with this.
"""
d = pd.read_csv("diamonds.csv.gz")
def task1(self):
"""Create a one-hot encoded dataframe (the Pandas Way) containing the
cut and color column values (in that order), dropping the first column.
"""
f_c = ['cut', 'color']
return pd.get_dummies(self.d[f_c], drop_first=True)
def task2(self):
""""Crate a data frame containing the carat, depth, and table columns
(in that order), scaled to zero mean and unit standard deviation."""
n_c = ["carat", "depth", "table"]
df = self.d[n_c]
return (df - df.mean()) / df.std()
def task3(self):
"""Using the results of task 1 and 2, create a combined dataset
containing scaled numerical and one-hot encoded categorical features.
"""
return self.task1().join(self.task2())
def task4(self):
"""Using the result from task 3, create a training and test dataset
using train_test_split, adding the random_state=1234 argument for
reproducibility, splitting off 30% of the data for the test set.
The target will be the price. This function should
return data in the form X_train, X_test, y_train, y_test."""
return train_test_split(
self.task3(), self.d['price'],
test_size=0.3,
random_state=1234
)
def task5(self):
"""Using the result from task 4, train a KNeighborsRegressor with k=10,
and calculate and return the train and test R-square scores as a tuple.
Hint: The R-squared score can be found sklearn.metrics."""
Xtr, Xte, ytr, yte = self.task4()
model = KNeighborsRegressor(10).fit(Xtr, ytr)
return (r2_score(ytr, model.predict(Xtr)),
r2_score(yte, model.predict(Xte)))
def task6(self):
"""Again using the result from task 4 as train and test data, train
KNeighborsRegressor models with k from 2 to 20 inclusive, in steps of 2.
Return a pandas series with the k-values as index and the mean absolute
test error of the corresponding model as value.
"""
Xtr, Xte, ytr, yte = self.task4()
ks = range(2, 21, 2)
models = [KNeighborsRegressor(k).fit(Xtr, ytr) for k in ks]
scores = [mean_absolute_error(yte, m.predict(Xte)) for m in models]
return pd.Series(scores, index=ks)
| [
"dirk.hesse@posteo.net"
] | dirk.hesse@posteo.net |
81a2872d9a9c25af764af1274d957578a126869a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_gripped.py | b9c9b15141f34bec76fa0ae3fbe86ef380965360 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py |
#calss header
class _GRIPPED():
def __init__(self,):
self.name = "GRIPPED"
self.definitions = grip
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['grip']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
2719ecdb0d3f55cf2a59d28875a664afed9e14ec | 45de7d905486934629730945619f49281ad19359 | /xlsxwriter/test/comparison/test_chart_legend06.py | f10d00e6bc314ead1c5752aad9c675cf4fe559c5 | [
"BSD-2-Clause"
] | permissive | jmcnamara/XlsxWriter | 599e1d225d698120ef931a776a9d93a6f60186ed | ab13807a1be68652ffc512ae6f5791d113b94ee1 | refs/heads/main | 2023-09-04T04:21:04.559742 | 2023-08-31T19:30:52 | 2023-08-31T19:30:52 | 7,433,211 | 3,251 | 712 | BSD-2-Clause | 2023-08-28T18:52:14 | 2013-01-04T01:07:06 | Python | UTF-8 | Python | false | false | 1,405 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_legend06.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with legend options."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [79972224, 79973760]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_legend({"fill": {"color": "yellow"}})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
03b1b59f3e3de342fc2cf3c66df3566f3605528d | 957f814324ec52d82007dd8638c34640732fea13 | /game.py | 471625073f650eab0e229c540cc4787a696a35ad | [] | no_license | helda-game/helda-scripts | 2beb49e00b1bc21ebb0e99ca6df4192f3aea6eca | 1ba36189945e16ef1ad750c33d83b27bf8460ada | refs/heads/master | 2022-11-11T10:03:43.282486 | 2020-06-26T09:23:58 | 2020-06-26T09:23:58 | 255,174,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | import dsl
import worker
def process_iteration(i, ctx):
if not ctx:
t = dsl.Tile("green", 1, 1)
m = dsl.Msg("HELLO")
m2 = dsl.Msg("WORLD")
ctx = dsl.WorldUpdateBuilder()
ctx.draw_bg_tile(t)
ctx.print_msg(m)
ctx.print_msg(m2)
ctx.send_update()
return ctx
else:
ctx.new_update()
ctx.print_msg(dsl.Msg("HELLO!!!"))
ctx.send_update()
return ctx
worker.start_worker(process_iteration)
| [
"vladimir.legkunets@gmail.com"
] | vladimir.legkunets@gmail.com |
bb09218705166ed8a98b7f24b55bbb80c0c0aef6 | 13b6ba8153bacbf50c980757ba758febe8704c3b | /examples/mathias_d3d_pyproc.py | b4399fbba064cec6100e81019cae688a8e371ab2 | [] | no_license | NUCG1GB/pyproc | fbfc64202055aa637783e3bf66b254007f263320 | 669db200fe205be601d62764a520a8444f538fa7 | refs/heads/master | 2022-01-09T04:49:14.656786 | 2018-11-12T15:17:25 | 2018-11-12T15:17:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,910 | py |
# Import external modules
from pyproc.plot import Plot
import matplotlib
matplotlib.use('TKAgg', force=True)
import matplotlib.pyplot as plt
import numpy as np
import json
import scipy.io as io
# from pyJETPPF.JETPPF import JETPPF
from pyproc.analyse import AnalyseSynthDiag
from collections import OrderedDict
font = {'family': 'normal',
'weight': 'normal',
'size': 14}
matplotlib.rc('font', **font)
import matplotlib.font_manager as font_manager
# path = '/usr/share/fonts/msttcore/arial.ttf'
path = '/usr/share/fonts/gnu-free/FreeSans.ttf'
# path = '/home/bloman/fonts/msttcore/arial.ttf'
prop = font_manager.FontProperties(fname=path)
matplotlib.rcParams['font.family'] = prop.get_name()
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams['mathtext.rm'] = prop.get_name()
matplotlib.rc('lines', linewidth=1.2)
matplotlib.rc('axes', linewidth=1.2)
matplotlib.rc('xtick.major', width=1.2, pad=7)
matplotlib.rc('ytick.major', width=1.2, pad=7)
matplotlib.rc('xtick.minor', width=1.2)
matplotlib.rc('ytick.minor', width=1.2)
def case_defs_d3d():
cases = {
'1': {
'sim_color': 'b', 'sim_zo': 1,
'case': 'mgroth_cmg_catalog_edge2d_d3d_160299_apr2618_seq#2',
},
}
return cases
if __name__=='__main__':
workdir = '/work/bloman/pyproc/'
cases = case_defs_d3d()
savefig=True
# setup plot figures
left = 0.2 # the left side of the subplots of the figure
right = 0.85 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.93 # the top of the subplots of the figure
wspace = 0.25 # the amount of width reserved for blank space between subplots
hspace = 0.15 # the amount of height reserved for white space between subplots
Hlines_dict = OrderedDict([
('1215.2', ['2', '1']),
('6561.9', ['3', '2']),
('4339.9', ['5', '2']),
('4101.2',['6', '2']),
('3969.5', ['7', '2'])
])
carbon_lines_dict = OrderedDict([
('2', {'5143.3': ['3p', '3s']}),
('3', {'4650.1': ['3p', '3s']}),
('4', {'1549.1': ['2p', '2s']})
])
spec_line_dict = {
'1': # HYDROGEN
{'1': Hlines_dict},
'6': carbon_lines_dict
}
fig1, ax1 = plt.subplots(nrows=len(Hlines_dict), ncols=1, figsize=(6, 12), sharex=True)
plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace)
fig2, ax2 = plt.subplots(nrows=len(carbon_lines_dict), ncols=1, figsize=(6, 12), sharex=True)
plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace)
fig3, ax3 = plt.subplots(nrows=1, ncols=1, figsize=(8, 8), sharex=True)
plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace)
for casekey, case in cases.items():
plot_dict = {
'spec_line_dict':spec_line_dict,
'prof_Hemiss_defs': {'diag': 'divspred',
'lines': spec_line_dict['1']['1'],
'excrec': True,
'axs': ax1,
# 'ylim': [[0, 5e21], [0, 2.5e20], [0, 3.5e17]],
# 'xlim': [2.15, 2.90],
'coord': 'R',# 'R' 'Z'
'color': case['sim_color'],
'zorder': 10,
'write_csv': False},
'prof_impemiss_defs': {'diag': 'divspred',
'lines': spec_line_dict,
'excrec': True,
'axs': ax2,
# 'ylim': [[0, 8e17], [0, 2e18]],# [0, 8e18]],
# 'xlim': [2.55, 2.8],
'coord': 'R',# 'R' 'Z'
'color': [case['sim_color']],
'zorder': case['sim_zo'],
'write_csv': False},
# 'prof_Prad_defs': {'diag': ['bolo1_hr'], # must be a list!!!
# 'axs': ax3,
# 'coord': 'angle', # 'angle' 'R' 'Z'
# 'color': 'b',
# 'zorder': 10,
# 'write_csv': False},
'2d_defs': {'lines': spec_line_dict,
'diagLOS': ['divspred'],
'Rrng': [1.0, 2.0],
'Zrng': [-1.5, -0.5],
'save': False},
}
o = Plot(workdir, case['case'], plot_dict=plot_dict)
# Print out results dictionary tree
# Plot.pprint_json(o.res_dict['mds1_hr']['1']['los_int'])
plt.show() | [
"bart.lomanowski@ccfe.ac.uk"
] | bart.lomanowski@ccfe.ac.uk |
243500863b711bdfcb3260554a5080051a1aab75 | dde65a7864e72c19d1e3f7e6df74e4bacc6fc185 | /examples/conopt_ex.py | 9299572ebfe60f48eb6f10b4ccf5dcc88589d612 | [
"MIT"
] | permissive | BieglersGroup/dae_pyomo | 17c07f6434d876223bba42864767d111b581699a | e12906da66d4c3d29aa2da42d067d2649a432b96 | refs/heads/master | 2020-03-08T16:20:08.655794 | 2019-03-18T18:28:14 | 2019-03-18T18:28:14 | 128,236,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,429 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from pyomo.environ import *
from pyomo.opt import SolverFactory, ProblemFormat
"""Example taken from the sipopt manual
please check
https://github.com/coin-or/Ipopt/blob/master/Ipopt/contrib/sIPOPT/examples/redhess_ampl/red_hess.run"""
__author__ = 'David Thierry' #: @2018
def main():
#: Declare Model
m = ConcreteModel()
m.i = Set(initialize=[1, 2, 3])
init_vals = {1:25E+00, 2:0.0, 3:0.0}
#: Variables
m.x = Var(m.i, initialize=init_vals)
#: Objective
m.oF = Objective(expr=(m.x[1] - 1.0)**2 + exp(m.x[2] - 2.0)**2 + (m.x[3] - 3.0)**2, sense=minimize)
#: Constraints
m.c1 = Constraint(expr=m.x[1] + 2 * m.x[2] + 3 * m.x[3] == 0.0)
#: ipopt suffixes REQUIRED FOR K_AUG!
m.dual = Suffix(direction=Suffix.IMPORT_EXPORT)
m.ipopt_zL_out = Suffix(direction=Suffix.IMPORT)
m.ipopt_zU_out = Suffix(direction=Suffix.IMPORT)
m.ipopt_zL_in = Suffix(direction=Suffix.EXPORT)
m.ipopt_zU_in = Suffix(direction=Suffix.EXPORT)
#: sipopt suffix
ipopt = SolverFactory('ipopt')
conopt = SolverFactory('conopt')
m.write(filename='mynl.nl', format=ProblemFormat.nl)
conopt.options['outlev'] = 3
conopt.solve(m, tee=True)
#: works with Pyomo 5.5.1 (CPython 2.7.15 on Linux 4.15.0-42-generic)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
fa327bc177500506e6306a6f31e506773ee546f0 | 270b2bfe807c58021bb3417155789adbc6554bd3 | /measurements.py | 856542697c7ebfcb7255a5c1d47e5c1d8e63b110 | [] | no_license | trannguyenle95/ipalm_dataset | a46e2096217cccacd410ce510b9da9d0f62d9c35 | 53b5846271429337f8c23d08dec793748ad302b9 | refs/heads/master | 2023-02-19T01:52:25.803916 | 2021-01-24T17:20:21 | 2021-01-24T17:20:21 | 316,262,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,758 | py | import json
from json import JSONEncoder
import os
####################################### subclass JSONEncoder
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj,'reprJSON'):
return obj.reprJSON()
else:
return json.JSONEncoder.default(self, obj)
########################################
class Measurements:
def __init__(self, name, value):
self.name = name
self.value = value
def reprJSON(self):
return dict(name=self.name, value=self.value)
#######################################
class ObjectMeasurements(object):
def __init__(self,location,object_id,object_name):
self.measurements = []
self.object_name = object_name
self.object_id = object_id
self.location = os.path.expanduser(location)
self.load(self.location)
def load(self , location):
if os.path.exists(location):
self._load()
else:
self.db = []
return True
def _load(self):
self.db = json.load(open(self.location , "r"))
if int(self.object_id) <= len(self.db):
self.measurements = self.db[int(self.object_id)-1]["measurements"]
else:
self.measurements = []
def dumpdb(self):
try:
json.dump(self.db, open(self.location, "w+"), indent=4, cls=ComplexEncoder)
return True
except:
return False
def get(self , key):
try:
return self.db[key]
except KeyError:
print("No Value Can Be Found for " + str(key))
return False
def delete(self , key):
if not key in self.db:
return False
del self.db[key]
self.dumpdb()
return True
def add_meas(self, key, value):
# else:
meas_item = Measurements(key, value)
self.measurements.append(meas_item)
def update(self):
self.db.append(self.reprJSON())
self.dumpdb()
self._load()
def modify(self,key,value):
if any(dict_item['name'] == key for dict_item in self.measurements):
print("Key exists. New value is updated")
next(dict_item for dict_item in self.measurements if dict_item["name"] == key)["value"] = value
else:
meas_item = Measurements(key, value)
self.measurements.append(meas_item)
print("New value is added to the objects")
json.dump(self.db, open(self.location, "w+"), indent=4, cls=ComplexEncoder)
self._load()
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
def reprJSON(self):
return dict(object_name=self.object_name,object_id=self.object_id,measurements=self.measurements)
| [
"tran.nguyenle@aalto.fi"
] | tran.nguyenle@aalto.fi |
02ac12584cbb4463982131352015d609488a5dac | 84aa025fe01b7d855520ed4faac237f32bf94f00 | /SnakeTutorial.py | 2b3fde69dbb1fa480bea339f1f35ffe11dc32999 | [] | no_license | vincentktieu101/Snake | 5c570e0af067cb0ed292c1a1d077c204d36e952d | 4c96e690aee4eeed84069125061644777e52e84e | refs/heads/master | 2023-04-01T00:30:57.157861 | 2021-04-05T06:59:50 | 2021-04-05T06:59:50 | 354,747,281 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,683 | py | import pygame
import random
win = pygame.display.set_mode((600,600))
pygame.display.set_caption("Snake")
class background():
def draw(self, win):
for i in range(0,15):
for j in range(0,15):
pygame.draw.rect(win, (255,255,255), (i * 40, j * 40, 40, 40), 1)
class player():
def __init__(self, x, y, radius, color, horizontalFacing, verticalFacing, tailLength):
self.x = x
self.y = y
self.radius = radius
self.color = color
self.verticalFacing = verticalFacing
self.horizontalFacing = horizontalFacing
self.tailLength = tailLength
def draw(self,win):
pygame.draw.circle(win, self.color, (self.x, self.y), self.radius)
class part():
def __init__(self, x, y):
self.x = x
self.y = y
self.hitbox = (self.x - 20, self.y - 20, 40, 40)
#pygame.draw.rect(win, (255, 0, 0), self.hitbox, 2)
def draw(self,win):
self.hitbox = (self.x - 20, self.y - 20, 40, 40)
pygame.draw.circle(win, (0, 255, 0), (self.x, self.y), 18)
#pygame.draw.rect(win, (255, 0, 0), self.hitbox, 2)
class food():
def __init__(self, x, y):
self.x = x
self.y = y
def draw(self, win):
pygame.draw.circle(win, (0,225, 255), (self.x, self.y), 18)
def redrawGameWindow():
win.fill((0,0,0))
snake.draw(win)
grid.draw(win)
for tailPart in tail:
tailPart.draw(win)
snakeFood.draw(win)
pygame.display.update()
snake = player(260, 300, 18, (0, 255, 0), 1, 0, 3)
grid = background()
tail = [part(220, 300), part(180, 300), part(140, 300)]
snakeFood = food(random.randint(0, 14) * 40 + 20, random.randint(0, 14) * 40 + 20)
hungry = False
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT] and not(snake.horizontalFacing == 1):
snake.horizontalFacing = -1
snake.verticalFacing = 0
if keys[pygame.K_RIGHT] and not(snake.horizontalFacing == -1):
snake.horizontalFacing = 1
snake.verticalFacing = 0
if keys[pygame.K_UP] and not(snake.verticalFacing == 1):
snake.verticalFacing = -1
snake.horizontalFacing = 0
if keys[pygame.K_DOWN] and not(snake.verticalFacing == -1):
snake.verticalFacing = 1
snake.horizontalFacing = 0
for tailPart in tail:
if tailPart.x == snake.x:
if tailPart.y == snake.y:
snake.tailLength = 3
for i in range(3, len(tail)):
tail.pop(3)
if snake.tailLength > len(tail):
tail.append(0)
for i in range(snake.tailLength - 1, 0, -1):
tail[i] = tail[i-1]
tail[0] = part(snake.x, snake.y)
snake.verticalVel = 40 * snake.verticalFacing
snake.horizontalVel = 40 * snake.horizontalFacing
if (tail[1].hitbox[0] < snake.x + snake.horizontalVel and tail[1].hitbox[0] + tail[1].hitbox[2] > snake.x + snake.horizontalVel and tail[1].hitbox[1] < snake.y + snake.verticalVel and tail[1].hitbox[1] + tail[1].hitbox[3] > snake.y + snake.verticalVel):
snake.verticalFacing = -snake.verticalFacing
snake.horizontalFacing = -snake.horizontalFacing
snake.verticalVel = 40 * snake.verticalFacing
snake.horizontalVel = 40 * snake.horizontalFacing
print('fixed')
if snake.x + snake.horizontalVel < 0:
snake.x = 580
elif snake.x + snake.horizontalVel > 600:
snake.x = 20
else:
snake.x += snake.horizontalVel
if snake.y + snake.verticalVel < 0:
snake.y = 580
elif snake.y + snake.verticalVel > 600:
snake.y = 20
else:
snake.y += snake.verticalVel
if not(hungry):
hungry = True
checkFood = 0
while checkFood == 0:
checkFood = 1
snakeFood.x = random.randint(0, 14) * 40 + 20
snakeFood.y = random.randint(0, 14) * 40 + 20
for tailPart in tail:
if (tailPart.hitbox[0] < snakeFood.x and tailPart.hitbox[0] + tailPart.hitbox[2] > snakeFood.x and tailPart.hitbox[1] < snakeFood.y and tailPart.hitbox[1] + tailPart.hitbox[3] > snakeFood.y) or (snakeFood.x == snake.x and snakeFood.y == snake.y) or (checkFood == 0):
checkFood = 0
#print('finding food...')
else:
if snakeFood.x == snake.x:
if snakeFood.y == snake.y:
hungry = False
snake.tailLength += 1
pygame.time.delay(100)
redrawGameWindow()
pygame.quit() | [
"vincentktieu101@gmail.com"
] | vincentktieu101@gmail.com |
ff439495d07d91a037f9e08654e86a577d88c06e | f0136ec80c551d266ec6052deb17e4de93089640 | /1-99/Q62.py | e06f5c2545648946ebe84ea193f4d4703adca6ea | [] | no_license | ZakHu7/LeetCode | 57145b8c511cded4b368d49daa407a547e54281e | 33bcea558595b94169d120f345958067396a2b35 | refs/heads/master | 2023-03-04T18:39:50.726662 | 2021-02-13T23:27:23 | 2021-02-13T23:27:23 | 280,045,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | class Solution:
def uniquePaths(self, m: int, n: int) -> int:
row = [1] * n
for r in range(1, m):
for c in range(1, n):
row[c] = row[c] + row[c-1]
return row[-1]
[1,1,1,1,1,1,1]
[1,2,3,4,5,6,7]
[1,3,6,10,15,21,28] | [
"zakhu7@hotmail.com"
] | zakhu7@hotmail.com |
856626e6572731bfab5c566ea0f58bc4cbb4fe27 | bf09b75da4a99c84623ee62cc25445f0e1fc2995 | /ex19sd.py | 52c8a33a354689655e0a089e23d7d80f63fa5a32 | [] | no_license | surakhchin/Learn-Python-the-Hard-Way | 312a1ac3ef71d64bb367888622639264272885aa | 88abc56d6fa6e5352ac7bc88933066fe81af83a1 | refs/heads/master | 2021-01-10T03:08:53.354176 | 2016-04-01T02:01:17 | 2016-04-01T02:01:17 | 53,166,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | def one_in_10_function(a,b,c):
print "%r, %r, %r" % (a,b,c)
one_in_10_function(1,2,3)
one_in_10_function(int(raw_input()),int(raw_input()),int(raw_input()))
one_in_10_function(15+16,30+30,50+23)
A = 33
B = 55
C = 11
one_in_10_function(A,B,C)
one_in_10_function("a","b","c")
one_in_10_function(A+10,B+10,C+10) | [
"urakhchin@gmail.com"
] | urakhchin@gmail.com |
9fa6d5ec08c54464f527fe58e8ce3f94274faf1e | f06c0693a5f6e3336b90b51059530f3c4f5009e1 | /Main/models.py | bf04d31607a0b05a2b436e6459e7cd68b00df6c9 | [
"MIT"
] | permissive | ahmadalwareh/SurveysBuilder | 183f70a8fb5f11f74dc77fdc758c5b5003018b2f | 0c80e0e6ef9559f631cc6ac5e7c4f8e331a2c2ef | refs/heads/master | 2023-01-08T12:36:14.618052 | 2023-01-03T18:24:39 | 2023-01-03T18:24:39 | 270,373,248 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,982 | py | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey and OneToOneField has `on_delete` set to the desired behavior
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from Main.managers import CustomUserManager
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from OnionOpinion import settings
class Roles(models.Model):
r_name = models.CharField(max_length=100)
def __str__(self):
return self.r_name
class Meta:
managed = True
db_table = 'Roles'
class Permissions(models.Model):
p_name = models.CharField(max_length=100, blank=True, null=True)
description = models.CharField(max_length=255, blank=True, null=True)
# relation = models.ManyToManyField(Roles, through='RolesPermissions')
def __str__(self):
return self.p_name
class Meta:
managed = True
db_table = 'Permissions'
class RolesPermissions(models.Model):
role = models.ForeignKey(Roles, models.DO_NOTHING)
permission = models.ForeignKey(Permissions, models.DO_NOTHING)
def __str__(self):
return str(self.role) + ": " + str(self.permission)
class Meta:
managed = True
db_table = 'Roles_Permissions'
class Countries(models.Model):
c_name = models.CharField(max_length=50)
def __str__(self):
return self.c_name
class Meta:
managed = True
db_table = 'Countries'
class Users(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(_('email address'), unique=True)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
date_joined = models.DateTimeField(default=timezone.now)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = CustomUserManager()
first_name = models.CharField(max_length=50, blank=True, null=True)
last_name = models.CharField(max_length=50, blank=True, null=True)
mobile = models.CharField(max_length=14, blank=True, null=True)
GENDER_MALE = 'Male'
GENDER_FEMALE = 'Female'
GENDER_CHOICES = [(GENDER_MALE, 'Male'), (GENDER_FEMALE, 'Female')]
gender = models.CharField(max_length=10, blank=True, null=True, choices=GENDER_CHOICES)
birth_date = models.DateField(default='1970-01-01')
image_path = models.ImageField(max_length=255, blank=True, null=True)
country = models.ForeignKey(Countries, models.DO_NOTHING, default='1')
role = models.ForeignKey(Roles, models.DO_NOTHING, default='4')
def __str__(self):
return self.email
class Meta:
managed = True
db_table = 'Users'
class Messages(models.Model):
sender_email = models.EmailField(max_length=100)
sender_name = models.CharField(max_length=100)
send_date = models.DateTimeField(default=timezone.now)
msg_subject = models.CharField(max_length=100)
msg_text = models.CharField(max_length=512)
msg_status = models.CharField(max_length=10)
user = models.ForeignKey(settings.AUTH_USER_MODEL, models.DO_NOTHING)
def __str__(self):
return self.msg_subject
class Meta:
managed = True
db_table = 'Messages'
class Surveys(models.Model):
title = models.CharField(max_length=100)
s_name = models.CharField(max_length=100)
creation_date = models.DateTimeField(default=timezone.now)
s_status = models.CharField(max_length=20)
is_private = models.BooleanField()
is_encrypted = models.BooleanField()
user = models.ForeignKey(settings.AUTH_USER_MODEL, models.DO_NOTHING)
def __str__(self):
return self.s_name
class Meta:
managed = True
db_table = 'Surveys'
class Questions(models.Model):
question_body = models.CharField(max_length=255)
question_type = models.CharField(max_length=20)
survey = models.ForeignKey('Surveys', models.DO_NOTHING)
def __str__(self):
return self.question_type
class Meta:
managed = True
db_table = 'Questions'
class Answers(models.Model):
answer = models.CharField(max_length=512)
answer_text = models.CharField(max_length=512, blank=True, null=True)
question = models.ForeignKey('Questions', models.DO_NOTHING)
class Meta:
managed = True
db_table = 'Answers'
class UsersAnswers(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, models.DO_NOTHING)
answer = models.ForeignKey(Answers, models.DO_NOTHING)
class Meta:
managed = True
db_table = 'Users_Answers'
| [
"Asm205#Ahm258@hj"
] | Asm205#Ahm258@hj |
a1ce7c14cc2c074e142ff0787a47cfc6ace7c996 | 926be6cac667168db041b1820adc6437f8fc58d3 | /examples/01_Planar_Single_Delam.py | 1ac9f3e1c72441625815bd39f5317fb08d6e8851 | [
"Apache-2.0"
] | permissive | idealab-isu/de-la-mo | 951b3dc7d0533ff6bf5267d9a3a6a5c19ff98606 | ddd2293a8d6ecca1b92386dbecfa0b2631dff1df | refs/heads/master | 2020-03-29T17:55:59.136921 | 2019-07-18T05:26:22 | 2019-07-18T05:26:22 | 150,186,180 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,075 | py | import numpy as np
import delamo.CADwrap
from delamo.api import DelamoModeler
from delamo.api import Layer
from delamo.api import bond_layers
from delamo.api import SimpleCoordSys
from delamo import process
import os
# Front matter
# ------------
# Read ACIS license key
acis_license = delamo.CADwrap.read_license_key(filename="license.dat")
# Initialize the DeLaMo model
DM=DelamoModeler.Initialize(globals(),
pointtolerancefactor=100.0,
normaltolerance=100e-4,
license_key=acis_license)
# This script then generates both a CAD file and a Python script.
# The Python script can be run from Abaqus. It includes the
# initialization script referenced above, and also opens
# the CAD file and builds the model.
# The name of the script file to generate and
# the name of the CAD file to write are returned
# by process.output_filenames()
# The first parameter to output_filenames
# should always match the name of the original script
# with the ".py" stripped
# In manually generated scripts, always specify phase
# to be "ORIGINAL"
(script_to_generate,
cad_file_path_from_script,
layer_boundary_template) = process.output_filenames("01_Planar_Single_Delam",phase="ORIGINAL")
# When writing a DeLaMo script, you start by creating a
# finite element initialization script. This is a
# Python script for ABAQUS that defines your various parameters
# -- material properties, etc. as Python variables.
# In this case they are stored in the "abqparams_CFRP.py" file
DM.abaqus_init_script("abqparams_CFRP.py",globals())
# The above call automatically inserts wrapped copies of variables
# defined in those scripts into the global variable space. Then you
# can reference those variables in this script
# (you can have as many init scripts as you like)
# The Delamo model contains generates several sets of instructions
# for different phases of the finite element modeling process:
# DM.initinstrs (initialization)
# DM.assemblyinstrs (model assembly)
# DM.bcinstrs (boundary conditions)
# DM.meshinstrs (meshing)
# All methods called from those variables will go generally be executed
# in the assemblyinstrs pool unless otherwise overridden. You can
# use e.g. DM.meshinstrs.rewrapobj() to get a reference to
# one of these variables that will execute in an alternate context.
#
# For example,
LaminateAssemblyMeshing=DM.meshinstrs.rewrapobj(LaminateAssembly)
# Creates a reference to the LaminateAssembly, for which method calls
# execute in the meshing context
# Basic parameters
# Set layer thickness we are planning on using
thickness = 0.199
# Create a NURBS mold surface
# over which the lamina will be laid
# In this case it is flat
mold = delamo.CADwrap.NURBSd()
mold.degree_u = 3
mold.degree_v = 3
mold.knotvector_u = [ 0, 0, 0, 0, 1, 2, 3, 3, 3, 3 ]
mold.knotvector_v = [ 0, 0, 0, 0, 1, 2, 3, 3, 3, 3 ]
# Read in control points for NURBS surface from a file
mold.read_ctrlpts("data/CP_Planar1.txt")
# Define a coordinate system
# This example defines +x direction along 0 deg. fibers,
# +y direction across 0 deg fibers, equivalent to
# the default (when coordsys is not specified)
coordsys=SimpleCoordSys((1.0,0.0,0.0),(0.0,1.0,0.0))
# Create 1st layer by moving the distance specified by thickness
# in the OFFSET_DIRECTION
layer1 = Layer.CreateFromMold(DM,mold,delamo.CADwrap.OFFSET_DIRECTION,thickness,"Layer_1",LaminaSection,0,coordsys=coordsys)
# Once any breaks, etc. of a given layer are complete, it must be
# finalized.
layer1.Finalize(DM)
# The MeshSimple method is a shortcut over the underlying ABAQUS routines
# It loops over each part in the layer and calls setElementType() with
# the specified MeshElemTypes, setMeshControls() with the given shape
# and technique, and seedPart() with the given mesh size, deviation factor,
# and minsizefactor. and refines the mesh near any given refined_edges
# Note that ABAQUS constants must be referenced as part of abqC
# rather than used directly
layer1.MeshSimple(MeshElemTypes,meshsize,abqC.HEX_DOMINATED,abqC.SYSTEM_ASSIGN)
# Create and add point marker for fixed faced boundary condition
# There is a surface at y=-25 mm from z= 0...0.2 mm
# This point identifies it
FixedPoint=[-20.0,-25.0,0.1]
# Define a fixed boundary condition based on that point.
# EncastreBC is an ABAQUS function that was found by
# using the ABAQUS/CAE interface and then looking at the
# replay (.rpy) file.
FEModel.EncastreBC(name="FixedFace_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer1.singlepart.GetInstanceFaceRegion(FixedPoint,DM.pointtolerance))
# Create 2nd layer
layer2 = Layer.CreateFromLayer(DM,layer1.gk_layer,delamo.CADwrap.OFFSET_DIRECTION,thickness,"Layer_2", LaminaSection,-45,coordsys=coordsys)
layer2.Finalize(DM)
layer2.MeshSimple(MeshElemTypes,meshsize/1.8,abqC.HEX_DOMINATED,abqC.SYSTEM_ASSIGN)
# Bond layers 1 and 2. With no other parameters, the layers are attached
# with a TIE boundary condition
bond_layers(DM,layer1, layer2)
# Update and add point marker for fixed faced boundary condition
FixedPoint[2]+=thickness
FEModel.EncastreBC(name="FixedFace_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer2.singlepart.GetInstanceFaceRegion(FixedPoint,DM.pointtolerance))
# Create 3rd layer
layer3 = Layer.CreateFromLayer(DM,layer2.gk_layer,delamo.CADwrap.OFFSET_DIRECTION,thickness,"Layer_3",LaminaSection,45,coordsys=coordsys)
layer3.Finalize(DM)
layer3.MeshSimple(MeshElemTypes,meshsize/1.8,abqC.HEX_DOMINATED,abqC.SYSTEM_ASSIGN)
# The line below performs a bonding operation with a delamination
# and a contact zone inside the delamination surrounded by a
# cohesive zone into which the delamination may grow
bond_layers(DM,layer2, layer3, defaultBC=delamo.CADwrap.BC_COHESIVE,
CohesiveInteraction=CohesiveInteraction,
ContactInteraction=ContactInteraction,
delaminationlist= [ "data/Delamination1_3D.csv" ])
# Update and add point marker for fixed faced boundary condition
FixedPoint[2]+=thickness
FEModel.EncastreBC(name="FixedFace_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer3.singlepart.GetInstanceFaceRegion(FixedPoint,DM.pointtolerance))
# Create 4th layer
layer4 = Layer.CreateFromLayer(DM,layer3.gk_layer,delamo.CADwrap.OFFSET_DIRECTION,thickness,"Layer_4",LaminaSection,90,coordsys=coordsys)
layer4.Finalize(DM)
layer4.MeshSimple(MeshElemTypes,meshsize/2.0,abqC.HEX_DOMINATED,abqC.SYSTEM_ASSIGN)
bond_layers(DM,layer3, layer4)
# Update and add point marker for fixed faced boundary condition
FixedPoint[2]+=thickness
FEModel.EncastreBC(name="FixedFace_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer4.singlepart.GetInstanceFaceRegion(FixedPoint,DM.pointtolerance))
# Create 5th layer over the layer 4 or the stiffener contour, if present
# ... for we just tell it to follow the layer 4 contour, which
# the stiffener automagically expanded
layer5 = Layer.CreateFromLayer(DM,layer4.gk_layer,delamo.CADwrap.OFFSET_DIRECTION,thickness,"Layer_5",LaminaSection,90,coordsys=coordsys)
layer5.Finalize(DM)
layer5.MeshSimple(MeshElemTypes,meshsize/2.0,abqC.HEX_DOMINATED,abqC.SYSTEM_ASSIGN)
bond_layers(DM,layer4, layer5)
# Update and add point marker for fixed faced boundary condition
FixedPoint[2]+=thickness
FEModel.EncastreBC(name="FixedFace_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer5.singlepart.GetInstanceFaceRegion(FixedPoint,DM.pointtolerance))
# Create 6th layer
layer6 = Layer.CreateFromLayer(DM,layer5.gk_layer,delamo.CADwrap.OFFSET_DIRECTION,thickness,"Layer_6",LaminaSection,45,coordsys=coordsys)
layer6.Finalize(DM)
layer6.MeshSimple(MeshElemTypes,meshsize/2.0,abqC.HEX_DOMINATED,abqC.SYSTEM_ASSIGN)
bond_layers(DM,layer5, layer6)
# Update and add point marker for fixed faced boundary condition
FixedPoint[2]+=thickness
FEModel.EncastreBC(name="FixedFace_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer6.singlepart.GetInstanceFaceRegion(FixedPoint,DM.pointtolerance))
# Create 7th layer
layer7 = Layer.CreateFromLayer(DM,layer6.gk_layer,delamo.CADwrap.OFFSET_DIRECTION,thickness,"Layer_7",LaminaSection,-45,coordsys=coordsys)
layer7.Finalize(DM)
layer7.MeshSimple(MeshElemTypes,meshsize/2.0,abqC.HEX_DOMINATED,abqC.SYSTEM_ASSIGN)
bond_layers(DM,layer6, layer7)
# Update and add point marker for fixed faced boundary condition
FixedPoint[2]+=thickness
FEModel.EncastreBC(name="FixedFace_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer7.singlepart.GetInstanceFaceRegion(FixedPoint,DM.pointtolerance))
# Create 8th layer
layer8 = Layer.CreateFromLayer(DM,layer7.gk_layer,delamo.CADwrap.OFFSET_DIRECTION,thickness,"Layer_8",LaminaSection,0,coordsys=coordsys)
layer8.Finalize(DM)
layer8.MeshSimple(MeshElemTypes,meshsize/2.0,abqC.HEX_DOMINATED,abqC.SYSTEM_ASSIGN)
bond_layers(DM,layer7, layer8)
# Update and add point marker for fixed faced boundary condition
FixedPoint[2]+=thickness
FEModel.EncastreBC(name="FixedFace_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer8.singlepart.GetInstanceFaceRegion(FixedPoint,DM.pointtolerance))
# Can define a "Surface" that is visible in the Abaqus output database
# This is a direct ABAQUS call on the part object
# within layer1 (assumes layer1 is not split due to fiber/matrix breakage)
layer1.singlepart.fe_part.Surface(name="ForceSurface",
side1Faces=layer1.singlepart.GetPartFace((24.0,24.0,thickness*0),DM.pointtolerance))
ForceVector=[ 0.0, 0.0, -5e-2 ] # Units of MPa
# Call ABAQUS SurfaceTraction method
# Again, this came from looking at ABAQUS replay (.rpy) output
# Observe again that all ABAQUS symbolic constants need the "abqC"
# prefix.
FEModel.SurfaceTraction(name="SurfaceTraction_%d" % (DM.get_unique()),
createStepName=ApplyForceStep.name,
region=layer1.singlepart.GetInstanceFaceRegionSurface((24.0,24.0,thickness*0.0),DM.pointtolerance),
distributionType=abqC.UNIFORM,
field='',
localCsys=None,
traction=abqC.GENERAL,
follower=abqC.OFF,
resultant=abqC.ON,
magnitude=np.linalg.norm(ForceVector),
directionVector=((0.0,0.0,0.0),tuple(ForceVector/np.linalg.norm(ForceVector))),
amplitude=abqC.UNSET)
# You can have the job auto-start when the Python script is run
#DM.RunJob(BendingJob)
# Finalization generates the output script and CAD model.
DM.Finalize(script_to_generate,cad_file_path_from_script)
| [
"sdh4@iastate.edu"
] | sdh4@iastate.edu |
f33f6ef2969b3ddaec26b539817a4f5bfea0761d | 020f4fadab70612804622d3f13a52ebb33828be6 | /yatube/urls.py | e6d9a928b378814de88343b2fb36a2356ef95351 | [] | no_license | myelemisearth/hw05_final | 4507d804f58bf65eceee3b230fd2c195159881d5 | 47e1395f7c2974d2cb68b274a3a46b1408414885 | refs/heads/master | 2023-01-22T21:11:24.167642 | 2020-11-18T18:54:31 | 2020-11-18T18:54:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | from django.conf import settings
from django.conf.urls import handler404, handler500
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.flatpages import views
from django.urls import include, path
handler404 = "posts.views.page_not_found"
handler500 = "posts.views.server_error"
urlpatterns = [
path('about-author/',
views.flatpage,
{'url': '/about-author/'},
name='about-author'),
path('about-spec/',
views.flatpage,
{'url': '/about-spec/'},
name='about-spec'),
path('about-us/',
views.flatpage,
{'url': '/about-us/'},
name='about-us'),
path('admin/',
admin.site.urls),
path('auth/',
include('users.urls')),
path('auth/',
include('django.contrib.auth.urls')),
path('contacts/',
views.flatpage,
{'url': '/contacts/'},
name='contacts'),
path('terms/',
views.flatpage,
{'url': '/terms/'},
name='terms'),
path('',
include('posts.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL,
document_root=settings.STATIC_ROOT)
| [
"morozov_a@protei.ru"
] | morozov_a@protei.ru |
9617a92049c9afde1670efeec8bd8ae6a362ffe4 | 54ad8e8c587531a02271e5aa9f9928aafdb76e27 | /sesje/2015-03-04/homebrew_02.py | 7677b52ca000470e837bd7b79b7fcbb34e0b74ee | [] | no_license | Karaluszyca/warsztaty_2015 | c4f76f08573202310d8db37f5486073ae11dad28 | fad4ceb1e92f4322624c10e24814413a2dbd3021 | refs/heads/master | 2020-12-30T23:09:26.988515 | 2015-07-18T08:56:24 | 2015-07-18T08:56:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | # matematyka
## kilka "zewnętrznych" rzeczy
from math import sin, cos
import turtle
## tu zaczyna sie nasz kod do poprawienia
dex = []
val_a = -100
dex.append((val_a, val_a * sin(val_a) + cos(val_a)))
val_b = -90
dex.append((val_b, sin(val_b) + val_b * cos(val_b)))
val_c = -80
dex.append((val_c, val_c * sin(val_c) + cos(val_c)))
val_d = -70
dex.append((val_d, sin(val_d) + val_d * cos(val_d)))
val_a = 10
dex.append((val_a, val_a * sin(val_a) + cos(val_a)))
val_b = 20
dex.append((val_b, sin(val_b) + val_b * cos(val_b)))
val_c = 30
dex.append((val_c, val_c * sin(val_c) + cos(val_c)))
val_d = 40
dex.append((val_d, sin(val_d) + val_d * cos(val_d)))
val_a = 70
dex.append((val_a, val_a * sin(val_a) + cos(val_a)))
val_b = 80
dex.append((val_b, sin(val_b) + val_b * cos(val_b)))
val_c = 90
dex.append((val_c, val_c * sin(val_c) + cos(val_c)))
val_d = 100
dex.append((val_d, sin(val_d) + val_d * cos(val_d)))
## tu kończy się nasz kod do poprawienia
## to poniżej służy tylko do wyświetlenia wyników obliczeń
turtle.delay(0)
tt = turtle.Turtle()
tt.speed(0)
tt.penup()
tt.goto(dex[0])
tt.pendown()
for point in dex:
tt.goto(point)
input('Enter/Ctrl+C to close')
| [
"soutys"
] | soutys |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.