blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
91560eee0d52a0f1cb3f860505d7313e30f96db9 | 6f9b15325e4bbe177e0e82f2c6d77f149fc59b65 | /pump_script.py | 79d334df43a36b90461a8be697a08716fee13a63 | [
"MIT"
] | permissive | hulsed/FFERMAT | c411d19ea0c6e98b96ebd267bdfb83470688ef80 | 5b180e637a2507a022e4a2c42c0b98cd44b6f8d6 | refs/heads/master | 2020-04-10T23:54:47.382002 | 2019-10-04T16:49:47 | 2019-10-04T16:49:47 | 161,368,580 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,187 | py | # -*- coding: utf-8 -*-
"""
File name: pump_script.py
Author: Daniel Hulse
Created: October 2019
Description: A simple example of I/O using faultprop.py and the pump model in ex_pump.py
"""
#Using the model that was set up, we can now perform a few different operations
#First, import the fault propogation library as well as the model
import faultprop as fp
import ex_pump as mdl
#Before seeing how faults propogate, it's useful to see how the model performs
#in the nominal state to check to see that the model has been defined correctly.
# Some things worth checking:
# -are all functions on the graph?
# -are the functions connected with the correct flows?
# -do any faults occur in the nominal state?
# -do all the flow states proceed as desired over time?
endresults, resgraph, flowhist, ghist=fp.runnominal(mdl, track={'Wat_1','Wat_2', 'EE_1', 'Sig_1'})
fp.showgraph(resgraph)
fp.plotflowhist(flowhist, 'Nominal')
#We might further query the faults to see what happens to the various states
endresults, resgraph, flowhist, ghist=fp.proponefault(mdl, 'Move Water', 'short', time=10, track={'Wat_1','Wat_2', 'EE_1', 'Sig_1'})
fp.showgraph(resgraph)
fp.plotflowhist(flowhist, 'short', time=10)
t=fp.printresult('Move Water', 'short', 10, endresults)
print(t)
#in addition to these visualizations, we can also look at the final results
#to see which specific faults were caused, as well as the flow states
#print(endresults)
#we can also look at other faults
endresults, resgraph, flowhist, ghist=fp.proponefault(mdl, 'Export Water', 'block', time=10, track={'Wat_1','Wat_2', 'EE_1', 'Sig_1'})
fp.showgraph(resgraph)
fp.plotflowhist(flowhist, 'blockage', time=10)
t=fp.printresult('Export Water', 'block', 10, endresults)
print(t)
#you can save to a csv this with:
#t.write('tab.ecsv', overwrite=True)
#finally, to get the results of all of the scenarios, we can go through the list
#note that this will propogate faults based on the times vector put in the model,
# e.g. times=[0,3,15,55] will propogate the faults at the begining, end, and at
# t=15 and t=15
resultsdict, resultstab=fp.proplist(mdl)
print(resultstab)
# resultstab.write('tab.ecsv', overwrite=True) | [
"hulsed@oregonstate.edu"
] | hulsed@oregonstate.edu |
49e4ce94174f630a74b92c620d81985a5f6ed000 | 472411e2fef7b01f240e6a7246ce52b15cd37294 | /brake/backends/dummybe.py | 9280d09644ac778530bbb3533ca52c7f8008a794 | [
"BSD-3-Clause"
] | permissive | SilentCircle/django-brake | 8f3357c7d4f3aa514ee3fdf89e6f9e476758baab | ed752d5219d1ad05760f40a647ec34f210cd0961 | refs/heads/master | 2021-01-16T20:44:51.716817 | 2015-07-01T05:56:09 | 2015-07-01T05:56:09 | 6,924,502 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | import random
from cachebe import CacheBackend
class DummyBackend(CacheBackend):
"""
A dummy rate-limiting backend that disables rate-limiting,
for testing.
"""
def get_ip(self, request):
return str(random.randrange(10e20))
def limit(self, func_name, request,
ip=True, field=None, count=5, period=None):
"""Return limit data about any keys relevant for requst."""
return []
| [
"stavros@korokithakis.net"
] | stavros@korokithakis.net |
ee89e4d12c58034f1d5306a8bba9cd9e34395fc1 | 81139ce962b55f699b32acb8fb6e7e340bcf5a73 | /checkout/signals.py | 4f3997b21b8f0de665df2158488581315bc98174 | [] | no_license | Paul-Glanville/full-stack-ci-mini-project | a40132e429c881c3add2b6d5a7d47a3caa6b5b45 | eb53831fb01f1506d40a188d9d1c1eef4079ab51 | refs/heads/master | 2023-06-26T20:38:29.452454 | 2021-08-06T17:40:41 | 2021-08-06T17:40:41 | 380,587,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from .models import OrderLineItem
@receiver(post_save, sender=OrderLineItem)
def update_on_save(sender, instance, created, **kwargs):
"""
update order total on lineitem update/create
"""
instance.order.update_total()
@receiver(post_delete, sender=OrderLineItem)
def update_on_delete(sender, instance, **kwargs):
"""
update order total on lineitem delete
"""
instance.order.update_total()
| [
"glanville49@googlemail.com"
] | glanville49@googlemail.com |
2b245b6ffdf88f4f85b306283077ccb69f097f21 | be71b41657883acc5e02c2ad988c2f2cf064cbcc | /baseDjango/wsgi.py | d58cf9cad79d45433d8c9662a6563ddaedcd119d | [] | no_license | SaltyCoco/baseDjango | 603c15e8945feab0f1d1a02dc6133ee0dbfaff8a | 81ec9b6d876f9f8524ddc23be24d4df30e643bea | refs/heads/master | 2023-05-03T19:17:53.952993 | 2021-06-01T17:16:44 | 2021-06-01T17:16:44 | 372,886,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for baseDjango project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'baseDjango.settings')
application = get_wsgi_application()
| [
"ryan.j.schulte@gmail.com"
] | ryan.j.schulte@gmail.com |
87a9e86353db3c608e1daaeff1ac61734e6ce8a8 | 8b4ae6987529bb8d48d7d8470776f3ca99a17e60 | /personal/urls.py | a590295ac8bbe0fcdbb27b901571ffdca297ff4c | [] | no_license | Adesola-samuel/cyberloan | 0124241d3edc3d9cf3ac1c718db46525e4e9374d | 89cbf31f255c91ede8a6bf8f7a1d615b285824e7 | refs/heads/master | 2023-02-10T05:28:54.183872 | 2021-01-10T21:18:30 | 2021-01-10T21:18:30 | 328,499,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | from django.urls import path
from . import views
app_name = 'profile'
urlpatterns = [
path('update', views.update, name='update'),
path('profile/<int:id>/', views.profile, name='user-profile'),
path('portfolio-detail/<int:id>/', views.portfolio_detail, name='portfolio-detail'),
path('ID-Card', views.card, name='card'),
] | [
"adesolapastorsamuel@gmail.com"
] | adesolapastorsamuel@gmail.com |
5a8f100ef380c3b09160c5a8e0ecaf752d8e43e3 | da607092db8e52b38ba1714c8e9989812eb4dbc7 | /newapp/urls.py | 8234d792944d74cff4989b26e34b3eb3b7333f33 | [] | no_license | eashwerks/project_exel | 4782b8a30f23a09f346ce63863bfdc24e855b5aa | 53a5c51bbd9f234ca3b3e6ece8bbf433cf2ead7a | refs/heads/master | 2020-04-08T11:30:32.817681 | 2018-11-27T10:32:47 | 2018-11-27T10:32:47 | 159,308,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | from django.conf.urls.static import static
from django.urls import path, include, re_path
from rest_framework.routers import DefaultRouter
from project_exel import settings
from . import views
router = DefaultRouter()
router.register(r'employees', views.EmployeeModelViewSet, basename='employees')
# router.register(r'occurrences', views.OccurrenceModelViewSet, basename='occurrences')
urlpatterns = [
path('rest-auth/', include('rest_auth.urls')),
path('rest-auth/logout/', views.LogoutViewEx.as_view(), name='rest_logout'),
# path('rest-auth/test_auth/', views.EmployeeModelViewSet.i, name='test_auth', ),
path('upload/', views.FileUploadView.as_view(), name='upload'),
path('', views.HomeTemplateView.as_view(), name='home'),
] + static(settings.STATIC_URL)
urlpatterns += router.urls
| [
"eashwerks@gmail.com"
] | eashwerks@gmail.com |
c6b360f08562aaddf5900e08cd01d476537105f1 | 0edb94d9de7222d31ac8350a8cc330179f69ef60 | /urls.py | 5b7e936fb82222a7f0a25bc6333ac4cee7b25143 | [] | no_license | ondrejsika/django-1.6-blank-project | 0f503fd661ec38fd3a9977d2e8fb4772d1c6da80 | 51b59c0a3102d8601c0490d2ee0e3b65afee0b33 | refs/heads/master | 2016-09-11T00:06:38.522221 | 2015-04-12T18:54:28 | 2015-04-12T18:54:28 | 26,711,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'twistedexample.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| [
"ondrej@ondrejsika.com"
] | ondrej@ondrejsika.com |
d259ae82743f9dd80d1891b6d940a00ed317e4c1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03815/s988732380.py | cb8e305d533c05ff2909fab480bd1c9ceab07cc4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | x = int(input())
a = x//11
b = x%11
if b == 0: print(2*a)
elif b > 6: print(2*a+2)
else: print(2*a+1) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1fb93c149f16a39467aeae55188a00beff837de3 | 5a6a7b6750a330fdf141c21191a8208bd6e7092b | /es setting/joinData/dataCollecttor.py | f4fd6f4f3986d5586e78462f4fe43387f2dd2cba | [] | no_license | gjp-git/esdata | 6957c64f38a4d5ad72381ccf987afe1526ad0329 | 7171c87b4a93159d13eac4fa16be71a5244fedb1 | refs/heads/master | 2020-04-13T09:49:06.160201 | 2019-01-21T07:00:17 | 2019-01-21T07:00:17 | 163,121,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,834 | py | # -*- coding: utf-8 -*-
import os
import json
import time
def matchLine(target,lines,index):
if index >= len(lines):
return -1
timeprefix = lines[index].replace('.','')[:11]
if target < timeprefix:
return -1
while target != timeprefix:
index += 1
if index >= len(lines):
return -1
timeprefix = lines[index].replace('.','')[:11]
if target < timeprefix:
return -1
if target == timeprefix:
return index
else:
return -1
if __name__ == "__main__":
#id = 0
dir = "D:\\cicv\\BBB new data"
fileList = os.listdir(dir)
#print fileList
for file in fileList:
city = file.split('_')[0]
#result data file
resultFile = open(file+"_data.txt","w")
result = []
thisLine = {}
thisLine['city'] = city
fieldList = ['can','gps','imu','road','obj','image','webp','image_marked','pcl1','pcl1_image','webp_marked']
fieldPaths = [
"\\can\\can.csv",
"\\imu\\gpsfix.csv",
"\\imu\\imudata.csv",
"\\me\\ivsensormeroad.csv",
"\\me\\ivsensormeobj.csv",
"\\imageList.csv",
"\\webpList.csv",
"\\imageMarkedList.csv",
"\\pcl1.csv",
"\\pcl1_image.csv",
"\\imageMarkedWebpList.csv"
]
#open files
#map(lambda x: exec(compile(fieldList[x]+'File = open(dir+"\\"+file+"'+fieldPaths[x]+'","r")', '<string>', 'exec')),[x for x in range(len(fieldList))])
for x in range(len(fieldList)):
cmd = fieldList[x]+'File = open("'+dir+'\\\\'+file+fieldPaths[x]+'","r")'
exec(cmd)
#ingore first line
canFile.readline()
gpsFile.readline()
imuFile.readline()
roadFile.readline()
objFile.readline()
#read files
for x in fieldList:
exec(x+"Lines = "+x+"File.readlines()")
#init line index
for x in fieldList:
exec(x+"LineIndex = 0")
for canLine in canLines:
canLine = canLine[:-1]
canFields = canLine.split(',')
timeprefix = canFields[0][:11]
#print timeprefix
thisLine["timestamp"] = timeprefix
thisLine["time"] = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(int(timeprefix[:-1])))
canData = {}
canData['siVehicleSpeed'] = canFields[1]
canData['siSteeringAngle'] = canFields[2]
canData['siSteeringVelocity'] = canFields[3]
canData['siBrakePressure'] = canFields[-5]
canData['siAccelerationPedal'] = canFields[-1]
tmp = matchLine(timeprefix,gpsLines,gpsLineIndex)
#print tmp
if tmp == -1:
continue
else:
gpsData = {}
gpsLineIndex = tmp
gpsFields = gpsLines[gpsLineIndex][:-1].split(',')
gpsData['latitude'] = gpsFields[6]
gpsData['longitude'] = gpsFields[7]
gpsData['altitude'] = gpsFields[8]
location = {}
location['lat'] = gpsFields[6]
location['lon'] = gpsFields[7]
gpsData['location'] = location
gpsLineIndex+=1
tmp = matchLine(timeprefix,imuLines,imuLineIndex)
if tmp == -1:
continue
else:
imuData = {}
imuLineIndex = tmp
imuFields = imuLines[imuLineIndex][:-1].split(',')
imuData['orientation_x'] = imuFields[4]
imuData['orientation_y'] = imuFields[5]
imuData['orientation_z'] = imuFields[6]
imuData['angular_velocity_x'] = imuFields[17]
imuData['angular_velocity_y'] = imuFields[18]
imuData['angular_velocity_z'] = imuFields[19]
imuData['linear_acceleration_x'] = imuFields[29]
imuData['linear_acceleration_y'] = imuFields[30]
imuData['linear_acceleration_z'] = imuFields[31]
imuLineIndex += 1
tmp = matchLine(timeprefix,roadLines,roadLineIndex)
if tmp == -1:
continue
else:
roadData = {}
roadLineIndex = tmp
roadFields = roadLines[roadLineIndex][:-1].split(',')
roadData['leftType'] = roadFields[1]
roadData['righType'] = roadFields[2]
roadData['curvature'] = roadFields[5]
roadData['heading'] = roadFields[6]
roadData['leftOffset'] = roadFields[7]
roadData['rightOffset'] = roadFields[8]
roadLineIndex += 1
tmp = matchLine(timeprefix,objLines,objLineIndex)
if tmp == -1:
continue
else:
objData = []
objLineIndex = tmp
objFields = objLines[objLineIndex][:-1].split(',')
num = len(objFields)//9
if objFields[-1]=='1' and len(objFields)%9==2:
for i in range(num):
obj = {}
obj['id'] = objFields[i*9+1]
obj['x'] = objFields[i*9+2]
obj['y'] = objFields[i*9+3]
obj['relspeed'] = objFields[i*9+4]
obj['width'] = objFields[i*9+5]
obj['length'] = objFields[i*9+6]
obj['height'] = objFields[i*9+7]
obj['classification'] = objFields[i*9+9]
objData.append(obj)
else:
print timeprefix+" obj failed"
objLineIndex += 1
tmp = matchLine(timeprefix,imageLines,imageLineIndex)
if tmp == -1:
continue
else:
imageData = ''
imageLineIndex = tmp
imageFields = imageLines[imageLineIndex][:-1].split(',')
imageData = imageFields[1]
imageLineIndex += 1
tmp = matchLine(timeprefix,webpLines,webpLineIndex)
if tmp == -1:
continue
else:
webpData = ''
webpLineIndex = tmp
webpFields = webpLines[webpLineIndex][:-1].split(',')
webpData = webpFields[1]
webpLineIndex += 1
tmp = matchLine(timeprefix,image_markedLines,image_markedLineIndex)
if tmp == -1:
continue
else:
image_markedData = ''
image_markedLineIndex = tmp
image_markedFields = image_markedLines[image_markedLineIndex][:-1].split(',')
image_markedData = image_markedFields[1]
image_markedLineIndex += 1
tmp = matchLine(timeprefix,webp_markedLines,webp_markedLineIndex)
if tmp == -1:
continue
else:
webp_markedData = ''
webp_markedLineIndex = tmp
webp_markedFields = webp_markedLines[webp_markedLineIndex][:-1].split(',')
webp_markedData = webp_markedFields[1]
webp_markedLineIndex += 1
tmp = matchLine(timeprefix,pcl1Lines,pcl1LineIndex)
if tmp == -1:
continue
else:
pcl1Data = ''
pcl1LineIndex = tmp
pcl1Fields = pcl1Lines[pcl1LineIndex][:-1].split(',')
pcl1Data = pcl1Fields[1]
pcl1LineIndex += 1
tmp = matchLine(timeprefix,pcl1_imageLines,pcl1_imageLineIndex)
if tmp == -1:
continue
else:
pcl1_imageData = ''
pcl1_imageLineIndex = tmp
pcl1_imageFields = pcl1_imageLines[pcl1_imageLineIndex][:-1].split(',')
pcl1_imageData = pcl1_imageFields[1]
pcl1_imageLineIndex += 1
#value thisline
for x in fieldList:
exec("thisLine['"+x+"'] = "+x+"Data")
result.append(thisLine)
#json_str = json.dumps(thisLine)
#id += 1
#resultFile.write('{"index":{"_id":"'+str(id)+'"}}\n')
#resultFile.write(json_str+'\n')
thisLine = {}
thisLine['city'] = city
#if len(result) >=300:
# print timeprefix
# break
rs = {"result":result}
json_str = json.dumps(rs)
resultFile.write(json_str)
#close files
for x in fieldList:
exec(x+"File.close()")
resultFile.close()
| [
"1435842616@qq.com"
] | 1435842616@qq.com |
9c4fb21102c3b1103255d3567f24a7281093eb0f | daa5f181ae982030a26777ca74080946cb29b8c5 | /rottencherries_v3/account/views.py | d431779bf209504c12739d5c1c7075b1d43b4888 | [] | no_license | hannibal1296/Project | a4e6f2fedee106559d705c3a955fb5b7d3c77731 | 3884dc380713684ec0c80b39a51aca9b401cabd0 | refs/heads/master | 2021-05-14T08:01:36.150077 | 2018-03-07T05:29:30 | 2018-03-07T05:29:30 | 116,282,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,054 | py | from django.shortcuts import render, redirect
from .models import *
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from mainpage import views as mainpage_view
from django.contrib.auth.decorators import login_required
from .forms import UserForm, StudentForm
from django import forms
import pdb
from django.contrib.auth.forms import UserCreationForm
prev_url = None
def goto_login(request):
global prev_url
prev_url = request.META['HTTP_REFERER'] # HOW TO REDIRECT THE PAGE TO THE PREVIOUS ONE
return render(request, 'account/logintemplate.html')
def make_login(request):
global prev_url
ppreve_url = prev_url
prev_url = None
user_id = request.POST.get('login_id_box', None)
user_pw = request.POST.get('login_pw_box', None)
user = authenticate(username=user_id, password=user_pw)
if user is not None:
login(request, user)
return redirect(ppreve_url)
else:
context = {'is_incorrect': True}
return render(request, 'account/logintemplate.html', context)
def make_signup(request):
user_id = request.POST.get('username_box', None)
pw = request.POST.get('pw_box', None)
email = request.POST.get('email_box', None)
sid = request.POST.get('sid_box', None)
dept = request.POST.get('dept_box', None)
major = request.POST.get('major_box', None)
year = request.POST.get('year_box', None)
dept_all = mainpage_view.get_all_dept()
major_all = mainpage_view.get_all_major()
if dept == "0" or major == "0": # 부서 또는 전공이 입력되지 않은 경우
return render(request, 'account/signuptemplate.html',
{'empty_slot': True, 'dept_list': dept_all, 'major_list': major_all})
user = User.objects.filter(username=user_id)
if len(user) == 1: # 이미 아이디가 존재하는 경우
return render(request, 'account/signuptemplate.html',
{'already_id': True, 'dept_list': dept_all, 'major_list': major_all})
stu = Student.objects.filter(s_id=sid)
if len(stu) == 1: # 이미 학번(s_id)이 존재하는 경우
return render(request, 'account/signuptemplate.html',
{'already_sid': True, 'dept_list': dept_all, 'major_list': major_all})
m_obj = Major.objects.filter(m_name=major)
d_obj = Department.objects.filter(d_name=dept)
user = User.objects.create_user(username=user_id, password=pw, email=email)
Student(user=user, s_id=sid, s_d=d_obj[0], s_m=m_obj[0], username=user_id, s_email=email, since_y=int(year)).save()
login(request, user)
return redirect('/mainpage/')
@login_required
def make_logout(request):
logout(request)
return redirect(request.META['HTTP_REFERER'])
def signup(request):
global prev_url
if request.method == "POST":
# user_form = UserCreationForm(request.POST)
user_form = UserForm(request.POST)
student_form = StudentForm(request.POST)
if user_form.is_valid() and student_form.is_valid():
user = User.objects.create_user(username=user_form.cleaned_data['username'],
password=user_form.cleaned_data['password'],
email=user_form.cleaned_data['email'])
student = student_form.save(commit=False)
student.user = user
year = request.POST.get('s_id', None)
if year:
year = int(year[0:4])
student.since_y = year
student.save()
pprev_url = prev_url
prev_url = None
return redirect(pprev_url)
else:
user_form = UserForm()
student_form = StudentForm()
dept_list = mainpage_view.get_all_dept()
major_list = mainpage_view.get_all_major()
return render(request, 'account/signup.html',
{'user_form': user_form, 'student_form': student_form, 'dept_list': dept_list,
'major_list': major_list})
| [
"show1296@gmail.com"
] | show1296@gmail.com |
94b4e8d8f567da5caa24b59eddc1e618e7006a22 | 12c41119156dd3783c3801e07f5f973289f26bb0 | /aliyun-python-sdk-dbs/aliyunsdkdbs/request/v20190306/ModifyBackupPlanNameRequest.py | ba55c010034c829610d62b54c02abad12cceb8cd | [
"Apache-2.0"
] | permissive | toywei/aliyun-openapi-python-sdk | bfe0893da38af9b222ce072fd7587d5b6cdce204 | ce8f683e3201fca8c473512267f50a34f71e31d3 | refs/heads/master | 2020-08-07T23:42:00.053692 | 2019-10-08T08:50:21 | 2019-10-08T08:50:21 | 213,626,962 | 1 | 0 | NOASSERTION | 2019-10-08T11:43:15 | 2019-10-08T11:43:15 | null | UTF-8 | Python | false | false | 1,697 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifyBackupPlanNameRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dbs', '2019-03-06', 'ModifyBackupPlanName','cbs')
def get_BackupPlanName(self):
return self.get_query_params().get('BackupPlanName')
def set_BackupPlanName(self,BackupPlanName):
self.add_query_param('BackupPlanName',BackupPlanName)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_BackupPlanId(self):
return self.get_query_params().get('BackupPlanId')
def set_BackupPlanId(self,BackupPlanId):
self.add_query_param('BackupPlanId',BackupPlanId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
1db358528dfe7eb150bfcf52b137cce3df1bb254 | 027dd49b92ee92c8faa5ea05bce95d28efd2268d | /Documents/django/crudView/crudapp/migrations/0001_initial.py | 03b4d87a72cb9fdab2c706d0b7c2ab583aa93a89 | [] | no_license | arunkumar27-ank-tech/RestAPIcrud | 0ac06a4f0b6cf3373eb76b815e3cd6c5748610d5 | 387c5fad78f4b72cfbbe47d06e79c1a15038ad69 | refs/heads/master | 2023-06-13T21:44:08.157685 | 2021-07-06T14:11:12 | 2021-07-06T14:11:12 | 383,477,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | # Generated by Django 3.1.5 on 2021-07-06 06:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('completed', models.BooleanField(default=False)),
],
),
]
| [
"arunkumar834428@gmail.com"
] | arunkumar834428@gmail.com |
246e721355400a47a1dc99c1418a351878281f59 | 67a62a0ea5c5daed433e659dceb8dc1bb83a5dbb | /game_interface.py | 08e132e3fc9f92242d14842bf310675736b05cd3 | [] | no_license | hengkan/heng | fa742ef52028cccac5a08ab18672c241b765ca54 | 44eb8db238fbae3e4b2b1d9adbea3fff270792de | refs/heads/master | 2020-03-19T18:39:40.346388 | 2018-06-10T15:08:05 | 2018-06-10T15:08:05 | 136,818,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,886 | py | """
The module used to play our games.
Please fill in the TODO's (i.e. importing games and strategies as needed)
Note: You do not have to run python_ta on this file.
You may import your games from A1 (i.e. Chopsticks). However, the minimax
strategy cannot be used on Chopsticks unless you account for infinite loops.
(You do not have to worry about this for the assignment: only do it for
your own curiousity!)
"""
# TODO: import the modules needed to make game_interface run.
from strategy import *
from typing import Any, Callable
from subtract_square_game import SubtractSquareGame
from stonehenge import *
# TODO: Replace None with the corresponding class name for your games.
# 'h' should map to Stonehenge.
playable_games = {'s': SubtractSquareGame,
'h': StonehengeGame}
# TODO: Replace None with the corresponding function names for your strategies.
# 'mr' should map to your recursive implementation of minimax while
# 'mi' should map to your iterative implementation of minimax
usable_strategies = {'i': interactive_strategy,
'ro': rough_outcome_strategy,
'mr': recursive_minimax,
'mi': iterative_minimax}
class GameInterface:
"""
A game interface for a two-player, sequential move, zero-sum,
perfect-information game.
"""
def __init__(self, game: Any, p1_strategy: Callable,
p2_strategy: Callable[[Any], Any]) -> None:
"""
Initialize this GameInterface, setting its active game to game, and
using the strategies p1_strategy for Player 1 and p2_strategy for
Player 2.
:param game: The game to be played.
:type game:
:param p1_strategy: The strategy for Player 1.
:type p1_strategy:
:param p2_strategy: The strategy for Play 2.
:type p2_strategy:
"""
first_player = input("Type y if player 1 is to make the first move: ")
is_p1_turn = False
if first_player.lower() == 'y':
is_p1_turn = True
self.game = game(is_p1_turn)
self.p1_strategy = p1_strategy
self.p2_strategy = p2_strategy
def play(self) -> None:
"""
Play the game.
"""
current_state = self.game.current_state
print(self.game.get_instructions())
print(current_state)
# Pick moves until the game is over
while not self.game.is_over(current_state):
move_to_make = None
# Print out all of the valid moves
possible_moves = current_state.get_possible_moves()
print("The current available moves are:")
for move in possible_moves:
print(move)
# Pick a (legal) move.
while not current_state.is_valid_move(move_to_make):
current_strategy = self.p2_strategy
if current_state.get_current_player_name() == 'p1':
current_strategy = self.p1_strategy
move_to_make = current_strategy(self.game)
# Apply the move
current_player_name = current_state.get_current_player_name()
new_game_state = current_state.make_move(move_to_make)
self.game.current_state = new_game_state
current_state = self.game.current_state
print("{} made the move {}. The game's state is now:".format(
current_player_name, move_to_make))
print(current_state)
# Print out the winner of the game
if self.game.is_winner("p1"):
print("Player 1 is the winner!")
elif self.game.is_winner("p2"):
print("Player 2 is the winner!")
else:
print("It's a tie!")
if __name__ == '__main__':
games = ", ".join(["'{}': {}".format(key, playable_games[key].__name__) if
playable_games[key] is not None else
"'{}': None".format(key) for key in playable_games])
strategies = ", ".join(["'{}': {}".format(key,
usable_strategies[key].__name__)
if usable_strategies[key] is not None else
"'{}': None".format(key)
for key in usable_strategies])
chosen_game = ''
while chosen_game not in playable_games.keys():
chosen_game = input(
"Select the game you want to play ({}): ".format(games))
p1 = ''
p2 = ''
while p1 not in usable_strategies.keys():
p1 = input("Select the strategy for Player 1 ({}): ".format(strategies))
while p2 not in usable_strategies.keys():
p2 = input("Select the strategy for Player 2 ({}): ".format(strategies))
GameInterface(playable_games[chosen_game], usable_strategies[p1],
usable_strategies[p2]).play()
| [
"noreply@github.com"
] | hengkan.noreply@github.com |
0527bee5e87be348d59d9a2dceebb0b42f5a6ea2 | c2be395eac600d0d853de03cd67070bd8391038f | /ofm_request_reverse_rd/__manifest__.py | 39a1213f2bec2e986a4933fa317933ec0a2efee5 | [] | no_license | amendoncabh/salary_emp | 960cfdb4df48df70ab361886039c790840a5e8d2 | 2ac2dd9461271153cb2ee406bf70a29f614c25f1 | refs/heads/master | 2022-03-30T22:35:10.704092 | 2020-01-05T16:23:20 | 2020-01-05T16:23:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | # -*- coding: utf-8 -*-
# © <YEAR(S)> <AUTHOR(S)>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Trinity Roots :: OFM Request Approve Reverse RD",
"summary": "For updating related project modules",
"version": "8.0.1.0.0",
"category": "Uncategorized",
"description": """
MODULE
======
* This module MUST be depended by related project module.
* If this module is updated, All related module will be updated too.
""",
"website": "http://www.trinityroots.co.th/",
"author": "Trinity Roots",
"license": "AGPL-3",
"application": False,
"installable": True,
"external_dependencies": {
"python": [],
"bin": [],
},
# any module necessary for this one to work correctly
'depends': [
'base',
'web_notify',
'pos_customize',
'ofm_inventory_ext',
'tr_core_update',
],
# always loaded
'data': [
'security/request_reverse_rd_security.xml',
'security/ir.model.access.csv',
'views/ofm_request_reverse_view.xml',
'views/stock_view.xml',
'wizard/reason_reject_wizard_view.xml',
'wizard/reason_approve_wizard_view.xml',
],
}
| [
"loveprakon@gmail.com"
] | loveprakon@gmail.com |
58810b1fc046396db89749b69a23a37c30c942cb | 8204ba94c61602331fc4942e3cc53cbafe8d11b3 | /excel-word-pdf/py-pdf.py | 800d627689b0c3bfa4acb41970fe71899926d17a | [] | no_license | mkhira2/automate-the-boring-stuff | 73ee0bb5d68e13d8445d4a80f93e784c46895217 | b3cf67e2309a3469581ab3cc4dd451d0132b09d4 | refs/heads/master | 2021-06-12T00:43:42.176697 | 2019-10-02T16:24:37 | 2019-10-02T16:24:37 | 136,773,031 | 0 | 1 | null | 2019-10-02T16:24:38 | 2018-06-10T02:25:44 | Python | UTF-8 | Python | false | false | 1,140 | py | import PyPDF2, os
# download example pdfs from:
# http://autbor.com/meetingminutes1.pdf
# http://autbor.com/meetingminutes2.pdf
# rb = read binary, wb = write binary
# necessary for pdf files
os.chdir('c:\\users\\mkhir\\documents')
pdfFile = open('meetingminutes1.pdf', 'rb')
reader = PyPDF2.PdfFileReader(pdfFile)
print(reader.numPages) # print number of pages
page = reader.getPage(0)
print(page.extractText()) # print text from page 1
for pageNum in range(reader.numPages):
print(reader.getPage(pageNum).extractText()) # print all pdf text
# combining pdf's
pdf1File = open('meetingminutes1.pdf', 'rb')
pdf2File = open('meetingminutes2.pdf', 'rb')
reader1 = PyPDF2.PdfFileReader(pdf1File)
reader2 = PyPDF2.PdfFileReader(pdf2File)
writer = PyPDF2.PdfFileWriter()
for pageNum in range(reader1.numPages):
page = reader1.getPage(pageNum)
writer.addPage(page)
for pageNum in range(reader2.numPages):
page = reader2.getPage(pageNum)
writer.addPage(page)
outputFile = open('combinedminutes.pdf', 'wb')
writer.write(outputFile) # writes new pdf to hard drive
outputFile.close()
pdf1File.close()
pdf2File.closer()
| [
"mkhira2@gmail.com"
] | mkhira2@gmail.com |
d776bc09bc587e3731a54a38fe8a68057100c76e | a25056c5a5a8e4209bca9ee8dc1fbdd21148dfae | /test_cramers_rule.py | 986da3fa44c63a03fd4d6d7070967721502ab105 | [] | no_license | ElFanzo/Cramer-s-rule | 9606d8dab152e3f7ed9300159fb63a098e1f0279 | 1d7e211bedf02e9e84394199e306925e57bde312 | refs/heads/master | 2020-07-29T14:55:18.895332 | 2020-03-04T17:00:22 | 2020-03-04T17:00:22 | 209,852,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,424 | py | from unittest import main, TestCase
from cramers_rule import arr_remove, arr_replace_column, get_det, solve
class TestSolve(TestCase):
def test_correct_array(self):
arr_a = [
[2, 5, 4],
[1, 3, 2],
[2, 10, 9],
]
arr_b = [30, 150, 110]
self.assertTupleEqual(solve(arr_a, arr_b), (-152, 270, -254))
arr_a = [
[1, -1],
[3, 2],
]
arr_b = [7, 16]
self.assertTupleEqual(solve(arr_a, arr_b), (6, -1))
arr_a = [
[2, -4, 1, -5],
[4, -7, -1, -8],
[10, -18, 2, -23],
[2, -3, 1, -1],
]
arr_b = [2, -5, 3, 0]
self.assertTupleEqual(solve(arr_a, arr_b), (1, 2, 3, -1))
arr_a = [2]
arr_b = [14]
self.assertTupleEqual(solve(arr_a, arr_b), (7,))
def test_empty_array(self):
with self.assertRaises(ValueError):
solve([], [])
def test_wrong_size_array(self):
arr_a = [2]
arr_b = [14, 4]
with self.assertRaises(ValueError):
solve(arr_a, arr_b)
arr_a = [
[1, 4],
[5, 10],
]
arr_b = [6, 5, 0]
with self.assertRaises(ValueError):
solve(arr_a, arr_b)
arr_a = [
[1, 4],
[5, 10],
[4, 6, 8],
]
arr_b = [6, 5, 0]
with self.assertRaises(ValueError):
solve(arr_a, arr_b)
def test_non_quadratic_array(self):
arr_a = [
[1, 5],
[13, 60],
[14, 0],
]
arr_b = [15, 17]
with self.assertRaises(ValueError):
solve(arr_a, arr_b)
arr_a = [4, 6, 8]
arr_b = [6, 5, 0]
with self.assertRaises(ValueError):
solve(arr_a, arr_b)
def test_array_with_zero_determinant(self):
arr_a = [
[1, -2],
[-2, 4],
]
arr_b = [6, 0]
with self.assertRaises(ValueError):
solve(arr_a, arr_b)
class TestArrReplaceColumn(TestCase):
def setUp(self) -> None:
self.arr_a = [
[2, 5, 4],
[1, 3, 2],
[2, 10, 9],
]
self.arr_b = [30, 150, 110]
def test_array(self):
self.assertListEqual(
arr_replace_column(self.arr_a, self.arr_b, 0),
[
[30, 5, 4],
[150, 3, 2],
[110, 10, 9],
]
)
self.assertListEqual(
arr_replace_column(self.arr_a, self.arr_b, 1),
[
[2, 30, 4],
[1, 150, 2],
[2, 110, 9],
]
)
self.assertListEqual(
arr_replace_column(self.arr_a, self.arr_b, 2),
[
[2, 5, 30],
[1, 3, 150],
[2, 10, 110],
]
)
def test_single_array(self):
arr_a = [2]
arr_b = [3]
self.assertListEqual(arr_replace_column(arr_a, arr_b, 0), [3])
def test_negative_index(self):
self.assertListEqual(
arr_replace_column(self.arr_a, self.arr_b, -1),
[
[2, 5, 30],
[1, 3, 150],
[2, 10, 110],
]
)
def test_wrong_index(self):
with self.assertRaises(IndexError):
arr_replace_column(self.arr_a, self.arr_b, 3)
class TestArrRemove(TestCase):
def setUp(self) -> None:
self.arr = [
[2, 5, 4],
[1, 3, 2],
[2, 10, 9],
]
def test_array(self):
self.assertListEqual(
arr_remove(self.arr, 0),
[
[3, 2],
[10, 9],
]
)
self.assertListEqual(
arr_remove(self.arr, 1),
[
[1, 2],
[2, 9],
]
)
self.assertListEqual(
arr_remove(self.arr, 2),
[
[1, 3],
[2, 10],
]
)
def test_tetra_array(self):
arr = [
[3, 4],
[6, 7]
]
self.assertListEqual(arr_remove(arr, 0), [7])
self.assertListEqual(arr_remove(arr, 1), [6])
def test_negative_index(self):
self.assertListEqual(
arr_remove(self.arr, -1),
[
[1, 3],
[2, 10],
]
)
def test_wrong_index(self):
with self.assertRaises(IndexError):
arr_remove(self.arr, 3)
class TestGetDet(TestCase):
def test_array(self):
arr = [
[2, 5, 4],
[1, 3, 2],
[2, 10, 9],
]
self.assertEqual(get_det(arr), 5)
arr = [
[1, 1],
[3, 2],
]
self.assertEqual(get_det(arr), -1)
arr = [
[2, -4, 1, -5],
[4, -7, -1, -8],
[10, -18, 2, -23],
[2, -3, 1, -1],
]
self.assertEqual(get_det(arr), 24)
def test_single_array(self):
arr = [2]
self.assertEqual(get_det(arr), 2)
def test_array_with_zero_determinant(self):
arr = [
[1, -2],
[-2, 4],
]
self.assertEqual(get_det(arr), 0)
if __name__ == "__main__":
main()
| [
"fan.hakimov2010@yandex.ru"
] | fan.hakimov2010@yandex.ru |
2ea601fe51a05f0441db552e2eccc67851d668d1 | 1c28a4d2a451b6220b14f486132b17a64308af10 | /venv/Scripts/pip3.7-script.py | 34ea88bd371865854c1428579324623e0b82f03c | [] | no_license | color-me/HOKA-django | a0c53fe08397ec3908a77849a500c8fe2f23316f | 6a42f5717feaa3e723728b1a6d2f2c956cee47cf | refs/heads/master | 2021-05-22T13:34:49.215552 | 2020-06-12T07:45:58 | 2020-06-12T07:45:58 | 252,945,612 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | #!D:\PycharmProjects\first\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"agent.chensf@gmail.com"
] | agent.chensf@gmail.com |
088a53bea4709075e3b76018e76868d0ff7b892b | 9b42089df76ec33388fbece8d309cd8f0cd96c5a | /todoapp/wsgi.py | 81fb5453e0e952a6eb46f3b1037c57f7689e4961 | [
"Unlicense"
] | permissive | YuliyaZhdanova/module12 | 5d953046152ea550ca8a3122af2d68914a5d25ff | 65c2e4469532d08a9bff736f86ade6b2f1e724ed | refs/heads/master | 2021-06-25T09:24:53.206503 | 2019-07-24T18:49:59 | 2019-07-24T18:49:59 | 198,691,657 | 0 | 0 | Unlicense | 2021-06-10T21:45:43 | 2019-07-24T18:43:54 | Python | UTF-8 | Python | false | false | 391 | py | """
WSGI config for todoapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'todoapp.settings')
application = get_wsgi_application()
| [
"k4ty@mail.ru"
] | k4ty@mail.ru |
2aa8324aee23f64603e3406c3de9441e9cb98c51 | 4b4544e5860bf2776ef578ba8e91dd34a9cf2b80 | /nodejs/patches/pkgsrc/lang/nodejs/patches/patch-deps_cares_cares.gyp | ba1548a9de0c9d65a856346f95ff4d5904181d81 | [
"CC0-1.0"
] | permissive | nabla-containers/rumprun-packages | 1e00e5cf0b6995f1772e8dff6b20d7d064ac71cf | 687c6dab278ff3dba68b914e1ed0511eb5525551 | refs/heads/solo5 | 2021-07-08T10:42:24.436007 | 2019-02-21T22:39:36 | 2019-02-21T22:43:57 | 137,268,640 | 1 | 4 | NOASSERTION | 2019-02-20T02:29:18 | 2018-06-13T20:44:12 | Makefile | UTF-8 | Python | false | false | 647 | gyp | $NetBSD: patch-deps_cares_cares.gyp,v 1.1 2013/05/22 15:17:07 mspo Exp $
Add support for NetBSD.
--- deps/cares/cares.gyp.orig 2013-03-14 10:55:24.000000000 +0900
+++ deps/cares/cares.gyp 2013-03-14 10:55:47.000000000 +0900
@@ -140,6 +140,10 @@
'include_dirs': [ 'config/freebsd' ],
'sources': [ 'config/freebsd/ares_config.h' ]
}],
+ [ 'OS=="netbsd"', {
+ 'include_dirs': [ 'config/netbsd' ],
+ 'sources': [ 'config/netbsd/ares_config.h' ]
+ }],
[ 'OS=="openbsd"', {
'include_dirs': [ 'config/openbsd' ],
'sources': [ 'config/openbsd/ares_config.h' ]
| [
"dahalls@gmail.com"
] | dahalls@gmail.com |
f1c2b26a52b2931804652f2426eac16b54d10b88 | 27c38ecfc6d13728bd716ac9fb908683525d22cd | /variablesEvaluation/barras.py | 56c53019e601e00d6c0a3d2db0304b9a95b655c3 | [] | no_license | ccsc-research/trusted-system | 23a5aee3a13298b60a030280a956c3b3edcb7d4d | 256a103f0858ee1223b37142301d46bf352aefdd | refs/heads/main | 2023-03-21T17:42:18.760442 | 2021-03-18T15:26:00 | 2021-03-18T15:26:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,134 | py | import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.markers
import sys
import numpy as np
import statistics
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['figure.figsize'] = (12,10)
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams['font.size'] = 36
#df = pd.read_csv(sys.argv[1], header=1, sep=';')
#print(df)
dataset = str(sys.argv[4])
entrada1=pd.read_csv(sys.argv[1])
entrada2=pd.read_csv(sys.argv[2])
entrada3=pd.read_csv(sys.argv[3])
#entrada4=pd.read_csv(sys.argv[4])
#entrada5=pd.read_csv(sys.argv[5])
acuracias = []
precisoes = []
recalls = []
fscores = []
am =[]
ad =[]
pm =[]
ppd =[]
rm =[]
rd =[]
fm =[]
fd = []
acuracias.append(entrada1.iloc[:,0])
acuracias.append(entrada2.iloc[:,0])
acuracias.append(entrada3.iloc[:,0])
#acuracias.append(entrada4.iloc[:,0])
#acuracias.append(entrada5.iloc[:,0])
precisoes.append(entrada1.iloc[:,1])
precisoes.append(entrada2.iloc[:,1])
precisoes.append(entrada3.iloc[:,1])
#precisoes.append(entrada4.iloc[:,1])
#precisoes.append(entrada5.iloc[:,1])
recalls.append(entrada1.iloc[:,2])
recalls.append(entrada2.iloc[:,2])
recalls.append(entrada3.iloc[:,2])
#recalls.append(entrada4.iloc[:,2])
#recalls.append(entrada5.iloc[:,2])
fscores.append(entrada1.iloc[:,3])
fscores.append(entrada2.iloc[:,3])
fscores.append(entrada3.iloc[:,3])
#fscores.append(entrada4.iloc[:,3])
#fscores.append(entrada5.iloc[:,3])
for i in range(0, 3, 1):
am.append(statistics.mean(acuracias[i]))
pm.append(statistics.mean(precisoes[i]))
rm.append(statistics.mean(recalls[i]))
fm.append(statistics.mean(fscores[i]))
for i in range(0, 3, 1):
ad.append(statistics.stdev(acuracias[i]))
ppd.append(statistics.stdev(precisoes[i]))
rd.append(statistics.stdev(recalls[i]))
fd.append(statistics.stdev(fscores[i]))
barwidth=0.20
r1 = np.arange(len(am))
r2 = [x + barwidth for x in r1]
r3 = [x + barwidth for x in r2]
r4 = [x + barwidth for x in r3]
f = plt.figure()
print(am, ad)
#plt.bar(r1, am, color='r', width=barwidth, label='Accuracy', yerr=ad)
#plt.bar(r2, pm, color='blue', width=barwidth, label='Precision', yerr=ppd)
#plt.bar(r3, rm, color='g', width=barwidth, label='Recall', yerr=rd)
#plt.bar(r4, fm, color='orange', width=barwidth, label='F1-Score', yerr=fd)
plt.bar(r1, am, color='r', width=barwidth, label='Acurácia', yerr=ad)
plt.bar(r2, pm, color='blue', width=barwidth, label='Precisão', yerr=ppd)
plt.bar(r3, rm, color='g', width=barwidth, label='Recall', yerr=rd)
plt.bar(r4, fm, color='orange', width=barwidth, label='F1-Score', yerr=fd)
plt.xlabel('Sistemas')
plt.xticks([r + barwidth for r in range(3)], ['TRUSTED\nc/ HAC', 'FIXO\nc/ HAC', ' TRUSTED\nc/ ARF'])
plt.ylabel('')
plt.grid(True, axis='y')
#f.legend(loc='upper center', bbox_to_anchor=(0.5, 1.01),
# ncol=2, fancybox=True, shadow=True)
plt.legend(loc='lower center', ncol=2)#, fontsize=30)
plt.ylim(0,1.1)
f.savefig("compFinal{}_Port.pdf".format(dataset), bbox_inches='tight')
f.savefig("compFinal{}_Port.png".format(dataset), bbox_inches='tight')
f.savefig("eps/compFinal{}_Port.eps".format(dataset), bbox_inches='tight')
#plt.show() | [
"31410440+BrunoSchwengber@users.noreply.github.com"
] | 31410440+BrunoSchwengber@users.noreply.github.com |
bc8c33b8c7c2217ef583a00773343483d531e800 | c7ffc57fa1155ac82b717d50baa5ffe26c92bc99 | /override.py | 6a9ed2662881f9bb153a96a3a6482855628a6507 | [] | no_license | QueenieCplusplus/Backend_Script3 | cdfc3994d79a96ed8577b507bdf1a4aeb59f0df9 | bb89999a57a261a42c05e81556b46ded0a97dc8c | refs/heads/master | 2020-11-24T19:08:42.484494 | 2020-11-09T01:06:31 | 2020-11-09T01:06:31 | 228,305,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | # 2020, 1/14, AM 10:15, by Queenie Chen
# override
class A:
def__init__(self, i=0):
self.i=i
def mmm(self):
self.i *= 100
class B(A):
def__init__(self, j=0):
super().__init__(6)
self.j = j
def mmm(self):
self.i *= 1000
def main():
a = A()
b = B()
a.mmm(9)
b.mmm()
print(b.i)
print(b.j)
main()
| [
"noreply@github.com"
] | QueenieCplusplus.noreply@github.com |
14be5d11b7f486101bd0a54389493bdede54e2c1 | dbfb911ddb166f0522a7286ecef715708acdf2c5 | /src/constants.py | 3b3296ce8e292b2c19adb9f83282ff43297c6938 | [] | no_license | gabrielle2801/P3_MacGyver | cc4fae540cf3dc2e58eca33c8b64f2b767a8a6bc | d5042f1cd73b34c4863ba66c94cd50e99af5309c | refs/heads/master | 2021-02-12T17:17:38.856180 | 2020-03-07T13:23:03 | 2020-03-07T13:23:03 | 244,611,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | MAX_NUMBER_ITEM = 3
WEAPONS_LIST = ("needle", "plastic_tube", "ether")
SPRITES_LINES = 17 * 44
SPRITES_COLUMMS = 15 * 44
sprite_size = 44
X_SIZE = 15
Y_SIZE = 15
| [
"gabrielleazadian@gmail.com"
] | gabrielleazadian@gmail.com |
9feb6015066f24f3eadcdf5ce0d2aad92b5b7785 | 8f34f7b1b586f852abe24c0af8ad9caf2d986148 | /app/core/migrations/0004_recipe.py | edff153df904646ba316e5c4c53776be66a5fdc2 | [
"MIT"
] | permissive | johan-uribe21/recipe-app-api | 1e65f7cfa0d0fdaa4d5e2bbbffe1e4ee20834db9 | 14e642e2e09a9b3974e989567756ae31843c1721 | refs/heads/master | 2020-07-07T06:17:37.933317 | 2019-10-29T13:49:04 | 2019-10-29T13:49:04 | 203,275,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | # Generated by Django 2.1.11 on 2019-08-28 23:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"johanandresuribe@gmail.com"
] | johanandresuribe@gmail.com |
ed240b64758a709dbdc34b22204a6fef55cf355e | b51ac97fc0dcb19c401f92a48e8657de9a2b86db | /tccli/services/tke/tke_client.py | a2c1e960dff0206080bec664527d89c408e09bbe | [
"Apache-2.0"
] | permissive | tarnover/tencentcloud-cli | 657b97d7b07997cff98456c111847d97324d9372 | 5b0537913a33884a20d7663405a8aa1c2276b41a | refs/heads/master | 2020-05-03T12:54:37.091798 | 2019-04-05T16:23:21 | 2019-04-05T16:23:21 | 178,639,639 | 0 | 0 | Apache-2.0 | 2019-04-05T05:36:09 | 2019-03-31T03:49:37 | Python | UTF-8 | Python | false | false | 12,592 | py | # -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli.nice_command import NiceCommand
import tccli.error_msg as ErrorMsg
import tccli.help_template as HelpTemplate
from tccli import __version__
from tccli.utils import Utils
from tccli.configure import Configure
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.tke.v20180525 import tke_client as tke_client_v20180525
from tencentcloud.tke.v20180525 import models as models_v20180525
from tccli.services.tke import v20180525
from tccli.services.tke.v20180525 import help as v20180525_help
def doAddExistedInstances(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("AddExistedInstances", g_param[OptionsDefine.Version])
return
param = {
"ClusterId": Utils.try_to_json(argv, "--ClusterId"),
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"InstanceAdvancedSettings": Utils.try_to_json(argv, "--InstanceAdvancedSettings"),
"EnhancedService": Utils.try_to_json(argv, "--EnhancedService"),
"LoginSettings": Utils.try_to_json(argv, "--LoginSettings"),
"SecurityGroupIds": Utils.try_to_json(argv, "--SecurityGroupIds"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TkeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.AddExistedInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.AddExistedInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeClusters(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeClusters", g_param[OptionsDefine.Version])
return
param = {
"ClusterIds": Utils.try_to_json(argv, "--ClusterIds"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Filters": Utils.try_to_json(argv, "--Filters"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TkeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeClustersRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeClusters(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteClusterInstances(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DeleteClusterInstances", g_param[OptionsDefine.Version])
return
param = {
"ClusterId": Utils.try_to_json(argv, "--ClusterId"),
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"InstanceDeleteMode": Utils.try_to_json(argv, "--InstanceDeleteMode"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TkeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteClusterInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.DeleteClusterInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeClusterInstances(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeClusterInstances", g_param[OptionsDefine.Version])
return
param = {
"ClusterId": Utils.try_to_json(argv, "--ClusterId"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.TkeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeClusterInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeClusterInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20180525": tke_client_v20180525,
}
MODELS_MAP = {
"v20180525": models_v20180525,
}
ACTION_MAP = {
"AddExistedInstances": doAddExistedInstances,
"DescribeClusters": doDescribeClusters,
"DeleteClusterInstances": doDeleteClusterInstances,
"DescribeClusterInstances": doDescribeClusterInstances,
}
AVAILABLE_VERSION_LIST = [
v20180525.version,
]
AVAILABLE_VERSIONS = {
'v' + v20180525.version.replace('-', ''): {"help": v20180525_help.INFO,"desc": v20180525_help.DESC},
}
def tke_action(argv, arglist):
if "help" in argv:
versions = sorted(AVAILABLE_VERSIONS.keys())
opt_v = "--" + OptionsDefine.Version
version = versions[-1]
if opt_v in argv:
version = 'v' + argv[opt_v].replace('-', '')
if version not in versions:
print("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return
action_str = ""
docs = AVAILABLE_VERSIONS[version]["help"]
desc = AVAILABLE_VERSIONS[version]["desc"]
for action, info in docs.items():
action_str += " %s\n" % action
action_str += Utils.split_str(" ", info["desc"], 120)
helpstr = HelpTemplate.SERVICE % {"name": "tke", "desc": desc, "actions": action_str}
print(helpstr)
else:
print(ErrorMsg.FEW_ARG)
def version_merge():
help_merge = {}
for v in AVAILABLE_VERSIONS:
for action in AVAILABLE_VERSIONS[v]["help"]:
if action not in help_merge:
help_merge[action] = {}
help_merge[action]["cb"] = ACTION_MAP[action]
help_merge[action]["params"] = []
for param in AVAILABLE_VERSIONS[v]["help"][action]["params"]:
if param["name"] not in help_merge[action]["params"]:
help_merge[action]["params"].append(param["name"])
return help_merge
def register_arg(command):
cmd = NiceCommand("tke", tke_action)
command.reg_cmd(cmd)
cmd.reg_opt("help", "bool")
cmd.reg_opt(OptionsDefine.Version, "string")
help_merge = version_merge()
for actionName, action in help_merge.items():
c = NiceCommand(actionName, action["cb"])
cmd.reg_cmd(c)
c.reg_opt("help", "bool")
for param in action["params"]:
c.reg_opt("--" + param, "string")
for opt in OptionsDefine.ACTION_GLOBAL_OPT:
stropt = "--" + opt
c.reg_opt(stropt, "string")
def parse_global_arg(argv):
params = {}
for opt in OptionsDefine.ACTION_GLOBAL_OPT:
stropt = "--" + opt
if stropt in argv:
params[opt] = argv[stropt]
else:
params[opt] = None
if params[OptionsDefine.Version]:
params[OptionsDefine.Version] = "v" + params[OptionsDefine.Version].replace('-', '')
config_handle = Configure()
profile = config_handle.profile
if ("--" + OptionsDefine.Profile) in argv:
profile = argv[("--" + OptionsDefine.Profile)]
is_conexist, conf_path = config_handle._profile_existed(profile + "." + config_handle.configure)
is_creexist, cred_path = config_handle._profile_existed(profile + "." + config_handle.credential)
config = {}
cred = {}
if is_conexist:
config = config_handle._load_json_msg(conf_path)
if is_creexist:
cred = config_handle._load_json_msg(cred_path)
for param in params.keys():
if param == OptionsDefine.Version:
continue
if params[param] is None:
if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId]:
if param in cred:
params[param] = cred[param]
else:
raise Exception("%s is invalid" % param)
else:
if param in config:
params[param] = config[param]
elif param == OptionsDefine.Region:
raise Exception("%s is invalid" % OptionsDefine.Region)
try:
if params[OptionsDefine.Version] is None:
version = config["tke"][OptionsDefine.Version]
params[OptionsDefine.Version] = "v" + version.replace('-', '')
if params[OptionsDefine.Endpoint] is None:
params[OptionsDefine.Endpoint] = config["tke"][OptionsDefine.Endpoint]
except Exception as err:
raise Exception("config file:%s error, %s" % (conf_path, str(err)))
versions = sorted(AVAILABLE_VERSIONS.keys())
if params[OptionsDefine.Version] not in versions:
raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return params
def show_help(action, version):
docs = AVAILABLE_VERSIONS[version]["help"][action]
desc = AVAILABLE_VERSIONS[version]["desc"]
docstr = ""
for param in docs["params"]:
docstr += " %s\n" % ("--" + param["name"])
docstr += Utils.split_str(" ", param["desc"], 120)
helpmsg = HelpTemplate.ACTION % {"name": action, "service": "tke", "desc": desc, "params": docstr}
print(helpmsg)
def get_actions_info():
config = Configure()
new_version = max(AVAILABLE_VERSIONS.keys())
version = new_version
try:
profile = config._load_json_msg(os.path.join(config.cli_path, "default.configure"))
version = profile["tke"]["version"]
version = "v" + version.replace('-', '')
except Exception:
pass
if version not in AVAILABLE_VERSIONS.keys():
version = new_version
return AVAILABLE_VERSIONS[version]["help"]
| [
"tencentcloudapi@tencent.com"
] | tencentcloudapi@tencent.com |
3b42e213c226cfb3959f04faaf51a1e274f60e8a | 2e09400c3166eb00c87139941a18ae2e6c413d12 | /__manifest__.py | 5ce43cab388c0bbc592df5b936b7c2c4d83d426b | [] | no_license | claudm/dw_sale | 87d204e30c2ea230ea5dba4ce90516e488c6b269 | 701f108b5b27a418a9193a539629af723ca02323 | refs/heads/master | 2023-03-16T11:53:42.884976 | 2018-10-30T17:11:13 | 2018-10-30T17:11:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | # -*- coding: utf-8 -*-
{
'name': "DW Sale",
'license': 'AGPL-3', # TODO: Verificar se a licensa desejada será essa
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "Everton Silva",
# 'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/odoo/addons/base/module/module_data.xml
# for the full list
'category': 'Vendas',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'sale',],
# always loaded
'data': [
'data/cron.xml',
'views/views.xml',
# 'security/ir.model.access.csv',
'report/dw_report_views.xml',
# 'report/dw_report.xml',
# 'report/dw_report_templates.xml',
# 'views/templates.xml',
],
# only loaded in demonstration mode
'demo': [
# 'demo/demo.xml',
],
'installable': True,
'application': True,
} | [
"evertonalves92@gmail.com"
] | evertonalves92@gmail.com |
7a885c6b840ef4f4c81ca95bc7f26f7114261289 | a0d90c0c4c1dde93b656d06c2d1bebd526fae7db | /text_classifier2.py | 9ba93dbeee3448c4e3c4b505df8f5ab974dea7f2 | [] | no_license | vic-ayres/ArticleProject | a1be834d920fe65cebf8c9b473ada2860c5ca0ab | 706d69730843fabcfa71b4c93a225b0138634da2 | refs/heads/master | 2020-06-10T04:35:32.457031 | 2019-06-24T22:28:52 | 2019-06-24T22:28:52 | 193,583,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,536 | py | import numpy as np
import load_data as load
import spacy_tokenizer as st
import word_embeddings as emb
from gensim.models import KeyedVectors
from confusion_matrix import ConfusionMatrix
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, accuracy_score
import sys
# Load in the data from annotated atricles
articles, labels = load.load_data()
print("Gathered vaild Data...")
print()
# Tokenizes each article and saves them in "docs"
# Remove puncutations, numbers, and whitespace
docs = []
for a in articles:
doc = st.tokenize(a)
docs.append(doc)
# Each tokenized document is associated with the
# average of each word vector in the document
word_embedding_list = []
word_vectors = KeyedVectors.load('vectors.kv', mmap='r')
for d in docs:
vector = emb.word_embedding(word_vectors, d)
word_embedding_list.append(vector)
print(type(word_embedding_list))
print(len(word_embedding_list))
print()
# Splits the annotated data into training, developing, and testing data
# Stratifies them based on the labels
x, x_test, y, y_test = train_test_split(np.array(word_embedding_list), labels,
test_size=0.1,
stratify = labels)
x_train, x_dev, y_train, y_dev = train_test_split(x,y,
test_size=0.1,
stratify = y)
print("Data split to train, develop, and test...")
print()
# Oversamples the training data using SMOTE
'''smote = SMOTE(sampling_strategy = 'minority')
xtr, ytr = smote.fit_resample(x_train, y_train)
print('Oversamples data using SMOTE...')'''
# Create and train Mutlinomial Naive Bayes classifier
clf = LogisticRegression(solver='lbfgs',
multi_class='multinomial')
clf.fit(x_train, y_train)
print("Classifier trained...")
print()
# Evaluate the predictive accuracy
predicted = clf.predict(x_dev)
print("Mean Accuracy: " + str(np.mean(predicted == y_dev)))
print()
# Prints out the confusion matrix
labels = ['fp_ic', 'fp_terr', 'fp_other', 'dom', 'notapp']
cm = ConfusionMatrix(labels, y_dev, predicted)
cm.print_matrix()
print("Actual: " + str(cm.count_actual()))
print("Predicted: "+ str(cm.count_predicted()))
# Prints the classification report
print(classification_report(y_dev, predicted, target_names=labels))
| [
"victoriaayresibarra@gmail.com"
] | victoriaayresibarra@gmail.com |
ff8c5303dc7f579180a40c27ff0d8d164a3e8dca | 922e43e0db6abc3b8852b1c4b1f88b1118c724b4 | /ecommerce/backend/views.py | 260bfb550773181bffce41503e9fb6e74cad7620 | [] | no_license | Lamduy0807/EcommerceBackend | fb862cf856a27da24d8a6787f8dd3446dd78c057 | 1559e45d5cd084b503facf0adc3291d27ff84d54 | refs/heads/main | 2023-09-02T18:56:32.362435 | 2021-10-20T14:16:37 | 2021-10-20T14:16:37 | 413,791,163 | 0 | 0 | null | 2021-10-05T11:38:08 | 2021-10-05T11:34:17 | null | UTF-8 | Python | false | false | 1,333 | py | from django.contrib.auth.models import Permission
from django.shortcuts import render
from rest_framework import serializers, viewsets, permissions, generics
from rest_framework.response import Response
from rest_framework import status
from .models import *
from .serializers import *
from rest_framework.parsers import MultiPartParser
from rest_framework.decorators import action
# Create your views here.
class ProductViewSet(viewsets.ModelViewSet):
queryset = Product.objects.filter(IsActive=True)
serializer_class = ProductSerializer
class CategoryViewSet(viewsets.ModelViewSet):
queryset = ProductCategory.objects.all()
serializer_class = ProductCategorySerializer
class ProductImageViewSet(viewsets.ModelViewSet):
queryset = ProductImage.objects.all()
serializer_class = ProductImageSerializer
class RatingViewSet(viewsets.ModelViewSet):
queryset = Rating.objects.all()
serializer_class = RatingSerializer
class LoveListViewSet(viewsets.ModelViewSet):
queryset = LoveList.objects.all()
serializer_class = LoveListSerializer
class TagViewSet(viewsets.ModelViewSet):
queryset = IngredientsTag.objects.all()
serializer_class = TagSerializer
class IngredientsViewSet(viewsets.ModelViewSet):
queryset = Ingredients.objects.all()
serializer_class = IngredientSerializer | [
"19521421@gm.uit.edu.vn"
] | 19521421@gm.uit.edu.vn |
d02c7802cac3338856b1ad9f56d833cb3b40e76f | 3cce7f1d06238ad48978955b8e8d456eef1fdc12 | /trainModel.py | 3e4b9abe28dbf342f5963a2c20bc6258c3feada2 | [
"MIT"
] | permissive | simon27-0/TA2_2021_Kelompok12 | ba44a4aa3913bf60116c343a970e9fa0fd2f7f7d | f35d7a1eccf24c9d050185fefb0cedc7e724a2e3 | refs/heads/main | 2023-07-01T10:40:48.952942 | 2021-08-09T05:31:41 | 2021-08-09T05:31:41 | 378,032,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,316 | py | # Code ini untuk train model yang sudah ada pada dataset
# USAGE
# import the necessary packages
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import os
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
help="path to input dataset")
ap.add_argument("-p", "--plot", type=str, default="plot.png",
help="path to output loss/accuracy plot")
ap.add_argument("-m", "--model", type=str,
default="mask_detector.model",
help="path to output face mask detector model")
args = vars(ap.parse_args())
# initialize the initial learning rate, number of epochs to train for,
# and batch size
INIT_LR = 1e-4
EPOCHS = 20
BS = 32
# grab the list of images in our dataset directory, then initialize
# the list of data (i.e., images) and class images
print("[INFO] loading images...")
imagePaths = list(paths.list_images(args["dataset"]))
data = []
labels = []
# loop over the image paths
for imagePath in imagePaths:
# extract the class label from the filename
label = imagePath.split(os.path.sep)[-2]
# load the input image (224x224) and preprocess it
image = load_img(imagePath, target_size=(224, 224))
image = img_to_array(image)
image = preprocess_input(image)
# update the data and labels lists, respectively
data.append(image)
labels.append(label)
# convert the data and labels to NumPy arrays
data = np.array(data, dtype="float32")
labels = np.array(labels)
# perform one-hot encoding on the labels
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
(trainX, testX, trainY, testY) = train_test_split(data, labels,
test_size=0.20, stratify=labels, random_state=42)
# construct the training image generator for data augmentation
aug = ImageDataGenerator(
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest")
# load the MobileNetV2 network, ensuring the head FC layer sets are
# left off
baseModel = MobileNetV2(weights="imagenet", include_top=False,
input_tensor=Input(shape=(224, 224, 3)))
# construct the head of the model that will be placed on top of the
# the base model
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(128, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation="softmax")(headModel)
# place the head FC model on top of the base model (this will become
# the actual model we will train)
model = Model(inputs=baseModel.input, outputs=headModel)
# loop over all layers in the base model and freeze them so they will
# *not* be updated during the first training process
for layer in baseModel.layers:
layer.trainable = False
# compile our model
print("[INFO] compiling model...")
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt,
metrics=["accuracy"])
# train the head of the network
print("[INFO] training head...")
H = model.fit(
aug.flow(trainX, trainY, batch_size=BS),
steps_per_epoch=len(trainX) // BS,
validation_data=(testX, testY),
validation_steps=len(testX) // BS,
epochs=EPOCHS)
# make predictions on the testing set
print("[INFO] evaluating network...")
predIdxs = model.predict(testX, batch_size=BS)
# for each image in the testing set we need to find the index of the
# label with corresponding largest predicted probability
predIdxs = np.argmax(predIdxs, axis=1)
# show a nicely formatted classification report
print(classification_report(testY.argmax(axis=1), predIdxs,
target_names=lb.classes_))
# serialize the model to disk
print("[INFO] saving mask detector model...")
model.save(args["model"], save_format="h5")
# plot the training loss and accuracy
N = EPOCHS
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["accuracy"], label="train_accuracy")
plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_accuracy")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig(args["plot"])
| [
"noreply@github.com"
] | simon27-0.noreply@github.com |
86e6a118b49dcff4397daafa09388751e0ee8c56 | 6ff29687d43202757565521fc2a223a616a3e2b6 | /else/天马杯--AI+z智能质检/code/run_c3.py | f94be65e07053fa528c493f6721db87a0afc3d62 | [] | no_license | xiaoyusmd/data-science-competition | e0964398749988e31094b6fc66871cb33fe0a230 | faf7aad6a90d843b61e9ec8a0d41702b17c53a51 | refs/heads/main | 2023-05-31T16:14:36.174908 | 2021-06-20T04:22:13 | 2021-06-20T04:22:13 | 413,094,887 | 1 | 1 | null | 2021-10-03T14:04:57 | 2021-10-03T14:04:57 | null | UTF-8 | Python | false | false | 10,671 | py | """
This script provides an exmaple to wrap UER-py for C3 (multiple choice dataset).
"""
import argparse
import json
import torch
import random
import torch.nn as nn
from uer.utils.vocab import Vocab
from uer.utils.constants import *
from uer.utils.tokenizer import *
from uer.layers.embeddings import *
from uer.encoders.bert_encoder import *
from uer.encoders.rnn_encoder import *
from uer.encoders.birnn_encoder import *
from uer.encoders.cnn_encoder import *
from uer.encoders.attn_encoder import *
from uer.encoders.gpt_encoder import *
from uer.encoders.mixed_encoder import *
from uer.utils.optimizers import *
from uer.utils.config import load_hyperparam
from uer.utils.seed import set_seed
from uer.model_saver import save_model
from run_classifier import build_optimizer, load_or_initialize_parameters, train_model, batch_loader, evaluate
class MultipleChoice(nn.Module):
def __init__(self, args):
super(MultipleChoice, self).__init__()
self.embedding = globals()[args.embedding.capitalize() + "Embedding"](args, len(args.tokenizer.vocab))
self.encoder = globals()[args.encoder.capitalize() + "Encoder"](args)
self.dropout = nn.Dropout(args.dropout)
self.output_layer = nn.Linear(args.hidden_size, 1)
def forward(self, src, tgt, seg, soft_tgt=None):
"""
Args:
src: [batch_size x choices_num x seq_length]
tgt: [batch_size]
seg: [batch_size x choices_num x seq_length]
"""
choices_num = src.shape[1]
src = src.view(-1, src.size(-1))
seg = seg.view(-1, seg.size(-1))
# Embedding.
emb = self.embedding(src, seg)
# Encoder.
output = self.encoder(emb, seg)
output = self.dropout(output)
logits = self.output_layer(output[:, 0, :])
reshaped_logits = logits.view(-1, choices_num)
if tgt is not None:
loss = nn.NLLLoss()(nn.LogSoftmax(dim=-1)(reshaped_logits), tgt.view(-1))
return loss, reshaped_logits
else:
return None, reshaped_logits
def read_dataset(args, path):
with open(path, mode="r", encoding="utf-8") as f:
data = json.load(f)
examples = []
for i in range(len(data)):
for j in range(len(data[i][1])):
example = ['\n'.join(data[i][0]).lower(), data[i][1][j]["question"].lower()]
for k in range(len(data[i][1][j]["choice"])):
example += [data[i][1][j]["choice"][k].lower()]
for k in range(len(data[i][1][j]["choice"]), args.max_choices_num):
example += ["No Answer"]
example += [data[i][1][j].get("answer", "").lower()]
examples += [example]
dataset = []
for i, example in enumerate(examples):
tgt = 0
for k in range(args.max_choices_num):
if example[2 + k] == example[6]:
tgt = k
dataset.append(([], tgt, []))
for k in range(args.max_choices_num):
src_a = args.tokenizer.convert_tokens_to_ids([CLS_TOKEN] + args.tokenizer.tokenize(example[k+2]) + [SEP_TOKEN])
src_b = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(example[1]) + [SEP_TOKEN])
src_c = args.tokenizer.convert_tokens_to_ids(args.tokenizer.tokenize(example[0]) + [SEP_TOKEN])
src = src_a + src_b + src_c
seg = [1] * (len(src_a) + len(src_b)) + [2] * len(src_c)
if len(src) > args.seq_length:
src = src[:args.seq_length]
seg = seg[:args.seq_length]
while len(src) < args.seq_length:
src.append(0)
seg.append(0)
dataset[-1][0].append(src)
dataset[-1][2].append(seg)
return dataset
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Path options.
parser.add_argument("--pretrained_model_path", default=None, type=str,
help="Path of the pretrained model.")
parser.add_argument("--output_model_path", default="./models/multichoice_model.bin", type=str,
help="Path of the output model.")
parser.add_argument("--vocab_path", default=None, type=str,
help="Path of the vocabulary file.")
parser.add_argument("--spm_model_path", default=None, type=str,
help="Path of the sentence piece model.")
parser.add_argument("--train_path", type=str, required=True,
help="Path of the trainset.")
parser.add_argument("--dev_path", type=str, required=True,
help="Path of the devset.")
parser.add_argument("--test_path", type=str,
help="Path of the testset.")
parser.add_argument("--config_path", default="./models/bert_base_config.json", type=str,
help="Path of the config file.")
# Model options.
parser.add_argument("--batch_size", type=int, default=32,
help="Batch size.")
parser.add_argument("--seq_length", type=int, default=512,
help="Sequence length.")
parser.add_argument("--embedding", choices=["bert", "word"], default="bert",
help="Emebdding type.")
parser.add_argument("--encoder", choices=["bert", "lstm", "gru", \
"cnn", "gatedcnn", "attn", "synt", \
"rcnn", "crnn", "gpt", "bilstm"], \
default="bert", help="Encoder type.")
parser.add_argument("--bidirectional", action="store_true", help="Specific to recurrent model.")
parser.add_argument("--factorized_embedding_parameterization", action="store_true", help="Factorized embedding parameterization.")
parser.add_argument("--parameter_sharing", action="store_true", help="Parameter sharing.")
parser.add_argument("--max_choices_num", default=4, type=int,
help="The maximum number of cadicate answer, shorter than this will be padded.")
# Tokenizer options.
parser.add_argument("--tokenizer", choices=["bert", "char", "space"], default="bert",
help="Specify the tokenizer."
"Original Google BERT uses bert tokenizer on Chinese corpus."
"Char tokenizer segments sentences into characters."
"Space tokenizer segments sentences into words according to space."
)
# Optimizer options.
parser.add_argument("--learning_rate", type=float, default=2e-5,
help="Learning rate.")
parser.add_argument("--warmup", type=float, default=0.1,
help="Warm up value.")
parser.add_argument("--fp16", action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit.")
parser.add_argument("--fp16_opt_level", choices=["O0", "O1", "O2", "O3" ], default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
# Training options.
parser.add_argument("--dropout", type=float, default=0.2,
help="Dropout.")
parser.add_argument("--epochs_num", type=int, default=8,
help="Number of epochs.")
parser.add_argument("--report_steps", type=int, default=100,
help="Specific steps to print prompt.")
parser.add_argument("--seed", type=int, default=7,
help="Random seed.")
args = parser.parse_args()
args.labels_num = args.max_choices_num
# Load the hyperparameters from the config file.
args = load_hyperparam(args)
set_seed(args.seed)
# Build tokenizer.
args.tokenizer = globals()[args.tokenizer.capitalize() + "Tokenizer"](args)
# Build multiple choice model.
model = MultipleChoice(args)
# Load or initialize parameters.
load_or_initialize_parameters(args, model)
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(args.device)
# Training phase.
trainset = read_dataset(args, args.train_path)
random.shuffle(trainset)
instances_num = len(trainset)
batch_size = args.batch_size
src = torch.LongTensor([example[0] for example in trainset])
tgt = torch.LongTensor([example[1] for example in trainset])
seg = torch.LongTensor([example[2] for example in trainset])
args.train_steps = int(instances_num * args.epochs_num / batch_size) + 1
print("Batch size: ", batch_size)
print("The number of training instances:", instances_num)
optimizer, scheduler = build_optimizer(args, model)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level = args.fp16_opt_level)
args.amp = amp
if torch.cuda.device_count() > 1:
print("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
args.model = model
total_loss, result, best_result = 0., 0., 0.
print("Start training.")
for epoch in range(1, args.epochs_num+1):
model.train()
for i, (src_batch, tgt_batch, seg_batch, _) in enumerate(batch_loader(batch_size, src, tgt, seg)):
loss = train_model(args, model, optimizer, scheduler, src_batch, tgt_batch, seg_batch)
total_loss += loss.item()
if (i + 1) % args.report_steps == 0:
print("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, i+1, total_loss / args.report_steps))
total_loss = 0.
result = evaluate(args, read_dataset(args, args.dev_path))
if result[0] > best_result:
best_result = result[0]
save_model(model, args.output_model_path)
# Evaluation phase.
if args.test_path is not None:
print("Test set evaluation.")
if torch.cuda.device_count() > 1:
model.module.load_state_dict(torch.load(args.output_model_path))
else:
model.load_state_dict(torch.load(args.output_model_path))
evaluate(args, read_dataset(args, args.test_path))
if __name__ == "__main__":
main()
| [
"39004676+DLLXW@users.noreply.github.com"
] | 39004676+DLLXW@users.noreply.github.com |
21acfdfebbfa573c6f03151a2582739a309134ee | d6f7d4f71b4ce5204a17e3ef7a21f495e45a44ce | /Unique Integers in Sorted List.py | f81e2a91f47e1b9c7b83c7903663a189c5d4b05f | [] | no_license | zoskar/Binary_search | de43168105688ded7b58e11963bdcce57f83b8af | 532e67b074c478a5af0f23a644e79043e89ed0c5 | refs/heads/master | 2023-06-04T08:13:03.897436 | 2021-06-20T12:24:29 | 2021-06-20T12:24:29 | 356,622,345 | 0 | 0 | null | 2021-06-20T12:24:29 | 2021-04-10T15:29:43 | Python | UTF-8 | Python | false | false | 74 | py | class Solution:
def solve(self, nums):
return len(set(nums))
| [
"zoskar00@gmail.com"
] | zoskar00@gmail.com |
263dbf5122189f82d269deadb89ecd37c4e4936c | 76003819bb5dd0fea7c6cc685d8263354bd94681 | /dask-gateway-server/dask_gateway_server/options.py | 15b413843a528c8e966258a3f4adce0b899c7fb0 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | ydataai/dask-gateway | e9eada4056b059301ee869d25df63c900079193d | 75ff9453c97ae42a909ed9921e6eae99eea8bc75 | refs/heads/main | 2023-06-09T22:35:52.116925 | 2021-06-28T14:59:48 | 2021-06-28T14:59:48 | 380,277,835 | 2 | 0 | BSD-3-Clause | 2021-06-28T14:59:49 | 2021-06-25T15:19:59 | Python | UTF-8 | Python | false | false | 11,048 | py | import copy
import inspect
import textwrap
from collections import OrderedDict
from collections.abc import Sequence
from .utils import FrozenAttrDict
__all__ = ("Options", "String", "Bool", "Integer", "Float", "Select", "Mapping")
class Options(object):
"""A declarative specification of exposed cluster options.
Parameters
----------
*fields : Field
Zero or more configurable fields.
handler : callable, optional
A callable with the signature ``handler(options)`` or
``handler(options, user)``, where ``options`` is the validated dict of
user options, and ``user`` is a ``User`` model for that user. Should
return a dict of configuration overrides to forward to the cluster
manager. If not provided, the default will return the options
unchanged.
Example
-------
Here we expose options for users to configure
:data:`c.Backend.worker_cores` and
:data:`c.Backend.worker_memory`. We set bounds on each resource to
prevent users from requesting too large of a worker. The handler is used to
convert the user specified memory from GiB to bytes (as expected by
:data:`c.Backend.worker_memory`).
.. code-block:: python
from dask_gateway_server.options import Options, Integer, Float
def options_handler(options):
return {
"worker_cores": options.worker_cores,
"worker_memory": int(options.worker_memory * 2 ** 30)
}
c.Backend.DaskGateway.cluster_options = Options(
Integer("worker_cores", default=1, min=1, max=4, label="Worker Cores"),
Float("worker_memory", default=1, min=1, max=8, label="Worker Memory (GiB)"),
handler=options_handler,
)
"""
def __init__(self, *fields, handler=None):
for f in fields:
if not isinstance(f, Field):
raise TypeError(
"All fields must by instances of ``Field``, got %r"
% type(f).__name__
)
self.fields = fields
self.handler = handler
@property
def handler(self):
return self._handler
@handler.setter
def handler(self, handler):
if handler is None:
self._handler = None
else:
sig = inspect.signature(handler)
if len(sig.parameters) == 1 and not any(
a.kind == inspect.Parameter.VAR_POSITIONAL
for a in sig.parameters.values()
):
self._handler = lambda options, user: handler(options)
else:
self._handler = handler
def get_specification(self):
return [f.json_spec() for f in self.fields]
def parse_options(self, request):
if not isinstance(request, dict):
raise TypeError("options must be a dict, got %r" % type(request).__name__)
# Check for extra fields
extra = set(request).difference(f.field for f in self.fields)
if extra:
raise ValueError("Unknown fields %r" % sorted(extra))
# Validate options
return {
f.field: f.validate(request.get(f.field, f.get_default()))
for f in self.fields
}
def transform_options(self, options):
return {
f.target: f.transform(options.get(f.field, f.get_default()))
for f in self.fields
}
def get_configuration(self, options, user):
options = self.transform_options(options)
if self.handler is None:
return options
return self.handler(FrozenAttrDict(options), user)
_field_doc_template = """\
{description}
Parameters
----------
field : str
The field name to use. Must be a valid Python variable name. This will
be the keyword users use to set this field programmatically (e.g.
``"worker_cores"``).
{params}
label : str, optional
A human readable label that will be used in GUI representations (e.g.
``"Worker Cores"``). If not provided, ``field`` will be used.
target : str, optional
The target parameter to set in the processed options dict. Must be a
valid Python variable name. If not provided, ``field`` will be used.
"""
def field_doc(description, params):
def inner(cls):
cls.__doc__ = _field_doc_template.format(
description=description,
params=textwrap.dedent(params.strip("\n")).strip("\n"),
)
return cls
return inner
class Field(object):
def __init__(self, field, default=None, target=None, label=None):
self.field = field
# Validate the default
self.validate(default)
self.default = default
self.target = target or field
self.label = label or field
def json_spec(self):
return {
"field": self.field,
"label": self.label,
"default": self.default,
"spec": self.json_type_spec(),
}
def json_type_spec(self):
raise NotImplementedError
def get_default(self):
return self.default
def validate(self, x):
"""Check that x is valid, and do any normalization.
The output of this method must be serializable as json."""
raise NotImplementedError
def transform(self, x):
"""Transform a valid x into the desired output type.
This may return any Python object."""
return x
@field_doc(
description="A string field.",
params="""
default : str, optional
The default value. Default is the empty string (``""``).
""",
)
class String(Field):
def __init__(self, field, default="", label=None, target=None):
super().__init__(field, default=default, label=label, target=target)
def validate(self, x):
if not isinstance(x, str):
raise TypeError("%s must be a string, got %r" % (self.field, x))
return x
def json_type_spec(self):
return {"type": "string"}
@field_doc(
description="A boolean field.",
params="""
default : bool, optional
The default value. Default is False.
""",
)
class Bool(Field):
def __init__(self, field, default=False, label=None, target=None):
super().__init__(field, default=default, label=label, target=target)
def validate(self, x):
if not isinstance(x, bool):
raise TypeError("%s must be a bool, got %r" % (self.field, x))
return x
def json_type_spec(self):
return {"type": "bool"}
class Number(Field):
def __init__(self, field, default=0, min=None, max=None, label=None, target=None):
# Temporarily set to allow `validate` to work
self.min = self.max = None
if min is not None:
self.min = self.validate(min)
if max is not None:
self.max = self.validate(max)
super().__init__(field, default=default, label=label, target=target)
def validate(self, x):
if self.min is not None and x < self.min:
raise ValueError("%s must be >= %f, got %s" % (self.field, self.min, x))
if self.max is not None and x > self.max:
raise ValueError("%s must be <= %f, got %s" % (self.field, self.max, x))
return x
@field_doc(
description="An integer field, with optional bounds.",
params="""
default : int, optional
The default value. Default is 0.
min : int, optional
The minimum valid value (inclusive). Unbounded if not set.
max : int, optional
The maximum valid value (inclusive). Unbounded if not set.
""",
)
class Integer(Number):
def validate(self, x):
if not isinstance(x, int):
raise TypeError("%s must be an integer, got %r" % (self.field, x))
return super().validate(x)
def json_type_spec(self):
return {"type": "int", "min": self.min, "max": self.max}
@field_doc(
description="A float field, with optional bounds.",
params="""
default : float, optional
The default value. Default is 0.
min : float, optional
The minimum valid value (inclusive). Unbounded if not set.
max : float, optional
The maximum valid value (inclusive). Unbounded if not set.
""",
)
class Float(Number):
def validate(self, x):
if isinstance(x, int):
x = float(x)
if not isinstance(x, float):
raise TypeError("%s must be a float, got %r" % (self.field, x))
return super().validate(x)
def json_type_spec(self):
return {"type": "float", "min": self.min, "max": self.max}
@field_doc(
description="A select field, allowing users to select between a few choices.",
params="""
options : list
A list of valid options. Elements may be a tuple of ``(key, value)``,
or just ``key`` (in which case the value is the same as the key).
Values may be any Python object, keys must be strings.
default : str, optional
The key for the default option. Defaults to the first listed option.
""",
)
class Select(Field):
def __init__(self, field, options, default=None, label=None, target=None):
if not isinstance(options, Sequence):
raise TypeError("options must be a sequence")
elif not len(options):
raise ValueError("There must be at least one option")
options_map = OrderedDict()
for value in options:
if isinstance(value, tuple):
key, value = value
else:
key = value
if not isinstance(key, str):
raise TypeError("Select keys must be strings, got %r" % key)
options_map[key] = value
if default is None:
default = list(options_map)[0]
self.options = options_map
super().__init__(field, default=default, label=label, target=target)
def validate(self, x):
if not isinstance(x, str):
raise TypeError("%s must be a string, got %r" % (self.field, x))
if x not in self.options:
raise ValueError("%r is not a valid option for %s" % (x, self.field))
return x
def transform(self, x):
self.validate(x)
return self.options[x]
def json_type_spec(self):
return {"type": "select", "options": list(self.options)}
@field_doc(
description="A mapping field.",
params="""
default : dict, optional
The default value. Default is an empty dict (``{}``).
""",
)
class Mapping(Field):
def __init__(self, field, default=None, label=None, target=None):
if default is None:
default = {}
super().__init__(field, default=default, label=label, target=target)
def validate(self, x):
if not isinstance(x, dict):
raise TypeError("%s must be a mapping, got %r" % (self.field, x))
return x
def get_default(self):
return copy.deepcopy(self.default)
def json_type_spec(self):
return {"type": "mapping"}
| [
"noreply@github.com"
] | ydataai.noreply@github.com |
b0a67ae46e2cc09f784f219659fe6a52ae000b70 | 6f82a98751f27d07a947f3a22b8523b2d0b9c0db | /oneclickvitals/migrations/0024_auto_20150427_1920.py | 0100d64d0e2980848e64db385b51246639b7b7fd | [] | no_license | SirishaDumpala/MedicalProject | 6078bcc3098750e4afcf4af42002cb5e424099b7 | 83fec120bdf41e673f7672576a481d334e4d4289 | refs/heads/master | 2021-01-19T06:53:14.091093 | 2015-04-28T18:41:44 | 2015-04-28T18:41:44 | 31,844,156 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('oneclickvitals', '0023_auto_20150427_1605'),
]
operations = [
migrations.AlterField(
model_name='labresults',
name='test_date',
field=models.DateField(null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='labtest',
name='test_date',
field=models.DateField(null=True, blank=True),
preserve_default=True,
),
]
| [
"birlangisiri@gmail.com"
] | birlangisiri@gmail.com |
def2133f683035964fdbf030fa9a9bec0085cb22 | f1fcaf58e53792db786bf6ffb87f67b815ed600e | /Chapter8.py | 4e1e60f78cbb1e010b37949f78d483331693bc96 | [] | no_license | stephenosullivan/effective-python3 | 8e414d0aa64eb2a599ba661056809830b6e4a39f | c933b3f80021f9ba3d1f0ad608f563a106d89bd8 | refs/heads/master | 2021-01-13T07:39:56.418989 | 2015-10-04T01:27:26 | 2015-10-04T01:27:26 | 39,714,317 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,323 | py | __author__ = 'stephenosullivan'
class Item55:
"""
Use repr Strings for debugging output
"""
def __init__(self):
a = "string"
print(a)
print(repr(a))
print(eval(repr(a)))
print('%r' % a)
a = Opaque(5,4)
print(a)
b = BetterClass(6,7)
print(b)
print(a.__dict__)
class Opaque:
def __init__(self, x, y):
self.x = x
self.y = y
class BetterClass:
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "BetterClass(%s, %s)" % (self.x, self.y)
class Item56:
"""
Test everything with unittest
"""
def __init__(self):
return
def to_str(data):
if isinstance(data, str):
return data
elif isinstance(data, bytes):
return data.decode('utf-8')
else:
raise TypeError('Must supply string or bytes, ' 'found: %r' % data)
from unittest import TestCase, main
class UtilsTestCase(TestCase):
def test_to_str_bytes(self):
self.assertEqual('hello', to_str(b'hello'))
def test_to_str_str(self):
self.assertEqual('hello', to_str('hello'))
def test_to_str_bad(self):
self.assertRaises(TypeError, to_str, object())
if __name__ == "__main__":
sol = Item55()
main()
| [
"osullisg@gmail.com"
] | osullisg@gmail.com |
1ef2e9efc80c13efd70ec5dc3809ee3bea477113 | 8cb45cfc28e207dc56bee330e4f670e3f17e1e9d | /uofa-homepage.py | 86c94941587174e5d37e4b212863967f9db12546 | [] | no_license | llDanieLll/url_headers_parse | e643207c4451b4bcf53ffcd102976378876ea910 | 2d4f9facddab8806155e46dd5a22264dc57f7018 | refs/heads/master | 2020-03-07T11:24:18.783326 | 2018-03-30T17:27:15 | 2018-03-30T17:27:15 | 127,454,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | import urllib.request
import urllib.parse
try:
url = 'https://www.ualberta.ca/'
values = {}
headers = {}
data = urllib.parse.urlencode(values)
data = data.encode('utf-8')
headers['User-Agent']='Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17'
req = urllib.request.Request(url,data=data)
resp = urllib.request.urlopen(req)
respData = resp.read()
saveFile = open('u_ahomepage.txt','w')
saveFile.write(str(respData))
saveFile.close
except Exception as e:
print (str(e))
| [
"noreply@github.com"
] | llDanieLll.noreply@github.com |
4344b251328ece82d57f22c21563a169e723a2c2 | f94e54d3085cd07a6f4972f2111574ad95fe4d89 | /utils/iotools.py | 406433fab0cdf5f54f662d8821bdadfae2017c15 | [] | no_license | banskt/statsfpl | b4e67ca4ed09a8cdc927ec4cb4ad570d891ad395 | b442208fa4d07e3a097445c75a4fd2f8098440ff | refs/heads/master | 2021-06-30T01:54:05.461439 | 2020-09-07T09:41:04 | 2020-09-07T09:41:04 | 143,441,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,469 | py | import numpy as np
import collections
import csv
from utils.containers import FixtureInfo
def get_fixtures(filename, scores):
fixtures = [{} for x in range(38)]
with open(filename) as csvfile:
instream = csv.reader(csvfile, delimiter = ',')
for row in instream:
team = row[0].strip()
teamscores = scores[team]
for gw in range(1, 39):
opp = row[gw].split()[0].strip()
loc = row[gw].split()[1].strip()[1]
athome = False
if loc == 'H':
athome = True
score = teamscores[opp][loc]
fixtures[gw - 1][team] = FixtureInfo(gegen = opp, athome = athome, prob = score)
return fixtures
def convert_scores_mat(sdict, teams, nanval = 0.5):
n = len(teams)
home = np.zeros((n, n))
away = np.zeros((n, n))
for i, t1 in enumerate(teams):
for j, t2 in enumerate(teams):
home[i, j] = sdict[t1][t2]['H']
away[i, j] = sdict[t1][t2]['A']
vmin = min(np.min(home), np.min(away))
vmax = max(np.max(home), np.max(away))
delta = vmax - vmin
home = (home - vmin) / delta
away = (away - vmin) / delta
home[np.diag_indices_from(home)] = nanval
away[np.diag_indices_from(away)] = nanval
return home, away
def get_scores(filename, nanval = 0.5):
scores = {}
points = {}
teams = list()
ATTACKH = 0
ATTACKA = 1
DEFENDH = 2
DEFENDA = 3
with open(filename) as csvfile:
instream = csv.reader(csvfile, delimiter = ',')
next(instream, None)
for row in instream:
team = row[0].strip()
teams.append(team)
points[team] = [float(x.strip()) for x in row[1:]]
for team in teams:
scores[team] = {}
for opp in teams:
scores[team][opp] = {}
if opp == team:
scores[team][opp]['H'] = 0
scores[team][opp]['A'] = 0
else:
scores[team][opp]['H'] = points[team][DEFENDH] - points[opp][ATTACKA]
scores[team][opp]['A'] = points[team][DEFENDA] - points[opp][ATTACKH]
home, away = convert_scores_mat(scores, teams, nanval = nanval)
for i, team in enumerate(teams):
for j, opp in enumerate(teams):
scores[team][opp]['H'] = home[i, j]
scores[team][opp]['A'] = away[i, j]
return teams, scores
| [
"bnrj.saikat@gmail.com"
] | bnrj.saikat@gmail.com |
b7c52076b044cbf724a5c615bcde432d581148bf | 0bd17d41590d8946df411db0af0e8e814854dcce | /pycmbs/diagnostic/__init__.py | 80c3801fcdea8281f7a9be5a73a8d7f856407424 | [
"MIT"
] | permissive | wk1984/pycmbs | 39b36c62b62aa7b9ca9fcd7f2fc9104838524c19 | 853bcd1995e0b89b92aa94d30f967f6ecdd16bcc | refs/heads/master | 2020-04-08T14:44:06.822629 | 2015-08-04T17:45:37 | 2015-08-04T17:45:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | # -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
from diagnostic_basic import *
from regional_analysis import RegionalAnalysis
| [
"a.loew@gmx.net"
] | a.loew@gmx.net |
decc14ec6c9e00b0fbed6e000b45d1b1efb74fa2 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2335/60705/241811.py | ab11ecca95033a28315b69c996ee7f5b73163e7e | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | x = int(input())
y = int(input())
count = 0
s = {x}
while not s.__contains__(y):
s2 = set()
for i in s:
s2.add(2*i)
s2.add(i - 1)
for j in s2:
s.add(j)
count += 1
print(count) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
9697269b3760a99d42530da45367f35e3567521c | c597ef67cef06a2073e32ab0cf0f58634a6857ef | /main.py | 6fae7d67aa4880eb5c7996517728d507f1d52d79 | [] | no_license | EdwinMichaelLab/a_dockerized_flask_app | 7b523f4657503a3d7f0a6001c315bea6c3a025a7 | 48f2193e60384b9a2bc882bac1f5ac9f5e4f1887 | refs/heads/main | 2023-08-13T18:40:36.949013 | 2021-10-08T02:18:39 | 2021-10-08T02:18:39 | 414,640,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | '''
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World'
'''
from flask import Flask, request
from flask import jsonify
app = Flask(__name__)
@app.route('/')
def myapp():
message = "To use this app: %s/add?x=value&y=value" % request.base_url
return message
@app.route('/add')
def add():
# Checking that both parameters have been supplied
for param in ['x', 'y']:
if not param in request.args:
result = {
'type': '%s value is missing' % param,
'content': '',
'status': 'REQUEST_DENIED'
}
return jsonify(result)
# Make sure they are numbers too
try:
x = float(request.args['x'])
y = float(request.args['y'])
except:
return "x and y should be numbers"
result = {
'type': 'result',
'content': x+y,
'status': 'REQUEST_OK'
}
return jsonify(result)
if __name__ == '__main__':
app.run(host="0.0.0.0", debug=True) | [
"sookim@usf.edu"
] | sookim@usf.edu |
3eacd279773774d50e98c01956d5ca188f7c8b09 | c1d3bbeef64bcd8565f96dcad616c9db38d03b6f | /preprocess/prep_ae.py | 3c43a19cf49cb9bc697f9b14156f760c3b869092 | [] | no_license | amymok1318/SentiLARE | 8ddbba5ca14adf8a87f385b6c6072511d6df0e08 | 5f1243788fb872e56b5e259939b932346b378419 | refs/heads/master | 2023-04-20T07:31:46.438255 | 2021-04-28T04:36:18 | 2021-04-28T04:36:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,687 | py | # Pre-processing codes for aspect term extraction
# This code is modified based on
# https://github.com/howardhsu/BERT-for-RRC-ABSA/blob/master/pytorch-pretrained-bert/preprocessing/prep_ae.py
import nltk
import numpy as np
import json
from collections import defaultdict
import xml.etree.ElementTree as ET
import random
random.seed(1337)
np.random.seed(1337)
from aspect_utils import process_text, load_sentinet
valid_split=150
polar_idx={'positive': 0, 'negative': 1, 'neutral': 2, 'conflict': 3}
idx_polar={0: 'positive', 1: 'negative', 2: 'neutral'}
sentinet, gloss_embedding, gloss_embedding_norm = load_sentinet('SentiWordNet_3.0.0.txt', 'gloss_embedding.npy')
def parse_SemEval14(fn):
root=ET.parse(fn).getroot()
corpus = []
opin_cnt=[0]*len(polar_idx)
sent_cnt = 0
aspect_cnt = 0
id_list, sentence_list, term_list = [], [], []
for sent in root.iter("sentence"):
text=[]
opins=set()
sent_cnt += 1
for opin in sent.iter('aspectTerm'):
if int(opin.attrib['from'] )!=int(opin.attrib['to'] ) and opin.attrib['term']!="NULL":
opins.add((opin.attrib['term'], int(opin.attrib['from']), int(opin.attrib['to']), polar_idx[opin.attrib['polarity'] ] ) )
aspect_cnt += len(opins)
for ix, c in enumerate(sent.find('text').text ):
for opin in opins:
if (c=='/' or c=='*' or c=='-' or c=='=') and len(text)>0 and text[-1]!=' ':
text.append(' ')
if ix==int(opin[1] ) and len(text)>0 and text[-1]!=' ':
text.append(' ')
elif ix==int(opin[2] ) and len(text)>0 and text[-1]!=' ' and c!=' ':
text.append(' ')
text.append(c)
if (c=='/' or c=='*' or c=='-' or c=='=') and text[-1]!=' ':
text.append(' ')
text="".join(text)
tokens=nltk.word_tokenize(text)
lb=[0]*len(tokens)
for opin in opins:
opin_cnt[opin[3]]+=1
token_idx, pt, tag_on=0, 0, False
for ix, c in enumerate(sent.find('text').text):
if pt>=len(tokens[token_idx]):
pt=0
token_idx+=1
if token_idx>=len(tokens):
break
if ix==opin[1]: #from
assert pt==0 and c!=' '
lb[token_idx]=1
tag_on=True
elif ix==opin[2]: #to
assert pt==0
tag_on=False
elif tag_on and pt==0 and c!=' ':
lb[token_idx]=2
if c==' ' or ord(c)==160:
pass
elif tokens[token_idx][pt:pt+2]=='``' or tokens[token_idx][pt:pt+2]=="''":
pt+=2
else:
pt+=1
if len(opins) > 0:
id_list.append(sent.attrib['id'])
sentence_list.append(text)
term_list.append(lb)
clean_id_list, text_list, text_list_split, pos_list, senti_list, label_list = process_text(id_list, sentence_list,
term_list,
sentinet,
gloss_embedding,
gloss_embedding_norm,
'discrete')
for idx in range(len(text_list)):
assert len(text_list_split[idx]) == len(label_list[idx])
corpus.append({"id": clean_id_list[idx], "sentence": text_list[idx], "labels": label_list[idx],
"pos_sent": pos_list[idx], "senti_sent": senti_list[idx], "sentence_split": text_list_split[idx]})
print('number of sentences with each label: ', opin_cnt)
print('number of training samples: ',len(corpus))
print('number of total sentences: ', sent_cnt)
print('number of total aspects: ', aspect_cnt)
return corpus
train_corpus = parse_SemEval14('../raw_data/aspect_data/semeval14/train/Laptop_Train_v2.xml')
with open("../preprocessed_data_ae/laptop/train_newpos.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[:-valid_split]}, fw, sort_keys=True, indent=4)
with open("../preprocessed_data_ae/laptop/dev_newpos.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[-valid_split:]}, fw, sort_keys=True, indent=4)
test_corpus = parse_SemEval14('../raw_data/aspect_data/semeval14/test/Laptops_Test_Gold.xml')
with open("../preprocessed_data_ae/laptop/test_newpos.json", "w") as fw:
json.dump({rec["id"]: rec for rec in test_corpus}, fw, sort_keys=True, indent=4)
train_corpus = parse_SemEval14('../raw_data/aspect_data/semeval14/train/Restaurants_Train_v2.xml')
with open("../preprocessed_data_ae/restaurant/train_newpos.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[:-valid_split]}, fw, sort_keys=True, indent=4)
with open("../preprocessed_data_ae/restaurant/dev_newpos.json", "w") as fw:
json.dump({rec["id"]: rec for rec in train_corpus[-valid_split:]}, fw, sort_keys=True, indent=4)
test_corpus = parse_SemEval14('../raw_data/aspect_data/semeval14/test/Restaurants_Test_Gold.xml')
with open("../preprocessed_data_ae/restaurant/test_newpos.json", "w") as fw:
json.dump({rec["id"]: rec for rec in test_corpus}, fw, sort_keys=True, indent=4)
| [
"noreply@github.com"
] | amymok1318.noreply@github.com |
345593e9bcdc9ac1039f7722b7abe6402d3352ae | 188b2f0a0a9dbdf6261feb59442c0fe8d03daa6c | /users/permission.py | 394f2d771aefb8578da8999120472b2d47138c47 | [] | no_license | andre1201/work | 966eca5901eb22a1d816b9f5bff0c03690c39b93 | dbf656c612021cc074ef652b28f3a87e9a6481be | refs/heads/master | 2021-01-10T14:30:34.595668 | 2015-11-10T08:34:16 | 2015-11-10T08:34:16 | 43,693,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | from rest_framework.permissions import BasePermission
from django.contrib.auth.models import User
class IsAdminUser(BasePermission):
def has_permission(self, request, view):
# if (request.user and not request.user.is_staff):
# return request.user and not request.user.is_staff
return request.user and request.user.is_staff
| [
"shukin_am@crvtu.local"
] | shukin_am@crvtu.local |
d6e6e537527df94454a1ffa739957e917b26d616 | 8ce656578e04369cea75c81b529b977fb1d58d94 | /bank_guarantee/helpers/copy_request.py | 6d2267189010e53aa1b00484d02d30b070321647 | [] | no_license | JJvzd/django_exp | f9a08c40a6a7535777a8b5005daafe581d8fe1dc | b1df4681e67aad49a1ce6426682df66b81465cb6 | refs/heads/master | 2023-05-31T13:21:24.178394 | 2021-06-22T10:19:43 | 2021-06-22T10:19:43 | 379,227,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,328 | py | import os
import requests
from django.db import models
from django.db.models import Q
from django.utils import timezone
from django.utils.functional import cached_property
from sentry_sdk import capture_exception
from accounting_report.fields import QuarterData
from accounting_report.models import Quarter
from bank_guarantee.models import RequestStatus, Request
from base_request.models import RequestTender
from clients.models import Client, Agent
from common.helpers import generate_password
from questionnaire.models import (
PassportDetails, Profile, LicensesSRO, KindOfActivity, BankAccount,
ProfilePartnerLegalEntities
)
from users.models import Role, User
class CopyRequest:
from_url = r'http://tenderhelp.ru/'
token = os.environ.get('TOKEN_FOR_COPY_REQUEST')
def __init__(self, from_request_id, from_url=None, token=None):
self.from_request_id = from_request_id
if from_url:
self.from_url = from_url
if token:
self.token = token
def get_data(self, url, params=None):
if params is None:
params = {}
result = requests.get(
self.from_url + url,
params=params,
headers={'Authorization': 'Token %s' % self.token},
verify=False
)
try:
return result.json()
except Exception as error:
capture_exception(error)
raise error
def get_request_data(self):
url = r'api/requests/bank_guarantee/%s/' % str(self.from_request_id)
result = self.get_data(url)
return result.get('request')
def get_profile_data(self):
url = r'api/requests/bank_guarantee/%s/profile/' % str(self.from_request_id)
result = self.get_data(url)
return result.get('profile')
def get_accountint_report_data(self, client_id):
url = r'api/accounting_report/%s/' % client_id
result = self.get_data(url)
return result.get('quarters')
@cached_property
def agent(self):
agent = Agent.objects.filter(inn=5010050218).first()
if agent is None:
agent = Agent.objects.first()
if agent is None:
BaseException('Нету созданных агентов')
return agent
@cached_property
def agent_user(self):
user = self.agent.user_set.first()
if user is None:
BaseException('У агента инн: %s нету пользователей' % self.agent.inn)
return user
@cached_property
def manager(self):
user = User.objects.filter(roles__name=Role.MANAGER).first()
if user is None:
BaseException('В системе нет менеджеров, создайте!')
return user
@cached_property
def manager_fio(self):
return self.manager.full_name
def save_bank_accounts(self, profile, bank_accounts):
bank_accounts_save = []
for bank_account_data in bank_accounts:
bank_account_id = bank_account_data.get('id')
bank_account_data.pop('profile', None)
if bank_account_id:
bank_account = BankAccount.objects.filter(id=bank_account_id).first()
self.update_from_dict(bank_account, bank_account_data)
else:
bank_account = profile.bankaccount_set.create()
self.update_from_dict(bank_account, bank_account_data)
bank_accounts_save.append(bank_account.id)
BankAccount.objects.filter(profile=profile).exclude(
id__in=bank_accounts_save
).delete()
def save_activities(self, profile, activities):
activities_save = []
for activity_data in activities:
activity_id = activity_data.get('id')
activity_data.pop('profile', None)
if activity_id:
activity = KindOfActivity.objects.filter(id=activity_id).first()
self.update_from_dict(activity, activity_data)
else:
activity = profile.kindofactivity_set.create()
self.update_from_dict(activity, activity_data)
activities_save.append(activity.id)
KindOfActivity.objects.filter(profile=profile).exclude(
id__in=activities_save
).delete()
def save_licenses(self, profile, licenses):
licenses_sro_save = []
for license_sro_data in licenses:
license_sro_id = license_sro_data.pop('id', None)
license_sro_data.pop('profile', None)
if license_sro_id:
license_sro = LicensesSRO.objects.filter(id=license_sro_id).first()
self.update_from_dict(license_sro, license_sro_data)
else:
license_sro = profile.licensessro_set.create()
self.update_from_dict(license_sro, license_sro_data)
licenses_sro_save.append(license_sro.id)
LicensesSRO.objects.filter(profile=profile).exclude(
id__in=licenses_sro_save
).delete()
def person_empty(self, person):
return all([not value for key, value in person.items() if
key in ['first_name', 'last_name', 'middle_name', 'fiz_inn']])
def update_from_dict(self, obj, data):
if data:
for key, value in data.items():
if hasattr(obj, key):
if obj._meta.get_field(key).__class__ is models.DateField:
if not value:
value = None
if key not in ['id']:
setattr(obj, key, value)
obj.save()
def save_passport(self, passport_data):
passport_id = passport_data.pop('id', None)
if not passport_id:
passport = PassportDetails.objects.create()
else:
passport = PassportDetails.objects.filter(id=passport_id).first()
self.update_from_dict(passport, passport_data)
return passport
def save_persons(self, profile: Profile, persons):
persons_save = []
for person_data in persons:
if not self.person_empty(person_data):
if person_data['resident'] is None:
person_data['resident'] = False
passport_data = person_data.pop('passport', {})
passport = self.save_passport(passport_data)
person_data.update({'passport': passport})
# не нужно сохранять для обычного участника
person_data.pop('document_gen_dir', {})
person_data.pop('profile', None)
person_id = person_data.pop('id', None)
if person_id:
person = profile.profilepartnerindividual_set.filter(
id=person_id
).first()
else:
person = profile.profilepartnerindividual_set.create()
self.update_from_dict(person, person_data)
persons_save.append(person_id)
profile.profilepartnerindividual_set.exclude(
Q(id__in=persons_save) | Q(is_general_director=True) | Q(is_booker=True)
).delete()
@staticmethod
def persons_without_general_director(persons):
return [p for p in persons if not p['is_general_director']]
def save_general_director(self, profile, general_director):
if general_director:
general_director_id = general_director.pop('id', None)
passport_data = general_director.pop('passport', {})
passport = self.save_passport(passport_data)
general_director.update({'passport': passport})
general_director.update({'profile': profile})
document_gen_dir_data = general_director.pop('document_gen_dir', {})
gen_dir = None
if general_director_id:
gen_dir = profile.profilepartnerindividual_set.filter(
id=general_director_id
).first()
if not gen_dir:
gen_dir = profile.profilepartnerindividual_set.create(
is_general_director=True
)
self.update_from_dict(gen_dir, general_director)
self.update_from_dict(gen_dir.document_gen_dir, document_gen_dir_data)
profile.profilepartnerindividual_set.filter(
is_general_director=True
).exclude(id=gen_dir.id).delete()
def save_legal_sharehoders(self, profile, legal_shareholders):
legal_shareholders_save = []
for legal_shareholder_data in legal_shareholders:
legal_shareholder_id = legal_shareholder_data.get('id')
legal_shareholder_data.pop('profile', None)
if legal_shareholder_data.get('passport'):
passport_data = legal_shareholder_data.get('passport')
if passport_data.get('id'):
passport = PassportDetails.objects.filter(
id=passport_data.pop('id')
).first()
else:
passport = PassportDetails.objects.create()
self.update_from_dict(passport, passport_data)
legal_shareholder_data.update({'passport': passport})
if legal_shareholder_id:
legal_shareholder = ProfilePartnerLegalEntities.objects.filter(
id=legal_shareholder_id
).first()
self.update_from_dict(legal_shareholder, legal_shareholder_data)
else:
legal_shareholder = profile.profilepartnerlegalentities_set.create()
self.update_from_dict(legal_shareholder, legal_shareholder_data)
legal_shareholders_save.append(legal_shareholder.id)
ProfilePartnerLegalEntities.objects.filter(profile=profile).exclude(
id__in=legal_shareholders_save
).delete()
@classmethod
def clear_id(cls, data):
if isinstance(data, list):
for d in data:
cls.clear_id(d)
if isinstance(data, dict):
temp = list(data.keys())
for key in temp:
if key in ['id', 'profile'] and isinstance(data[key], int):
del data[key]
else:
cls.clear_id(data[key])
def update_profile(self, client):
profile = client.profile
profile_data = self.get_profile_data()
self.clear_id(profile_data)
if profile.general_director:
profile_data['general_director']['id'] = profile.general_director.id
profile_data['general_director']['passport']['id'] = profile.general_director.passport.id # noqa
profile_data['general_director']['document_gen_dir']['id'] = profile.general_director.document_gen_dir.id # noqa
self.save_bank_accounts(
profile=profile,
bank_accounts=profile_data.pop('bank_accounts') or []
)
self.save_activities(
profile=profile,
activities=profile_data.pop('activities') or []
)
self.save_licenses(
profile=profile,
licenses=profile_data.pop('licenses_sro') or []
)
self.save_general_director(
profile=profile,
general_director=profile_data.pop('general_director') or {}
)
self.save_persons(
profile=profile,
persons=self.persons_without_general_director(
profile_data.pop('persons') or []
)
)
self.save_legal_sharehoders(
profile=profile,
legal_shareholders=profile_data.pop('legal_shareholders') or []
)
profile_data.pop('booker', None)
self.update_from_dict(client.profile, profile_data)
def update_accounting_report(self, new_client, old_client_id):
data = self.get_accountint_report_data(old_client_id)
for d in data:
quarter, create = Quarter.objects.update_or_create(
quarter=d['quarter'],
year=d['year'],
client=new_client,
defaults={'data': QuarterData(d['data']), 'no_data': d['no_data']}
)
def get_client(self, data):
inn = data['inn']
kpp = data['kpp']
data['agent_company'] = self.agent
data['agent_user'] = self.agent_user
data['manager'] = self.manager
data['managet_fio'] = self.manager_fio
client_id = data['id']
fields_for_delete = [
'id',
'inn',
'kpp',
'profile',
'agent_user_id',
'agent_company_id',
'agent_company_inn',
'agent_company_short_name',
'email',
'last_login',
'managet_fio',
'phone',
]
for field in fields_for_delete:
del data[field]
client, create = Client.objects.update_or_create(inn=inn, kpp=kpp, defaults=data)
self.update_profile(client)
self.update_accounting_report(client, client_id)
if create:
self.create_user(client)
return client
def create_user(self, client, roles=[Role.CLIENT]):
email = '%s@test.ru' % client.inn
password = generate_password()
user = User.objects.create_user(email, password=password)
user.client = client
user.roles.set(Role.objects.filter(name__in=roles))
user.save()
@staticmethod
def get_tender(data):
fields_for_delete = [
'read_only_fields',
'procuring_amount',
'placement',
'get_federal_law_display'
]
for field in fields_for_delete:
del data[field]
return RequestTender.objects.create(**data)
@staticmethod
def get_status():
return RequestStatus.objects.get(code=RequestStatus.CODE_DRAFT)
def copy_request(self):
data = self.get_request_data()
fields_for_delete = [
'id',
'bank',
'offer',
'offer_additional_fields',
'assigned',
'additional_status',
'rating',
'base_request',
'decision_maker',
'request_number',
'request_number_in_bank',
'is_signed',
'status_changed_date',
'sent_to_bank_date',
'created_date',
'updated_date',
'agent_user_id',
]
for field in fields_for_delete:
data.pop(field, None)
data['banks_commissions'] = '{}'
data['client'] = self.get_client(data['client'])
data['tender'] = self.get_tender(data['tender'])
data['status'] = self.get_status()
data['agent'] = self.agent
data['agent_user'] = self.agent.user_set.first()
data['interval_to'] = timezone.datetime(*[
int(i) for i in data['interval_to'].split('-')
])
data['interval_from'] = timezone.datetime(*[
int(i) for i in data['interval_from'].split('-')
])
Request.objects.create(**data)
| [
"javad@MacBook-Pro-Namig.local"
] | javad@MacBook-Pro-Namig.local |
1d24db42c4a9e7491a0048d011a432ca391c9bca | 89414e420826da539f1b4934f5eb98f1ff57c131 | /scrape_mars1.py | 1b06e13ce91da8991fe4949be7f6d440ff4b6c86 | [] | no_license | shuyijun2020/web-scraping-challenge | 11bf48048264d2678b08ce73265516bb07ebb136 | e863e8521fb1d8282e221bba7ed228ab30e104c4 | refs/heads/master | 2022-11-26T23:16:09.567979 | 2020-08-06T03:15:06 | 2020-08-06T03:15:06 | 283,396,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | from splinter import Browser
from bs4 import BeautifulSoup as bs
import time
def init_browser():
# @NOTE: Replace the path with your actual path to the chromedriver
executable_path = {"executable_path": "chromedriver.exe"}
return Browser("chrome", **executable_path, headless=False)
def scrape_info():
browser = init_browser()
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
news_title = soup.find_all('div', class_="content_title")
news_p = soup.find_all('div', class_="article_teaser_body")
# Store data in a dictionary
mars_data = {
"news_title": news_title,
"news_p": news_p
}
# Close the browser after scraping
browser.quit()
# Return results
return mars_data
| [
"64697542+shuyijun2020@users.noreply.github.com"
] | 64697542+shuyijun2020@users.noreply.github.com |
ad9cfaa7af4750b0980f01e8a34a73266f8458bb | 71a6969fbec7f1a4b2886108cbf8ced578cadb97 | /memory_vs_trial_type.py | 6079c6fa99abff0684dd567c8b99f6b72acde068 | [] | no_license | gadietz/SURF_analysis | 796662492ccf1fb2658b4cc4bf28b8bfe771aede | c3eba9d081fe92474ec2896dce064fa5cc737382 | refs/heads/master | 2022-11-23T18:11:51.715942 | 2020-07-24T20:30:47 | 2020-07-24T20:30:47 | 274,964,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,274 | py | #Data Processing
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.stats.anova import AnovaRM
memory = pd.read_csv('data_memory.csv')
control = pd.read_csv('data_control.csv')
#remove bad subjects - found previously
excludedSubjects = [5, 21, 38, 72] #pids of bad subjects
#remove bad subjects from memory dataframe
bad_mem_indices = []
for indx in range (0, len(memory['pid'])):
curr_pid = memory['pid'][indx]
is_bad = False
for subject in excludedSubjects:
if curr_pid == subject:
is_bad = True
if is_bad:
bad_mem_indices.append(indx)
memory = memory.drop(bad_mem_indices)
#remove bad subjects from control dataframe
bad_con_indices = []
for indx in range (0, len(control['pid'])):
curr_pid = control['pid'][indx]
is_bad = False
for subject in excludedSubjects:
if curr_pid == subject:
is_bad = True
if is_bad:
bad_con_indices.append(indx)
control = control.drop(bad_con_indices)
pid_list = memory.pid.unique().tolist()
memory = memory.values.tolist()
control = control.values.tolist()
#function to find the trial tyoes for the current trial given the pid and picid
def find_curr_trial (mem_pid, mem_picid):
for i in range (1, len(control)):
if control[i][0] == mem_pid:
if control[i][2] == mem_picid:
return (control[i][5], control[i][6])
return('-','-') #if this fails for any reason
#determines ratio of "1" responses to total responses in that category
def accuracy(response_list):
accurate = [i for i in response_list if i == 1]
return(len(accurate)/len(response_list))
#find accuracy for the individual subject
def subject_accuracy(curr_ID):
#initialize arrays for different trial types
congruent_go = []
congruent_nogo = []
incongruent_go = []
incongruent_nogo = []
#find memory accuracy in relation to previous trial type
for i in range (0, len(memory)):
if (memory[i][0] == curr_ID) and (memory[i][5] == "old"):
prev_conditions = find_curr_trial(memory[i][0], memory[i][2])
response = memory[i][10]
if prev_conditions[0] != '-':
if prev_conditions[0] == "congruent":
if prev_conditions[1] == "go":
congruent_go.append(response)
else:
congruent_nogo.append(response)
else:
if prev_conditions[1] == "go":
incongruent_go.append(response)
else:
incongruent_nogo.append(response)
accuracies = [accuracy(congruent_go), accuracy(congruent_nogo), accuracy(incongruent_go), accuracy(incongruent_nogo)]
return accuracies
#Responses will be contained in this two dimensional list so that it is in the
# order of congruent_go, congruent_nogo, incongruent_go, incongruent_nogo lists
all_accuracies = [[],[],[],[]]
ANOVA_list = []
#get accuracies for each participant and then put into all_accuracies
for ID in pid_list:
individ_acc = subject_accuracy(ID)
ANOVA_list.append([ID, 'go', 'congruent', individ_acc[0]])
ANOVA_list.append([ID, 'nogo', 'congruent', individ_acc[1]])
ANOVA_list.append([ID, 'go', 'incongruent', individ_acc[2]])
ANOVA_list.append([ID, 'nogo', 'incongruent', individ_acc[3]])
for i in range(4):
all_accuracies[i].append(individ_acc[i])
#ANOVA
data = pd.DataFrame(ANOVA_list, columns = ['pid', 'response', 'congruency', 'SbjACC'])
gpResult = data.groupby(['response','congruency']).SbjACC.mean().reset_index()
print(gpResult)
curr_ANOVA = AnovaRM(data, 'SbjACC', 'pid', within = ['response', 'congruency'])
curr_ANOVA = curr_ANOVA.fit()
print(curr_ANOVA)
#Overall Analysis
all_accuracies_average = []
for i in range(len(all_accuracies)):
all_accuracies_average.append(sum(all_accuracies[i])/len(all_accuracies[i]))
print(all_accuracies_average)
all_accuracies_average = [all_accuracies_average[0], all_accuracies_average[2], all_accuracies_average[1], all_accuracies_average[3]]
#plot the relationship
labels = (' congruent\ngo ', ' incongruent\ngo ', ' congruent\nnogo ', ' incongruent\nnogo ')
y_pos = np.arange(len(labels))
fig = plt.figure(1)
'''
plt.bar(y_pos, all_accuracies_average)
plt.xticks(y_pos, labels)
plt.ylabel("Memory Accuracy")
plt.xlabel("Trial Type")
plt.title("Memory Accuracy vs. Trial Type")
for i in range(len(all_accuracies_average)):
plt.text(x=y_pos[i] - 0.12, y=all_accuracies_average[i] + 0.01, \
s=round(all_accuracies_average[i], 3), size=10)
plt.show()
plt.figure(2)
'''
plt.bar(y_pos, all_accuracies_average, color="gold")
plt.xticks(y_pos, labels)
plt.ylabel("Memory Accuracy")
plt.xlabel("Trial Type")
plt.title("Memory Accuracy vs. Trial Type")
plt.ylim(.4, .7)
for i in range(len(all_accuracies_average)):
plt.text(x=y_pos[i] - 0.12, y=all_accuracies_average[i] + 0.01, \
s=round(all_accuracies_average[i], 3), size=10)
plt.show()
fig.savefig("memory_vs_trial_type.png")
| [
"noreply@github.com"
] | gadietz.noreply@github.com |
788ef0052aafc50928a425010a71836954b38794 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/inspections/ChainedComparison9.py | b9dfa664cb719771acc687073432f02f50687c0f | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 24 | py | 0 < x and True and x < 2 | [
"mikhail.golubev@jetbrains.com"
] | mikhail.golubev@jetbrains.com |
a30badd10e968213b68d1cab709d7f6258ff4478 | 921c29354a9065a4f76f816c2b2ec68457f66aef | /todo/tests/test_task.py | e4752471e00904681378c98b7d75e47dcc6c54c8 | [] | no_license | AmrAnwar/ToDoList | 520fa0529090183832dfd8c274fb3e7dad4d7a3b | de5e9e9887dee857e6169184aa9c7b74f31d32c4 | refs/heads/master | 2020-04-11T15:51:39.869491 | 2018-12-15T17:20:11 | 2018-12-15T17:20:11 | 161,905,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | from .test_init import InitTest
class TestList(InitTest):
def setUp(self):
super(TestList, self).setUp()
def test_get_task(self):
res = self.client.get(self.task.get_absolute_url())
self.assertEqual(res.status_code, 404)
self.client.login(username="anwar", password="password")
res = self.client.get(self.task.get_absolute_url())
self.assertEqual(res.status_code, 404)
self.client.login(username="guest", password="password")
res = self.client.get(self.task.get_absolute_url())
self.assertEqual(res.status_code, 200)
def test_update(self):
self.client.login(username="guest", password="password")
data = {
"title": "test-title"
}
self.client.post(self.task.get_absolute_url(), data=data)
| [
"amranwar945@gmail.com"
] | amranwar945@gmail.com |
d432534c8bdaa3ee9066132eed7708eab9d2652a | 3346669e5b8bfb5318179819ec46dbe2011d1619 | /DQN/Deep_Q_Network.py | 79b0897187c4e52ab0dab3b2d3a705adfed701f5 | [] | no_license | alidzhikov/reinforcement-learning | 1d9c5560a66ca7b22e1eb5bc23c6ef2b26e4539b | 83db4325a8d0f034e977d5359d7e825baac7ed74 | refs/heads/main | 2023-07-01T13:14:23.245919 | 2021-08-07T06:59:12 | 2021-08-07T06:59:12 | 357,237,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,974 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 26 20:03:15 2021
@author: Mustafa
"""
import gym
import random
import torch
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
from model import QNetwork
env = gym.make('LunarLander-v2')
env.seed(0)
print('State shape: ', env.observation_space.shape)
print('Number of actions: ', env.action_space.n)
vf = QNetwork(env.observation_space.shape[0], env.action_space.n)
from dqn_agent import Agent
agent = Agent(state_size=env.observation_space.shape[0], action_size=env.action_space.n, seed=0)
def dqn(n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):
"""Deep Q-Learning.
Params
======
n_episodes (int): maximum number of training episodes
max_t (int): maximum number of timesteps per episode
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
eps_decay (float): multiplicative factor (per episode) for decreasing epsilon
"""
scores = [] # list containing scores from each episode
scores_window = deque(maxlen=100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes+1):
state = env.reset()
score = 0
for t in range(max_t):
action = agent.act(state, eps)
next_state, reward, done, _ = env.step(action)
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
eps = max(eps_end, eps_decay*eps) # decrease epsilon
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
if np.mean(scores_window)>=200.0:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))
torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')
break
return scores
scores = dqn()
# plot the scores
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
# load the weights from file
agent.qnetwork_local.load_state_dict(torch.load('checkpoint.pth'))
rewards = 0
for i in range(5):
state = env.reset()
for j in range(200):
action = agent.act(state)
env.render()
state, reward, done, _ = env.step(action)
rewards += reward
if done:
break
print(rewards)
rewards = 0
env.close() | [
"m.alidzhikov2630@gmail.com"
] | m.alidzhikov2630@gmail.com |
866b8f99d6eac8ae5e5950e1f6719735879f0ae8 | 42931eaf6eefe529cfd88bcdfe1d8c7bf9836132 | /ProgrammerProfile/migrations/0003_auto_20170328_1747.py | 75a103d46860bc39e82460a7a659ca684e1f1f59 | [] | no_license | pythongiant/Programming-Hub | 283eeafda84dff3fc71037a258e8b623e797e9e0 | de87e020ce2616e35e2545900db1097dccf22f72 | refs/heads/master | 2021-01-23T06:01:43.364433 | 2018-03-06T10:57:45 | 2018-03-06T10:57:45 | 86,331,204 | 2 | 2 | null | 2017-04-27T11:19:41 | 2017-03-27T12:12:55 | JavaScript | UTF-8 | Python | false | false | 884 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-28 12:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ProgrammerProfile', '0002_auto_20170328_1745'),
]
operations = [
migrations.AddField(
model_name='person',
name='Description',
field=models.TextField(default='lol'),
preserve_default=False,
),
migrations.AddField(
model_name='person',
name='Name',
field=models.CharField(default='lol', max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='person',
name='age',
field=models.IntegerField(default=2),
preserve_default=False,
),
]
| [
"srihari.unnikrishnan@gmail.com"
] | srihari.unnikrishnan@gmail.com |
db8b10220ef515e9136d2d50773d63d16206705d | 65ca31a4d4d8f337b624d8e21b0e84596c7bf604 | /Level 5 - Expanding Nebula Solution.py | 1600eba937ec10e908fbe627cdc6c434f119bf99 | [] | no_license | jonyeh48/myGoogleFoobarChallenges | ae9b9cb61496d386e7a5d7abdbf975dbc1a3feac | 8aadfe6b7b951c9373a73bfa819a175ba9083dd5 | refs/heads/main | 2023-02-16T13:00:24.002190 | 2021-01-18T00:06:18 | 2021-01-18T00:06:18 | 327,396,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,243 | py | #This was extremely hard. Cellular Automaton is no joke. Solution heavily influenced by Jinzhou Zhang's (lotabout)'s solution. https://gist.github.com/lotabout/891621ae8a01d8e512afd4b5254516b4
def create(c1,c2,bit_length):
w = c1 & ~(1<<bit_length)
x = c2 & ~(1<<bit_length)
y = c1 >> 1
z = c2 >> 1
return (w&~x&~y&~z) | (~w&x&~y&~z) | (~w&~x&y&~z) | (~w&~x&~y&z)
from collections import defaultdict
def construct_map(n, nums):
mapping = defaultdict(set)
nums = set(nums)
for i in range(1<<(n+1)):
for j in range(1<<(n+1)):
creation = create(i,j,n)
if creation in nums:
mapping[(creation, i)].add(j)
return mapping
import numpy as np
def solution(g):
g = list(np.transpose(g))
nrows = len(g)
ncols = len(g[0])
#turn map into numbers
nums = [sum([1<<i if col else 0 for i, col in enumerate(row)]) for row in g]
mapping = construct_map(ncols, nums)
prev_pic = {i: 1 for i in range(1<<(ncols+1))}
for row in nums:
row2 = defaultdict(int)
for c1 in prev_pic:
for c2 in mapping[(row, c1)]:
row2[c2] += prev_pic[c1]
prev_pic = row2
ans = sum(prev_pic.values())
return ans
| [
"noreply@github.com"
] | jonyeh48.noreply@github.com |
22902253ca00137cc6feea542ec4cd16598ac16c | 8dadbb206f2c5cd2506165afb98e3c7dc1ec591a | /python_program-105.py | f3c67a5afd20badea992594518aec1c24ebea9aa | [] | no_license | Bansi-Parmar/Python_Basic_Programs | fe3c541e37be9660027e2d02fc8f468cc0d60f2e | 5fe29f81e83f07e8897261e773e751bfef7810da | refs/heads/master | 2023-05-12T07:06:56.514433 | 2021-05-31T17:38:33 | 2021-05-31T17:38:33 | 372,475,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | ## 105. To find the reverse number of a given positive number.
print('\n\t reverse number')
print('\t.................\n')
n = int(input("Enter Number :- "))
print('Reverse Number is :- ',str(n)[::-1])
| [
"bansiparmar3999@gmail.com"
] | bansiparmar3999@gmail.com |
8de9d49675be983416774ae4bf4609d2d1d95145 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/142/usersdata/227/62295/submittedfiles/av2_p3_civil.py | 280c0440154f4d960bd1fc3ba353a60f8deb5e93 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | # -*- coding: utf-8 -*-
def media(a):
soma = 0
for i in range(0,len(a),1):
soma = soma + a[i]
media = soma/len(a)
return (media)
#ESCREVA AS DEMAIS FUNÇÕES
def somaA(x,y):
mx=media(x)
my=media(y)
soma=0
for i in range(0,len(x),1):
soma=soma+((x[i]-mx)*(y[i])-my)
return(soma)
def entradaLista(n):
a = []
for i in range(0,n,1):
valor = float(input('Digite um valor: '))
a.append(valor)
return (a)
n = int(input('Digite o tamanho da lista: '))
x = entradaLista(n)
y = entradaLista(n)
p=somaA(x,y)/((somaD(x)*somad(y)**(0,5))
print('%.4f' % p)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
8e0e53a5764b7b9a3db6808cd0c63699d8c4d210 | e33325d9a49c5190c0305ffa8c2dee0566b3dae9 | /Switch_Atoms.py | fe7d18c6a2115b60927e14e69bd0e9b2270b6373 | [] | no_license | rtrottie/VTST-Tools | 3820006e3173c068f3fa915896c492c5e1a4a058 | b3e467c656442900e2704b68c28055c158d7f538 | refs/heads/master | 2021-07-21T15:14:48.169104 | 2021-02-23T17:33:21 | 2021-02-23T17:33:21 | 37,675,855 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | #!/usr/bin/env python
# Switches specified atoms from the run. Updates INCAR afterwards with values from the cfg.py. Atoms are switched
# pairwise so when called with 1 2 3 4, 1 and 2 switch, and 3 and 4 switch
# TODO: Stop this from messing with the MAGMOM
# Usage: Remove_Atoms.py Previous_Dir [This_Dir] Atom_#s
__author__ = 'ryan'
import sys
from Substitute_Atoms import *
if len(sys.argv) < 3:
raise Exception('Not Enough Arguments Provided\n need: Previous_Dir [This_Dir] Atom_#s')
prev_NEB_dir = sys.argv[1]
try:
this_NEB_dir = os.getcwd()
atom_nums = list(map(lambda x: int(x), sys.argv[2:len(sys.argv)]))
except:
this_NEB_dir = sys.argv[2]
atom_nums = list(map(lambda x: int(x), sys.argv[3:len(sys.argv)]))
switch_atom_arbitrary(prev_NEB_dir, this_NEB_dir, atom_nums)
| [
"themeisme@gmail.com"
] | themeisme@gmail.com |
a9e2684649859d6b87e451d62c77a2a7bc594f57 | 5b4b1866571453f78db5b06a08ff0eda17b91b04 | /test/vanilla/Expected/AcceptanceTests/Url/url/operations/_path_items_operations.py | 8514033f930cc55c1503dfc44161367f746472ce | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | koek67/autorest.azure-functions-python | ba345f1d194ca7431daab1210a0cd801d4946991 | b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783 | refs/heads/main | 2022-12-20T13:27:56.405901 | 2020-09-30T08:23:11 | 2020-09-30T08:23:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,257 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.tracing.decorator import distributed_trace
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PathItemsOperations(object):
"""PathItemsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~url.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_all_with_values(
self,
path_item_string_path, # type: str
local_string_path, # type: str
path_item_string_query=None, # type: Optional[str]
local_string_query=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""send globalStringPath='globalStringPath', pathItemStringPath='pathItemStringPath',
localStringPath='localStringPath', globalStringQuery='globalStringQuery',
pathItemStringQuery='pathItemStringQuery', localStringQuery='localStringQuery'.
:param path_item_string_path: A string value 'pathItemStringPath' that appears in the path.
:type path_item_string_path: str
:param local_string_path: should contain value 'localStringPath'.
:type local_string_path: str
:param path_item_string_query: A string value 'pathItemStringQuery' that appears as a query
parameter.
:type path_item_string_query: str
:param local_string_query: should contain value 'localStringQuery'.
:type local_string_query: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_all_with_values.metadata['url'] # type: ignore
path_format_arguments = {
'pathItemStringPath': self._serialize.url("path_item_string_path", path_item_string_path, 'str'),
'globalStringPath': self._serialize.url("self._config.global_string_path", self._config.global_string_path, 'str'),
'localStringPath': self._serialize.url("local_string_path", local_string_path, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if path_item_string_query is not None:
query_parameters['pathItemStringQuery'] = self._serialize.query("path_item_string_query", path_item_string_query, 'str')
if self._config.global_string_query is not None:
query_parameters['globalStringQuery'] = self._serialize.query("self._config.global_string_query", self._config.global_string_query, 'str')
if local_string_query is not None:
query_parameters['localStringQuery'] = self._serialize.query("local_string_query", local_string_query, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
get_all_with_values.metadata = {'url': '/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/globalStringQuery/pathItemStringQuery/localStringQuery'} # type: ignore
@distributed_trace
def get_global_query_null(
self,
path_item_string_path, # type: str
local_string_path, # type: str
path_item_string_query=None, # type: Optional[str]
local_string_query=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""send globalStringPath='globalStringPath', pathItemStringPath='pathItemStringPath',
localStringPath='localStringPath', globalStringQuery=null,
pathItemStringQuery='pathItemStringQuery', localStringQuery='localStringQuery'.
:param path_item_string_path: A string value 'pathItemStringPath' that appears in the path.
:type path_item_string_path: str
:param local_string_path: should contain value 'localStringPath'.
:type local_string_path: str
:param path_item_string_query: A string value 'pathItemStringQuery' that appears as a query
parameter.
:type path_item_string_query: str
:param local_string_query: should contain value 'localStringQuery'.
:type local_string_query: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_global_query_null.metadata['url'] # type: ignore
path_format_arguments = {
'pathItemStringPath': self._serialize.url("path_item_string_path", path_item_string_path, 'str'),
'globalStringPath': self._serialize.url("self._config.global_string_path", self._config.global_string_path, 'str'),
'localStringPath': self._serialize.url("local_string_path", local_string_path, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if path_item_string_query is not None:
query_parameters['pathItemStringQuery'] = self._serialize.query("path_item_string_query", path_item_string_query, 'str')
if self._config.global_string_query is not None:
query_parameters['globalStringQuery'] = self._serialize.query("self._config.global_string_query", self._config.global_string_query, 'str')
if local_string_query is not None:
query_parameters['localStringQuery'] = self._serialize.query("local_string_query", local_string_query, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
get_global_query_null.metadata = {'url': '/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/null/pathItemStringQuery/localStringQuery'} # type: ignore
@distributed_trace
def get_global_and_local_query_null(
self,
path_item_string_path, # type: str
local_string_path, # type: str
path_item_string_query=None, # type: Optional[str]
local_string_query=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""send globalStringPath=globalStringPath, pathItemStringPath='pathItemStringPath',
localStringPath='localStringPath', globalStringQuery=null,
pathItemStringQuery='pathItemStringQuery', localStringQuery=null.
:param path_item_string_path: A string value 'pathItemStringPath' that appears in the path.
:type path_item_string_path: str
:param local_string_path: should contain value 'localStringPath'.
:type local_string_path: str
:param path_item_string_query: A string value 'pathItemStringQuery' that appears as a query
parameter.
:type path_item_string_query: str
:param local_string_query: should contain null value.
:type local_string_query: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_global_and_local_query_null.metadata['url'] # type: ignore
path_format_arguments = {
'pathItemStringPath': self._serialize.url("path_item_string_path", path_item_string_path, 'str'),
'globalStringPath': self._serialize.url("self._config.global_string_path", self._config.global_string_path, 'str'),
'localStringPath': self._serialize.url("local_string_path", local_string_path, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if path_item_string_query is not None:
query_parameters['pathItemStringQuery'] = self._serialize.query("path_item_string_query", path_item_string_query, 'str')
if self._config.global_string_query is not None:
query_parameters['globalStringQuery'] = self._serialize.query("self._config.global_string_query", self._config.global_string_query, 'str')
if local_string_query is not None:
query_parameters['localStringQuery'] = self._serialize.query("local_string_query", local_string_query, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
get_global_and_local_query_null.metadata = {'url': '/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/null/pathItemStringQuery/null'} # type: ignore
@distributed_trace
def get_local_path_item_query_null(
self,
path_item_string_path, # type: str
local_string_path, # type: str
path_item_string_query=None, # type: Optional[str]
local_string_query=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""send globalStringPath='globalStringPath', pathItemStringPath='pathItemStringPath',
localStringPath='localStringPath', globalStringQuery='globalStringQuery',
pathItemStringQuery=null, localStringQuery=null.
:param path_item_string_path: A string value 'pathItemStringPath' that appears in the path.
:type path_item_string_path: str
:param local_string_path: should contain value 'localStringPath'.
:type local_string_path: str
:param path_item_string_query: should contain value null.
:type path_item_string_query: str
:param local_string_query: should contain value null.
:type local_string_query: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get_local_path_item_query_null.metadata['url'] # type: ignore
path_format_arguments = {
'pathItemStringPath': self._serialize.url("path_item_string_path", path_item_string_path, 'str'),
'globalStringPath': self._serialize.url("self._config.global_string_path", self._config.global_string_path, 'str'),
'localStringPath': self._serialize.url("local_string_path", local_string_path, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if path_item_string_query is not None:
query_parameters['pathItemStringQuery'] = self._serialize.query("path_item_string_query", path_item_string_query, 'str')
if self._config.global_string_query is not None:
query_parameters['globalStringQuery'] = self._serialize.query("self._config.global_string_query", self._config.global_string_query, 'str')
if local_string_query is not None:
query_parameters['localStringQuery'] = self._serialize.query("local_string_query", local_string_query, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
get_local_path_item_query_null.metadata = {'url': '/pathitem/nullable/globalStringPath/{globalStringPath}/pathItemStringPath/{pathItemStringPath}/localStringPath/{localStringPath}/globalStringQuery/null/null'} # type: ignore
| [
"varad.meru@gmail.com"
] | varad.meru@gmail.com |
b1809dd3f4b79f8398dbf2558718ef516e945463 | d5bb9f9e4cabf8422c63b4668f1545ccc5c9d8f6 | /tools/filecnv.py | b0c3c6a1cd97ac5ca47214aa2421a4820857fc85 | [] | no_license | mohebifar/forcebalance | 3823caf0ce971221e5f9b5f837fa5ceaae2a00bd | d830166f2954eb35d669977a522c32a1372f7973 | refs/heads/master | 2021-07-08T18:10:58.161099 | 2017-09-23T00:12:39 | 2017-09-23T00:12:39 | 106,040,943 | 3 | 0 | null | 2017-10-06T19:07:58 | 2017-10-06T19:07:58 | null | UTF-8 | Python | false | false | 330 | py | #!/home/leeping/local/bin/python
from forcebalance.molecule import Molecule
from sys import argv
def main():
M = Molecule(argv[1])
tempfnm = argv[3] if len(argv) >= 4 else None
if tempfnm != None:
M.add_quantum(tempfnm)
print M.Data.keys()
M.write(argv[2])
if __name__ == "__main__":
main()
| [
"leeping@e93b7d5b-8bb4-0410-82c6-a20ae092c748"
] | leeping@e93b7d5b-8bb4-0410-82c6-a20ae092c748 |
bec0d07a9a0cf235e71abbecd3a26b45a53a69b9 | c3120adea9862e6791a6d7b6ea99ceef4d1db1cc | /program/Deep/unet2/test.py | d57b572f8a972a3ceb7a5a3d9af5ec37a7effdaa | [] | no_license | Millasta/SkyEye | 634dc57514b93b61d7b4b66a4534c874de09a699 | ea5abef39c99d40e41472950cea7cf5619b46213 | refs/heads/master | 2022-12-07T06:38:32.164100 | 2020-01-16T15:49:12 | 2020-01-16T15:49:12 | 228,794,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,354 | py | from program.Deep.unet2.model_v2 import *
from program.Deep.unet2.data import *
from program.Deep.unet2.weighted_categorical_crossentropy import *
import numpy as np
import cv2
import os
import warnings
warnings.filterwarnings("ignore")
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def image_normalized(file_path):
'''
tif,size:512*512,gray
:param dir_path: path to your images directory
:return:
'''
img = cv2.imread(file_path)
img_shape = img.shape
image_size = (img_shape[1],img_shape[0])
img_standard = cv2.resize(img, (256, 256), interpolation=cv2.INTER_CUBIC)
img_new = img_standard
img_new = np.asarray([img_new / 255.])
return img_new,image_size
if __name__ == '__main__':
#path to images which aring wating for predicting
test_path = "data\\test"
# save the predict images
save_path = "data\\predict"
dp = data_preprocess(test_path=test_path,save_path=save_path,flag_multi_class=True,num_classes=3)
#load model
model = load_model('./model/model.hdf5', custom_objects={'loss': weighted_categorical_crossentropy([1,5,10])})
for name in os.listdir(test_path):
image_path = os.path.join(test_path,name)
x,img_size = image_normalized(image_path)
results = model.predict(x)
dp.saveResult([results[0]],img_size,name.split('.')[0])
| [
"valentinm1997@gmail.com"
] | valentinm1997@gmail.com |
e7f4f24803a27a38a46f361243a674a5236a571a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03274/s010264861.py | c2235d0bdb18733aa448f6ca2a63b3cad841e71a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | import collections
n,k = map(int, raw_input().split(' '))
xis = map(int, raw_input().split(' '))
ais = [xi for xi in xis if xi >= 0]
bis = [-xi for xi in xis if xi < 0][::-1]
m = ais[k -1] if k-1 < len(ais) else +float('inf')
m = min(m, bis[k -1] if k-1 < len(bis) else +float('inf'))
for i in range(len(ais)):
if i + 1 == k: break
if 0 <= k - (i+1) -1 < len(bis):
m = min(m, 2*ais[i] + bis[k - (i+1) -1])
m = min(m, ais[i] + 2*bis[k - (i+1) -1])
print m | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
89c21fb20a8613232a8f3d0a3ae35002b51efa11 | b1e77b3e652325db8ca3904c779ce758897c1dc8 | /introduction_to_python/conditionals/booleans.py | 239cc76b880c97b38f1eda181d7cb87409fd40af | [] | no_license | DebbyMurphy/debbys-python-projects | 330241c8047af903b0be585447557e7b46448371 | 5429a6575ea12b8526d16d9e1d1f7500fd8b07f2 | refs/heads/master | 2022-11-06T15:46:00.901194 | 2020-07-01T07:17:19 | 2020-07-01T07:17:19 | 276,279,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | # Booleans
print("AND Comparisons:")
is_raining = False
is_cold = False
print(is_raining and is_cold, not is_cold, is_raining and not is_cold)
is_raining = False
is_cold = True
print(is_raining and is_cold, not is_cold, is_raining and not is_cold)
is_raining = True
is_cold = False
print(is_raining and is_cold, not is_cold, is_raining and not is_cold)
is_raining = True
is_cold = True
print(is_raining and is_cold, not is_cold, is_raining and not is_cold)
print("OR Comparisons:")
is_raining = False
is_cold = False
print(is_raining or is_cold, is_raining or not is_cold)
is_raining = False
is_cold = True
print(is_raining or is_cold, is_raining or not is_cold)
is_raining = True
is_cold = False
print(is_raining or is_cold, is_raining or not is_cold)
is_raining = True
is_cold = True
print(is_raining or is_cold, is_raining or not is_cold)
print()
is_raining = False
is_cold = True
is_raining_and_is_cold = is_raining and is_cold
print(is_raining_and_is_cold) | [
"debby.m.jones@gmail.com"
] | debby.m.jones@gmail.com |
e61d98ff2befe6683e0a0492845c7ee6b7229d88 | 58399ec14cef82b023fb9eb83188cd3f4f5f8c20 | /loop.py | 02220070f04c09572a8c005479add22e810049f3 | [] | no_license | Venky9791/Venky_Geekexample | 0baf4262c05cfa1db4e2b2dfa57f05a2297b11da | 02d0389949d7add55a115ee9c02e064688706a9e | refs/heads/master | 2020-06-26T19:08:01.335717 | 2019-08-04T22:22:45 | 2019-08-04T22:22:45 | 199,725,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py |
def emi_calculator(p,r,t):
r = r / (12*100)
t = t * 12
emi = (p*r*pow(1+r,t) / (pow(1+r,t))-1)
return emi
print("Geeks",end=" ")
print("For Geeks")
a = [1,2,3,4]
for i in range(0,len(a)):
print(a[i],sep='@')
principal = 34500
rate = 4.5
time = 13
emi = emi_calculator(principal,rate,time)
print("The EMI is " ,emi)
x,y = input("Enter two values ").split('')
print("Numnber of X" , x)
print("Number of Y",y)
x = list(map(int,input("Enter Multiple values").split()))
print("List of Students" , x)
x,y = [int(x) for x in input("Enter two Values").split()]
print("First Number is {} and Second Number is {}".format(x,y))
| [
"bharthivenky76@gmail.com"
] | bharthivenky76@gmail.com |
cfb9d650e62d8e31c21c44cca0bdf58358225e04 | cd92483d2b32ddb20656c520eccd7ac0e0b113bb | /tm1637.py | cafbbf96acf07e1813fdc4ba24e55a4ba4c09697 | [] | no_license | Aleksandr277/Micropython | 7755602fa13ec7f8461e4a20346148d8e13d9a2e | d8b2e4869e9d018873476063be80846c39bedc97 | refs/heads/master | 2021-07-25T22:23:29.932680 | 2017-11-06T20:30:15 | 2017-11-06T20:30:15 | 109,744,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,585 | py | # MicroPython TM1637 quad 7-segment LED display driver
# from machine import Pin # old version
from pyb import Pin
from time import sleep_us
_CMD_SET1 = const(64) # 0x40 data set
_CMD_SET2 = const(192) # 0xC0 address command set
_CMD_SET3 = const(128) # 0x80 data control command set
# 0-9, a-f, blank, dash
_SEGMENTS = [63,6,91,79,102,109,125,7,127,111,119,124,57,94,121,113,0,64]
class TM1637(object):
"""Library for the quad 7-segment LED display modules based on the TM1637
LED driver."""
def __init__(self, clk, dio, brightness=7):
self.clk = clk
self.dio = dio
if not 0 <= brightness <= 7:
raise ValueError("Brightness out of range")
self._brightness = brightness
self.clk.init(Pin.IN)
self.dio.init(Pin.IN)
self.clk(0)
self.dio(0)
def _start(self):
self.dio.init(Pin.OUT)
sleep_us(50)
def _stop(self):
self.dio.init(Pin.OUT)
sleep_us(50)
self.clk.init(Pin.IN)
sleep_us(50)
self.dio.init(Pin.IN)
sleep_us(50)
def _write_comm1(self):
self._start()
self._write_byte(_CMD_SET1)
self._stop()
def _write_comm3(self):
self._start()
self._write_byte(_CMD_SET3 + self._brightness + 7)
self._stop()
def _write_byte(self, b):
# send each bit
for i in range(8):
self.clk.init(Pin.OUT)
sleep_us(50)
self.dio.init(Pin.IN if b & 1 else Pin.OUT)
sleep_us(50)
self.clk.init(Pin.IN)
sleep_us(50)
b >>= 1
self.clk.init(Pin.OUT)
sleep_us(50)
self.clk.init(Pin.IN)
sleep_us(50)
self.clk.init(Pin.OUT)
sleep_us(50)
def brightness(self, val=None):
"""Set the display brightness 0-7."""
if val is None:
return self._brightness
if not 0 <= val <= 7:
raise ValueError("Brightness out of range")
self._brightness = val
self._write_comm1()
self._write_comm3()
def write(self, segments, pos=0):
"""Display up to 4 segments moving right from a given position.
The MSB in the 2nd segment controls the colon between the 2nd
and 3rd segments."""
if not 0 <= pos <= 3:
raise ValueError("Position out of range")
self._write_comm1()
# write COMM2 + first digit address
self._start()
self._write_byte(_CMD_SET2 + pos)
for seg in segments:
self._write_byte(seg)
self._stop()
self._write_comm3()
def encode_digit(self, digit):
"""Convert a character 0-9, a-f to a segment."""
return _SEGMENTS[digit & 0x0f]
def encode_string(self, string):
"""Convert an up to 4 character length string containing 0-9, a-f,
space, dash to an array of segments, matching the length of the
source string."""
segments = bytearray(4)
for i in range(0, min(4, len(string))):
segments[i] = self.encode_char(string[i])
return segments
def encode_char(self, char):
"""Convert a character 0-9, a-f, space or dash to a segment."""
o = ord(char)
# space
if o == 32:
return _SEGMENTS[16]
# dash
if o == 45:
return _SEGMENTS[17]
# uppercase A-F
if o >= 65 and o <= 70:
return _SEGMENTS[o-55]
# lowercase a-f
if o >= 97 and o <= 102:
return _SEGMENTS[o-87]
# 0-9
if o >= 48 and o <= 57:
return _SEGMENTS[o-48]
raise ValueError("Character out of range")
def hex(self, val):
"""Display a hex value 0x0000 through 0xffff, right aligned."""
string = '{:04x}'.format(val & 0xffff)
self.write(self.encode_string(string))
def number(self, num):
"""Display a numeric value -999 through 9999, right aligned."""
# limit to range -999 to 9999
num = max(-999, min(num, 9999))
string = '{0: >4d}'.format(num)
self.write(self.encode_string(string))
def numbers(self, num1, num2, colon=True):
"""Display two numeric values -9 through 99, with leading zeros
and separated by a colon."""
num1 = max(-9, min(num1, 99))
num2 = max(-9, min(num2, 99))
segments = self.encode_string('{0:0>2d}{1:0>2d}'.format(num1, num2))
# colon on
if colon:
segments[1] |= 0x80
self.write(segments)
| [
"noreply@github.com"
] | Aleksandr277.noreply@github.com |
c77fe9366e51374c8ecebcf429288fd9af1bc905 | e506683acbdd23c6308ae720e5acc70dc2f27df5 | /stage_statistics_generic_inherit/models/stage_history.py | 63a17d9ffc42ded2a3899d682a172ed1c10a5f9c | [] | no_license | oycl/odoo | 9b017b8fecf5812fe104351e42228f308522a583 | c6bb7c85039eb16ef811c0de08f43fe0530f137a | refs/heads/master | 2020-07-04T17:26:38.329035 | 2019-08-13T21:44:08 | 2019-08-13T21:44:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import date
from odoo import api, fields, models
class TechnicalSupportOrder(models.Model):
_inherit = "technical_support.order"
tracking_fields = ['state', 'user_id']
class Part(models.Model):
_inherit = "part.order"
tracking_fields = ['state', 'user_id']
class RegulatoryTechnicalFileRegistry(models.Model):
_inherit = "regulatory.technical.file.registry"
tracking_fields = ['state', 'user_id']
| [
"r.tejada@electronicamedica.com"
] | r.tejada@electronicamedica.com |
537e3611939a3398300c03731edf4b5724974f7c | 7971b6f96d6fae6716c77e893b128d0df0ee5a6f | /assignment8_4.py | 1b9b87424e9fd984a5744631c124323487522796 | [] | no_license | cacavelli/python_projects | c48768d0cc8a97ee6930354046137ef40ff1cb8d | 60c0453200b9ade66839ddcc13914d41a785ac78 | refs/heads/master | 2021-02-09T00:51:39.144799 | 2020-04-14T19:23:25 | 2020-04-14T19:23:25 | 244,218,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | """
Este código foi a resposta de uma atividade do curso de Python da Universidade de Michigan.
O arquivo romeo.txt está na pasta para que o código possa ser testado.
Open the file romeo.txt and read it line by line.
For each line, split the line into a list of words using the split() method.
The program should build a list of words. For each word on each line check to see if the word is already in the list and if not append it to the list.
When the program completes, sort and print the resulting words in alphabetical order.
[Desired output]
['Arise', 'But', 'It', 'Juliet', 'Who', 'already', 'and', 'breaks', 'east', 'envious',
'fair', 'grief', 'is', 'kill', 'light', 'moon', 'pale', 'sick', 'soft', 'sun', 'the', 'through', 'what',
'window', 'with', 'yonder']
"""
fname = input("Enter file name: ")
fh = open(fname)
romeo = list()
for line in fh:
line = line.rstrip()
line = line.split()
for word in line:
if word not in romeo:
romeo.append(word)
print(sorted(romeo)) | [
"noreply@github.com"
] | cacavelli.noreply@github.com |
990c514d0438c13979250bc1440ae99ceb870238 | c191ff2917c46483dabeb4d5bef23e305954cd88 | /Project 1/Part2/clickomania.py | c382494a9352112e306a2b06c1f82582980d406f | [] | no_license | ElvishElvis/Ve593-JI-SJTU-AI-technique | 76cbcbd49d0220cff9c49fc5b583400147060364 | 47c990bce3c7f64de6f0b0c32a78cd822236b1c4 | refs/heads/master | 2020-04-10T18:15:39.303897 | 2019-01-17T01:43:55 | 2019-01-17T01:43:55 | 161,198,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,943 | py |
class Clickomania:
def __init__(self, N, M, K, state):
self.row = N
self.column = M
self.color = K
self.score = 0
self.state = state
#This make a copy from the state
def clone(self):
NewClickomania = Clickomania(self.row, self.column, self.color, self.state)
NewClickomania.score = self.score
return NewClickomania
#This update the gragh
def succ(self, position):
nextstate = self.clone()
Erasable = [position]
self.CanErase(position, self.state[position], Erasable)
if len(Erasable) >= 2:
for i in Erasable:
nextstate.state[i] = 0
self.score += (len(Erasable)-1)**2
for x in list(range(0, self.column)):
for y in list(range(1, self.row)):
if (self.state[x+y*self.column] == 0):
for z in list(range(y, 0, -1)):
self.state[x+z*self.column] = self.state[x+(z-1)*self.column]
self.state[x] = 0
Column=[]
for i in list(range(0, self.column)):
flag = 0
for j in list(range(0, self.row)):
if (self.state[i+j*self.column] != 0):
flag = 1
break
if (flag == 0):
Column.append(i)
while (Column != []):
k = Column.pop(0)
if (k != self.column-1):
for r in list(range(0, self.row)):
for s in list(range(1, self.column-k)):
if ((k+r*self.column+s) % self.column != self.column-1):
self.state[k+r*self.column+s] = self.state[k+r*self.column+s+1]
for t in list(range(0, self.row)):
self.state[self.column-1+t*self.column] = 0
for m in range(len(Column)):
Column[m] -= 1
return nextstate
def successors(self):
succs = []
for i in list(range(0, self.column*self.row)):
succs.append(self.succ(i))
return succs
#This determine whether a state reach the end state
def isEnd(self):
AllErased = True
adjacent = []
for i in list(range(0, self.row*self.column)):
if (self.state[i] != 0):
AllErased = False
if (self.CanErase(i, self.state[i], adjacent) == []):
return True
if AllErased == True:
return True
return False
#This is a function that can determine whether a block can be erased
def CanErase(self, place, k, adjacent):
if place>self.column:
if ( place >= self.column and (place-self.column not in adjacent) and self.state[place-self.column] == k):
adjacent.append(place-self.column)
self.CanErase(place-self.column, k, adjacent)
if place<self.row*self.column:
if ( place % self.column != self.column-1 and (place+1 not in adjacent) and self.state[place+1] == k):
adjacent.append(place+1)
self.CanErase(place+1, k, adjacent)
if place<(self.row-1)*self.column:
if ( place < self.column*(self.row-1) and (place+self.column not in adjacent) and self.state[place+self.column] == k):
adjacent.append(place+self.column)
self.CanErase(place+self.column, k, adjacent)
if place>0:
if ( place % self.column != 0 and (place-1 not in adjacent) and self.state[place-1] == k ):
adjacent.append(place-1)
self.CanErase(place-1, k, adjacent)
return adjacent
#This reduce the penalty to the final score
def Cost(self, state):
NotErased
for i in list(range(0, self.row*self.column)):
if (state.value[i] != 0):
NotErased += 1
return (NotErased-1)**2
| [
"noreply@github.com"
] | ElvishElvis.noreply@github.com |
1ac2f22455ca09da9f263824044202716a86b122 | 15b0ef49a2cbc6fb71e472f91f6ad656a2bb2e01 | /Other.py | e5fe4e618f12e1d196381856c0694b3994f09c15 | [] | no_license | diexel64/News_Scraper | e148b870ade1ebcbd98ab973533fd4b83dbe62aa | c92addec3c417dfdc623c9fe4282c0a0824d6365 | refs/heads/master | 2020-12-02T20:34:22.961493 | 2019-12-31T16:13:54 | 2019-12-31T16:13:54 | 231,113,378 | 0 | 0 | null | 2019-12-31T16:11:53 | 2019-12-31T15:53:31 | null | UTF-8 | Python | false | false | 5,134 | py | import requests, datetime, time, os
from bs4 import BeautifulSoup
import openpyxl, re, pprint
import pandas as pd
from pandas import ExcelWriter
#lst = []
destFolder = os.path.dirname(os.path.abspath(__file__))
headers = {
'User-Agent': 'Mozilla/5.0',
'From': 'monsieur@domain.com'
}
class OtherScraper:
def __init__(self):
self.url = ''
def getInaki(self, url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find('div', class_="bloque__interior")
articles = results.find_all('article')
df0 = pd.ExcelFile(destFolder + '\\Other.xlsx').parse("Other")
for post in articles:
link = post.find('a', class_='enlace').get('href').split('//')
title = post.find('h2', class_='articulo-titulo').text.strip()
author = 'Iñaki Gabilondo'
if None in (title, link):
continue
new_row = {'title': title, 'author': author, 'link': 'https://' + link[1], 'date': datetime.datetime.now().strftime("%d/%m/%Y")}
#lst.append(new_row)
df0 = df0.append(new_row, ignore_index=True)
writer = ExcelWriter(destFolder + '\\Other.xlsx')
df0 = df0.drop_duplicates(subset='title', keep='first')
df0.to_excel(writer, sheet_name='Other', index = False)
writer.save()
def getPiketty(self, url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find('div', class_="content-area")
articles = results.find_all('article')
df0 = pd.ExcelFile(destFolder + '\\Other.xlsx').parse("Other")
for post in articles:
link = post.find('h3', class_='entry-title').find('a').get('href').split('//')
title = post.find('h3', class_='entry-title').text.strip()
author = 'Thomas Piketty'
if None in (title, link):
continue
new_row = {'title': title, 'author': author, 'link': 'https://' + link[1], 'date': datetime.datetime.now().strftime("%d/%m/%Y")}
#lst.append(new_row)
df0 = df0.append(new_row, ignore_index=True)
writer = ExcelWriter(destFolder + '\\Other.xlsx')
df0 = df0.drop_duplicates(subset='title', keep='first')
df0.to_excel(writer, sheet_name='Other', index = False)
writer.save()
def getLacalle(self, url):
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find('div', class_="content-area")
articles = results.find_all('article')
df0 = pd.ExcelFile(destFolder + '\\Other.xlsx').parse("Other")
for post in articles:
link = post.find('h1', class_='entry-title').find('a').get('href').split('//')
title = post.find('h1', class_='entry-title').text.strip()
author = 'Daniel Lacalle'
if None in (title, link):
continue
new_row = {'title': title, 'author': author, 'link': 'https://' + link[1], 'date': datetime.datetime.now().strftime("%d/%m/%Y")}
#lst.append(new_row)
df0 = df0.append(new_row, ignore_index=True)
writer = ExcelWriter(destFolder + '\\Other.xlsx')
df0 = df0.drop_duplicates(subset='title', keep='first')
df0.to_excel(writer, sheet_name='Other', index = False)
writer.save()
def getRallo(self, url):
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find('div', class_="content-area")
articles = results.find_all('article')
df0 = pd.ExcelFile(destFolder + '\\Other.xlsx').parse("Other")
for post in articles:
link = post.find('h1', class_='entry-title').find('a').get('href').split('//')
title = post.find('h1', class_='entry-title').text.strip()
author = 'Juan Ramón Rallo'
if None in (title, link):
continue
new_row = {'title': title, 'author': author, 'link': 'https://' + link[1], 'date': datetime.datetime.now().strftime("%d/%m/%Y")}
#lst.append(new_row)
df0 = df0.append(new_row, ignore_index=True)
writer = ExcelWriter(destFolder + '\\Other.xlsx')
df0 = df0.drop_duplicates(subset='title', keep='first')
df0.to_excel(writer, sheet_name='Other', index = False)
writer.save()
def CreateExcel(self):
if os.path.isfile(destFolder + '\\Other.xlsx') == False:
wb = openpyxl.Workbook()
sheet = wb.get_sheet_by_name('Sheet')
sheet.title = "Other"
sheet = wb.get_sheet_by_name("Other")
wb.save(destFolder + '\\Other.xlsx')
#OtherScraper().getInaki('https://elpais.com/agr/la_voz_de_inaki/a')
#OtherScraper().getPiketty('https://www.lemonde.fr/blog/piketty/')'''
#OtherScraper().getLacalle('https://www.dlacalle.com/') | [
"noreply@github.com"
] | diexel64.noreply@github.com |
75687bab192a3f68f275a053b3ee4aa69bc1955b | 523fb785bda41e33546c929a5c2de6c93f98b434 | /专题学习/树/BinaryTreePathDivideConquer.py | 63bc4f925a2a8db053b249b643773310f578e34c | [] | no_license | lizhe960118/TowardOffer | afd2029f8f9a1e782fe56ca0ff1fa8fb37892d0e | a0608d34c6ed96c9071cc3b9bdf70c95cef8fcbd | refs/heads/master | 2020-04-27T10:33:21.452707 | 2019-05-02T10:47:01 | 2019-05-02T10:47:01 | 174,259,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | """
Definition of TreeNode:
"""
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param root: the root of the binary tree
@return: all root-to-leaf paths
"""
def binaryTreePaths(self, root):
paths = []
if root is None:
return paths
left_paths = self.binaryTreePaths(root.left)
right_paths = self.binaryTreePaths(root.right)
for path in left_paths:
paths.append(str(root.val) + '->' + path)
for path in right_paths:
paths.append(str(root.val) + '->' + path)
# 如果节点是叶子节点
if len(paths) == 0:
paths.append(str(root.val))
return paths
if __name__ == '__main__':
node1 = TreeNode(1)
node2 = TreeNode(2)
node3 = TreeNode(3)
node4 = TreeNode(4)
node5 = TreeNode(5)
node1.left = node2
node1.right = node3
node2.left = node4
node2.right = node5
root = node1
print(Solution().binaryTreePaths(root)) | [
"2957308424@qq.com"
] | 2957308424@qq.com |
2dc148a31732191d413c4a03bd9a8984c6e4ea01 | 28f5df5450a3091bd83c43d1eb7c5af9e6ca4a9e | /day2/tests/test_task2.py | d97858145e27deaf5c4fd8ce8106100e8a7268bc | [] | no_license | meriliis/advent-of-code-2019 | 10cc553ecd4a53977c7cccfe4733bc445b583e1f | 94baaf4eef2d20dc9a56ae25b2623699c6b0d0d5 | refs/heads/master | 2020-09-22T10:23:07.769376 | 2019-12-09T15:21:31 | 2019-12-09T15:21:31 | 225,154,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | import pytest
import sys, os
sys.path.append(os.path.realpath(os.path.dirname(__file__)+"/.."))
from day2.task2 import find_inputs_for_output
class TestFindInputsForOutput:
def test_finds_correct_inputs(self):
initial_program = [1, 0, 0, 0, 99]
desired_output = 4
expected_inputs = (2, 2)
inputs = find_inputs_for_output(initial_program, desired_output, noun_range=(0, 3), verb_range=(0, 3))
assert inputs == expected_inputs | [
"meri@MacBook-Pro.local"
] | meri@MacBook-Pro.local |
1eb95df50b7fce6a96b9c2a92aed8338bb8782fb | 2237771ca104604ed17f4ddf59cddea0e4802460 | /models 1.py | 3298cf2da87593c523a64c0ae9c66833017e6120 | [] | no_license | shubham727-ai/Project-Assignment | 8ba8a82ee34db2f2535e16615896d3486b6e36e7 | f40092a5c60e81de0b726708302b3d7d664efa74 | refs/heads/main | 2023-02-16T10:06:31.189562 | 2021-01-13T11:18:02 | 2021-01-13T11:18:02 | 329,280,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | from django.db import models
# Create your models here.
makemigration - create change and store in a file
migrate - apply the pending change created by makemigrations
class index(models.model):
name =models.models.CharField(_(""), max_length=50)
surname =models.models.CharField(_(""), max_length=50)
age =models.models.CharField(_(""), max_length=50)
skills =models.models.CharField()
desc =models.models.TextField(_(""), max_length=50)
| [
"noreply@github.com"
] | shubham727-ai.noreply@github.com |
b00ff6d542ee367cb1dcf0375a7039877f16c8ce | c100e6474e4a10bd658a24b5d4ba673928a0bac6 | /cafemanagement.py | 3caeafdae70635133f0106a3ff33249ae1da684a | [] | no_license | akthunder/Hotel_management_gui | 56f16690c800a9caa7338e3f43f9a3a839edc424 | 5b32e2c0847e602dca43245333e66bfe32d46104 | refs/heads/master | 2022-10-04T08:21:22.118594 | 2020-06-01T10:42:22 | 2020-06-01T10:42:22 | 268,493,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,791 | py | from tkinter import *
import random
import time;
import sqlite3
win=Tk()
win.geometry("1400x700")
win.title("This is cafe management system")
f1=Frame(win, width=1400 ,height=100, bd=14, relief="raise")
f1.pack(side=TOP)
label=Label(f1, text="Cafe Management Systems", font=("arial","40","bold"),fg="black")
label.place(x=250,y=0)
f2=Frame(win, width=900 ,height=600, relief="raise")
f2.pack(side=LEFT)
f3=Frame(win, width=470 ,height=600, bd=8, relief="raise")
f3.pack(side=RIGHT)
# ON FRAME F2 CODING......................
f4=Frame(f2, width=450, height=450 , bd=10, relief="raise")
f4.place(x=0,y=0)
f5=Frame(f2, width=450, height=450 , bd=10, relief="raise")
f5.place(x=450,y=0)
f6=Frame(f2, width=450, height=150 , bd=8, relief="raise")
f6.place(x=0,y=450)
f7=Frame(f2, width=450, height=150 , bd=8, relief="raise")
f7.place(x=450,y=450)
#Here are the all variabels declaration....................
vale_var=IntVar()
capp_var=IntVar()
african_var=IntVar()
american_var=IntVar()
iced_var=IntVar()
coff_var=IntVar()
red_var=IntVar()
black_var=IntVar()
lagos_var=IntVar()
queen_var=IntVar()
cod_var=StringVar()
cok_var=StringVar()
serv_var=StringVar()
paid_var=StringVar()
sub_var=IntVar()
tot_var=IntVar()
# here are the labels ....................................
label_list=["vale coffee","Cappuccino","African coffee","American coffee","Iced cappuccino"]
f4_labelposition=30
for i in label_list:
f4_label=Label(f4,text=i, font=("ariel","20","bold"))
f4_label.place(x=10,y=f4_labelposition)
f4_labelposition+=70
vale_entry=Entry(f4, width="10",font="bold",bd="10",textvariable=vale_var,justify="left")
vale_entry.place(x=315,y=30)
capp_entry=Entry(f4,width="10",font="bold",bd="10",textvariable=capp_var,justify="left")
capp_entry.place(x=315,y=100)
african_entry=Entry(f4,width="10",font="bold",bd="10",textvariable=african_var,justify="left")
african_entry.place(x=315,y=170)
american_entry=Entry(f4,width="10",font="bold",bd="10",textvariable=american_var,justify="left")
american_entry.place(x=315,y=240)
iced_entry=Entry(f4,width="10",font="bold",bd="10",textvariable=iced_var,justify="left")
iced_entry.place(x=315,y=310)
list2=["Coffee cake","Red valvet cake","Black forest cake","Lagos chocolae cake","Queen chocolate cake"]
f5_labelposition=30
for j in list2:
f5_label=Label(f5,text=j, font=("ariel","20","bold"))
f5_label.place(x=10,y=f5_labelposition)
f5_labelposition+=70
coff_entry=Entry(f5,width="10",font="bold",bd="10",textvariable=coff_var,justify="left")
coff_entry.place(x=315,y=30)
red_entry=Entry(f5,width="10",font="bold",bd="10",textvariable=red_var,justify="left")
red_entry.place(x=315,y=100)
black_entry=Entry(f5,width="10",font="bold",bd="10",textvariable=black_var,justify="left")
black_entry.place(x=315,y=170)
lagos_entry=Entry(f5,width="10",font="bold",bd="10",textvariable=lagos_var,justify="left")
lagos_entry.place(x=315,y=240)
queen_entry=Entry(f5,width="10",font="bold",bd="10",textvariable=queen_var,justify="left")
queen_entry.place(x=315,y=310)
# f6 Label are here................
f6_label=["Cost of Drinks","Cost of cakes","Service charge"]
f6_labelposition=5
for i in f6_label:
labels=Label(f6, text=i, font=("ariel","15","bold"))
labels.place(x=10,y=f6_labelposition)
f6_labelposition+=40
cod_entry=Entry(f6, width="10",font="bold",bd="10",textvariable=cod_var,justify="left")
cod_entry.place(x=315,y=5)
cok_entry=Entry(f6, width="10",font="bold",bd="10",textvariable=cok_var,justify="left")
cok_entry.place(x=315,y=45)
serv_entry=Entry(f6, width="10",font="bold",bd="10",textvariable=serv_var,justify="left")
serv_entry.place(x=315,y=85)
# Here are the cost declaration for the drinks and cakes..........
cod_var.set("30/piece")
cok_var.set("50/piece")
serv_var.set("1%")
paid_var.set("2%")
f7_label=["Paid Tax","Sub Total","Total Cost"]
f7_labelposition=5
for k in f7_label:
f7_labels=Label(f7, text=k, font=("ariel","15","bold"))
f7_labels.place(x=10,y=f7_labelposition)
f7_labelposition+=40
paid_entry=Entry(f7,width="10",font="bold",bd="10",textvariable=paid_var,justify="left")
paid_entry.place(x=315,y=5)
sub_entry=Entry(f7,width="10",font="bold",bd="10",textvariable=sub_var,justify="left")
sub_entry.place(x=315,y=45)
tot_entry=Entry(f7,width="10",font="bold",bd="10",textvariable=tot_var,justify="left")
tot_entry.place(x=315,y=85)
f3_yspace=10
for i in label_list:
f3_labels=Label(f3, text=i, font=("ariel","20","bold"))
f3_labels.place(x=5,y=f3_yspace)
f3_yspace+=50
f3_space=260
for i in list2:
f3_label=Label(f3, text=i, font=("ariel","20","bold"))
f3_label.place(x=5,y=f3_space)
f3_space+=50
# All the value holding variables for the f3's entry boxes.............
f3_vale_var=IntVar()
f3_capp_var=IntVar()
f3_african_var=IntVar()
f3_american_var=IntVar()
f3_iced_var=IntVar()
f3_coff_var=IntVar()
f3_red_var=IntVar()
f3_black_var=IntVar()
f3_lagos_var=IntVar()
f3_queen_var=IntVar()
# All the entry boxes of frame f3...............
f3_vale=Entry(f3,width="10",font="bold",textvariable=f3_vale_var, bd="5")
f3_vale.place(x=315,y=10)
f3_capp=Entry(f3, width="10",font="bold",textvariable=f3_capp_var, bd="5")
f3_capp.place(x=315,y=60)
f3_african=Entry(f3,width="10",font="bold",textvariable=f3_african_var, bd="5")
f3_african.place(x=315,y=110)
f3_american=Entry(f3,width="10",font="bold",textvariable=f3_american_var, bd="5")
f3_american.place(x=315,y=160)
f3_iced=Entry(f3, width="10",font="bold",textvariable=f3_iced_var,bd="5")
f3_iced.place(x=315,y=210)
f3_coff=Entry(f3, width="10",font="bold",textvariable=f3_coff_var, bd="5")
f3_coff.place(x=315,y=260)
f3_red=Entry(f3,width="10",font="bold",textvariable=f3_red_var, bd="5")
f3_red.place(x=315,y=310)
f3_black=Entry(f3,width="10",font="bold",textvariable=f3_black_var, bd="5")
f3_black.place(x=315,y=360)
f3_lagos=Entry(f3,width="10",font="bold",textvariable=f3_lagos_var, bd="5")
f3_lagos.place(x=315,y=410)
f3_queen=Entry(f3,width="10",font="bold",textvariable=f3_queen_var, bd="5")
f3_queen.place(x=315,y=460)
# here are the entered values...............
def total():
print("Total function is called")
vale_value=vale_var.get()
capp_value=capp_var.get()
african_value=african_var.get()
american_value=american_var.get()
coff_value=coff_var.get()
iced_value=iced_var.get()
red_value=red_var.get()
black_value=black_var.get()
lagos_value=lagos_var.get()
queen_value=queen_var.get()
# Frame f3's value ................after call total function.............
f3_vale_var.set(vale_value)
f3_capp_var.set(capp_value)
f3_african_var.set(african_value)
f3_american_var.set(american_value)
f3_iced_var.set(iced_value)
f3_coff_var.set(coff_value)
f3_red_var.set(red_value)
f3_black_var.set(black_value)
f3_lagos_var.set(lagos_value)
f3_queen_var.set(queen_value)
if vale_value:
vale=vale_value*30
else:
vale=vale_value
if capp_value:
capp=(capp_value*30)+vale
else:
capp=capp_value+vale
if african_value:
african=(african_value*30)+capp
else:
african=african_value+capp
if american_value:
american=(american_value*30)+african
else:
american=american_value+african
if iced_value:
iced=(iced_value*30)+american
else:
iced=iced_value+american
if coff_value:
coff=(coff_value*50)+iced
else:
coff=coff_value+iced
if red_value:
red=(red_value*50)+coff
else:
red=red_value+coff
if black_value:
black=(black_value*50)+red
else:
black=black_value+red
if lagos_value:
lagos=(lagos_value*50)+black
else:
lagos=lagos_value+black
if queen_value:
queen=(queen_value*50)+lagos
sub_var.set(queen)
else:
queen=queen_value+lagos
sub_var.set(queen)
serviceNpaid_charge=(queen*3)/100
tot_var.set(serviceNpaid_charge+queen)
# database connection ............
con = sqlite3.connect("akthunder.db")
cursor = con.cursor()
cursor.execute('insert into info3(vale,capp,african,american,iced,coff,red,black,lagos,queen) values(?,?,?,?,?,?,?,?,?,?)',(vale_value,capp_value,african_value,american_value,iced_value,coff_value,red_value,black_value,lagos_value,queen_value))
con.commit()
con.close()
def clear():
# here are the setted values.............
vale_var.set(0)
capp_var.set(0)
african_var.set(0)
american_var.set(0)
iced_var.set(0)
coff_var.set(0)
red_var.set(0)
black_var.set(0)
lagos_var.set(0)
queen_var.set(0)
sub_var.set(0)
tot_var.set(0)
# All values of frame f3 deleting here................
f3_vale_var.set(0)
f3_capp_var.set(0)
f3_african_var.set(0)
f3_american_var.set(0)
f3_iced_var.set(0)
f3_coff_var.set(0)
f3_red_var.set(0)
f3_black_var.set(0)
f3_lagos_var.set(0)
f3_queen_var.set(0)
def quit():
exit(0)
tot_button=Button(f3, width="10" ,bd="5",fg="green", text="Total",font=("ariel","15","bold"), command=lambda:total())
tot_button.place(x=5,y=500)
clear_button=Button(f3, width="10" ,bd="5",fg="brown", text="Clear",font=("ariel","15","bold"), command=lambda:clear())
clear_button.place(x=150,y=500)
quit_button=Button(f3, width="10" ,bd="5",fg="red", text="Quit",font=("ariel","15","bold"), command=lambda:quit())
quit_button.place(x=300,y=500)
win.mainloop() | [
"noreply@github.com"
] | akthunder.noreply@github.com |
a2dcadde1e6487627bc4b4681ce85f833c374bb5 | dcefcd767067736bb5d343e01ded03fe67b6c0a5 | /gazon.py | 9b749bb4e45e33a10f0a3dbb69bdbfe5191cd381 | [] | no_license | bekk-studio/Astek-etude-tondeuse-autonome | 2c0b09f695723554653a75ab1026b17374a44242 | 59699ecfaa8949bb00073615e0e54ff3567af88b | refs/heads/master | 2020-04-12T20:11:24.306179 | 2018-12-21T15:49:15 | 2018-12-21T15:49:15 | 162,728,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,330 | py | import numpy as np
import sys
from six import StringIO, b
from gym import utils
from gym.envs.toy_text import discrete
FORWARD = 0
TURNRIGHT = 1
TURNLEFT = 2
"""orientation
0
3 1
2
"""
MAPS = {
"4x4": [
"SPPP",
"PPPP",
"PPPP",
"PPBG"
],
"8x8": [
"SPPPPPPP",
"PPPPPPPP",
"PPPPPPPP",
"PPPPPPPP",
"PPPPPBPP",
"PPPPPPPP",
"PPPPPPPP",
"PPPPPPPG"
],
"8x16": [
"SPPPPPPPPPPPPPPP",
"PPPPPPPPPPPPPPPP",
"PPPPPPPPPPPPPPPP",
"PPPPPPPPPPPPPPPP",
"PPPPPBPPPPPPPPPP",
"PPPPPPPPPPPPPPPP",
"PPPPPPPPPPPPPPPP",
"PPPPPPPPPPPPPPPG"
]
}
class gazonEnv(discrete.DiscreteEnv):
"""
Gazon à tondre
S : starting point, safe
P : terrain plat
B: terrain pentu
X: terrain interdit
L'episode fini lorsque tout le gazon a été tondu
"""
metadata = {'render.modes': ['human', 'ansi']}
def __init__(self, desc=None, map_name="4x4",is_slippery=False, orientation=0):
if desc is None and map_name is None:
raise ValueError('Must provide either desc or map_name')
elif desc is None:
desc = MAPS[map_name]
self.desc = desc = np.asarray(desc,dtype='c')
self.nrow, self.ncol = nrow, ncol = desc.shape
row, col = np.where(np.array(self.desc == b'S'))[0][0], np.where(np.array(self.desc == b'S'))[1][0]
position = row*ncol + col
self.state = position, orientation
self.nA = 3
self.nO = 4
self.nS = nrow * ncol * self.nO
self.startstate = self.state
#isd = np.array(desc == b'S').astype('float64').ravel()
#isd /= isd.sum()
self.mowed = np.array(desc == b'S').astype(int) + np.array(desc == b'X').astype(int)
#P = {s : {a : [] for a in range(nA)} for s in range(nS)}
def reset(self):
self.state = self.startstate
self.mowed = np.array(self.desc == b'S').astype(int) + np.array(self.desc == b'X').astype(int)
return self.state
def step(self, action, forward_reward=-1, turn_reward=-1, mowing_reward=2 ):
reward = 0
position, orientation = self.state
if action==0: # forward
row, col = position//self.nrow, position%self.nrow
if orientation == 0:
row = max(row-1,0)
elif orientation == 1:
col = min(col+1,self.ncol-1)
elif orientation == 2:
row = min(row+1,self.nrow-1)
elif orientation == 3:
col = max(col-1,0)
position = row*self.ncol + col
reward += forward_reward
elif action==1: # right
orientation = (orientation + 1) % 4
reward += turn_reward
elif action==2: # left
orientation = (orientation - 1) % 4
reward += turn_reward
next_state = position, orientation
self.state = next_state
position, orientation = self.state
row, col = position//self.nrow, position%self.nrow
if self.mowed[row][col] == 0:
reward += mowing_reward
self.mowed[row][col] = 1
done = False
if np.sum(self.mowed) == self.nrow * self.ncol:
done = True
return next_state, reward, done
# obtain one-step dynamics for dynamic programming setting
#self.P = P
super(gazonEnv, self).__init__(nS, nA, P, isd)
"""def _render(self, mode='human', close=False):
if close:
return
outfile = StringIO() if mode == 'ansi' else sys.stdout
row, col = self.s // self.ncol, self.s % self.ncol
desc = self.desc.tolist()
desc = [[c.decode('utf-8') for c in line] for line in desc]
desc[row][col] = utils.colorize(desc[row][col], "red", highlight=True)
if self.lastaction is not None:
outfile.write(" ({})\n".format(["Left","Down","Right","Up"][self.lastaction]))
else:
outfile.write("\n")
outfile.write("\n".join(''.join(line) for line in desc)+"\n")
if mode != 'human':
return outfile"""
| [
"noreply@github.com"
] | bekk-studio.noreply@github.com |
ea211242d31bbd76d12c68ffe5aaa4ac19432641 | 4b1c693a6a18ac3856f2f855c15bb2c9bed90410 | /quat2/parser.py | f6e920ac6288b91cc8c5b0de252430903530acda | [] | no_license | mouboo/quat2 | 73cb06315400dd725bfd34dc13634f61366f8597 | f5d0519d0279aff8d0e5069db6211200a1e8263f | refs/heads/master | 2020-09-23T11:07:27.077622 | 2020-01-05T13:37:53 | 2020-01-05T13:37:53 | 225,485,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,694 | py | # parser.py
import itertools
import actions
### Words
verbdict = {'go' : ['walk', 'move'],
'take' : ['get','grab'],
'wear' : ['put on'],
'look' : ['examine']
}
preps = ['in','to','on','at']
def parse(phrase, player):
"""Takes a user input string and executes a command. """
#Make a list of all known synonyms for all available actions,items, and rooms
verbs = []
for v in verbdict:
verbs.append(v)
verbs += verbdict[v]
inventory = []
for i in player.inventory:
inventory.append(i.name)
inventory.extend(i.synonyms)
room_items = []
for r in player.current_room.items:
room_items.append(r.name)
room_items.extend(r.synonyms)
exits = []
for e in player.current_room.exits.values():
exits.extend(e)
known_words = verbs + preps + inventory + room_items + exits
#print("Verbs: {}".format(verbs))
#print("Prepositions: {}".format(preps))
#print("Inventory: {}".format(inventory))
#print("Room items: {}".format(room_items))
#print("Exits: {}".format(exits))
#Convert phrase to lower case
phrase = phrase.lower()
#Split string on whitespace into list of words
raw_p = [word for word in phrase.split()]
#Check if compound verb and nouns, replace in list
for i in range(0,len(raw_p)-1):
for j in range(0,len(raw_p)-1-i):
compound = raw_p[i:len(raw_p)-j]
c = ' '.join(compound)
if c in known_words:
raw_p.insert(i,c)
for k in range(1,len(compound)+1):
del raw_p[i+1]
#Remove unknown words
cmd = [word for word in raw_p if word in known_words]
# Analyze grammar: v=verb, n=noun, p=preposition, e=exit
grammar = []
for w in cmd:
if w in verbs:
grammar.append('v')
elif w in inventory or w in room_items:
grammar.append('n')
elif w in preps:
grammar.append('p')
elif w in exits:
grammar.append('e')
else:
print('Error: {} did not match a grammatic category'.format(w))
print("Command components before binding: {}".format(cmd))
print("Grammar: {}".format(grammar))
#TODO: Sanity checks: One verb, determines function.
if not grammar.count('v'):
print("Error: multiple verbs")
return
if not grammar[0] == 'v':
print("Error: verb not first")
return
#Go
if cmd[0] == 'go' or cmd[0] in verbdict['go']:
if 'e' in grammar:
e_pos = grammar.index('e')
print("Exit: {}".format(cmd[e_pos]))
else:
print("Error: no exit found")
for x in player.current_room.exits:
print(x)
print("Current room: {}".format(player.current_room.name))
#Look
if cmd[0] == 'look' or cmd[0] in verbdict['look']:
#TODO: deal with prepositions
if len(cmd) == 1:
player.look(player.current_room)
elif len(cmd) == 2 and grammar[1] == 'n':
word = cmd[1]
matches = []
for m in player.current_room.items + player.inventory:
if cmd[1] == m.name or cmd[1] in m.synonyms:
matches.append(m)
if len(matches) == 0:
print("Don't know how to look at {}".format(cmd[1]))
elif len(matches) > 1:
print("Which {}?".format(cmd[1]))
elif len(matches) == 1:
player.look(matches[0])
| [
"peterkvillegard@gmail.com"
] | peterkvillegard@gmail.com |
deb209eab8d82b4f9f28f02ec254c0b89224d034 | a51e4a1299cd5e2f5a5205f49a9c82a3e9e6939d | /1.2 Data Analysis with Pandas/2.2 Pandas DataFrame.py | 309155cdb4684bcc14464a71252ad21a022aa1aa | [] | no_license | Deviloxide/Introduction-to-Machine-Learning | 0309861a63e91766eadf4be64d57dbe18efe359a | 9599066dd747e2f4deb1b87b10ba7abd7c3ed314 | refs/heads/main | 2023-05-26T00:14:14.764233 | 2021-06-09T15:36:55 | 2021-06-09T15:36:55 | 373,716,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,528 | py | import numpy as np
import pandas as pd
# The coding exercise for this chapter involves creating various pandas
# DataFrame objects.
# We'll first create a DataFrame from a Python dictionary. The dictionary
# will have key-value pairs 'c1':[0, 1, 2, 3] and 'c2':[5, 6, 7, 8], in that
# order.
# The index for the DataFrame will come from the list of row labels
# ['r1', 'r2', 'r3', 'r4'].
# Set df equal to pd.DataFrame with the specified dictionary as the first
# argument and the list of row labels as the index keyword argument.
df = pd.DataFrame({'c1':[0, 1, 2, 3], 'c2':[5, 6, 7, 8]},
index=['r1', 'r2', 'r3', 'r4'])
print(df)
# We'll create another DataFrame, this one representing a single row. Rather
# than a dictionary for the first argument, we use a list of lists, and
# manually set the column labels to ['c1, 'c2'].
# Since there is only one row, the row labels will be ['r5'].
# Set row_df equal to pd.DataFrame with [[9, 9]] as the first argument, and
# the specified column and row labels for the columns and index keyword
# arguments.
row_df = pd.DataFrame([[9, 9]], columns=['c1', 'c2'], index=['r5'])
print(row_df)
# After creating row_df, we append it to the end of df and drop row 'r2'.
# Set df_app equal to df.append with row_df as the only argument.
# Then set df_drop equal to df_app.drop with 'r2' as the labels keyword
# argument.
df_app = df.append(row_df)
df_drop = df_app.drop(labels='r2')
print(df_app)
print(df_drop) | [
"noreply@github.com"
] | Deviloxide.noreply@github.com |
7027469100609fa2a1519d5ef522ba4867d97b0b | e6b800c3ff19fade9ac64f0042377c5e1ba98e6c | /tutorial/urls.py | cc7e263f17f24d673884c620a468d7c2a8ca2fc9 | [] | no_license | GHOWTAN/Django_REST_framework_Tutorial | 1151ce4c365d41475d27f40828081663b9587531 | 25c0357c2e700cf3ed6e2b73f490930526dda5f5 | refs/heads/master | 2021-05-05T23:09:20.271403 | 2018-01-25T21:45:07 | 2018-01-25T21:45:07 | 116,521,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | from django.conf.urls import url, include
from rest_framework import routers
from quickstart import views
router = routers.DefaultRouter()
router.register(r'owners', views.OwnerViewSet)
router.register(r'products', views.ProductViewSet)
router.register(r'users', views.UserViewSet)
router.register(r'groups', views.GroupViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
] | [
"paradisetan798@gmail.com"
] | paradisetan798@gmail.com |
80154383ca6b13dd06a95d0e165de2d67e2c7647 | 57a2a0db80453940e2a6816764581b44169177ca | /Stage_3/Task12_Recursion_Backtracking_Divide&Conquer/eight_queen_puzzle.py | bb0f89b12c174fe0dd6d5d2e22aee3c3365cc7bf | [
"MIT"
] | permissive | Pyabecedarian/Algorithms-and-Data-Structures-using-Python | cf9c7a2ecce3b3f9398509e30741279dd51ed241 | 08642357df60d48cb185b5487150204b42764260 | refs/heads/master | 2020-12-14T10:53:29.862940 | 2020-02-29T05:36:40 | 2020-02-29T05:36:40 | 234,718,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,914 | py | """
Backtracking: The Eight Queen Puzzle
Backtracking is an algorithmic paradigm that tries different solutions until finds a solution that `works`.
Problems which are typically solved using backtracking technique have following property in common:
> these problem can be solved by trying every possible configurations, and
> each configuration is tried only once.
The Eight-Queen puzzle or more general the N-Queen puzzle is the problem of placing N chess queens on an
N x N chessboard such that no two queens attack each other. `Attack` here means the next queen should
not be placed in the block that is of the same row, column and diagonal.
The queen attack area on 4x4 chessboard
* * * _
* X * *
* * * _
_ * _ *
(X: the Queen; *: attack area)
Idea:
The rule of the game tells that each row on the chessboard can place only ONE queen, the same as column.
So we can try to place a queen on each row one at a time. Let's define the representation of the queen's
position.
We can represent the coordinate of the queen in a two dimensional matrix:
[ 0, 1, 0, 0 ]
[ 0, 0, 0, 1 ]
[ 1, 0, 0, 0 ]
[ 0, 0, 1, 0 ]
Or we can also represent the places in a tuple, where the index of the each value is the row of the
queen and the value itself is which column the queen is placed. For example, s[0] = 1 means the queen
on the 1st row places at column 2 (Note: the starting index is 0 in python).
s[i] = col
Representation:
The first queen on the first row can be placed at any column in the beginning since there is no other
queens on the board. But starts with the second queen, there will be some areas that cause `conflict`
with the previously added queens. So we need a function that defines the conflict.
Conflicts:
The blocks are of the same rows and columns with the blocks that all the queens have been placed
are not legal. If `nextX` is the next column the queen is going to be placed, such that
s[i] - nextX != 0, for all i
Again, the coordinate of the next queen is not allowed to be on the diagonal blocks of all previous queens,
otherwise will cause a conflict. This can be expressed by the distance of the rows between every two queens
must not be the same as the distance of their columns.
| s[i] - nextX | != next_i - i
Thus we have defined the conflict condition, see the code in `def conflict()` function.
Procedure:
We could first place a queen (row 1) at column 1, then find the legal blocks for the next queen.
Once we get the next legal blocks, we choose one of the blocks and find the next legal blocks for the third
queen. Do the procedure until we find that:
> 1. we have placed 8 queens, therefore the solution has found;
> 2. we have no legal block for the next queen, thus we may think that the block of last queen
is wrong, so we backtrack to the last queen and choose another legal block followed by repeat
the procedure for the next queen.
Implementation:
See the procedure above, we need another function that can produce the legal blocks for the next queen.
Given the previous queens' positions, how could we produce all next legal blocks? Since we place the
queens row by row, so the next queen's column must be in range of [0, n], while not meet the conflict
condition.
See the code in `def next_positions()` function.
To let the algorithm follow the procedure where we have to backtrack the position when there is no
legal blocks for the next queen, we may think that we need a Stack to preserve the current queen's
position. If there is not next legal block, we pop the current queen's position and try another block.
This lead to a recursive call of the main function naturally.
Recursion:
First let's deal with the base case. Assume all the queens before the last queen has placed properly,
our main function will do as the same as what `next_positions()` did.
Next we need to move the general case towards the base case in order to complete the recursion.
We need the state `s` containing all the previous queens' positions increases to length n - 1, we can
add s with the position produces by last invocation. Therefore we need to change the return type to a
tuple of length 1 other than a int.
See the code in `def queen()` function.
Optimization:
We can see that in queen(), the for loop and conflict() invocation are the same, so we change the code
to a simpler version, see the final code in `def queen_final()` function.
"""
def conflict(next_x: int, s: tuple) -> bool:
"""Return a boolean that defines the conflict condition of the next queen's position"""
next_i = len(s)
for i in range(next_i):
if abs(s[i] - next_x) in (0, next_i - i):
return True
else:
return False
def next_positions(n: int = 8, s: tuple = ()) -> tuple:
"""A generator that contains all legal positions for next queen"""
for pos in range(n):
if not conflict(pos, s):
yield pos
def queen(n: int = 8, s: tuple = ()):
"""Main function of N-queen"""
if len(s) == n - 1: # base case
for pos in range(n):
if not conflict(pos, s):
# yield pos
yield (pos,)
else:
for pos in range(n):
if not conflict(pos, s):
for next_i in queen(n, s + (pos,)):
yield (pos,) + next_i
def queen_final(n: int = 8, s: tuple = ()):
"""Final version of N-queen solution"""
for pos in range(n):
if not conflict(pos, s):
if len(s) == n - 1:
yield (pos, )
else:
for next_pos in queen_final(n, s + (pos,)):
yield (pos,) + next_pos
def print_queens(solution: tuple):
"""Print the solution"""
print()
n = len(solution)
for x in solution:
line = '.\t' * x + 'X\t' + '.\t'*(n-1-x)
print(line)
if __name__ == '__main__':
print(list(next_positions(4, (1, 3, 0))))
print(list(queen(4, (1, 3, 0)))) # base case
print(list(queen(4)))
print(list(queen_final(4)))
# solution of 8x8 chessboard
print(len(list(queen_final(8)))) # 92
solution = queen_final(8)
print_queens(next(solution))
print_queens(next(solution))
print_queens(next(solution))
| [
"42270798+Pyabecedarian@users.noreply.github.com"
] | 42270798+Pyabecedarian@users.noreply.github.com |
bcbcad4aff1e9d1403cd1fac2ef49fb0335964ca | f3c60bcaec68ed7025cae6c3b41065f1767e7f53 | /1_Simulations/2_Atmosphere/fit_psf.py | 670df5190be3773796e7c5bf6f11344e91654bb2 | [] | no_license | maxime-rey/M2_Internship | c5adb1836596a42c8b7723a9fbfe9c5a5c326edd | e4006553b4d843a3a368f69e7f7b668687c336db | refs/heads/master | 2022-01-20T21:54:14.010367 | 2019-07-28T13:21:33 | 2019-07-28T13:21:33 | 194,552,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,753 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Time-stamp: <2018-06-04 15:34:50 ycopin>
"""
3D-spectrography PSF analysis.
"""
from __future__ import division, print_function
from astropy.table import Table
import numpy as N
import matplotlib.pyplot as P
import iminuit
from iminuit.frontends import console #Frontend
import psfModels
__author__ = "Yannick Copin <y.copin@ipnl.in2p3.fr>"
class MetaSlices(object):
def __init__(self, filename):
"""
Read metaslices from FITS table into an Astropy Table.
"""
self.filename = filename #: Input filename
self.tab = Table.read(filename) #: Astropy table
self.ns = len(self.tab) #: Nb of rows/stars
self.waves = self.tab[0]['wave'] #: Wavelengthes [Å]
self.nw, self.ny, self.nx = inshape = self.tab[0]['data'].shape
# Sanity checks
if not N.alltrue([row['data'].shape == inshape for row in self.tab]):
raise IndexError("Data shape is not constant among rows.")
if not N.alltrue([row['var'].shape == inshape for row in self.tab]):
raise IndexError("Variance shape is not constant among rows.")
if not N.allclose(self.tab[:]['wave'], self.waves):
raise IndexError("Wavelengths are not constant among rows.")
def __str__(self):
s = "{}: {} rows, shape=({}, {}, {}) in {:.0f}--{:.0f} Å".format(
self.filename, self.ns,
self.nw, self.ny, self.nx, self.waves[0], self.waves[-1])
return s
def extract_metaslice(self, index, lbda):
"""
Extract flux and error at given index and wavelength.
.. Warning:: `var` column is actually std error.
"""
row = self.tab[index]
iwave = N.argwhere(N.isclose(self.waves, lbda))
if len(iwave) != 1:
raise IndexError("Wavelength {} not found.".format(lbda))
f = N.ma.masked_invalid(row['data'][iwave[0]]) # (1, ny, nx)
df = N.ma.masked_invalid(row['var'][iwave[0]])
return f[0], df[0]
def create_coords(shape, starts=0, steps=1, sparse=False):
"""
Create coordinate arrays.
Create ...,y,x-coordinate arrays for a given input shape `(...,
ny, nx)`. Each of the ndim coordinate arrays will have input
shape. The output list is squeezed. `starts='auto'` centers *all*
coordinates on 0.
>>> create_coords((3,), starts='auto')
array([-1., 0., 1.])
>>> N.array(create_coords((3, 2),
... starts=(1, 0), steps=(3, 2), sparse=False))
array([[[ 1., 1.],
[ 4., 4.],
[ 7., 7.]],
[[ 0., 2.],
[ 0., 2.],
[ 0., 2.]]])
>>> create_coords((3, 2), starts=(1, 0), steps=(3, 2), sparse=True)
[array([[ 1.],
[ 4.],
[ 7.]]), array([[ 0., 2.]])]
"""
ndim = len(shape)
# N-dim steps and starts
steps = N.broadcast_to(steps, (ndim,)) # (ndim,)
if starts == 'auto': # Center coordinates
starts = [(1 - n) * step / 2.
for n, step in zip(shape, steps)]
else:
starts = N.broadcast_to(starts, (ndim,)) # (ndim,)
coords = [N.arange(n, dtype=float) * step + start
for n, step, start in zip(shape, steps, starts)]
if ndim == 1:
return N.meshgrid(coords[0])[0]
else:
return N.meshgrid(*coords[::-1], sparse=sparse)[::-1]
class Kolmogorov_PSF(object):
def __init__(self, shape=(15, 15), scale=0.43, lbda=5e-7):
"""
:param shape: spatial shape (ny, nx)
:param float scale: spatial scale [arcsec/spx]
:param float lbda: reference wavelength [Å]
"""
self.ny, self.nx = shape
self.scale = float(scale) #: Spatial scale [arcsec/spx]
#: Spatial cartesian coordinates [arcsec]
self.y, self.x = create_coords(
shape, starts='auto', steps=self.scale, sparse=True)
self.lbda = float(lbda) #: Reference wavelength [Å]
# PSF parameters
self.ampl = 1 #: Peak amplitude
self.x0 = 0 #: PSF x-center [arcsec]
self.y0 = 0 #: PSF y-center [arcsec]
self.cy2 = 1 #: Elliptical radius y2-coefficient
self.cxy = 0 #: Elliptical radius xy-coefficient
#: Reference Fried radius [m] at ref. wavelength
self.r0 = 0.1
self.expo = 5/3 #: Kolmogorov index
self.bkgnd = 0 #: Background level
self.flux = None #: Observed PSF
self.dflux = None #: Associated variance
def get_seeing(self, lbda=None):
"""
FWHM seeing [arcsec].
:param lbda: wavelength [Å], default to ref. wavelength
"""
if lbda is None:
lbda = self.lbda
return psfModels.seeing_fwhm(
lbda * 1e-10, r0ref=self.r0, lref=self.lbda * 1e-10, expo=self.expo)
def __str__(self):
s = "PSF({}×{}) @{:.0f} Å:".format(self.ny, self.nx, self.lbda)
for key, val in self.get_params().iteritems():
s += "\n {:>4s} = {}".format(key, val)
if key == 'r0':
s += ", seeing = {:.2f}'' = {:.2f}'' @ 5000 Å".format(
self.get_seeing(), self.get_seeing(5000.))
if key == 'expo':
s += " = {:.2f} / 3".format(self.expo * 3)
if self.flux is not None:
s += "\n data: {} points, max={}".format(
self.flux.count(), self.flux.max())
return s
def estimate_r0(self, seeing, lbda):
"""
Estimate Fried radius [m] at reference wavelength from FWHM seeing [arcsec]
at wavelength lbda [Å].
:param seeing: FWHM seeing [arcsec]
:param lbda: wavelength [Å]
"""
r0 = psfModels.r0_from_seeing(
seeing, lbda * 1e-10, expo=self.expo) # At lbda
return psfModels.friedParamater(self.lbda * 1e-10, # At self.lbda
r0ref=r0, lref=lbda * 1e-10, expo=self.expo)
def get_params(self):
from collections import OrderedDict
return OrderedDict((('ampl', self.ampl),
('x0', self.x0),
('y0', self.y0),
('cy2', self.cy2),
('cxy', self.cxy),
('r0', self.r0),
('expo', self.expo),
('bkgnd', self.bkgnd)))
def set_params(self, **kwargs):
for key, val in kwargs.iteritems():
if not hasattr(self, key):
raise AttributeError("Unknown parameter '{}'.".format(key))
setattr(self, key, float(val))
def get_extent(self):
"""
Return FoV extent (left, right, bottom, top) [arcsec].
"""
return (self.x[0, 0], self.x[0, -1],
self.y[0, 0], self.y[-1, 0]) # L, R, B, T
def radius_elliptical(self, **kwargs):
"""
Elliptical radius.
"""
dx = self.x - kwargs.get('x0', self.x0)
dy = self.y - kwargs.get('y0', self.y0)
cy2 = kwargs.get('cy2', self.cy2)
cxy = kwargs.get('cxy', self.cxy)
return N.abs(dx**2 + cy2 * dy**2 + cxy * dx * dy) ** 0.5 # [arcsec]
def radial_profile(self, r, **kwargs):
"""
PSF profile at radii r [arcsec].
:param r: (generalized) radii [arcsec]
"""
r0 = kwargs.get('r0', self.r0)
expo = kwargs.get('expo', self.expo)
# Spline interpolator
spl = psfModels.psf_Kolmogorov_Hankel_interp(
N.max(r), self.lbda * 1e-10, r0=r0, expo=expo)
return spl(r) # Interpolated values
def psf(self):
r = self.radius_elliptical() # Elliptical radius
p = self.radial_profile(r) # Peak-normalized PSF
return self.ampl * p + self.bkgnd
def set_data(self, flux, dflux, flux_scale=1e-12):
assert N.shape(flux) == (self.ny, self.nx)
self.flux = N.ma.MaskedArray(flux) / flux_scale
self.dflux = N.ma.MaskedArray(dflux) / flux_scale
def initial_guess(self):
if self.flux is None:
raise NotImplementedError()
bkgnd = N.percentile(self.flux.filled(0), 20) # 1st quintile
ampl = self.flux.max() - bkgnd
weights = (self.flux - bkgnd).filled(0)
x0 = N.average(self.x.squeeze(), weights=weights.sum(axis=0))
y0 = N.average(self.y.squeeze(), weights=weights.sum(axis=1))
expo = 3 / 2
r0 = 0.1
cy2 = 1
cxy = 0
self.set_params(ampl=ampl, x0=x0, y0=y0, expo=expo, bkgnd=bkgnd, r0=r0, cy2=cy2, cxy=cxy)
def chi2(self, ampl=1, x0=0, y0=0, cy2=1, cxy=0, r0=0.1, expo=5/3, bkgnd=0):
self.set_params(ampl=ampl, x0=x0, y0=y0, cy2=cy2, cxy=cxy,
r0=r0, expo=expo, bkgnd=bkgnd)
res = (self.psf() - self.flux) / self.dflux
return N.dot(res.ravel(), res.ravel())
def fit(self, **kwargs):
"""
Chi2-fit using current parameters as initial guess.
"""
# Initialization
init = self.get_params() # Initial guess
init.update(error_x0=1, error_y0=1, # Initial steps
error_ampl=1,
error_cy2=0.1, error_cxy=0.1,
error_r0=0.01, error_expo=0.01,
error_bkgnd=1)
init.update(limit_ampl=(self.ampl / 10, self.ampl * 10), # Limits
limit_cxy=(0,10),
limit_cy2=(0.1, 10),
limit_expo=(0.1, 2-1e-3), # expo >= 2 doesn't work (PSF takes negative values)
limit_r0=(1e-2, 1),
limit_bkgnd=(0, self.ampl))
init.update(kwargs)
self.minuit = iminuit.Minuit(self.chi2, **init)
# Fit
self.minuit.migrad()
# Results
fmin = self.minuit.get_fmin()
if fmin.is_valid:
# Save best fit in actual params
self.set_params(**self.minuit.values)
print("Chi2 = {}, DoF = {}".format(
self.minuit.fval, self.flux.count() - self.minuit.narg))
for key in self.minuit.parameters:
print("{} = {} ± {}".format(
key, self.minuit.values[key], self.minuit.errors[key]))
return self.minuit
def figure_psf(self, log=False):
if self.flux is None:
raise NotImplementedError()
seeing = self.get_seeing()
fig = P.figure(tight_layout=True)
ax1 = fig.add_subplot(1, 2, 1, aspect='equal',
xlabel="x [arcsec]", ylabel="y [arcsec]",
title=u"Seeing={:.2f}'' @ {:.0f} Å\n"
u"({:.2f}'' @ 5000 Å)".format(
seeing, self.lbda, self.get_seeing(5000)))
ax2 = fig.add_subplot(2, 2, 2,
#xlabel="r [arcsec]",
yscale='log', ylabel="Flux")
ax4 = fig.add_subplot(2, 2, 4,
xlabel="r [arcsec]",
ylabel="Relative error")
# Contours
extent = self.get_extent()
norm = P.matplotlib.colors.LogNorm() if log else None
import astropy.visualization as AV
imnorm = AV.ImageNormalize(
self.flux,
# interval=AV.ZScaleInterval(),
stretch=AV.LogStretch())
ax1.imshow(self.flux, extent=extent,
norm=imnorm, cmap='gray_r', label="Observed", origin='lower')
cnt = ax1.contour(self.flux, extent=extent,
norm=norm, cmap='gray_r', label="Observed", origin='lower')
ax1.contour(self.psf(), levels=cnt.levels, extent=extent,
norm=norm, cmap='magma', label="Adjusted", origin='lower')
# cbar = fig.colorbar(im, ax=ax1, orientation='horizontal')
# cbar.add_lines(cnt)
# Elliptical radius
r = self.radius_elliptical()
ax1.plot([self.x0], [self.y0], marker='*', color='g', mec='0.2')
cnt = ax1.contour(r, levels=[2*seeing], extent=extent,
colors='g', linewidths=0.5, label="r=2''")
# cnt.clabel(inline=1)
# ax1.legend(loc='best', fontsize='small')
# Radial plot
mr = N.ma.MaskedArray(data=r, mask=self.flux.mask)
ax2.plot(mr, self.flux, color='k', marker='.', ls='none')
r = N.logspace(-2, N.log10(mr.max()), max(50, int(mr.count() ** 0.5)))
p = self.radial_profile(r) * self. ampl + self.bkgnd
l, = ax2.plot(r, p, label="n={:.2f}".format(self.expo))
ax2.set_autoscaley_on(False)
ax2.axhline(self.bkgnd, ls='--')
ax2.legend(loc='best', fontsize='small')
# Residuals
mp = N.ma.MaskedArray(data=self.psf(), mask=self.flux.mask)
ax4.plot(mr, (self.flux - mp)/mp, color='k', marker='.', ls='none')
ax4.axhline(0, color=l.get_color())
return fig
class vonKarman_PSF(object):
def __init__(self, shape=(15, 15), scale=0.43, lbda=5e-7):
"""
:param shape: spatial shape (ny, nx)
:param float scale: spatial scale [arcsec/spx]
:param float lbda: reference wavelength [Å]
"""
self.ny, self.nx = shape
self.scale = float(scale) #: Spatial scale [arcsec/spx]
#: Spatial cartesian coordinates [arcsec]
self.y, self.x = create_coords(
shape, starts='auto', steps=self.scale, sparse=True)
self.lbda = float(lbda) #: Reference wavelength [Å]
# PSF parameters
self.ampl = 1 #: Peak amplitude
self.x0 = 0 #: PSF x-center [arcsec]
self.y0 = 0 #: PSF y-center [arcsec]
self.cy2 = 1 #: Elliptical radius y2-coefficient
self.cxy = 0 #: Elliptical radius xy-coefficient
#: Reference Fried radius [m] at ref. wavelength
self.r0 = 0.1
self.L0 = 10 #: External length of coherence
self.bkgnd = 0 #: Background level
self.flux = None #: Observed PSF
self.dflux = None #: Associated variance
def get_seeing(self, lbda=None):
"""
FWHM seeing [arcsec].
:param lbda: wavelength [Å], default to ref. wavelength
"""
if lbda is None:
lbda = self.lbda
return psfModels.seeing_fwhm_vK(
lbda * 1e-10, r0ref=self.r0, lref=self.lbda * 1e-10, L0=self.L0)
def __str__(self):
s = "PSF({}×{}) @{:.0f} Å:".format(self.ny, self.nx, self.lbda)
for key, val in self.get_params().iteritems():
s += "\n {:>4s} = {}".format(key, val)
if self.flux is not None:
s += "\n data: {} points, max={}".format(
self.flux.count(), self.flux.max())
return s
def estimate_r0(self, seeing, lbda):
"""
Estimate Fried radius [m] at reference wavelength from FWHM seeing [arcsec]
at wavelength lbda [Å].
:param seeing: FWHM seeing [arcsec]
:param lbda: wavelength [Å]
"""
r0 = psfModels.r0_from_seeing_vK(
seeing, lbda * 1e-10, L0=self.L0) # At lbda
return psfModels.friedParamater(self.lbda * 1e-10, # At self.lbda
r0ref=r0, lref=lbda * 1e-10, L0=self.L0)
def get_params(self):
from collections import OrderedDict
return OrderedDict((('ampl', self.ampl),
('x0', self.x0),
('y0', self.y0),
('cy2', self.cy2),
('cxy', self.cxy),
('r0', self.r0),
('L0', self.L0),
('bkgnd', self.bkgnd)))
def set_params(self, **kwargs):
for key, val in kwargs.iteritems():
if not hasattr(self, key):
raise AttributeError("Unknown parameter '{}'.".format(key))
setattr(self, key, float(val))
def get_extent(self):
"""
Return FoV extent (left, right, bottom, top) [arcsec].
"""
return (self.x[0, 0], self.x[0, -1],
self.y[0, 0], self.y[-1, 0]) # L, R, B, T
def radius_elliptical(self, **kwargs):
"""
Elliptical radius.
"""
dx = self.x - kwargs.get('x0', self.x0)
dy = self.y - kwargs.get('y0', self.y0)
cy2 = kwargs.get('cy2', self.cy2)
cxy = kwargs.get('cxy', self.cxy)
return N.abs(dx**2 + cy2 * dy**2 + cxy * dx * dy) ** 0.5 # [arcsec]
def radial_profile(self, r, **kwargs):
"""
PSF profile at radii r [arcsec].
:param r: (generalized) radii [arcsec]
"""
r0 = kwargs.get('r0', self.r0)
L0 = kwargs.get('L0', self.L0)
# Spline interpolator
spl = psfModels.psf_vonKarman_Hankel_interp(
N.max(r), self.lbda * 1e-10, r0=r0, L0=L0)
return spl(r) # Interpolated values
def psf(self):
r = self.radius_elliptical() # Elliptical radius
p = self.radial_profile(r) # Peak-normalized PSF
return self.ampl * p + self.bkgnd
def set_data(self, flux, dflux, flux_scale=1e-12):
assert N.shape(flux) == (self.ny, self.nx)
self.flux = N.ma.MaskedArray(flux) / flux_scale
self.dflux = N.ma.MaskedArray(dflux) / flux_scale
def initial_guess(self):
if self.flux is None:
raise NotImplementedError()
bkgnd = N.percentile(self.flux.filled(0), 20) # 1st quintile
ampl = self.flux.max() - bkgnd
weights = (self.flux - bkgnd).filled(0)
x0 = N.average(self.x.squeeze(), weights=weights.sum(axis=0))
y0 = N.average(self.y.squeeze(), weights=weights.sum(axis=1))
L0 = 20
r0 = 0.1
cy2 = 1
cxy = 0
self.set_params(ampl=ampl, x0=x0, y0=y0, L0=L0, bkgnd=bkgnd, r0=r0, cy2=cy2, cxy=cxy)
def chi2(self, ampl=1, x0=0, y0=0, cy2=1, cxy=0, r0=0.1, L0=10, bkgnd=0):
self.set_params(ampl=ampl, x0=x0, y0=y0, cy2=cy2, cxy=cxy,
r0=r0, L0=L0, bkgnd=bkgnd)
res = (self.psf() - self.flux) / self.dflux
return N.dot(res.ravel(), res.ravel())
def fit(self, **kwargs):
"""
Chi2-fit using current parameters as initial guess.
"""
# Initialization
init = self.get_params() # Initial guess
init.update(error_x0=1, error_y0=1, # Initial steps
error_ampl=1,
error_cy2=0.1, error_cxy=0.1,
error_r0=0.01, error_L0=1,
error_bkgnd=1)
init.update(limit_ampl=(self.ampl / 10, self.ampl * 10), # Limits
limit_cxy=(0,10),
limit_cy2=(0.1, 10),
limit_L0=(1, 100),
limit_r0=(1e-2, 1),
limit_bkgnd=(0, self.ampl))
init.update(kwargs)
self.minuit = iminuit.Minuit(self.chi2, frontend=ConsoleFrontend(), **init)
# Fit
self.minuit.migrad()
# Results
fmin = self.minuit.get_fmin()
if fmin.is_valid:
# Save best fit in actual params
self.set_params(**self.minuit.values)
print("Chi2 = {}, DoF = {}".format(
self.minuit.fval, self.flux.count() - self.minuit.narg))
for key in self.minuit.parameters:
print("{} = {} ± {}".format(
key, self.minuit.values[key], self.minuit.errors[key]))
return self.minuit
def figure_psf(self, log=False):
if self.flux is None:
raise NotImplementedError()
seeing = self.get_seeing()
fig = P.figure(tight_layout=True)
ax1 = fig.add_subplot(1, 2, 1, aspect='equal',
xlabel="x [arcsec]", ylabel="y [arcsec]",
title=u"Seeing={:.2f}'' @ {:.0f} Å\n"
u"({:.2f}'' @ 5000 Å)".format(
seeing, self.lbda, self.get_seeing(5000)))
ax2 = fig.add_subplot(2, 2, 2,
#xlabel="r [arcsec]",
yscale='log', ylabel="Flux")
ax4 = fig.add_subplot(2, 2, 4,
xlabel="r [arcsec]",
ylabel="Relative error")
# Contours
extent = self.get_extent()
norm = P.matplotlib.colors.LogNorm() if log else None
import astropy.visualization as AV
imnorm = AV.ImageNormalize(
self.flux,
# interval=AV.ZScaleInterval(),
stretch=AV.LogStretch())
ax1.imshow(self.flux, extent=extent,
norm=imnorm, cmap='gray_r', label="Observed", origin='lower')
cnt = ax1.contour(self.flux, extent=extent,
norm=norm, cmap='gray_r', label="Observed", origin='lower')
ax1.contour(self.psf(), levels=cnt.levels, extent=extent,
norm=norm, cmap='magma', label="Adjusted", origin='lower')
# cbar = fig.colorbar(im, ax=ax1, orientation='horizontal')
# cbar.add_lines(cnt)
# Elliptical radius
r = self.radius_elliptical()
ax1.plot([self.x0], [self.y0], marker='*', color='g', mec='0.2')
cnt = ax1.contour(r, levels=[2*seeing], extent=extent,
colors='g', linewidths=0.5, label="r=2''")
# cnt.clabel(inline=1)
# ax1.legend(loc='best', fontsize='small')
# Radial plot
mr = N.ma.MaskedArray(data=r, mask=self.flux.mask)
ax2.plot(mr, self.flux, color='k', marker='.', ls='none')
r = N.logspace(-2, N.log10(mr.max()), max(50, int(mr.count() ** 0.5)))
p = self.radial_profile(r, L0=self.L0) * self. ampl + self.bkgnd
l, = ax2.plot(r, p, label="L0={:.2f}".format(self.L0))
ax2.set_autoscaley_on(False)
ax2.axhline(self.bkgnd, ls='--')
ax2.legend(loc='best', fontsize='small')
# Residuals
mp = N.ma.MaskedArray(data=self.psf(), mask=self.flux.mask)
ax4.plot(mr, (self.flux - mp)/mp, color='k', marker='.', ls='none')
ax4.axhline(0, color=l.get_color())
return fig
if __name__ == '__main__':
ms = MetaSlices('Data/meta_slices_B_full.fits') #Read the table to treat
table = ms.tab
nstars = ms.ns # Number of rows/stars
nwaves = ms.nw # Number of metaslices/waves
N.random.seed(0) #Fix random for reproductability
irows = N.arange(ms.ns) # Row indices
N.random.shuffle(irows) # Shuffled indices
t = Table(table[irows])
kpsf = Kolmogorov_PSF(shape=(ms.ny, ms.nx), lbda=ms.waves[0])
pnames = kpsf.get_params().keys()
for name in pnames:
t[name] = N.zeros((len(t), nwaves, 2))
t['chi2'] = N.zeros((len(t), nwaves))
t['DoF'] = N.zeros((len(t), nwaves))
t['cov_mat'] = N.zeros((len(t), nwaves, len(pnames), len(pnames)))
counter = 0
count = 20 #number of stars to treat before writing file
for i in range(len(t)):
for j in range(nwaves):
print('etoile {0}, jd {1},metaslice {2}, wave {3}'.format(
i, t[i]['jd'], j, t['wave'][i, j]))
kpsf.lbda = ms.waves[j]
kpsf.set_data(*ms.extract_metaslice(irows[i], kpsf.lbda))
if j == 0 or kpsf.get_params()['ampl'] == 0:
kpsf.initial_guess()
try:
kpsf.fit()
t['cov_mat'][i, j] = N.array(kpsf.minuit.matrix(correlation=True))
t['chi2'][i, j] = kpsf.minuit.fcn()
for name in pnames:
t[name][i, j, 0] = kpsf.minuit.values[name]
t[name][i, j, 1] = kpsf.minuit.errors[name]
except ValueError as err:
print(err)
except RuntimeError as err:
print(err)
counter += 1
if counter == count:
t.write('Data/meta_slices_BvK_treated.fits',overwrite=True) #File to write into
counter = 0 | [
"m.rey@ipnl.in2p3.fr"
] | m.rey@ipnl.in2p3.fr |
977c8b79c54b1cb10224b1f61fdd478aac3bad96 | 16435aa45aed532911ca7924fc910990c0d76fc1 | /manage.py | c9dfa60cc269a4b3d578fd0aebcbd371ada8bf5f | [] | no_license | konekato/view-atcoder-questions | 14fabf0adae5a5e7f180ebd946e3240c6d68ab05 | 84a5fb43b478bcecdffdc0194e1d887366e0be00 | refs/heads/master | 2020-12-15T10:53:53.883854 | 2020-02-29T07:23:10 | 2020-02-29T07:23:10 | 235,080,363 | 0 | 0 | null | 2020-02-15T06:53:10 | 2020-01-20T10:55:06 | HTML | UTF-8 | Python | false | false | 642 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'view_atcoder_questions.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"18ec033@ms.dendai.ac.jp"
] | 18ec033@ms.dendai.ac.jp |
4eccb52479c5050a8cb64f03d50f62ec22ebf031 | 083d93a621f0fd411aabd9b1607e83aedd588d2c | /etg/propgridiface.py | 847a0e054be21e3f253b5d29483057e83d7d49fc | [] | no_license | jns4u/Phoenix | 0a8e5b50326d37048aa58d11023308517ace525b | 478e192ccf0d75a04b78c6600963614d1039dd53 | refs/heads/master | 2021-01-09T06:20:02.546100 | 2017-02-05T03:33:00 | 2017-02-05T03:33:00 | 80,965,252 | 1 | 0 | null | 2017-02-05T03:10:08 | 2017-02-05T03:10:08 | null | UTF-8 | Python | false | false | 2,648 | py | #---------------------------------------------------------------------------
# Name: etg/propgridiface.py
# Author: Robin Dunn
#
# Created: 23-Feb-2015
# Copyright: (c) 2015 by Total Control Software
# License: wxWindows License
#---------------------------------------------------------------------------
import etgtools
import etgtools.tweaker_tools as tools
PACKAGE = "wx"
MODULE = "_propgrid"
NAME = "propgridiface" # Base name of the file to generate to for this script
DOCSTRING = ""
# The classes and/or the basename of the Doxygen XML files to be processed by
# this script.
ITEMS = [ 'wxPGPropArgCls',
'wxPropertyGridInterface',
]
#---------------------------------------------------------------------------
def run():
# Parse the XML file(s) building a collection of Extractor objects
module = etgtools.ModuleDef(PACKAGE, MODULE, NAME, DOCSTRING)
etgtools.parseDoxyXML(module, ITEMS)
#-----------------------------------------------------------------
# Tweak the parsed meta objects in the module object as needed for
# customizing the generated code and docstrings.
c = module.find('wxPGPropArgCls')
assert isinstance(c, etgtools.ClassDef)
c.find('GetPtr').overloads[0].ignore()
c = module.find('wxPropertyGridInterface')
c.abstract = True
for m in c.findAll('GetIterator'):
if m.type == 'wxPropertyGridConstIterator':
m.ignore()
c.find('SetPropertyValue').findOverload('int value').ignore()
c.find('SetPropertyValue').findOverload('bool value').ignore()
c.find('SetPropertyValue').findOverload('wxLongLong_t value').ignore()
c.find('SetPropertyValue').findOverload('wxULongLong_t value').ignore()
c.find('SetPropertyValue').findOverload('wxObject *value').ignore()
module.addItem(
tools.wxArrayPtrWrapperTemplate('wxArrayPGProperty', 'wxPGProperty', module))
# wxPGPropArg is a typedef for "const wxPGPropArgCls&" so having the
# wrappers treat it as a normal type can be problematic. ("new cannot be
# applied to a reference type", etc.) Let's just ignore it an replace it
# everywhere for the real type.
module.find('wxPGPropArg').ignore()
for item in module.allItems():
if hasattr(item, 'type') and item.type == 'wxPGPropArg':
item.type = 'const wxPGPropArgCls &'
#-----------------------------------------------------------------
tools.doCommonTweaks(module)
tools.runGenerators(module)
#---------------------------------------------------------------------------
if __name__ == '__main__':
run()
| [
"robin@alldunn.com"
] | robin@alldunn.com |
5e843d4b41898209060fd90c9b740c89bfce701b | d5d6f814794fb86543c69528188c7b6c6fe818d5 | /classifier.py | aed6f5a5c21b6d97d0ec0447687a4c94db75ea68 | [] | no_license | aguerra7002/InversionSampling | d61a79247cff82fe0856fdd40b89ec892dd1852f | 09d93e960caf9e900b0ff50714132fc9bc0f8878 | refs/heads/master | 2023-08-25T05:48:22.436982 | 2021-11-04T03:11:49 | 2021-11-04T03:11:49 | 424,456,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,042 | py | # prerequisites
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLPClassifier(nn.Module):
def __init__(self, x_dim, h_dim1, h_dim2, out_dim):
super(MLPClassifier, self).__init__()
# encoder part
self.fc1 = nn.Linear(x_dim, h_dim1)
self.fc2 = nn.Linear(h_dim1, h_dim2)
self.fc3 = nn.Linear(h_dim2, out_dim)
def forward(self, x, return_latent=False):
h1 = F.leaky_relu(self.fc1(x), negative_slope=0.1)
h2 = F.leaky_relu(self.fc2(h1), negative_slope=0.1)
out = F.log_softmax(self.fc3(h2))
if return_latent:
return h1, h2, out
else:
return out
def invert_leaky_relu(self, vec, slope=0.1):
new_vec = vec.clone()
new_vec[vec < 0] /= slope
return new_vec
def loss_function(self, data, target):
data_flat = data.reshape((data.shape[0], 784)) #+ torch.randn((data.shape[0], 784)) / 0.02
h1, h2, preds = self.forward(data_flat, return_latent=True)
t0 = F.nll_loss(preds, target)
dist1 = (self.invert_leaky_relu(h2) - self.fc2.bias) @ torch.linalg.pinv(self.fc2.weight).T
# mu_vec1 = torch.mean(h1, 0)
# cov_mat1 = torch.var(h1, 0)
# norm1 = torch.distributions.Normal(mu_vec1, cov_mat1)
# t1 = -norm1.log_prob(dist1).mean()
#t1 = torch.trace((dist1 - h1) @ (dist1 - h1).T) / dist1.shape[0] # MSE
t1 = F.mse_loss(dist1, h1)
dist2 = (self.invert_leaky_relu(h1) - self.fc1.bias) @ torch.linalg.pinv(self.fc1.weight).T
# mu_vec2 = torch.mean(data_flat, 0)
# cov_mat2 = torch.var(data_flat + torch.randn(data_flat.shape).detach() / 20, 0)
# norm2 = torch.distributions.Normal(mu_vec2, cov_mat2)
# t2 = -norm2.log_prob(dist2).mean()
#t2 = torch.trace((dist2 - data_flat) @ (dist2 - data_flat).T) / dist2.shape[0] # MSE
t2 = F.mse_loss(dist2, data_flat)
return t0 + t1 + t2 | [
"alexguerra618@gmail.com"
] | alexguerra618@gmail.com |
54ecb86c49656278cd829b496017000c28b118b7 | 93fd5507aa5a529604f1b1530d1ddf811ac28e3c | /Api/Api/settings.py | 5183fb7b5a96d8d45e3876d6bdc367a6b399614f | [] | no_license | Teng0/Django_Api | 50d7293d32a5c1b07bf068daee689b6768d88dae | 9ba567a74c727e893640d401393a1cd8398e336a | refs/heads/main | 2023-05-22T23:53:50.763985 | 2021-06-15T12:41:18 | 2021-06-15T12:41:18 | 377,158,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,268 | py | """
Django settings for Api project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-25ox42mo25k8l#wds!qpr+f#vz=o&a4i37y3m*_dywx0fmddoa'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api_basic'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"tengosmail@gmail.com"
] | tengosmail@gmail.com |
b2d1bee1c1f29ad7eb9757329b61d459d81ec670 | 9ff9a4871217759235c8556446244b09ad3fd6c2 | /scripts/simple_comparison.py | 665144b3c1ee32bbe87fc09ee9d0b46ac649ba3a | [] | no_license | ATSpitzer/FlightFinder | 65111cf85da10ecaaf0a3a67b61a18867f049512 | 7656323b37d4eaa258a21c39c8cbaff078984565 | refs/heads/master | 2022-11-25T13:22:38.265190 | 2020-07-30T23:31:46 | 2020-07-30T23:31:46 | 277,891,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | from Page_Explorer.Trip.TripResults_Explorer import TripResultsExplorer
from ResultParser import combine_result_dicitonary, filter_combined_results
import json
import pandas
pandas.set_option('display.max_rows', None)
pandas.set_option('display.max_columns', None)
URL_1="https://us.trip.com/hotels/list?city=7291&countryId=108&checkin=2021/02/17&checkout=2021/02/18&optionId=7291&optionType=IntlCity&display=Labuan%20Bajo&crn=1&adult=1&children=0&searchBoxArg=t&travelPurpose=0&ctm_ref=ix_sb_dl&domestic=0"
def perform_search(url, country):
te = TripResultsExplorer(start_url=url, country=country)
# te.driver.maximize_window()
r = te.build_results()
print(json.dumps(te.driver.get_cookies()))
r_j = json.dumps(r)
# print(pandas.read_json(r_j, orient='index'))
with open('res_{n}.json'.format(n=country), 'w') as j:
j.write(r_j)
return r
usa = perform_search(url = URL_1, country='usa')
india = perform_search(url=URL_1, country='india')
end_results = {}
end_results = combine_result_dicitonary(end_results, usa, 'usa')
end_results = combine_result_dicitonary(end_results, india, 'india')
res_comb = json.dumps(end_results)
with open('res_combined.json','w') as j3:
j3.write(res_comb)
j3.close()
#
filtered_res = filter_combined_results(end_results, 'list-price')
filt_res = json.dumps(filtered_res)
with open('res_filtered.json','w') as j4:
j4.write(filt_res)
j4.close()
#
#
table = pandas.read_json(json.dumps(filtered_res), orient='index')
print(str(table)) | [
"ATSpitzer@users.noreply.github.com"
] | ATSpitzer@users.noreply.github.com |
e83a5ab48045b3772e0757155a62cbe6deddfd54 | 54263a576ee5d2ef9e5bd9c20aa05f93a75065f4 | /peers/accounts/permissions.py | 60613cd54d8745da1932f96a2a08fead6bb8005c | [] | no_license | rmishra7/usgbc.peers | 9f6bfe84e7a05d05b42c2f7b25a72bf2fb311dde | 240a11f2766d0963d604dc41acc909771bb46dd6 | refs/heads/master | 2020-05-21T20:13:15.160419 | 2016-12-27T05:39:00 | 2016-12-27T05:39:00 | 65,134,232 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 338 | py |
from rest_framework import permissions
__all__ = [
'IsNotAuthenticated',
]
class IsNotAuthenticated(permissions.BasePermission):
"""
Allows access only to un-authenticated users.
"""
def has_permission(self, request, view):
return not (request.user and request.user.is_authenticated())
| [
"rmishra@usgbc.org"
] | rmishra@usgbc.org |
e934bf6fc00ef71de0e2e6196f8ddca60a64e648 | e3154403f4f93012e97c5163a537789431be50dd | /firstforpython/5.7/zoo.py | 6a7d9da63d9e8999c9119fda0fb47b626522d284 | [] | no_license | leeryu/python-for-sysadmin | c1e6debe7c44e10b8f7a9ec9a218adc0864b6acd | 7777a1b9807378cd6de742b708fcec8841b57356 | refs/heads/master | 2021-05-11T23:34:09.664208 | 2018-01-19T07:02:23 | 2018-01-19T07:02:23 | 117,513,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py |
def hours():
return 'Open 9-5 daily' | [
"sexyuck@gmail.com"
] | sexyuck@gmail.com |
b392a03ab73267692b64ee4c97a84ddab5311947 | b692a9a9bc8853111742b038c54cc1b274a337b5 | /img_brightness_show.py | bc25cc33ebfcc65b33aed19c2c3552d76d8b9aa6 | [
"MIT"
] | permissive | tianfr/Image_Process_Methods | cac5f3ba0e67e9a1fb3d5659dcfdd034cac13867 | 0b5ba2329d9afa59841c0e6a45bd214996faaa6d | refs/heads/master | 2022-11-20T12:10:42.153316 | 2020-07-27T12:03:03 | 2020-07-27T12:03:03 | 282,885,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | #!usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author:Sui yue
@describe: 灰度直方图,描述每个灰度级在图像矩阵中的像素个数或者占有率
@time: 2019/09/15
"""
import sys
import cv2
import numpy as np
import matplotlib.pyplot as plt
# 对于8位图,图像的灰度级范围式0~255之间的整数,通过定义函数来计算直方图
def calcGrayHist(image):
# 灰度图像矩阵的高、宽
rows, cols = image.shape
# 存储灰度直方图
grayHist = np.zeros([256], np.uint64)
for r in range(rows):
for c in range(cols):
grayHist[image[r][c]] += 1
return grayHist
# 主函数
if __name__ == "__main__":
# 第一个参数式图片地址,你只需放上你的图片就可
image = cv2.imread(
'D:\Computer Vision\Datasets\prostate_MR\DL_Image\DL_Image0001_0001.png', cv2.IMREAD_GRAYSCALE)
cv2.imshow("image", image)
print("Usge:python histogram.py imageFile")
# 计算灰度直方图
grayHist = calcGrayHist(image)
# 画出灰度直方图
x_range = range(256)
plt.plot(x_range, grayHist, 'r', linewidth=2, c='black')
# 设置坐标轴的范围
y_maxValue = np.max(grayHist)
plt.axis([0, 255, 0, y_maxValue])
plt.ylabel('gray level')
plt.ylabel("number or pixels")
# 显示灰度直方图
plt.show()
cv2.waitKeyEx(0)
| [
"747458467@qq.com"
] | 747458467@qq.com |
6ae98ec9b19602ca3b934564f4ae0d463cb011ee | a99a3781732dff2c0808d42fa0452f3baca4b0e2 | /launch_training_tournament.py | 231f0f17e8c5728b23ad1b706a7636d833e099a6 | [
"MIT"
] | permissive | linearregression/ai-challenger | 51e639ffc1b5c92f1d33b52d53e8e7f5b96b7fee | ae2819ba7b842135673aee3dfc3612783f8d7813 | refs/heads/master | 2020-12-30T19:57:58.638055 | 2015-10-20T16:20:03 | 2015-10-20T16:20:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,484 | py | #!/usr/bin/env python
import json
import os
import urllib
import urllib2
import subprocess
import sys
HOST = 'http://127.0.0.1:8081'
SERVER_BINARY = './ai-challenger-grid/ai-challenger-grid'
def main():
dirname = os.path.dirname(os.path.realpath(__file__))
os.chdir(dirname)
if not os.path.exists(SERVER_BINARY):
print "Server binary {0} must exist".format(os.path.abspath(SERVER_BINARY))
print "Please run 'make release' or extract already compiled package in {0}".format(dirname)
sys.exit(1)
if len(sys.argv) != 3:
print "Usage:"
print " {0} <map_name> <my bot executable>".format(sys.argv[0])
print "Available maps: 10x10, 40x40"
sys.exit(2)
mybot_exe = os.path.abspath(sys.argv[2])
subprocess.Popen(['killall', 'ai-challenger-grid'], stderr=subprocess.PIPE).communicate()
print 'Launching training tournament for {0}'.format(mybot_exe)
server = subprocess.Popen('./ai-challenger-grid/ai-challenger-grid')
try:
add_bot('my_bot', mybot_exe)
add_bot('greedy', 'game-grid/greedy.py')
add_bot('randy', 'game-grid/randy.py')
add_bot('idle', 'test-grid/test-bots/idle.py')
launch_tournament(sys.argv[1])
open_in_browser(HOST + '/tournaments/0')
print
print 'Press Enter to shutdown server'
print
sys.stdin.readline()
finally:
server.terminate()
def launch_tournament(map_name):
req = urllib2.Request(HOST + '/launch-training/' + map_name + '/my_bot')
req.add_header('Accept', 'application/json')
req.get_method = lambda: 'POST'
urllib2.urlopen(req)
def add_bot(name, exe):
exe = os.path.abspath(exe)
print 'Adding bot {0}: {1}'.format(name, exe)
try:
data = json.dumps(
{'botName':name,
'botCommunication':
{'tag': 'ExecutableBot'
,'contents': exe
}
})
req = urllib2.Request(HOST + '/add-bot')
req.add_header('Content-Type', 'application/json')
req.add_data(data)
urllib2.urlopen(req)
except urllib2.HTTPError as e:
print e.reason
raise
def open_in_browser(f):
if sys.platform.startswith('darwin'):
cmd = 'open'
elif sys.platform.startswith('win'):
cmd = 'start'
else: # it probably has X
cmd = 'xdg-open'
subprocess.call([cmd, f])
if __name__ == '__main__':
main() | [
"divanov@machinezone.com"
] | divanov@machinezone.com |
d7c53619199ac1dfc49c7119f880c6f0920483a1 | 14e4b6a6683de09fc3a7d5ace5b40aca1388d760 | /examples/gr0.py | f3846c19a5f1c22a55b077f39f95500544a09414 | [] | no_license | OptimalPandemic/CS476-3 | f43bfb995c4c90338ac0575d1e44d8d139731c62 | 6f7a59e758761ebf26dfc5f9a36a744d7e4ddca0 | refs/heads/master | 2021-01-20T00:58:42.093346 | 2016-11-05T02:07:48 | 2016-11-05T02:07:48 | 72,062,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | import sys
from turtle import Turtle, mainloop
class KeysMouseEvents:
def __init__(self):
super().__init__()
self.T = Turtle()
self.screen = self.T.getscreen()
self.screen.onkey(self.clear, "c")
self.screen.onkey(self.quit, "q")
self.screen.onclick(self.draw_fig)
self.screen.listen()
def clear(self):
self.T.screen.clear()
self.__init__()
@staticmethod
def quit():
sys.exit(0)
def draw_fig(self, x, y):
print(x, y)
self.T.goto(x, y)
self.T.circle(20)
def main(self):
mainloop()
def gr0():
draw = KeysMouseEvents()
draw.main()
gr0()
| [
"austin@campuswise.com"
] | austin@campuswise.com |
c85c401ca39a394abef692d10332763a237fa7ed | 5764941cfdbb67c78afa7861b2df41c153637827 | /chap04树与图/03list_of_depth.py | 906015b4a3f3ee1a4c98c8b9515eed6f983bc9f3 | [
"MIT"
] | permissive | zzsyjl/crack_the_coding_interview | 4b496a5c693d1a8dd9c175132d968ff21b1ba82f | ea76b9f87c321880833fe69bee2b98f4b3f13ff0 | refs/heads/main | 2023-08-14T01:15:54.940998 | 2021-09-17T07:00:09 | 2021-09-17T07:00:09 | 373,381,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | from collections import deque
import sys
sys.path.append(
'/Users/jlyang/python_projects/crack_the_coding_interview/chap02链表')
from linked_list import LinkedList
import unittest
"""
特定深度节点链表. 给定一个二叉树, 设计一个算法, 创建每个深度上的节点链表
分析:
这个东西应该也是很有用的. 我用层遍历的思路, 每次集体出一层的时候, 再集体进一层.
"""
from collections import deque
import sys
sys.path.append(
'/Users/jlyang/python_projects/crack_the_coding_interview/chap02链表')
class BinaryNode:
def __init__(self, name, left=None, right=None):
self.name = name
self.left = left
self.right = right
def __str__(self) -> str:
return '%s' % self.name
def create_node_list_by_depth(root):
lists = []
queue = deque([root])
while len(queue) > 0:
llist = LinkedList()
length = len(queue)
for _ in range(length):
popped = queue.popleft()
llist.add(popped)
if popped.left:
queue.append(popped.left)
if popped.right:
queue.append(popped.right)
lists.append(llist)
return lists
def example():
root = BinaryNode(0)
root.left = BinaryNode(1)
root.right = BinaryNode(2)
root.left.left = BinaryNode(3)
root.left.right = BinaryNode(4)
root.right.left = BinaryNode(5)
root.right.right = BinaryNode(6)
levels = create_node_list_by_depth(root)
for i in levels:
print(i)
if __name__ == "__main__":
example()
| [
"287683119@qq.com"
] | 287683119@qq.com |
f613d66153900cdfab69753db317f2b3e2792278 | 64c8d431c751b1b7a7cb7224107ee40f67fbc982 | /code/python/external/pi3d/constants/__init__.py | a0dfd912eed894eb189c79ca89c51473d892341d | [
"MIT"
] | permissive | silky/echomesh | 6ac4755e4ff5ea3aa2b2b671c0979068c7605116 | 2fe5a00a79c215b4aca4083e5252fcdcbd0507aa | refs/heads/master | 2021-01-12T20:26:59.294649 | 2013-11-16T23:29:05 | 2013-11-16T23:29:05 | 14,458,268 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | from __future__ import absolute_import, division, print_function, unicode_literals
"""
pi3d.constants contains constant values, mainly integers, from OpenGL ES 2.0.
"""
VERSION = '0.06'
STARTUP_MESSAGE = """
Pi3D module - version %(version)s
Copyright (c) Tim Skillman, 2012-2013
Copyright (c) Patrick Gaunt, 2012-2013
Copyright (c) Tom Ritchford, 2012-2013
Updates available from www.github.com/tipam/pi3d
""" % {'version': VERSION}
VERBOSE = False
# TODO: get rid of verbose in favor of logging.
# Pick up our constants extracted from the header files with prepare_constants.py
from pi3d.constants.egl import *
from pi3d.constants.gl2 import *
from pi3d.constants.gl2ext import *
from pi3d.constants.gl import *
# Define some extra constants that the automatic extraction misses.
EGL_DEFAULT_DISPLAY = 0
EGL_NO_CONTEXT = 0
EGL_NO_DISPLAY = 0
EGL_NO_SURFACE = 0
DISPMANX_PROTECTION_NONE = 0
# Lastly, load the libraries.
def _load_library(name):
"""Try to load a shared library, report an error on failure."""
try:
import ctypes
return ctypes.CDLL('lib%s.so' % name)
except:
from echomesh.util import Log
Log.logger(__name__).error("Couldn't load library %s" % name)
bcm = _load_library('bcm_host')
opengles = _load_library('GLESv2')
openegl = _load_library('EGL')
| [
"tom@swirly.com"
] | tom@swirly.com |
e5ec85f0684534bf8f010ac3e339a54ac37dfb2e | ccef8d9a16c0752fdf8de608c8b4f728f0586e1d | /resize_rotate_images.py | 7bb1f3d944d2380face6cc7ddb804df97d83c603 | [] | no_license | Sarath197/Intruder-Alert-System | fdcf79087cda7906fbc5ddf6a002dfcb4f4e76da | cb0bd495e21e428a9f1a453317e63ef30e162de3 | refs/heads/master | 2020-04-10T08:49:02.062196 | 2018-12-08T07:49:56 | 2018-12-08T07:49:56 | 160,915,874 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 768 | py | '''
Sarath Kumar
sarathsmartzs[at]gmail[dot]com
SACET, Chirala
Takes all the images in "input_path" and rotates , resizes them with given parameters.
Modified images are saved at "output_path".
'''
import cv2
from os import listdir
input_path= "input_images/"
output_path= "new_resize/"
images= listdir("/home/sarath/Desktop/intrudur -2/"+ input_path)
for image in images:
print(input_path+ image)
img= cv2.imread(input_path+ image)
resize_img= cv2.resize(img, (650, 490))
(h, w)= resize_img.shape[:2]
center= (w/2, h/2)
matrix= cv2.getRotationMatrix2D(center, 270, 0.8)
rotated_img = cv2.warpAffine(resize_img, matrix, (w, h))
cv2.imwrite(output_path+ image, rotated_img)
cv2.imshow("Win", rotated_img)
cv2.waitKey(5)
| [
"noreply@github.com"
] | Sarath197.noreply@github.com |
22ba7a4d8a0971eeedfd25e58733e5baece63ad0 | c73db175fa1b9575609e7865dc544dacb3ee1fa7 | /app/errors.py | a4aa6e8389e3ef23d88a181763e8c56832fba4dc | [] | no_license | ArthurConan/test_parser | ccb1f51b45d710fdb6e65c36790f7fa652abe7b1 | 1c74c81dab907210f0f119f59a1a4348569e794c | refs/heads/master | 2021-03-28T01:12:11.760610 | 2020-03-16T22:10:45 | 2020-03-16T22:10:45 | 247,822,875 | 0 | 0 | null | 2021-03-20T03:10:47 | 2020-03-16T21:48:04 | Python | UTF-8 | Python | false | false | 1,088 | py | import asyncio
import aiohttp
import aioredis
request_errors = (
ConnectionRefusedError,
ConnectionResetError,
asyncio.TimeoutError,
aiohttp.InvalidURL,
aiohttp.client_exceptions.ClientConnectorError,
aiohttp.client_exceptions.ClientResponseError,
aiohttp.client_exceptions.ClientOSError,
aiohttp.client_exceptions.ServerDisconnectedError,
aiohttp.client_exceptions.ClientConnectionError,
aiohttp.client_exceptions.ClientResponseError
)
redis_errors = (
ConnectionRefusedError,
ConnectionResetError,
aioredis.RedisError,
aioredis.ProtocolError,
aioredis.ReplyError,
aioredis.MaxClientsError,
aioredis.AuthError,
aioredis.PipelineError,
aioredis.MultiExecError,
aioredis.WatchVariableError,
aioredis.ChannelClosedError,
aioredis.ConnectionClosedError,
aioredis.ConnectionForcedCloseError,
aioredis.PoolClosedError,
aioredis.MasterNotFoundError,
aioredis.SlaveNotFoundError,
aioredis.ReadOnlyError
)
| [
"noreply@github.com"
] | ArthurConan.noreply@github.com |
003c3aaec70843e685a4dec277cb198174d2ec40 | 7eb49bb0e94816bbccf52d4f00459b45d4b2e425 | /aoc2018/__init__.py | 3f8996332d61d8ea2c9ee37771cbc99a31b45975 | [] | no_license | phorkyas-tg/advent-of-code | 0b65360c0d70f2be2d59cbe978a7e8585ce8cd7b | e61dfdde4489f965892d5f71966e20a0a36f1e79 | refs/heads/master | 2022-12-26T07:46:54.761010 | 2022-12-25T09:58:43 | 2022-12-25T09:58:43 | 228,563,146 | 0 | 0 | null | 2022-05-09T19:16:40 | 2019-12-17T07:55:43 | Python | UTF-8 | Python | false | false | 882 | py | from .AocInput import *
from .CalculateFrequency import CalculateFrequency, CalculateFrequencyDuplicate
from .CheckSum import CalculateChecksum
from .HammingDistance import GetCommonCharsFromIds
from .OverlappingSquares import GetMultipleClaimedSquares, GetIntactClaimId
from .GuardSchedule import GenerateGuardSchedule, CalculateStrategyOne, CalculateStrategyTwo
from .PolymerScanner import CalculateLenAfterReactions, GetShortestPolymerAfterImprovement
from .StepOrder import GenerateStepDict, CalculateStepOrder, CalculateWorkerTime
from .BattleMapSolver import CalculateBattleOutcome, BattleTillElfsWin
from .ManhattanArea import GetLargestFiniteArea, GetLargestAreaWithManhattanDistance
from .LicenseFileParser import GetSumOfMetaData, GetSumOfMetaDataAdvanced
from .MarbleGame import GetBestScore, GetBestScoreFast
from ._10_TheStarsAlign import GetStarMessage
| [
"tom.gothan@tracetronic.de"
] | tom.gothan@tracetronic.de |
8919b11db0a35f57941caa8a697d20d6bcaef424 | aac676acd9d9ab5d68ce46b72da8fe010a430bb1 | /Pacman/search/search.py | ab6ee7460169f3a9acf59e35a12262d56582c2cc | [] | no_license | SamuelFoo/Learn | 55609118b502269956784830bdd7e4849b014e1a | 5bb05a2cdb1b5f7294d4ab70df48a57e9d29365f | refs/heads/master | 2022-06-05T17:16:08.398649 | 2020-04-28T03:31:03 | 2020-04-28T03:31:03 | 259,520,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,098 | py | # search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# TODO: Try using set() for visited, though for corners problem some mutability required (for performance)
# Maybe convert from tuple to list, make changes, then convert back.
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def firstSearch(problem, ds, heuristic=nullHeuristic):
pq = util.PriorityQueue()
startState = problem.getStartState()
if type(ds) == type(pq):
ds.push((startState, []), heuristic(startState, problem))
else:
ds.push((startState, []))
visited = []
while not ds.isEmpty():
currNode, pathToCurrNode = ds.pop()
if currNode not in visited:
visited.append(currNode)
if problem.isGoalState(currNode):
# print("Goal state is ", currNode)
return pathToCurrNode
for successor in problem.getSuccessors(currNode):
if type(ds) == type(pq):
cost = problem.getCostOfActions(pathToCurrNode+[successor[1]])+heuristic(successor[0], problem)
ds.update((successor[0], pathToCurrNode+[successor[1]]), cost)
else:
ds.push((successor[0], pathToCurrNode+[successor[1]]))
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print("Start:", problem.getStartState())
print("Is the start a goal?", problem.isGoalState(problem.getStartState()))
print("Start's successors:", problem.getSuccessors(problem.getStartState()))
"""
"*** YOUR CODE HERE ***"
return firstSearch(problem, util.Stack())
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** YOUR CODE HERE ***"
return firstSearch(problem, util.Queue())
def uniformCostSearch(problem):
"""Search the node of least total cost first."""
"*** YOUR CODE HERE ***"
return firstSearch(problem, util.PriorityQueue())
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"*** YOUR CODE HERE ***"
return firstSearch(problem, util.PriorityQueue(), heuristic)
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
| [
"fooenzesamuel@gmail.com"
] | fooenzesamuel@gmail.com |
49acb7c799821f6f485dc8243c3203145bd9385f | c6db8eccba0f863e464fa23e7c8c5f27d6da277b | /CS/Programming_Languages/Python/Modules/exterior/topics/gui/dearPyGUI/tutorials/_3_item_usage/_3_3_configuration_state_info/configure_items.py | ea4e913f1183433230106f6806d466fcd30d277d | [] | no_license | corridda/Studies | ceabb94f48bd03a31e4414e9af841d6a9b007cf9 | 1aacf52f2762e05a416c9e73ebe20794cb5d21cf | refs/heads/master | 2023-02-05T18:51:04.217528 | 2023-01-28T09:21:03 | 2023-01-28T09:21:03 | 216,492,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | import dearpygui.dearpygui as dpg
dpg.create_context()
with dpg.window(label="Tutorial"):
# configuration set when button is created
dpg.add_button(label="Apply", width=300)
# user data and callback set any time after button has been created
btn = dpg.add_button(label="Apply 2")
dpg.set_item_label(btn, "Button 57")
dpg.set_item_width(btn, 200)
dpg.show_item_registry()
dpg.create_viewport(title='Custom Title', width=800, height=600)
dpg.setup_dearpygui()
dpg.show_viewport()
dpg.start_dearpygui()
dpg.destroy_context()
| [
"corridda@yandex.ru"
] | corridda@yandex.ru |
2d2a0919eaf9d4900549e260e76a29a86aff5212 | 9f1b8a1ada57198e2a06d88ddcdc0eda0c683df7 | /submission - lab9/set 2/VICTORIA ALEXANDRA ALERS_19376_assignsubmission_file_Lab9/VICTORIA ALEXANDRA ALERS_19376_assignsubmission_file_Lab9.py | 23b6a370b40815a0efeb963db12ada5ea16a12bf | [] | no_license | sendurr/spring-grading | 90dfdced6327ddfb5c311ae8f42ae1a582768b63 | 2cc280ee3e0fba02e95b6e9f45ad7e13bc7fad54 | refs/heads/master | 2020-04-15T17:42:10.781884 | 2016-08-29T20:38:17 | 2016-08-29T20:38:17 | 50,084,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | #1________________________________
from math import sin, exp
class F:
def __init__(self, a, w):
self.a = a
self.w = w
def value(self, x):
a = self.a
w = self.w
return exp(-a * w) * sin(w * x)
from math import pi
f = F(a=1.0, w=0.1)
print (f.value(x=pi))
f.a = 2
print (f.value(pi))
#2---------------------------------------------
class Simple:
def __init__(self, i):
#super().__init__()
self.i=i
def double(self):
self.i = self.i + self.i
s1=Simple(4)
for i in range(4):
s1.double()
print(s1.i)
s2=Simple('Hello')
s2.double(); s2.double()
print(s2.i)
s2.i=100
print(s2.i)
| [
"sendurr@hotmail.com"
] | sendurr@hotmail.com |
a55ea6fb88744fafbc82c42cd4a4bc6df2028af1 | 98e1f43d148d556bb2ce76515f10e509e854ee1a | /hogehoge/python/sura2yomeruPython/chap5/chap5-3-2.py | 771b54a197290a202e71f36d7fea17be8a6411c1 | [] | no_license | hiroshikinofu/hoihoi | 0688d4a02f4eeab5b2f2a74610addacca5c5b705 | a404940c2066d25714f5da752c9840bfa11d0e5b | refs/heads/master | 2018-10-14T05:52:12.695354 | 2018-08-21T00:40:40 | 2018-08-21T00:40:40 | 117,177,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | # 日付一覧に曜日も表示する
# python3
from datetime import date, timedelta
# datetimeモジュールからdateオブジェクトとtimedeltaオブジェクトを取り込め
youbi = '月火水木金土日'
# 文字列「月火水木金土日」を変数youbiに入れろ
start = date(2018, 6, 18)
# 数値2018と数値6と数値18を指定してdateオブジェクトを作成し、変数startに入れろ
for d in range(14):
# 0~数値14直前の範囲内の整数を変数dに順次入れる間、以下を繰り返せ
cur = start + timedelta(days=d)
# 引数daysに変数dを指定したtimedaltaオブジェクトを作成し、
# それを変数startに足した結果を変数curに入れろ
wd = cur.weekday()
# 変数curの曜日を調べて変数wdに入れろ
print(cur, youbi[wd])
# 変数curと変数youbiの要素wdを表示しろ
| [
"hiroshikinofu@gmail.com"
] | hiroshikinofu@gmail.com |
7b91c8ada0db683715b51a9cca5e470b4c885b26 | 281210f0b78371bb991df7be50e3fea41c259418 | /learning_users/settings.py | bece60ed129a538b3da095e6677c40113f13c11d | [] | no_license | Faiz4work/first-django-deployment | 5cd0ca8a29059a7c9cc0beadbb088784a166b643 | 2e361e262b674c67986db4d9daac3d649bac236e | refs/heads/master | 2020-05-02T20:03:08.624399 | 2019-04-03T07:41:28 | 2019-04-03T07:41:28 | 178,177,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,716 | py | """
Django settings for learning_users project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,'templates')
MEDIA_DIR = os.path.join(BASE_DIR,'media')
STATIC_DIR = os.path.join(BASE_DIR,'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#+a0ioc$2_x8#ui3)o$mk2=14=mgdy(2^xbh7+841w*an(9vq9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['192.168.0.17','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'basic_app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learning_users.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'learning_users.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
]
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {'min_length':9}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [STATIC_DIR,]
MEDIA_ROOT = MEDIA_DIR
MEDIA_URL = '/media/'
LOGIN_URL = '/basic_app/user_login'
| [
"faizahmed11234@gmail.com"
] | faizahmed11234@gmail.com |
425c324708d6569908e314be60f581c65bb1afea | 82dfca511e209dca52e349ef31f09b633415b85b | /Metodo Grafico/metodografico.py | adf535bbc9e568c59e54c44837ef97ba8451e1c7 | [] | no_license | Cristian-Blanco/investigacion-operaciones | 9abf1f8db8dfbffbc4325420792e61217bf43b92 | 091a320a87f62e001e7e24c00aabdaeb17378a1a | refs/heads/master | 2023-03-11T13:23:36.628644 | 2021-02-24T23:34:32 | 2021-02-24T23:34:32 | 301,534,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,439 | py | import sympy as sym
from sympy import symbols, plot_implicit, And
from sympy.plotting import plot
#VARIABLES
x, y = symbols('x, y') #Declaramos las variables X,Y
diccionario = {} #Este diccionario lo utilizaremos para guardar los maximos o Minimos (Depende el caso)
lista = [] #La lista nos ayudara a organizar los maximos y minimos para los ejercicios 6 en adelante
#VARIABLES CON FUNCIONES
primer_ejercicio = y < (5-2*x)
segundo_ejercicio = y <= 5
tercer_ejercicio = 2*(2*x-y)<2*(x+y)-4
#Funciones Para ejercicios de MAXIMOS Y MINIMOS
#Funcion del Ejercicio 6
def primerFuncionMaximos(x,y):
p = 4*x+6*y #Evaluamos los puntos en la funcion P(x,y)
diccionario[p]= (x,y) #Guardamos el resultado y los valores x,y
text = "x: "+str(x)+" y: "+str(y) #realizamos un texto para el usuario
return text #retornamos el texto
#Funcion del Ejercicio 7
def segundoFuncionMaximos(x,y):
p = 6*x + 3*y #Evaluamos los puntos en la funcion P(x,y)
diccionario[p]= (x,y) #Guardamos el resultado y los valores x,y
text = "x: "+str(x)+" y: "+str(y) #realizamos un texto para el usuario
return text #retornamos el texto
#Funcion del Ejercicio 8
def tercerFuncionMinimos(x,y):
p = 9*x - y #Evaluamos los puntos en la funcion P(x,y)
diccionario[p]= (x,y) #Guardamos el resultado y los valores x,y
text = "x: "+str(x)+" y: "+str(y) #realizamos un texto para el usuario
return text #retornamos el texto
#Ejercicio de graficar Sistemas de Ecuaciones
print("EJERCICIO 1.\nGrafica y < 5-2x")
plot_implicit(primer_ejercicio, (x,-100,100), (y,-100,100))#Obtenemos la region a estudiar plot_implicit(function,(range))
print("\n\nEJERCICIO 2.\nGrafica y <= 5")
plot_implicit(segundo_ejercicio, (x,-50,50), (y,-50,50))#Obtenemos la region a estudiar plot_implicit(function,(range))
print("\n\nEJERCICIO 3.\nGrafica 2(2x-y)<2(x+y)-4")
plot_implicit(tercer_ejercicio, (x,-100,100), (y,-100,100)) #Obtenemos la region a estudiar plot_implicit(function,(range))
print("\n\nEJERCICIO 4.\nGrafica: \n 2x+y > 3\n 2y-1 > 0\n x>=y")
plot_implicit(And(2*x+y > 3, 2*y - 1 > 0, x>=y), (x,-10,10), (y,-10,10))#Obtenemos la region a estudiar plot_implicit(And(function,function,...),(range))
print("\n\nEJERCICIO 5.\nGrafica: \n 2x+3y <= 60\n x >= 0\n y >= 0")
plot_implicit(And((2*x+3*y)<=60, x >= 0, y >= 0), (x,-40,40), (y,-40,40))#Obtenemos la region a estudiar plot_implicit(And(function,function,...),(range))
#MAXIMIZAR Y MINIMIZAR UNA FUNCION SUJETA A RESTRICCIONES
#EJERCICIO 6
print("\n\nEJERCICIO 6.\nMaximizar La Funcion sujeta a Las siguientes Restricciones:")
print("Max. P = 4x + 6y\n S.A : 2X+Y<=180\n x+2y<=160\n x+y<=100\n x>=0\n y>=0")
print("Primero Dibujamos las rectas de las Restricciones:\n 2x+y-180=0\n x+2y-160=0\n x+y-100=0\n x=0\n y=0")
plot(180-2*x,(160-x)/2,100-x,0,(x,-100,100)) #Graficamos todas las rectas igualadas a Y plot(function,function,...,(range))
print("Ahora solo escojemos la zona de estudio que es la siguiente:\n")
plot_implicit(And(2*x+y<=180, x+2*y<=160, x+y <=100, x>=0, y>= 0),(x,-100,100), (y,-100,100)) #Obtenemos la region a estudiar plot_implicit(And(function,function,...),(range))
#el sym.solve nos ayuda a resolver ecuaciones igualadas a 0
inter_1 = sym.solve([2*x+y-180, x+y-100], dict=True)
inter_2 = sym.solve([x+2*y-160, x+y-100], dict=True)
inter_3 = sym.solve([2*x+y-180, y+0], dict=True)
inter_4 = sym.solve([x+0, y+0], dict=True)
inter_5 = sym.solve([x+2*y-160, x+0], dict=True)
print("Los puntos extremos son los siguientes:\n")
print("1:"+primerFuncionMaximos(inter_1[0][x],inter_1[0][y])+"\n2:"+primerFuncionMaximos(inter_2[0][x],inter_2[0][y])+"\n3:"+primerFuncionMaximos(inter_3[0][x],inter_3[0][y])+"\n4:"+primerFuncionMaximos(inter_4[0][x],inter_4[0][y])+"\n5:"+primerFuncionMaximos(inter_5[0][x],inter_5[0][y]))
for i in diccionario.keys(): #añadimos las llaves de los diccionarios a la lista
lista.append(i)
lista.sort() #organizamos la lista de menor a mayor
valores = diccionario[lista[-1]] #obtenemos la tupla del Maximo en el diccionario
print("El maximo absoluto es el punto (x: "+str(valores[0])+" y: "+str(valores[1])+")\n ya que evaluados en la funcion P da como valor maximo : "+str(lista[-1]))
#EJERCICIO 7
# EJEMPLO CREADO "MAXIMIZAR LA FUNCION SUJETA A LAS RESTRICCIONES"
print("\n\nEJERCICIO 7.\nMaximizar La Funcion sujeta a Las siguientes Restricciones:")
print("Max. P = 6x + 3y\n S.A : x-3y<=7\n 3x+y<=14\n y-x<=21")
print("Primero Dibujamos las rectas de las Restricciones:\n x-3y-7=0\n 3x+y-14=0\n y-x-21=0")
plot((7-x)/-3, 14-3*x, 21+x, (x,-50,50)) #Graficamos todas las rectas igualadas a Y plot(function,function,...,(range))
print("Ahora solo escojemos la zona de estudio que es la siguiente:\n")
plot_implicit(And(x-3*y<=7, 3*x+y<=14, y-x<=21),(x,-50,50), (y,-50,50)) #Obtenemos la region a estudiar plot_implicit(And(function,function,...),(range))
diccionario={}
lista=[]
#el sym.solve nos ayuda a resolver ecuaciones igualadas a 0
inter_1 = sym.solve([x-3*y-7, 3*x+y-14], dict=True)
inter_2 = sym.solve([3*x+y-14, y-x-21], dict=True)
inter_3 = sym.solve([y-x-21, x-3*y-7], dict=True)
print("Los puntos extremos son los siguientes:\n")
print("1:"+segundoFuncionMaximos(inter_1[0][x],inter_1[0][y])+"\n2:"+segundoFuncionMaximos(inter_2[0][x],inter_2[0][y])+"\n3:"+segundoFuncionMaximos(inter_3[0][x],inter_3[0][y]))
for i in diccionario.keys(): #añadimos las llaves de los diccionarios a la lista
lista.append(i)
lista.sort() #organizamos la lista de menor a mayor
valores = diccionario[lista[-1]] #obtenemos la tupla del Maximo en el diccionario
print("El maximo absoluto es el punto (x: "+str(valores[0])+" y: "+str(valores[1])+")\n ya que evaluados en la funcion P da como valor maximo : "+str(lista[-1]))
#EJERCICIO 8
# EJEMPLO CREADO "MINIMIZAR LA FUNCION SUJETA A LAS RESTRICCIONES"
print("\n\nEJERCICIO 8.\nMinimizar La Funcion sujeta a Las siguientes Restricciones:")
print("Max. P = 9*x - y\n S.A : 3x+6y>=18\n 7x+3y>=10\n x+y<=5")
print("Primero Dibujamos las rectas de las Restricciones:\n 3x+6y-18=0\n 7x+3y-10=0\n x+y-5=0")
plot((18-3*x)/6, (10-7*x)/3, 5-x, (x,-10,10)) #Graficamos todas las rectas igualadas a Y plot(function,function,...,(range))
print("Ahora solo escojemos la zona de estudio que es la siguiente:\n")
plot_implicit(And(3*x+6*y>=18, 7*x+3*y>=10, x+y<=5),(x,-10,10), (y,-10,10)) #Obtenemos la region a estudiar plot_implicit(And(function,function,...),(range))
diccionario={}
lista=[]
#el sym.solve nos ayuda a resolver ecuaciones igualadas a 0
inter_1 = sym.solve([x+y-5, 7*x+3*y-10], dict=True)
inter_2 = sym.solve([3*x+6*y-18, x+y-5], dict=True)
inter_3 = sym.solve([3*x+6*y-18, 7*x+3*y-10], dict=True)
print("Los puntos extremos son los siguientes:\n")
print("1:"+tercerFuncionMinimos(inter_1[0][x],inter_1[0][y])+"\n2:"+tercerFuncionMinimos(inter_2[0][x],inter_2[0][y])+"\n3:"+tercerFuncionMinimos(inter_3[0][x],inter_3[0][y]))
for i in diccionario.keys(): #añadimos las llaves de los diccionarios a la lista
lista.append(i)
lista.sort() #organizamos la lista de menor a mayor
valores = diccionario[lista[0]] #obtenemos la tupla del minimo en el diccionario
print("El minimo absoluto es el punto (x: "+str(valores[0])+" y: "+str(valores[1])+")\n ya que evaluados en la funcion P da como valor minimo : "+str(lista[0])) | [
"cristianjmb10@gmail.com"
] | cristianjmb10@gmail.com |
e6297b19b200728ecedcc82b222603e469f71954 | b071bedf74a7e9cdde9040947433447e77de98fc | /portfolio/migrations/0004_login_email.py | 5987218bc89e2d3730d33ff4f5f47f909a4b6caf | [] | no_license | Yomnaali22/Django3_project | 0e9b28024c67c9916d62cd4462861dfd37795425 | 33e6c45417a3277a311df956bbf692a93d9433ca | refs/heads/master | 2023-03-25T01:10:21.708135 | 2021-03-17T16:26:38 | 2021-03-17T16:26:38 | 338,165,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | # Generated by Django 3.1.4 on 2021-02-26 22:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0003_auto_20210227_0010'),
]
operations = [
migrations.AddField(
model_name='login',
name='Email',
field=models.EmailField(default='string', max_length=254),
),
]
| [
"Yomnaali1718@gmail.com"
] | Yomnaali1718@gmail.com |
0844bbe4a8d7dc1254333da5414f1afff5a87ca7 | f4b694982027ac362de1e9d6755f2943d0355a06 | /DECSKS-24_--_Boundary_conditions_revisited_on_a_cell_centered_perspective/DECSKS/main.py | 64e3792512a19fa7015a7435945c2d43bd03da74 | [] | no_license | dsirajud/IPython-notebooks | 55275e44191c16f5393571522787993f931cfd98 | 6ad9d978c611558525fc9d716af101dc841a393b | refs/heads/master | 2021-01-15T15:33:57.119172 | 2016-07-13T20:08:29 | 2016-07-13T20:08:29 | 35,054,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,042 | py | #!/usr/bin/env python
#============================================================================##
#DECSKS - DEterministic Convected Scheme Kinetic Solver for Boltzmann systems #
#-----------------------------------------------------------------------------#
# 1D1V Vlasov-Poisson system, two species #
# #
# __author__ = David Sirajuddin #
# __version__ = 2.3 #
# __email__ = sirajuddin@wisc.edu #
# __status__ = in development #
# #
# Python code is crafted with some attention to PEP8 style standards #
# https://www.python.org/dev/peps/pep-0008/ for #
# #
# Python 2.7.3 #
# NumPy 1.11.0.dev0+fe64f97 #
# #
# is not compatible with Python 3+ and/or earlier Numpy releases #
# #
# coding conventions: #
# packages/modules -- lowercase, no underscore #
# classes -- CapWords #
# instances -- lowercase (if no symbol conflict), with underscores #
# functions -- lowercase, with underscores unless proper name #
# #
# other (local) conventions: #
# numpy arrays -- lowercase (if no symbol conflict), with underscores #
# iterators -- i, j, n used according to f(x_i, v_j, t^n), #
# phase space var -- z = {x, y, z, vx, vy, z} #
#=============================================================================#
import _mypath # adds relative path to sys.path for flexible deployment
import DECSKS
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import time
# =========================================================================== #
#rm_plots = int(raw_input('remove ALL plot files after simulation is done (1 = yes, 0 = no)?: '))
rm_plots = 0
tic = time.clock()
sim_params = DECSKS.lib.read.inputfile('./etc/params_s18-20.dat')
# both species will use same grid x, vx. Can reuse the same vx and ax here
# given serial implementation. In parallel applications, distinct vx_i, vx_e
# ax_i, ax_e may be desirable depending on how the parallelization is approached
x = DECSKS.lib.domain.Setup(sim_params, var = 'x')
vx = DECSKS.lib.domain.Setup(sim_params, var = 'v', dim = 'x')
ax = DECSKS.lib.domain.Setup(sim_params, 'a', 'x')
t = DECSKS.lib.domain.Setup(sim_params, var = 't')
# set up two species
fe, fi = DECSKS.lib.density.setup(sim_params, t, x, vx) # NOTE mu and tau in ion density must match those just below
ne_avg = np.sum(fe)*x.width * vx.width / x.L
print ne_avg
# store total mass for conservation checks, TODO do not need the x phase space variable pass in this function
sim_params['me_0'] = np.sum(fe)
sim_params['mi_0'] = np.sum(fi)
print "TIME ZERO, masses are"
print "fe = %g" % np.sum(fe)
print "fi = %g" % np.sum(fi)
# CAUTION: make sure you use the same mu and tau in lib.density for fe, fi as indicated in params.dat input deck
# as the mu specified in params.dat will be used to compute the ion acceleration ax ~ 1 / mu * Ex compared to the
# electron acceleration term ax ~ -Ex
print sim_params['BC']['f']['x']['type']
print sim_params['BC']['f']['vx']['type']
print sim_params['BC']['f']['x']['lower']
print sim_params['BC']['f']['x']['upper']
print sim_params['BC']['f']['vx']['lower']
print sim_params['BC']['f']['vx']['upper']
#print sim_params['compute_electric_potential_phi_handle'][x.str] # = None if fourier solver
# print sim_params['phi_BC']['x'] # = None if fourier solver
Plot = DECSKS.lib.plots.PlotSetup(fe, 0, t, x, vx, sim_params, species = 'electron')
Plot(n = 0)
Plot = DECSKS.lib.plots.PlotSetup(fi, 0, t, x, vx, sim_params, species = 'ion')
Plot(n = 0)
#Ex_2D = eval(sim_params['compute_electric_field_orchestrator_handle']['x'])(fe, fi, x, vx, 0, sim_params)
#Ex = Ex_2D[:,0] # all columns of the original 2D array are the same, choose the zeroeth column arbitrarily
#phi = eval(sim_params['compute_electric_potential_phi_handle'][x.str])(fe, fi, x, vx, 0, sim_params)
#phi = phi[:,0]
#print phi.shape
DECSKS.lib.diagnostics.calcs_and_writeout(sim_params,fe, fi, 0, x, vx, sim_params['mu'])
#matplotlib.pyplot.plot(x.gridvalues, phi, linewidth = 2, color = 'blue')
#matplotlib.pyplot.grid()
##matplotlib.pyplot.axis([x.gridvalues[0], x.gridvalues[-1], 0, 35])
#matplotlib.pyplot.xlabel(r'position $x$', fontsize = 18)
#matplotlib.pyplot.ylabel(r'$\phi (t^n,x)$', fontsize = 18)
#matplotlib.pyplot.title(r's18-21d Electric potential $\phi (x)$: $N_x$ = %d, $N_v$ = %d, $t^n$ = %2.3f, n = %03d' % (sim_params['active_dims'][0], sim_params['active_dims'][1], 0.0, 0))
#it_str = 'it%05d' % 0
#matplotlib.pyplot.savefig('./plots/' + 'phi_s18-21_' + it_str)
#matplotlib.pyplot.clf()
#matplotlib.pyplot.plot(x.gridvalues, Ex, linewidth = 2, color = 'blue')
#matplotlib.pyplot.grid()
#matplotlib.pyplot.axis([x.gridvalues[0], x.gridvalues[-1], 0, 35])
#matplotlib.pyplot.xlabel(r'position $x$', fontsize = 18)
#matplotlib.pyplot.ylabel(r'$E (t^n,x)$', fontsize = 18)
#matplotlib.pyplot.title(r's18-21d Electric field $E (x)$: $N_x$ = %d, $N_v$ = %d, $t^n$ = %2.3f, n = %03d' % (sim_params['active_dims'][0], sim_params['active_dims'][1], 0.0, 0))
#it_str = 'it%05d' % 0
#matplotlib.pyplot.savefig('./plots/' + 'Ex_s18-21_' + it_str)
#matplotlib.pyplot.clf()
#print sim_params['sigma']['x']['lower']
#print sim_params['sigma']['x']['upper']
print 'simulation has started, status updates are broadcasted after each timestep'
print t.stepnumbers
for n in t.stepnumbers:
fe, fi = DECSKS.lib.split.scheme(
fe, fi,
t, x, vx, ax,
n,
sim_params
)
# sim_params['sigma_n']['x']['lower'][n] = sim_params['sigma']['x']['lower']
# sim_params['sigma_n']['x']['upper'][n] = sim_params['sigma']['x']['upper']
# print sim_params['sigma']['x']['lower']
# print sim_params['sigma']['x']['upper']
Plot = DECSKS.lib.plots.PlotSetup(fe, n, t, x, vx, sim_params, species = 'electron')
Plot(n)
Plot = DECSKS.lib.plots.PlotSetup(fi, n, t, x, vx, sim_params, species = 'ion')
Plot(n)
# Ex_2D = eval(sim_params['compute_electric_field_orchestrator_handle']['x'])(fe, fi, x, vx, n, sim_params)
# Ex = Ex_2D[:,0] # all columns of the original 2D array are the same, choose the zeroeth column arbitrarily
# phi = eval(sim_params['compute_electric_potential_phi_handle'][x.str])(fe, fi, x, vx, 0, sim_params)
# phi = phi[:,0]
# matplotlib.pyplot.plot(x.gridvalues, phi, linewidth = 2, color = 'blue')
# matplotlib.pyplot.grid()
# # matplotlib.pyplot.axis([x.gridvalues[0], x.gridvalues[-1], 0, 35])
# matplotlib.pyplot.xlabel(r'position $x$', fontsize = 18)
# matplotlib.pyplot.ylabel(r'$\phi (t^n,x)$', fontsize = 18)
# matplotlib.pyplot.title(r's18-21d Electric potential $\phi (t^n, x)$: $N_x$ = %d, $N_v$ = %d, $t^n$ = %2.3f, n = %03d' % (sim_params['active_dims'][0], sim_params['active_dims'][1], n*t.width, n))
# it_str = 'it%05d' % n
# matplotlib.pyplot.savefig('./plots/' + 'phi_s18-21_' + it_str)
# matplotlib.pyplot.clf()
# matplotlib.pyplot.plot(x.gridvalues, Ex, linewidth = 2, color = 'blue')
# matplotlib.pyplot.grid()
# # matplotlib.pyplot.axis([x.gridvalues[0], x.gridvalues[-1], 0, 35])
# matplotlib.pyplot.xlabel(r'position $x$', fontsize = 18)
# matplotlib.pyplot.ylabel(r'$E (t^n,x)$', fontsize = 18)
# matplotlib.pyplot.title(r's18-21d Electric field $E (x)$: $N_x$ = %d, $N_v$ = %d, $t^n$ = %2.3f, n = %03d' % (sim_params['active_dims'][0], sim_params['active_dims'][1], n*t.width, n))
# it_str = 'it%05d' % n
# matplotlib.pyplot.savefig('./plots/' + 'Ex_s18-21_' + it_str)
# matplotlib.pyplot.clf()
# calcs performed and outputs written only if "record outputs? = yes"
# in ./etc/params.dat
DECSKS.lib.diagnostics.calcs_and_writeout(sim_params,fe, fi, n, x, vx, sim_params['mu'])
DECSKS.lib.status.check_and_clean(t, n, tic, rm_plots)
#sigma_n_left = sim_params['sigma_n']['x']['lower']
#sigma_n_right = sim_params['sigma_n']['x']['upper']
#plt.plot(t.times, sigma_n_left, linewidth = 2, label = r'$\sigma (t, x= -10)$')
#plt.plot(t.times,sigma_n_right, linewidth = 2, label = r'$\sigma (t, x= +10)$')
#plt.grid()
#plt.xlabel(r'time step $n$', fontsize = 18)
#plt.ylabel(r'$\sigma (t,x)$', fontsize = 18)
#plt.legend(loc = 'best')
#phi_left = sim_params['sigma_n']['x']['lower'] # E = -1/2 sigma, phi = 1/2 sigma, here sigma = ni - ne
#phi_right = sim_params['sigma_n']['x']['upper']
#plt.plot(trange,phi_left, linewidth = 2, label = r'$\phi (t, x= -10)$')
#plt.plot(trange,phi_right, linewidth = 2, label = r'$\phi (t, x= +10)$')
#plt.grid()
#plt.xlabel(r'time step $n$', fontsize = 18)
#plt.ylabel(r'$\phi (t,x)$', fontsize = 18)
#plt.legend(loc = 'best')
#plt.show()
toc = time.clock()
simtime = toc - tic
print "simulation completed in %g seconds = %g minutes = %g hours " % (
simtime,
simtime/60.,
simtime/3600.)
# =============================================================================== #
# END
| [
"sirajuddin@wisc.edu"
] | sirajuddin@wisc.edu |
c00448b92e9ee8ba2796820e55c64a9ef18a190f | 911794909018514bf742b8411eade0d4e4daccaa | /backend/backend/urls.py | 1a3614c13f2768258949e278d7155324e2270442 | [] | no_license | iza17hor/Python-JavaScript-TodoList | b0328dd1bf01007550b98729cdb533c550281243 | 8e08b05b12450ba78b3bef26aeb0741ba1f9a2f1 | refs/heads/main | 2023-03-07T03:33:39.985566 | 2021-02-25T21:58:04 | 2021-02-25T21:58:04 | 342,390,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | """backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('apka.urls'))
]
| [
"iza17hor@gmail.com"
] | iza17hor@gmail.com |
1fdca67de448f93540d8a9c52f91526ca3245071 | 0693aa6a146126c43a3114b79d2f458abde4d58a | /julia-version/genGraph.py | 363b90b33362990c8f867c1703a8346f1515c068 | [] | no_license | ReNginx/LRAstar | 5301ddd63c19265671b730477f48cda1788e9b7c | eebd14f7b55d0ab0694e4e5691cae43504558e1d | refs/heads/master | 2020-04-03T22:47:13.997184 | 2019-05-29T04:14:23 | 2019-05-29T04:14:23 | 155,609,582 | 0 | 1 | null | 2018-11-08T22:52:09 | 2018-10-31T19:04:30 | C++ | UTF-8 | Python | false | false | 5,577 | py | import numpy as np
import turtle as tl
import math
import heapq
ang_gap = 30
dis_gap = 1
class segment:
def __init__(self, p1, p2):
self.p1 = np.array(p1)
self.p2 = np.array(p2)
self.d = self.p2 - self.p1
def intersect(self, seg):
return np.cross(((seg.p1 - self.p1).reshape(1,-1)), self.d.T) * np.cross(((seg.p2 - self.p1).reshape(1,-1)), self.d.T) < 0 \
and np.cross(((self.p1 - seg.p1).reshape(1,-1)), seg.d.T) * np.cross(((self.p2 - seg.p1).reshape(1,-1)), seg.d.T) < 0
class robot:
d = math.sqrt(0.5)*10 # dist from center to corner.
def __init__(self, x, y, th):
self.x = x
self.y = y
self.th = th
def get_seg_list(self):
pts = []
for i in [45, 135, -135, -45]:
ang = (self.th + i) / 180 * math.pi
pts.append((self.x + robot.d * math.cos(ang), self.y + robot.d * math.sin(ang)))
return [segment(pts[i], pts[(i+1)%4]) for i in range(4)]
def get_pos(self):
return (self.x, self.y, self.th)
def set_pos(self, x, y, th):
self.x = x
self.y = y
self.th = th
def is_collide(self, obs):
for self_seg in self.get_seg_list():
for seg in obs.get_seg_list():
if self_seg.intersect(seg):
return True
return False
class obstacle:
def __init__(self,seg_list):
self.seg_list = seg_list
def get_seg_list(self):
return self.seg_list
def advance(x, y, th, d, nth):
nx = int(round(x + d * math.cos(nth/180*math.pi)))
ny = int(round(y + d * math.sin(nth/180*math.pi)))
return (nx, ny, nth-360 if nth >= 360 else nth)
def show_seg(seg):
tl.setpos(seg.p1)
tl.pendown()
tl.goto(seg.p2)
tl.penup()
def show_obj(obj):
seg_list = obj.get_seg_list()
tl.penup()
for seg in seg_list:
show_seg(seg)
def dist(p1, p2):
return math.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)
def astar(obs, rob, target = (1, 0.50, 180)):
q = []
heapq.heappush(q, (dist(rob.get_pos(), target), 0, rob.get_pos()))
visited = {rob.get_pos(): False}
flag = True
while len(q) > 0 and flag:
cur = heapq.heappop(q)
print(cur)
di= cur[1]
cur = cur[2]
#tl.goto(cur[0]*100, cur[1]*100)
# tl.pendown()
# tl.penup()
for ang in range(0, 360, ang_gap):
nxt = advance(cur[0], cur[1], cur[2], dis_gap, ang+cur[2])
rob.set_pos(*nxt)
if not(left <= nxt[0] and nxt[0] <= right and bottom <= nxt[1] and nxt[1] <= top):
continue
if not rob.is_collide(obs) and nxt not in visited:
visited[nxt] = cur
if dist(nxt, target) < 0.5:
flag = False
break
heapq.heappush(q, (dist(nxt, target)+di+dist(nxt, cur), di+dist(nxt, cur), nxt))
show_seg(segment((cur[0], cur[1]), (nxt[0], nxt[1])))
if not flag:
while visited[nxt] != False:
rob.set_pos(nxt[0], nxt[1], nxt[2])
show_obj(rob)
nxt = visited[nxt]
print(nxt)
if __name__ == "__main__":
# top = 1000
# bottom = -1000
# left = -1000
# right = 1000
# top = 2.5
# bottom = -3
# left = -3
# right = 2.5
top = 55
bottom = 0
left = 0
right = 55
tl.setworldcoordinates(left,bottom,right,top)
tl.speed(0)
p1 = (0,0)
p2 = (0,40)
p3 = (40,0)
p4 = (0, 14)
p5 = (2, 14)
p6 = (8, 14)
p7 = (40, 14)
p8 = (10, 14)
p9 = (10, 40)
target = (10, 5, 180)
obs = obstacle([segment(p1, p2),
segment(p1, p3),
segment(p6, p7),
segment(p4, p5),
segment(p8, p9)])
show_obj(obs)
id = dict()
edg = []
cnt = 0
for i in np.arange(bottom, top, dis_gap):
for j in np.arange(left, right, dis_gap):
for ang in np.arange(0, 360, ang_gap):
cnt += 1
id[(i,j,ang)] = cnt
print(i,j,ang)
#print(id(1, 55, 60))
for i in np.arange(bottom, top, dis_gap):
print(i)
for j in np.arange(left, right, dis_gap):
for ang in np.arange(0, 360, ang_gap):
cur = (i,j,ang)
idCur = id[cur]
for newAng in np.arange(0, 360, ang_gap):
nxt = advance(cur[0], cur[1], cur[2], dis_gap, newAng+cur[2])
if not(left <= nxt[0] and nxt[0] < right and bottom <= nxt[1] and nxt[1] < top):
continue
idNxt = id[nxt]
if (idNxt < idCur):
continue
lazy = round(dist(cur, nxt))
real = lazy
rob = robot(i,j,ang)
if (rob.is_collide(obs)):
real = 0x3f3f3f3f
rob.set_pos(*nxt)
if rob.is_collide(obs):
real = 0x3f3f3f3f
# if (real != 0x3f3f3f3f):
# show_seg(segment((cur[0], cur[1]), (nxt[0], nxt[1])))
edg.append((idCur, idNxt, real, lazy))
f= open("input.txt","w")
print(len(id), file=f)
print(len(edg), file=f)
print(id[(30,30,0)], file=f)
print(id[(10,6,180)], file=f)
print(10,file=f)
for e in edg:
for i in range(4):
print(e[i], file=f)
f.close()
| [
"969765303@qq.com"
] | 969765303@qq.com |
59b8200d9e426b9b624b7b691217b17661c322ac | 2c505ac232a9c56cc46503c5c2e77c2a7cdb13bc | /app.py | a64ac8d46c208fd50731e557ae399d2a78f18f86 | [] | no_license | underhood31/EzyChair | 4b612e6ee94e7c96a36fddb4fc22f846c14a69a0 | ba5771cd8dfddfc62762478fc4002a7b706d5da5 | refs/heads/master | 2020-04-26T14:02:31.391383 | 2019-03-05T16:40:51 | 2019-03-05T16:40:51 | 173,599,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | import flask
from flask import render_template
from flask import request
app = flask.Flask(__name__)
@app.route('/')
def homepage():
# return("Welcome to the home page new page")
return render_template("./EzyChair.html")
if __name__ == "__main__" :
app.run(debug=True) | [
"manavjeet18295@iiitd.ac.in"
] | manavjeet18295@iiitd.ac.in |
db65f69a9e0e554a65106f54ff445628c3458f7c | 839d8d7ccfa54d046e22e31a2c6e86a520ee0fb5 | /icore/high/thread/thread_queue.py | 7e1d562ecf087f475cb24370b431819ad85ae3b5 | [] | no_license | Erich6917/python_corepython | 7b584dda737ef914780decca5dd401aa33328af5 | 0176c9be2684b838cf9613db40a45af213fa20d1 | refs/heads/master | 2023-02-11T12:46:31.789212 | 2021-01-05T06:21:24 | 2021-01-05T06:21:24 | 102,881,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,484 | py | # -*- coding: utf-8 -*-
# @Time : 2017/12/29
# @Author : LIYUAN134
# @File : thread_queue.py
# @Commment:
#
# -*- coding: UTF-8 -*-
import Queue
import threading
import time
exitFlag = 0
class myThread(threading.Thread):
def __init__(self, threadID, name, q):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.q = q
def run(self):
print "Starting " + self.name
process_data(self.name, self.q)
print "Exiting " + self.name
def process_data(threadName, q):
while not exitFlag:
queueLock.acquire()
if not workQueue.empty():
data = q.get()
queueLock.release()
print "%s processing %s" % (threadName, data)
else:
queueLock.release()
time.sleep(1)
threadList = ["Thread-1", "Thread-2", "Thread-3"]
nameList = ["One", "Two", "Three", "Four", "Five"]
queueLock = threading.Lock()
workQueue = Queue.Queue(10)
threads = []
threadID = 1
# 创建新线程
for tName in threadList:
thread = myThread(threadID, tName, workQueue)
thread.start()
threads.append(thread)
threadID += 1
# 填充队列
queueLock.acquire()
for word in nameList:
workQueue.put(word)
queueLock.release()
# 等待队列清空
while not workQueue.empty():
pass
# 通知线程是时候退出
exitFlag = 1
# 等待所有线程完成
for t in threads:
t.join()
print "Exiting Main Thread"
| [
"1065120559@qq.com"
] | 1065120559@qq.com |
8f9d55a7ec42f1f6e1469f2eb18176ef4fb5e402 | d85abbce34bc6aa43b9069168688c063949b0b8f | /trivia/trivia/wsgi.py | c5baa3e6f87a5578683e53c3c070dd9d5e413d0a | [] | no_license | wilsonramstead/TriviaGame | 048b6e3929c16da0c40d88338d667f9d5779fa65 | 8d4b562a381c271c23eda5a7e0790932f7e737cd | refs/heads/master | 2020-06-14T08:35:21.263753 | 2019-07-04T05:20:51 | 2019-07-04T05:20:51 | 194,960,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | """
WSGI config for trivia project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "trivia.settings")
application = get_wsgi_application()
| [
"ramsteadwilson@gmail.com"
] | ramsteadwilson@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.