index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
67,752 | luozhouyang/stupidtree | refs/heads/master | /stupidtree/core/node_test.py | # Copyright (c) 2018 luozhouyang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import unittest
from .node import Node
class TestNode(unittest.TestCase):
def test_equality(self):
n0 = Node(data='', tag=None, parent=None)
n1 = Node(data='A', tag=None, parent=None)
n2 = Node(data='', tag='TAG_0', parent=None)
n3 = Node(data='', tag=None, parent=n2)
n4 = Node(data='', tag=None, parent=None)
n5 = Node(data='', tag='TAG_1', parent=None)
n6 = Node(data='', tag='TAG_0', parent=n4)
self.assertEqual(n0, n4)
self.assertNotEqual(n0, n1)
self.assertNotEqual(n0, n2)
self.assertNotEqual(n0, n3)
self.assertNotEqual(n0, n5)
self.assertNotEqual(n0, n6)
self.assertNotEqual(n1, n2)
self.assertNotEqual(n1, n3)
self.assertNotEqual(n1, n4)
self.assertNotEqual(n1, n5)
self.assertNotEqual(n1, n6)
self.assertNotEqual(n2, n3)
self.assertNotEqual(n2, n4)
self.assertNotEqual(n2, n5)
self.assertNotEqual(n2, n6)
self.assertNotEqual(n3, n4)
self.assertNotEqual(n3, n5)
self.assertNotEqual(n3, n6)
self.assertNotEqual(n4, n5)
self.assertNotEqual(n4, n6)
self.assertNotEqual(n5, n6)
if __name__ == "__main__":
unittest.main()
| {"/stupidtree/examples/address/pcd_tree_test.py": ["/stupidtree/examples/address/pcd_tree.py"]} |
67,753 | luozhouyang/stupidtree | refs/heads/master | /stupidtree/examples/address/pcd_tree.py | # Copyright (c) 2018 luozhouyang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import abc
from stupidtree.core.indexed_tree import IndexedTree
from stupidtree.core.indexer import NodeDictIndexer
from stupidtree.examples.address.level import Level
from stupidtree.examples.address.node import AddressNode
class PCDInterface(abc.ABC):
@abc.abstractmethod
def provinces(self):
"""
Get province level nodes
:return: a set of nodes
"""
raise NotImplementedError()
@abc.abstractmethod
def cities(self):
"""
Get city level nodes
:return: a set of nodes
"""
raise NotImplementedError()
@abc.abstractmethod
def districts(self):
"""
Get district level nodes
:return: a set of nodes
"""
raise NotImplementedError()
@abc.abstractmethod
def contains(self, key):
"""
If the tree contains nodes whose tag equals `key`
:param key: node's tag
:return: a set of nodes
"""
raise NotImplementedError()
class PCDTree(IndexedTree, PCDInterface):
"""
Chinese address tree. Addresses contains Province, City and District levels.
PCD are Province, City and District.
"""
def __init__(self, indexer=NodeDictIndexer()):
"""
Construct tree.
:param indexer: nodes' indexer
"""
super().__init__(indexer=indexer)
self.provinces = set()
self.cities = set()
self.districts = set()
def on_insert(self, node):
super().on_insert(node)
if node.tag == Level.COUNTRY:
return
if node.tag == Level.PROVINCE:
self.provinces.add(node)
return
if node.tag == Level.CITY:
self.cities.add(node)
return
if node.tag == Level.DISTRICT:
self.districts.add(node)
return
def on_remove(self, node):
super().on_remove(node)
if node.tag == Level.COUNTRY:
return
if node.tag == Level.PROVINCE:
self.provinces.remove(node)
return
if node.tag == Level.CITY:
self.cities.remove(node)
return
if node.tag == Level.DISTRICT:
self.districts.remove(node)
return
def _create_root_node(self, words, depth):
return AddressNode(data='', level=Level.COUNTRY, parent=None)
def _create_node(self, node, words, depth):
return AddressNode(data=words[depth], level=Level(depth + 2),
parent=node)
def provinces(self):
return self.provinces
def cities(self):
return self.cities
def districts(self):
return self.districts
def contains(self, key):
nodes = self.get(key)
if not nodes:
return False
return len(nodes) > 0
| {"/stupidtree/examples/address/pcd_tree_test.py": ["/stupidtree/examples/address/pcd_tree.py"]} |
67,754 | luozhouyang/stupidtree | refs/heads/master | /stupidtree/examples/address/level_test.py | # Copyright (c) 2018 luozhouyang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import unittest
from .level import Level
class TestLevel(unittest.TestCase):
def test_level(self):
l0 = Level.COUNTRY
self.assertEqual(1, l0)
l1 = Level.PROVINCE
self.assertNotEqual(l0, l1)
self.assertEqual(2, l1)
l2 = Level.CITY
self.assertEqual(3, l2)
self.assertNotEqual(l0, l2)
self.assertNotEqual(l1, l2)
l3 = Level.DISTRICT
self.assertEqual(4, l3)
self.assertNotEqual(l0, l3)
self.assertNotEqual(l1, l3)
self.assertNotEqual(l2, l3)
l4 = Level.ROAD
self.assertEqual(5, l4)
self.assertNotEqual(l0, l4)
self.assertNotEqual(l1, l4)
self.assertNotEqual(l2, l4)
self.assertNotEqual(l3, l4)
if __name__ == "__main__":
unittest.main()
| {"/stupidtree/examples/address/pcd_tree_test.py": ["/stupidtree/examples/address/pcd_tree.py"]} |
67,755 | luozhouyang/stupidtree | refs/heads/master | /stupidtree/core/indexer_test.py | # Copyright (c) 2018 luozhouyang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import unittest
from .indexer import NodeDictIndexer
from .node import Node
class TestNodeDictIndexer(unittest.TestCase):
def test_node_dict_indexer(self):
n0 = Node(data='A', tag='TAG_0', parent=None)
n1 = Node(data='B', tag='TAG_1', parent=None)
n2 = Node(data='A', tag='TAG_0', parent=None)
indexer = NodeDictIndexer()
indexer.put(n0.data, n0)
indexer.put(n1.data, n1)
indexer.put(n2.data, n2)
self.assertEqual(1, len(indexer.get(n0.data)))
self.assertEqual(1, len(indexer.get(n1.data)))
indexer.remove(n0)
for n in indexer.get(n0.data):
print(n.data)
self.assertEqual(0, len(indexer.get(n0.data)))
self.assertEqual(1, len(indexer.get(n1.data)))
indexer.remove(n1)
self.assertEqual(0, len(indexer.get(n1.data)))
if __name__ == "__main__":
unittest.main()
| {"/stupidtree/examples/address/pcd_tree_test.py": ["/stupidtree/examples/address/pcd_tree.py"]} |
67,756 | shashikant1997k/wlAttendance_api | refs/heads/main | /employee/migrations/0005_auto_20210601_0943.py | # Generated by Django 3.2.3 on 2021-06-01 09:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('employee', '0004_auto_20210519_0944'),
]
operations = [
migrations.AlterField(
model_name='employee',
name='aadharBackImage',
field=models.FileField(upload_to='media/aadhar/'),
),
migrations.AlterField(
model_name='employee',
name='aadharFrontImage',
field=models.FileField(upload_to='media/aadhar/'),
),
migrations.AlterField(
model_name='employee',
name='pancardImage',
field=models.FileField(upload_to='media/pancard/'),
),
migrations.AlterField(
model_name='employee',
name='passbookImage',
field=models.FileField(upload_to='media/bankAccount/'),
),
migrations.AlterField(
model_name='employee',
name='profileImage',
field=models.FileField(upload_to='media/profile/'),
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,757 | shashikant1997k/wlAttendance_api | refs/heads/main | /attendance/views.py | from django.shortcuts import render
from rest_framework import mixins
from rest_framework import generics
from django_filters.rest_framework import DjangoFilterBackend
from django.shortcuts import get_list_or_404, get_object_or_404
from .models import Attendance
from .serializers import AttendanceSerializer, FetchAttendanceSerializer
class FetchAttendance(generics.ListCreateAPIView):
queryset = Attendance.objects.select_related('empID')
print(str(queryset.query))
serializer_class = FetchAttendanceSerializer
def get_object(self):
queryset = self.queryset()
obj = get_object_or_404(queryset)
return obj
class AttendanceInput(mixins.ListModelMixin,
mixins.CreateModelMixin,
generics.GenericAPIView):
# get method handler
queryset = Attendance.objects.all().order_by("id")
serializer_class = AttendanceSerializer
filter_backends = (DjangoFilterBackend,)
filter_fields = ('empID', 'daydate',)
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class AttendanceList(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
generics.GenericAPIView):
# get method handler
queryset = Attendance.objects.all().order_by("id")
serializer_class = AttendanceSerializer
lookup_field = 'id'
def get(self, request, id):
return self.retrieve(request, id=id)
def put(self, request, id):
return self.update(request, id=id)
def delete(self, request, id):
return self.destroy(request, id=id)
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,758 | shashikant1997k/wlAttendance_api | refs/heads/main | /attendance/migrations/0004_alter_attendance_daydate.py | # Generated by Django 3.2.3 on 2021-05-21 05:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attendance', '0003_alter_attendance_daydate'),
]
operations = [
migrations.AlterField(
model_name='attendance',
name='daydate',
field=models.DateField(auto_now_add=True),
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,759 | shashikant1997k/wlAttendance_api | refs/heads/main | /attendance/migrations/0003_alter_attendance_daydate.py | # Generated by Django 3.2.3 on 2021-05-21 05:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attendance', '0002_alter_attendance_status'),
]
operations = [
migrations.AlterField(
model_name='attendance',
name='daydate',
field=models.DateTimeField(),
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,760 | shashikant1997k/wlAttendance_api | refs/heads/main | /employee/urls.py |
from django.urls import path
# from .views import Register, EmployeeList, EmployeeDetail
from .views import EmployeeDetails, EmployeeList
urlpatterns = [
path('register/', EmployeeList.as_view(), name="register"),
path('employeeList/<int:id>/', EmployeeDetails.as_view(), name="employeeList"),
]
# urlpatterns = [
# # register todo get, post
# path('register/', EmployeeList.as_view()),
# # register todo put, patch delete
# path('employee-list/<int:pk>', EmployeeDetail.as_view()),
# ]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,761 | shashikant1997k/wlAttendance_api | refs/heads/main | /user/migrations/0003_rename_token_user_accesstoken.py | # Generated by Django 3.2.3 on 2021-05-25 11:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0002_rename_passowrd_user_password'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='token',
new_name='accessToken',
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,762 | shashikant1997k/wlAttendance_api | refs/heads/main | /employee/migrations/0001_initial.py | # Generated by Django 3.2.3 on 2021-05-17 06:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('id', models.BigAutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')),
('empID', models.CharField(max_length=20, unique=True)),
('name', models.CharField(max_length=150)),
('aadharNumber', models.CharField(max_length=20, unique=True)),
('email', models.EmailField(max_length=255, unique=True)),
('mobile', models.CharField(max_length=20, unique=True)),
('branch', models.CharField(max_length=255)),
('address', models.CharField(max_length=255)),
('role', models.CharField(max_length=50)),
('date_joined', models.DateField()),
('dob', models.DateField()),
('image', models.TextField()),
],
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,763 | shashikant1997k/wlAttendance_api | refs/heads/main | /attendance/migrations/0019_alter_attendance_daydate.py | # Generated by Django 3.2.3 on 2021-05-24 10:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attendance', '0018_alter_attendance_empid'),
]
operations = [
migrations.AlterField(
model_name='attendance',
name='daydate',
field=models.DateField(),
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,764 | shashikant1997k/wlAttendance_api | refs/heads/main | /employee/migrations/0003_auto_20210519_0317.py | # Generated by Django 3.2.3 on 2021-05-19 03:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('employee', '0002_auto_20210517_1116'),
]
operations = [
migrations.RenameField(
model_name='employee',
old_name='image',
new_name='profileImage',
),
migrations.AddField(
model_name='employee',
name='IFSCCode',
field=models.CharField(default='', max_length=50),
),
migrations.AddField(
model_name='employee',
name='aadharBackImage',
field=models.TextField(default='https://cdn.pixabay.com/photo/2015/03/04/22/35/head-659652_960_720.png'),
),
migrations.AddField(
model_name='employee',
name='aadharFrontImage',
field=models.TextField(default='https://cdn.pixabay.com/photo/2015/03/04/22/35/head-659652_960_720.png'),
),
migrations.AddField(
model_name='employee',
name='accountNumber',
field=models.CharField(default='', max_length=50),
),
migrations.AddField(
model_name='employee',
name='bankname',
field=models.CharField(default='', max_length=150),
),
migrations.AddField(
model_name='employee',
name='pancard',
field=models.CharField(default='', max_length=20, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name='employee',
name='pancardImage',
field=models.TextField(default='https://cdn.pixabay.com/photo/2015/03/04/22/35/head-659652_960_720.png'),
),
migrations.AddField(
model_name='employee',
name='passbookImage',
field=models.TextField(default='https://cdn.pixabay.com/photo/2015/03/04/22/35/head-659652_960_720.png'),
),
migrations.AlterField(
model_name='employee',
name='address',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='employee',
name='branch',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='employee',
name='empID',
field=models.CharField(max_length=20, unique=True),
),
migrations.AlterField(
model_name='employee',
name='name',
field=models.CharField(default='', max_length=150),
),
migrations.AlterField(
model_name='employee',
name='role',
field=models.CharField(default='', max_length=50),
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,765 | shashikant1997k/wlAttendance_api | refs/heads/main | /user/serializers.py | from django.db.models import Q # for queries
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from .models import User
from django.core.exceptions import ValidationError
from uuid import uuid4
from django.contrib.auth.hashers import make_password
class UserSerializer(serializers.ModelSerializer):
email = serializers.EmailField(
required=True,
validators=[UniqueValidator(queryset=User.objects.all())]
)
username = serializers.CharField(
required=True,
validators=[UniqueValidator(queryset=User.objects.all())]
)
class Meta:
model = User
fields = ['id', 'username', 'email', 'password', 'role', 'accessToken']
extra_kwargs = {
'password': {'write_only': True}
}
def create(self, validated_data):
validated_data['password'] = make_password(
validated_data['password'])
return super(UserSerializer, self).create(validated_data)
class UserLoginSerializer(serializers.ModelSerializer):
# to accept either username or email
email = serializers.CharField()
password = serializers.CharField()
accessToken = serializers.CharField(required=False, read_only=True)
def validate(self, data):
# user,email,password validator
email = data.get("email", None)
password = data.get("password", None)
if not email and not password:
raise ValidationError("Details not entered.")
user = None
# if the email has been passed
if '@' in email:
user = User.objects.filter(
Q(email=email) &
Q(password=password)
).distinct()
if not user.exists():
raise ValidationError(
{"message": "User credentials are not correct.", "code": "401"})
user = User.objects.get(email=email)
else:
user = User.objects.filter(
Q(username=email) &
Q(password=password)
).distinct()
if not user.exists():
raise ValidationError(
{"message": "User credentials are not correct.", "code": "401"})
user = User.objects.get(username=email)
if user.ifLogged:
raise ValidationError(
{"message": "User already logged in.", "code": "203"})
user.ifLogged = True
data['accessToken'] = uuid4()
user.accessToken = data['accessToken']
user.save()
return data
class Meta:
model = User
fields = (
'email',
'password',
'accessToken',
)
read_only_fields = (
'accessToken',
)
class UserLogoutSerializer(serializers.ModelSerializer):
accessToken = serializers.CharField()
status = serializers.CharField(required=False, read_only=True)
def validate(self, data):
accessToken = data.get("accessToken", None)
print(accessToken)
user = None
try:
user = User.objects.get(accessToken=accessToken)
if not user.ifLogged:
raise ValidationError("User is not logged in.")
except Exception as e:
raise ValidationError(str(e))
user.ifLogged = False
user.accessToken = ""
user.save()
data['status'] = "User is logged out."
return data
class Meta:
model = User
fields = (
'accessToken',
'status',
)
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,766 | shashikant1997k/wlAttendance_api | refs/heads/main | /attendance/migrations/0013_alter_attendance_empid.py | # Generated by Django 3.2.3 on 2021-05-24 08:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('employee', '0004_auto_20210519_0944'),
('attendance', '0012_alter_attendance_empid'),
]
operations = [
migrations.AlterField(
model_name='attendance',
name='empID',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='employee.employee'),
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,767 | shashikant1997k/wlAttendance_api | refs/heads/main | /attendance/migrations/0008_auto_20210521_0847.py | # Generated by Django 3.2.3 on 2021-05-21 08:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attendance', '0007_auto_20210521_0846'),
]
operations = [
migrations.AlterField(
model_name='attendance',
name='timing_in',
field=models.TimeField(default=''),
),
migrations.AlterField(
model_name='attendance',
name='timing_out',
field=models.TimeField(default=''),
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,768 | shashikant1997k/wlAttendance_api | refs/heads/main | /employee/migrations/0002_auto_20210517_1116.py | # Generated by Django 3.2.3 on 2021-05-17 11:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('employee', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='employee',
name='isActive',
field=models.CharField(default=1, max_length=2),
),
migrations.AlterField(
model_name='employee',
name='empID',
field=models.CharField(max_length=20),
),
migrations.AlterField(
model_name='employee',
name='image',
field=models.TextField(default='https://cdn.pixabay.com/photo/2015/03/04/22/35/head-659652_960_720.png'),
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,769 | shashikant1997k/wlAttendance_api | refs/heads/main | /attendance/migrations/0007_auto_20210521_0846.py | # Generated by Django 3.2.3 on 2021-05-21 08:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attendance', '0006_auto_20210521_0823'),
]
operations = [
migrations.AlterField(
model_name='attendance',
name='timing_in',
field=models.CharField(default='', max_length=12),
),
migrations.AlterField(
model_name='attendance',
name='timing_out',
field=models.CharField(default='', max_length=12),
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,770 | shashikant1997k/wlAttendance_api | refs/heads/main | /user/urls.py | from django.urls import path
from .views import Register, Login, Logout
urlpatterns = [
path('userLogin/', Login.as_view(), name="userLogin"),
path('userRegister/', Register.as_view(), name="userRegister"),
path('userLogout/', Logout.as_view(), name="userLogout"),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,771 | shashikant1997k/wlAttendance_api | refs/heads/main | /employee/migrations/0004_auto_20210519_0944.py | # Generated by Django 3.2.3 on 2021-05-19 09:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('employee', '0003_auto_20210519_0317'),
]
operations = [
migrations.AlterField(
model_name='employee',
name='aadharBackImage',
field=models.TextField(default='https://blog.qburst.com/wp-content/uploads/2019/10/01_aadhar_front_side_original.jpg'),
),
migrations.AlterField(
model_name='employee',
name='aadharFrontImage',
field=models.TextField(default='https://blog.qburst.com/wp-content/uploads/2019/10/01_aadhar_front_side_original.jpg'),
),
migrations.AlterField(
model_name='employee',
name='pancardImage',
field=models.TextField(default='https://yourspj.files.wordpress.com/2011/06/fake-pan-card_yourspj.jpg'),
),
migrations.AlterField(
model_name='employee',
name='passbookImage',
field=models.TextField(default='https://qph.fs.quoracdn.net/main-qimg-14d1798dac81721780d1404cb5620251'),
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,772 | shashikant1997k/wlAttendance_api | refs/heads/main | /user/migrations/0001_initial.py | # Generated by Django 3.2.3 on 2021-05-25 10:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=100, unique=True)),
('email', models.CharField(max_length=100, unique=True)),
('passowrd', models.CharField(default='', max_length=50)),
('role', models.CharField(default='', max_length=50)),
('ifLogged', models.BooleanField(default=False)),
('token', models.CharField(default='', max_length=500, null=True)),
],
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,773 | shashikant1997k/wlAttendance_api | refs/heads/main | /attendance/migrations/0002_alter_attendance_status.py | # Generated by Django 3.2.3 on 2021-05-20 05:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attendance', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='attendance',
name='status',
field=models.CharField(max_length=2),
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,774 | shashikant1997k/wlAttendance_api | refs/heads/main | /user/models.py | from django.db import models
class User(models.Model):
username = models.CharField(max_length=100, unique=True)
email = models.CharField(max_length=100, unique=True)
password = models.CharField(max_length=50, default="")
role = models.CharField(max_length=50, default="")
ifLogged = models.BooleanField(default=False)
accessToken = models.CharField(max_length=500, null=True, default="")
def __str__(self):
return "{} -{}".format(self.email)
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,775 | shashikant1997k/wlAttendance_api | refs/heads/main | /employee/migrations/0006_auto_20210602_0446.py | # Generated by Django 3.2.3 on 2021-06-02 04:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('employee', '0005_auto_20210601_0943'),
]
operations = [
migrations.AlterField(
model_name='employee',
name='aadharBackImage',
field=models.ImageField(upload_to='media/aadhar/'),
),
migrations.AlterField(
model_name='employee',
name='aadharFrontImage',
field=models.ImageField(upload_to='media/aadhar/'),
),
migrations.AlterField(
model_name='employee',
name='pancardImage',
field=models.ImageField(upload_to='media/pancard/'),
),
migrations.AlterField(
model_name='employee',
name='passbookImage',
field=models.ImageField(upload_to='media/bankAccount/'),
),
migrations.AlterField(
model_name='employee',
name='profileImage',
field=models.ImageField(upload_to='media/profile/'),
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,776 | shashikant1997k/wlAttendance_api | refs/heads/main | /employee/serializers.py | from django.db.models import Q # for queries
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from .models import Employee
class EmployeeRegisterSerializer(serializers.ModelSerializer):
class Meta:
model = Employee
fields = ['id', 'empID', 'name', 'aadharNumber', 'aadharFrontImage', 'aadharBackImage',
'pancard', 'pancardImage', 'email', 'mobile', 'branch', 'address', 'role', 'date_joined', 'dob', 'profileImage', 'isActive', 'bankname', 'accountNumber', 'IFSCCode', 'passbookImage']
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,777 | shashikant1997k/wlAttendance_api | refs/heads/main | /attendance/models.py | from django.db import models
from employee.models import Employee
class Attendance(models.Model):
empID = models.ForeignKey(
Employee, on_delete=models.SET_NULL, to_field='empID', db_constraint=False, null=True)
daydate = models.DateField(auto_now_add=True)
# daydate = models.DateField()
timing_in = models.CharField(max_length=12, default="")
timing_out = models.CharField(max_length=12, default="")
status = models.CharField(max_length=2)
def __str__(self):
return "{} -{}".format(self.empID, self.daydate)
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,778 | shashikant1997k/wlAttendance_api | refs/heads/main | /employee/views.py | from django.shortcuts import render
from rest_framework import mixins
from rest_framework import generics
from django_filters.rest_framework import DjangoFilterBackend
from .models import Employee
from .serializers import EmployeeRegisterSerializer
from django.db import IntegrityError
from rest_framework.response import Response
from rest_framework import status
class EmployeeList(mixins.ListModelMixin,
mixins.CreateModelMixin,
generics.GenericAPIView):
# get method handler
queryset = Employee.objects.all().order_by("id")
serializer_class = EmployeeRegisterSerializer
filter_backends = (DjangoFilterBackend,)
filter_fields = ('branch', 'empID',)
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class EmployeeDetails(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
generics.GenericAPIView):
# get method handler
queryset = Employee.objects.all()
serializer_class = EmployeeRegisterSerializer
lookup_field = 'id'
def get(self, request, id):
return self.retrieve(request, id=id)
def put(self, request, id):
try:
return super(mixins.UpdateModelMixin, self).update(request, id=id)
# return self.update(request, id=id)
except IntegrityError:
content = {'error': 'IntegrityError'}
return Response(content, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, id):
return self.destroy(request, id=id)
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,779 | shashikant1997k/wlAttendance_api | refs/heads/main | /attendance/migrations/0011_alter_attendance_empid.py | # Generated by Django 3.2.3 on 2021-05-24 07:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attendance', '0010_alter_attendance_empid'),
]
operations = [
migrations.AlterField(
model_name='attendance',
name='empID',
field=models.CharField(max_length=20, unique=True),
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,780 | shashikant1997k/wlAttendance_api | refs/heads/main | /attendance/urls.py |
from django.urls import path
from .views import AttendanceInput, AttendanceList, FetchAttendance
urlpatterns = [
path('attendanceInput/', AttendanceInput.as_view(), name="attendanceInput"),
path('attendanceList/<int:id>/',
AttendanceList.as_view(), name="attendanceList"),
path('fetchAttendance/',
FetchAttendance.as_view(), name="fetchAttendance"),
]
# urlpatterns = [
# # register todo get, post
# path('register/', EmployeeList.as_view()),
# # register todo put, patch delete
# path('employee-list/<int:pk>', EmployeeDetail.as_view()),
# ]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,781 | shashikant1997k/wlAttendance_api | refs/heads/main | /attendance/serializers.py | from django.db.models import Q # for queries
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from .models import Attendance
from employee.serializers import EmployeeRegisterSerializer
class AttendanceSerializer(serializers.ModelSerializer):
# timing_in = serializers.TimeField(
# format='%H:%M:%S', input_formats="%H:%M:%S")
# timing_out = serializers.TimeField(
# format='%H:%M:%S', input_formats="%H:%M:%S", required=False)
class Meta:
model = Attendance
fields = ['id', 'empID', 'daydate',
'timing_in', 'timing_out', 'status']
class FetchAttendanceSerializer(serializers.ModelSerializer):
empData = EmployeeRegisterSerializer(source="empID")
class Meta:
model = Attendance
fields = ['id', 'empData', 'empID', 'daydate',
'timing_in', 'timing_out', 'status']
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,782 | shashikant1997k/wlAttendance_api | refs/heads/main | /employee/models.py | from django.db import models
class Employee(models.Model):
empID = models.CharField(max_length=20, unique=True)
name = models.CharField(max_length=150, default="")
aadharNumber = models.CharField(max_length=20, unique=True)
# aadharFrontImage = models.TextField(
# default="https://blog.qburst.com/wp-content/uploads/2019/10/01_aadhar_front_side_original.jpg")
# aadharBackImage = models.TextField(
# default="https://blog.qburst.com/wp-content/uploads/2019/10/01_aadhar_front_side_original.jpg")
aadharFrontImage = models.ImageField(upload_to='media/aadhar/')
aadharBackImage = models.ImageField(upload_to='media/aadhar/')
pancard = models.CharField(max_length=20, unique=True)
# pancardImage = models.TextField(
# default="https://yourspj.files.wordpress.com/2011/06/fake-pan-card_yourspj.jpg")
pancardImage = models.ImageField(upload_to='media/pancard/')
email = models.EmailField(max_length=255, unique=True)
mobile = models.CharField(max_length=20, unique=True)
branch = models.CharField(max_length=255, default="")
address = models.CharField(max_length=255, default="")
role = models.CharField(max_length=50, default="")
date_joined = models.DateField()
dob = models.DateField()
isActive = models.CharField(max_length=2, default=1)
# profileImage = models.TextField(
# default="https://cdn.pixabay.com/photo/2015/03/04/22/35/head-659652_960_720.png")
profileImage = models.ImageField(upload_to='media/profile/')
bankname = models.CharField(max_length=150, default="")
accountNumber = models.CharField(max_length=50, default="")
IFSCCode = models.CharField(max_length=50, default="")
# passbookImage = models.TextField(
# default="https://qph.fs.quoracdn.net/main-qimg-14d1798dac81721780d1404cb5620251")
passbookImage = models.ImageField(upload_to='media/bankAccount/')
def __str__(self):
return "{} -{}".format(self.name, self.email)
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,783 | shashikant1997k/wlAttendance_api | refs/heads/main | /attendance/admin.py | from django.contrib import admin
from .models import Attendance
admin.site.register(Attendance)
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,784 | shashikant1997k/wlAttendance_api | refs/heads/main | /attendance/migrations/0022_alter_attendance_empid.py | # Generated by Django 3.2.3 on 2021-06-03 02:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('employee', '0006_auto_20210602_0446'),
('attendance', '0021_alter_attendance_empid'),
]
operations = [
migrations.AlterField(
model_name='attendance',
name='empID',
field=models.ForeignKey(db_constraint=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to='employee.employee', to_field='empID'),
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,785 | shashikant1997k/wlAttendance_api | refs/heads/main | /attendance/migrations/0001_initial.py | # Generated by Django 3.2.3 on 2021-05-20 05:26
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Attendance',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('empID', models.CharField(max_length=20, unique=True)),
('daydate', models.DateTimeField(auto_now_add=True)),
('timing_in', models.TimeField(auto_now_add=True)),
('timing_out', models.TimeField(auto_now_add=True)),
('status', models.CharField(max_length=20)),
],
),
]
| {"/attendance/views.py": ["/attendance/models.py", "/attendance/serializers.py"], "/employee/urls.py": ["/employee/views.py"], "/user/serializers.py": ["/user/models.py"], "/employee/serializers.py": ["/employee/models.py"], "/attendance/models.py": ["/employee/models.py"], "/employee/views.py": ["/employee/models.py", "/employee/serializers.py"], "/attendance/urls.py": ["/attendance/views.py"], "/attendance/serializers.py": ["/attendance/models.py", "/employee/serializers.py"], "/attendance/admin.py": ["/attendance/models.py"]} |
67,786 | go925315/CNN | refs/heads/master | /AlexNet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class AlexNet(nn.Module):
def __init__(self):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(96, 256, kernel_size=5, stride=1, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(256, 384, kernel_size=3, stride=1, padding=2),
nn.ReLU(inplace=True),
nn.Conv2d(384, 384, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256*6*6,4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096,4096),
nn.ReLU(inplace=True),
nn.Linear(4096,10)
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return F.log_softmax(x, dim=1)
# return x
| {"/main.py": ["/GoogleNet.py"]} |
67,787 | go925315/CNN | refs/heads/master | /main.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import transforms
import matplotlib.pyplot as plt
import numpy as np
import time
# import AlexNet
# import VGG
import GoogleNet
# os.environ["CUDA_VISIBLE_DEVICES"]="1"
device = torch.device("cuda:0")
print(device)
def test(model, testloader):
model.to(device)
correct = 0
total = 0
model.eval()
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
def train():
Batch_size = 16
EPOCH = 100
transform = transforms.Compose(
[transforms.Resize(256),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5)),]
)
trainset = torchvision.datasets.CIFAR10(root='../data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=Batch_size,
shuffle=True, num_workers=0)
testset = torchvision.datasets.CIFAR10(root='../data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=Batch_size,
shuffle=False, num_workers=0)
# net = torch.load('AlexNet.pkl')
# net = AlexNet.AlexNet()
# net = VGG.VGG('VGG19')
net = GoogleNet.GoogleNet()
print(net)
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
train_data = len(trainloader)
end = time.time()
for epochs in range(EPOCH):
net.train(True)
batch_time_mean = []
end = time.time()
for i, data in enumerate(trainloader, 0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
batch_time = time.time() - end
end = time.time()
batch_time_mean.append(batch_time)
if i % (int(train_data /10)) == 0 and i > 1:
print('[%d, %5d] loss: %f' % (epochs+1, i+1, loss.item()))
print('batch time = %f' % (np.mean(batch_time_mean)))
test(net, testloader)
# torch.save(net, 'AlexNet.pkl')
if __name__ == "__main__":
train() | {"/main.py": ["/GoogleNet.py"]} |
67,788 | go925315/CNN | refs/heads/master | /ResNet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchsummary
class basicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channals, stride=1, downsampling=False):
super(basicBlock, self).__init__()
self.downsampling = downsampling
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channals, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channals),
nn.ReLU(inplace=True),
nn.Conv2d(out_channals, out_channals, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(out_channals)
)
if self.downsampling:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels, out_channals, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channals)
)
def forward(self, x):
residual = x
out = self.conv(x)
if self.downsampling:
residual = self.downsample(x)
out += residual
out = F.relu(out)
return out
class bottleneck(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channals, stride=1, downsampling=False):
super(bottleneck, self).__init__()
self.downsampling = downsampling
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channals, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channals),
nn.ReLU(inplace=True),
nn.Conv2d(out_channals, out_channals, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channals),
nn.ReLU(inplace=True),
nn.Conv2d(out_channals, out_channals*self.expansion, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channals*self.expansion)
)
if self.downsampling:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels, out_channals*self.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channals*self.expansion)
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.conv(x)
if self.downsampling:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=10):
super(ResNet, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3,64,kernel_size=7,stride=2,padding=3),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3,stride=2, padding=1)
)
self.inplanes = 64
self.conv2 = self._make_layer(block, layers[0], 64, stride=1)
self.conv3 = self._make_layer(block, layers[1], 128, stride=2)
self.conv4 = self._make_layer(block, layers[2], 256, stride=2)
self.conv5 = self._make_layer(block, layers[3], 512, stride=2)
self.avg_pool = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1))
)
self.fc = nn.Sequential(
nn.Linear(512 * block.expansion, num_classes),
nn.Sigmoid()
)
def _make_layer(self, block, layer, planes, stride=1):
layers = []
layers.append(block(in_channels=self.inplanes, out_channals=planes, stride=stride, downsampling =True))
self.inplanes = planes * block.expansion
for _ in range(1, layer):
layers.append(block(in_channels=self.inplanes, out_channals=planes, stride=1))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.avg_pool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def ResNet18():
return ResNet(basicBlock, [2,2,2,2])
def ResNet34():
return ResNet(basicBlock, [3,4,6,3])
def ResNet50():
return ResNet(bottleneck, [3,4,6,3])
def ResNet101():
return ResNet(bottleneck, [3,4,23,3])
def ResNet152():
return ResNet(bottleneck, [3,8,36,3])
if __name__ == "__main__":
net = ResNet152()
# print(net.fc[0])
x = torch.randn(1,3,224,224)
print(net(x))
| {"/main.py": ["/GoogleNet.py"]} |
67,789 | go925315/CNN | refs/heads/master | /VGG.py | import torch
import torch.nn as nn
cfg = {
'VGG11' : [64, 'M', 128, 'M', 256, 256 , 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, vgg_name):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Sequential(
nn.Linear(512*8*8, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096,4096),
nn.ReLU(inplace=True),
nn.Linear(4096, 10)
)
def forward(self, x):
x = self.features(x)
x = torch.flatten(x, 1)
# x = nn.Linear(512, 10)
x = self.classifier(x)
return x
def _make_layers(self, cfg):
layers = []
in_channels = 3
for _cfg in cfg:
if _cfg == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [
nn.Conv2d(in_channels, _cfg, kernel_size=3,padding=1),
nn.BatchNorm2d(_cfg),
nn.ReLU(inplace=True)
]
in_channels = _cfg
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
if __name__ == "__main__":
net = VGG('VGG19')
print(net)
| {"/main.py": ["/GoogleNet.py"]} |
67,790 | go925315/CNN | refs/heads/master | /GoogleNet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchsummary
class basicConv2d(nn.Module):
def __init__(self, in_channels, out_channals, **kwargs):
super(basicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channals, **kwargs)
self.bn = nn.BatchNorm2d(out_channals)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class inceptionBlock(nn.Module):
def __init__(self, _in_channels, n1x1, n3x3reduce, n3x3, n5x5reduce, n5x5, poolproj):
super(inceptionBlock,self).__init__()
# 1x1 conv branch
self.b1_1x1 = basicConv2d(_in_channels, n1x1, kernel_size=1, padding=1)
# 1x1 -> 3x3 conv branch
self.b2_1x1 = basicConv2d(_in_channels, n3x3reduce, kernel_size=1, padding=1)
self.b2_3x3 = basicConv2d(n3x3reduce, n3x3, kernel_size=3, padding=1)
# 1x1 -> 5x5 conv branch
self.b3_1x1 = basicConv2d(_in_channels, n5x5reduce, kernel_size=1, padding=1)
self.b3_5x5 = basicConv2d(n5x5reduce, n5x5, kernel_size=5, padding=2)
# 1x1 -> 3x3 conv -> 3x3 conv branch
# self.b3_1x1 = basicConv2d(_in_channels, n5x5reduce, kernel_size=1, padding=1)
# self.b3_5x5_1 = basicConv2d(n5x5reduce, n5x5, kernel_size=3, padding=1)
# self.b3_5x5_2 = basicConv2d(n5x5, n5x5, kernel_size=3, padding=1)
# max pools -> 1x1 conv branch
self.b4_pool = nn.MaxPool2d(3,padding=1, stride=1)
self.b4_1x1 = basicConv2d(_in_channels, poolproj, kernel_size=1, padding=1)
def forward(self, x):
b1 = self.b1_1x1(x)
b2 = self.b2_3x3(self.b2_1x1(x))
b3 = self.b3_5x5(self.b3_1x1(x))
# b3 = self.b3_5x5_2(self.b3_5x5_1(self.b3_1x1(x)))
b4 = self.b4_1x1(self.b4_pool(x))
return torch.cat([b1, b2, b3, b4], dim=1)
class GoogleNet(nn.Module):
def __init__(self):
super(GoogleNet, self).__init__()
self.block1 = nn.Sequential(
nn.Conv2d(3,64,kernel_size=7,stride=2,padding=3),
nn.BatchNorm2d(64),
nn.MaxPool2d(kernel_size=3,stride=2),
)
self.block2 = nn.Sequential(
nn.Conv2d(64,192,kernel_size=3,stride=1,padding=2),
nn.BatchNorm2d(192),
nn.MaxPool2d(kernel_size=3,stride=2)
)
# 3a -> 3b
self.block3 = nn.Sequential(
inceptionBlock(192, 64,96,128,16,32,32),
inceptionBlock(256, 128,128,192,32,96,64),
nn.MaxPool2d(kernel_size=3,stride=2)
)
# 4a -> 4b -> 4c -> 4d -> 4e
self.block4 = nn.Sequential(
inceptionBlock(480, 192,96,208,16,48,64),
inceptionBlock(512, 160,112,224,24,64,64),
inceptionBlock(512, 128,128,256,24,64,64),
inceptionBlock(512, 112,144,288,32,64,64),
inceptionBlock(528, 256,160,320,32,128,128),
nn.MaxPool2d(kernel_size=3,stride=2)
)
# 5a -> 5b
self.block5 = nn.Sequential(
inceptionBlock(832, 256,160,320,32,128,128),
inceptionBlock(832, 384,192,384,48,128,128),
nn.MaxPool2d(kernel_size=3,stride=2)
)
self.avg_pool = nn.Sequential(
# nn.AvgPool2d(kernel_size=7, stride=1)
nn.AdaptiveAvgPool2d((1, 1))
)
self.dropout = nn.Sequential(
nn.Dropout(0.2)
)
self.fc = nn.Sequential(
nn.Linear(1024, 10)
)
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.avg_pool(x)
x = torch.flatten(x, 1)
x = self.dropout(x)
x = self.fc(x)
return F.log_softmax(x, dim=1)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
if __name__ == "__main__":
net = GoogleNet()
print(net)
# x = torch.randn(1,3,256,256)
# print(net(x))
# if torch.cuda.is_available():
# net.cuda()
# torchsummary.summary(net, (3, 224, 224))
| {"/main.py": ["/GoogleNet.py"]} |
67,799 | sreejithr/IntelligentMapMiner | refs/heads/master | /persistent_store.py | import copy
# import json
# import requests
import redis
class RedisStore:
def store_coordinate(self, list_of_coordinates):
r_server = redis.Redis("localhost")
for coordinate in list_of_coordinates:
r_server.sadd("coordinates", ','.join([str(each) for each in coordinate]))
def store_centers(self, list_of_centers):
r_server = redis.Redis("localhost")
for center in list_of_centers:
r_server.sadd("centers", ','.join([str(each) for each in center]))
def store_address(self, coordinate, address):
r_server = redis.Redis("localhost")
r_server.sadd("addresses", address)
def get_coordinate(self):
r_server = redis.Redis("localhost")
return r_server.spop("coordinates")
def get_center(self):
r_server = redis.Redis("localhost")
return r_server.spop("centers")
# class ErrorResponse:
# def __init__(self, message):
# self.message = message
# def json(self):
# return {'message': self.message}
# class CouchDBStore:
# """
# Uses RESTFul APIs to connect with CouchDB
# """
# url = None
# headers = {}
# def __init__(self, host="localhost", port="5984",
# headers={"Content-type": "application/json"}):
# self.host = host
# self.port = port
# self.url = "http://{}:{}".format(self.host, self.port)
# self.headers = headers
# def get_id(self, parameter=None):
# # We fetch a UUID from CouchDB server
# response = requests.get("{}/_uuids/".format(self.url)).json()
# return response["uuids"][0]
# def put(self, db_name, data, document_id=None):
# try:
# # We use the uuid as id and make a new document in the db
# if isinstance(data, dict):
# if not document_id:
# data["_id"] = self.get_id()
# response = requests.post("{}/{}/".format(self.url, db_name),
# data=json.dumps(data), headers=self.headers)
# else:
# data["_id"] = document_id
# response = requests.put("{}/{}/{}".format(self.url, db_name,
# document_id), data=json.dumps(data), headers=self.headers)
# return response
# return ErrorResponse("Data provided is not a dict")
# except Exception as e:
# raise
# # TODO: Change this to catch specific exceptions
# return ErrorResponse("{} raised at put()".format(e))
# def get(self, db_name, coordinate=None, document_id=None):
# try:
# if document_id:
# response = requests.get("{}/{}/{}".format(self.url, db_name,
# document_id))
# elif coordinate:
# raise NotImplementedError
# else:
# response = requests.get("{}/{}/_all_docs".format(self.url,
# db_name))
# return response
# except Exception as e:
# raise
# # TODO: Change this to catch specific exceptions
# return ErrorResponse("{} raised at get()".format(e))
# def document_count(self, db_name):
# response = requests.get("{}/{}/".format(self.url, db_name))
# return response.json()["doc_count"]
# def get_n(self, db_name, n, processed=False):
# if processed:
# response = requests.get("{}/{}/_all_docs&limit={}".format(self.url,
# db_name, n))
# else:
# response = requests.get("{}/{}/_design/retrieve/_view/unprocessed?limit={}"
# .format(self.url, db_name, n))
# return response
# def delete(self, db_name, document_id):
# document = self.get(db_name, document_id=document_id).json()
# rev = document['_rev']
# return requests.delete("{}/{}/{}?rev={}".format(self.url, db_name, document_id,
# rev)).json()
# class PersistentStore(CouchDBStore):
# """
# TODO: Fill the docstring
# """
# def store(self, db_name, list_of_data, document_id=None):
# """
# Format is { "coordinate": ["14.562323", "75.232754"],
# "address": "407, Building 2, Gera Gardens, Pune" }
# """
# for data in list_of_data:
# try:
# response = self.put(db_name=db_name, data=data,
# document_id=document_id)
# self.put(db_name="coordinates", data={"_id": response.json()["id"],
# "coordinate": data["coordinate"], "calculated": "false"})
# except Exception as e:
# raise
# # TODO: Change this to catch specific exceptions
# return ErrorResponse("{} raised at store()".format(e))
# return response
# def update(self, db_name, document_id, data):
# try:
# document = self.retrieve(db_name=db_name, document_id=document_id)
# for key in data:
# document[key] = data[key]
# self.put(db_name=db_name, data=document)
# except Exception as e:
# raise
# return {"ok": "false", "message": "{} raised at update()".format(e)}
# def retrieve(self, db_name, coordinate=None, document_id=None, n=10,
# processed=None):
# if document_id:
# response = self.get(db_name=db_name, document_id=document_id)
# elif coordinate:
# response = self.get(db_name=db_name, coordinate=coordinate)
# elif processed is not None:
# response = self.get_n(db_name=db_name, n=n, processed=processed)
# else:
# response = self.get(db_name=db_name)
# # TODO: Change this to catch specific exceptions
# return response.json()
# def retrieve_for_processing(self, db_name, n=10):
# document = self.retrieve(db_name=db_name, n=n, processed=False)
# for document in document["rows"]:
# self.update(db_name=db_name, document_id=document["id"],
# data={"address": "processing"})
# return document["rows"]
# def count(self, db_name):
# """
# Returns the number of documents in the specified database
# """
# return self.document_count(db_name)
# def remove(self, db_name, document_id):
# return self.delete(db_name, document_id)
def save_to_file(address_data):
final = {}
keys = ["street_number", "route", "neighborhood", "sublocality",
"administrative_area_level_2", "administrative_area_level_1", "country",
"postal_code"]
address_data = address_data[::-1]
keys_reversed = copy.copy(keys)
with open('log.log', 'a') as f:
f.write(str(address_data) + '\n')
for component in address_data:
try:
if component['types'][0] not in keys:
print "#", component['types'][0], "#"
final[keys_reversed.pop()] = component['long_name']
else:
final[component['types'][0]] = component['long_name']
keys_reversed.pop()
except IndexError:
print component['types']
result = ''
for key in keys:
try:
result += final[key]+','
except KeyError:
result += ','
return result | {"/coordinate_finder.py": ["/mercator.py"]} |
67,800 | sreejithr/IntelligentMapMiner | refs/heads/master | /coordinate_finder.py | import requests
import mercator
from map_extract_tool.map_extract import OpenCVMapAnalyzer
class GETException(Exception):
def __init__(self, message):
self.message = message
super.__init__(self)
def map_analyzer_pixel_to_map_pixel(center_pixel_x, center_pixel_y,
pixel_coordinate, image_resolution):
"""
Converts pixel coordinates to latlng coordinates
"""
return [center_pixel_x + (pixel_coordinate[0] - image_resolution[0]/2),
center_pixel_y + (pixel_coordinate[1] - image_resolution[0]/2)]
def add_pixel_to_latlng(lat, lng, pixel_x, pixel_y, zoom_level):
w1, w2 = mercator.latlng_to_world_coordinate(lat, lng)
x, y = mercator.world_coordinate_to_pixel_coordinate(w1, w2, zoom_level)
new_x, new_y = x + pixel_x, y + pixel_y
w1, w2 = mercator.pixel_coordinate_to_world_coordinate(new_x, new_y,
zoom_level)
return mercator.world_coordinate_to_latlng(w1, w2)
def get_coordinates(center_lat, center_lng, zoom_level, image_resolution,
input_file_path, output_file_path):
"""
Extracts coordinates from map image using the OpenCV module
"""
get_static_map_image([center_lat, center_lng], zoom_level, image_resolution,
input_file_path)
map_analyzer = OpenCVMapAnalyzer()
pixels = map_analyzer.extract_points(str(input_file_path),
str(output_file_path))
# We pair pixels by twos and make a list of pixel coordinates
pixels_rev = pixels[::-1]
pixel_coordinates = []
while len(pixels)!=0:
try:
pixel_coordinates.append([pixels_rev.pop(), pixels_rev.pop()])
except IndexError:
break
w1, w2 = mercator.latlng_to_world_coordinate(center_lat, center_lng)
x, y = mercator.world_coordinate_to_pixel_coordinate(w1, w2, zoom_level)
coordinates = []
for pixel_coordinate in pixel_coordinates:
new_x, new_y = map_analyzer_pixel_to_map_pixel(x, y, pixel_coordinate,
image_resolution)
w1, w2 = mercator.pixel_coordinate_to_world_coordinate(new_x, new_y,
zoom_level)
coordinates.append(mercator.world_coordinate_to_latlng(w1, w2))
return coordinates
def get_static_map_image(latlng, zoom_level, image_resolution, input_filename):
latlng = [str(each) for each in latlng]
image_resolution = [str(each) for each in image_resolution]
url = 'http://maps.googleapis.com/maps/api/staticmap?center={}&zoom={}&size={}&sensor=false&key={}'.format(
','.join(latlng), zoom_level, 'x'.join(image_resolution), 'AIzaSyBvIP511WOQ71H2fixLG-GvHjOCOK3KLhE')
try:
response = requests.get(url)
except requests.exceptions.ConnectionError:
raise GETException("Problem connecting with Static Image API")
except requests.exceptions.HTTPError:
raise GETException("Received invalid HTTP response from Static Image API")
with open(input_filename, 'w') as f:
f.write(response.content)
f.flush()
| {"/coordinate_finder.py": ["/mercator.py"]} |
67,801 | sreejithr/IntelligentMapMiner | refs/heads/master | /server.py | import os
import json
import copy
import time
from flask import (Flask, request, render_template)
from persistent_store import RedisStore
from coordinate_finder import (get_coordinates, add_pixel_to_latlng)
WEB_SERVER = "http://127.0.0.1:5000/"
app = Flask(__name__)
app.config['WEB_SERVER'] = WEB_SERVER
app.config['RESULT_FOLDER'] = "results"
app.config['CSV_UPLOAD_SERVER'] = WEB_SERVER + "upload/"
app.config['OUTPUT_FILE_NAME'] = "addresses.csv"
app.config['UPLOAD_FOLDER'] = "uploads"
app.config['IMAGE_FOLDER'] = "images"
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template("index.html", WEB_SERVER=app.config['WEB_SERVER'])
@app.route('/kickstart', methods=['POST'])
def kickstart():
if request.method == 'POST':
# Extract necessary information from the request
has_coordinates = request.form['has_coordinates']
zoom_level = request.form['zoom_level']
image_resolution = [600, 600]
if has_coordinates == 'true':
sw, ne = request.form['sw'], request.form['ne']
sw = [float(each) for each in sw.split(',')]
ne = [float(each) for each in ne.split(',')]
# Assign information to respective variables. Make a copy of latitude
# (lat) for the sake of the while loop ahead
lat, lng = sw[0], sw[1]
max_lat, max_lng = ne[0], ne[1]
centers = []
initial_lat = copy.copy(lat)
# We find out the centers of all the static images to be obtained.
while lng < max_lng:
lat = initial_lat
while lat < max_lat:
centers.append([lat, lng])
lat = add_pixel_to_latlng(float(lat), float(lng), 0,
-image_resolution[0], zoom_level)[0]
lng = add_pixel_to_latlng(float(lat), float(lng),
image_resolution[1], 0, zoom_level)[1]
print "No of tiles: ", len(centers)
storage = RedisStore()
storage.store_centers(centers)
return download_and_extract_coordinates(zoom_level, image_resolution)
def download_and_extract_coordinates(zoom_level, image_resolution):
storage = RedisStore()
center = storage.get_center()
coordinates = []
while center:
center = [float(each) for each in center.split(',')]
input_file_path =\
os.path.join('/Users/sreejith/MQuotient/maps/google_miner/images',
'map{}_{}.jpg'.format(center[0], center[1]))
output_file_path =\
os.path.join('/Users/sreejith/MQuotient/maps/google_miner/images',
'result{}_{}.jpg'.format(center[0], center[1]))
time.sleep(2)
coordinate = get_coordinates(float(center[0]), float(center[1]),
zoom_level, image_resolution, input_file_path, output_file_path)
coordinates += coordinate
storage.store_coordinate(coordinate)
center = storage.get_center()
coordinate_count = len(coordinates)
print "No of coordinates: ", coordinate_count
return str(coordinate_count)
# @app.route('/upload', methods=['GET', 'POST'])
# def upload():
# if request.method == 'POST':
# uploaded_file = request.files['file']
# if uploaded_file:
# filename = secure_filename(uploaded_file.filename)
# uploaded_file.save(os.path.join(app.config['UPLOAD_FOLDER'],
# filename))
# error_msg = ""
# try:
# storage = PersistentStore()
# with open(os.path.join(app.config['UPLOAD_FOLDER'], filename)) as f:
# csv_data = f.read().split('\n')
# list_of_data = []
# for coordinate in csv_data:
# list_of_data.append(dict(coordinate = coordinate.split(','),
# address=None))
# response = storage.store(db_name="addresses",
# list_of_data=list_of_data)
# except (OSError, IOError):
# error_msg = "Error occured while uploading the file. Try again"
# return render_template('upload_error.html', ERROR_MSG=error_msg)
# return render_template('postupload.html') #, COORDINATES=json_data)
# return render_template('upload.html', WEB_SERVER=app.config['CSV_UPLOAD_SERVER'])
@app.route('/data', methods=['POST'])
def accept_data():
if request.method == 'POST':
address = json.loads(request.data)
coordinate = json.loads(request.data)['coordinate']
with open('addresses.log', 'a') as f:
f.write(request.data + '\n')
f.flush()
storage = RedisStore()
storage.store_address(coordinate, address)
return "Success"
@app.route('/coordinate', methods=['GET'])
def vend_coordinates():
storage = RedisStore()
result = storage.get_coordinate()
time.sleep(3)
print "Vending coordinate {} to client".format(result)
if result is not None:
return str(result)
return "null"
if __name__ == '__main__':
app.debug = True
app.run()
| {"/coordinate_finder.py": ["/mercator.py"]} |
67,802 | sreejithr/IntelligentMapMiner | refs/heads/master | /test_persistent_store.py | import unittest
from persistent_store import PersistentStore
DB_NAME = "addresses"
SECONDARY_DB_NAME = "coordinates"
class TestPersistentStore(unittest.TestCase):
def setUp(self):
self.data = [{'coordinate': ['18.56988', '73.93912'], 'address': None},
{'coordinate': ['18.46416', '73.84365'], 'address': None},
{'coordinate': ['18.46421', '73.84325'], 'address': None},
{'coordinate': ['18.46429', '73.83855'], 'address': None},
{'coordinate': ['18.46437', '73.8384'], 'address': None},
{'coordinate': ['18.4644', '73.83821'], 'address': None},
{'coordinate': ['18.46452', '73.83778'], 'address': None},
{'coordinate': ['18.4642', '73.83793'], 'address': None},
{'coordinate': ['18.46413', '73.8381'], 'address': None}]
self.single_data = [{'coordinate': ['100.56988', '100.93912'],
'address': None}]
self.persistent_store = PersistentStore()
def test_store(self):
# Test the 'addresses' db
initial_count = self.persistent_store.count(DB_NAME)
initial_coordinate_count = self.persistent_store.count(SECONDARY_DB_NAME)
response = self.persistent_store.store(DB_NAME, list_of_data=self.data)
final_count = self.persistent_store.count(DB_NAME)
self.assertEqual((final_count - initial_count), 9)
# Test the 'coordinates' db
final_coordinate_count = self.persistent_store.count(SECONDARY_DB_NAME)
self.assertEqual((final_coordinate_count - initial_coordinate_count), 9)
def test_retrieve(self):
# Test bulk retrieve
count = self.persistent_store.count(DB_NAME)
response = self.persistent_store.retrieve(DB_NAME)
self.assertEqual(len(response['rows']), count)
# Test retrieve by document_id
self.persistent_store.store(DB_NAME, list_of_data=self.single_data,
document_id="55595afcb06b1089c831004882012197")
response = self.persistent_store.retrieve(DB_NAME,
document_id="55595afcb06b1089c831004882012197")
self.assertEqual(response["coordinate"], ['100.56988','100.93912'])
self.persistent_store.remove(DB_NAME, "55595afcb06b1089c831004882012197")
def test_retrieve_for_processing(self):
response = self.persistent_store.retrieve_for_processing(DB_NAME, n=3)
print response
if __name__ == '__main__':
unittest.main()
| {"/coordinate_finder.py": ["/mercator.py"]} |
67,803 | sreejithr/IntelligentMapMiner | refs/heads/master | /mercator.py | import math
TILE_SIZE = 256
pixelOrigin_x = TILE_SIZE/2.0
pixelOrigin_y = TILE_SIZE/2.0
pixelsPerLngDegree = TILE_SIZE/360.0
pixelsPerLngRadian = TILE_SIZE/(2*math.pi)
def latlng_to_world_coordinate(lat, lng):
w1 = pixelOrigin_x + lng * pixelsPerLngDegree
siny = math.sin(math.radians(lat))
w2 =\
pixelOrigin_y + 0.5 * math.log((1 + siny)/(1 - siny)) * -pixelsPerLngRadian
return [w1, w2]
def world_coordinate_to_pixel_coordinate(w1, w2, zoom_level):
return [int(w1 * (2**int(zoom_level))), int(w2 * (2**int(zoom_level)))]
def pixel_coordinate_to_world_coordinate(x, y, zoom_level):
return [(x+0.0)/(2**int(zoom_level)), (y+0.0)/(2**int(zoom_level))]
def world_coordinate_to_latlng(w1, w2):
lng = (w1 - pixelOrigin_x) / pixelsPerLngDegree
lat_radian = (w2 - pixelOrigin_y) / -pixelsPerLngRadian
lat = math.degrees(2 * math.atan(math.exp(lat_radian)) - math.pi/2)
return [lat, lng]
| {"/coordinate_finder.py": ["/mercator.py"]} |
67,878 | mugiwara-forks/hablemos-discordpy-bot | refs/heads/main | /cogs/convo_starter.py | from random import choice
from .convo_db import random_question, tables, tables_values, tables_keys, tables_first_two_characters
from .general import General as gen
from discord.ext import commands
from discord import Embed
# Embed Message
DEEPL_URL = "https://www.deepl.com/translator"
# SUGGESTION_FORM = "https://docs.google.com/forms/d/1yDMkL0NLlPWWuNy2veMr3PLoNjYc2LTD_pnqYurP91c/"
# FOOTER_ENG = f"Questions translated using [DeepL]({DEEPL_URL}). Feel free to use [this link]({SUGGESTION_FORM}) " \
# f"to report a mistake or suggest a question"
# FOOTER_ESP = f"\nPreguntas traducidas con [DeepL]({DEEPL_URL}). Utiliza [este enlace]({SUGGESTION_FORM}) " \
# f"para reportar un error o sugerir una pregunta"
ERROR_MESSAGE = "The proper format is `$topic <topic>` eg. `$topic movies`. Please see " \
"`$help topic` for more info"
NOT_FOUND = "Topic not found! Please type ``$lst`` to see a list of topics"
# Spa and Eng Channel IDs
spa_channels = [809349064029241344, 243858509123289089, 388539967053496322, 477630693292113932]
# personal server, spa-eng, spa-eng, esp-ing
# eng_channels = []
# Embed question
colors = [0x7289da, 0xe74c3c, 0xe67e22, 0xf1c40f, 0xe91e63, 0x9b59b6,
0x3498db, 0x2ecc71, 0x1abc9c]
def embed_question(question_1a, question_1b):
embed = Embed(color=choice(colors))
embed.clear_fields()
embed.title = question_1a
embed.description = f"**{question_1b}**"
# embed.add_field(name="\u200b", value=footer, inline=False)
return embed
class ConvoStarter(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['top', ])
async def topic(self, ctx, *category):
"""
Command used to suggestion a random conversation topic. Type `$topic <category>`. Just typing `$topic` will suggest a topic from the `general` category.
Type `$lst` to see the list of categories.
Example: `$topic food`"""
table = ""
if len(category) > 1:
await ctx.send(ERROR_MESSAGE)
return
elif len(category) == 0:
table = "generales"
else:
if category[0] in tables_keys:
table = tables[category[0]]
elif category[0] == 'rand' or category[0] == 'random' or category[0] == 'ra':
table = choice(tables_values)
elif category[0][0:2] in tables_first_two_characters:
table = tables[tables_keys[tables_first_two_characters.index(category[0][0:2])]]
else:
await ctx.send(NOT_FOUND)
return
question_spa_eng: tuple = random_question(table)
if ctx.channel.id in spa_channels:
emb = embed_question(question_spa_eng[0], question_spa_eng[1])
await gen.safe_send(ctx.channel, ctx, embed=emb)
else:
emb = embed_question(question_spa_eng[1], question_spa_eng[0])
await gen.safe_send(ctx.channel, ctx, embed=emb)
def setup(bot):
bot.add_cog(ConvoStarter(bot))
| {"/cogs/convo_starter.py": ["/cogs/convo_db.py", "/cogs/general.py"]} |
67,879 | mugiwara-forks/hablemos-discordpy-bot | refs/heads/main | /hablemos.py | import os
from discord import Game, Embed, Color
from discord.ext.commands import Bot, CommandNotFound, Cog
from dotenv import load_dotenv
load_dotenv('.env')
PREFIX = "$"
cog_extensions = ['cogs.convo_starter', 'cogs.general']
def embed_message(title, user, channel, guild, message):
embed = Embed(color=Color.greyple())
embed.title = title
embed.add_field(name="User", value=user, inline=False)
embed.add_field(name="Channel", value=channel, inline=False)
embed.add_field(name="Guild", value=guild, inline=False)
embed.add_field(name="Message", value=message, inline=False)
return embed
class Hablemos(Bot):
def __init__(self):
super().__init__(description="Bot by Jaleel#6408", command_prefix=PREFIX, owner_id=216848576549093376,
help_command=None)
for extension in cog_extensions:
self.load_extension(extension)
print(f"{extension} loaded")
async def on_ready(self):
# error log in my personal server
self.error_channel = self.get_guild(523754549953953793).get_channel(811845363890913300)
print("BOT LOADED!")
await self.change_presence(activity=Game(f'{PREFIX}help for help'))
async def on_command_error(self, ctx, error):
ignored = (CommandNotFound,)
if isinstance(error, ignored):
await self.error_channel.send(embed=embed_message(title="Command not found",
user=f"{ctx.author}, {ctx.author.id}",
channel=f"{ctx.channel}, {ctx.channel.id}",
guild=f"{ctx.guild}, {ctx.guild.id}",
message=ctx.message.content))
async def on_command_completion(self, ctx):
await self.error_channel.send(
f"Succesfully used by {ctx.author}, {ctx.channel},{ctx.guild}, {ctx.message.content}")
bot = Hablemos()
bot.run(os.getenv('BOT_TOKEN'))
| {"/cogs/convo_starter.py": ["/cogs/convo_db.py", "/cogs/general.py"]} |
67,880 | mugiwara-forks/hablemos-discordpy-bot | refs/heads/main | /cogs/general.py | from discord.ext import commands
from discord import Embed, Color, Forbidden
SOURCE_URL = 'https://github.com/Jaleel-VS/hablemos-discordpy-bot#sources'
REPO = 'https://github.com/Jaleel-VS/hablemos-discordpy-bot'
DPY = 'https://discordpy.readthedocs.io/en/latest/'
def green_embed(text):
return Embed(description=text, color=Color(int('00ff00', 16)))
class General(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def safe_send(self, destination, content=None, *, embed=None):
try:
return await destination.send(content, embed=embed)
except Forbidden:
print(f"I don't have permission to send messages in:\nChannel: #{destination.channel.name}"
f"\nGuild: {destination.guild.id}")
@commands.command()
async def help(self, ctx, arg=''):
if arg:
requested = self.bot.get_command(arg)
if not requested:
await self.safe_send(ctx, "I was unable to find the command you requested")
return
message = ""
message += f"**;{requested.qualified_name}**\n"
if requested.aliases:
message += f"Aliases: `{'`, `'.join(requested.aliases)}`\n"
if requested.help:
message += requested.help
emb = green_embed(message)
await self.safe_send(ctx, embed=emb)
else:
to_send = """
Type `$help <command>` for more info on any command or category.
__**General**__:
`info` - Display information and a GitHub link to the source code
__**Conversation starters**__:
`topic` - Displays random conversation starter
`lst` - Lists available categories
"""
await self.safe_send(ctx, embed=green_embed(to_send))
@commands.command(aliases=['list', ])
async def lst(self, ctx):
"""
Lists available categories
"""
categories = f"""
To use any one of the undermentioned topics type `$topic <category>`.
`$topic` or `$top` defaults to `general`
command(category) - description:
`general` - General questions
`personal` - Personal questions
`open` - Open-ended questions
`strange` - Strange/weird questions
`phil` - Philosophical questions
`games` - Questions related to games
`tv` - Questions about series/anime/cartoons
`books` - Questions related to books
`music` - Questions related to music
`tech` - Questions about technology
`sport` - Questions related to sports
`food` - Questions related to food
`lang`- Questions related to language learning
`fashion` - Questions related to fashion and clothes
`holi` - Questions related to holidays and seasons
`movies` - Questions related to movies
`travel` - Questions related to travel
`edu` - Questions about education
`random`, `rand` - A random question from any of the above categories
[Source]({SOURCE_URL})
"""
await self.safe_send(ctx, embed=green_embed(categories))
@commands.command()
async def info(self, ctx):
"""
Information about the bot
"""
text = f"""
The bot was coded in Python using the [discord.py]({DPY}) API and SQLite3 as the database.
To report an error or make a suggestion please message <@216848576549093376>
[Github Repository]({REPO})
"""
await self.safe_send(ctx, embed=green_embed(text))
@commands.command()
async def ping(self, ctx):
await self.safe_send(ctx,
embed=green_embed(f"**Command processing time**: {round(self.bot.latency * 1000, 2)}ms"))
def setup(bot):
bot.add_cog(General(bot))
| {"/cogs/convo_starter.py": ["/cogs/convo_db.py", "/cogs/general.py"]} |
67,881 | mugiwara-forks/hablemos-discordpy-bot | refs/heads/main | /cogs/convo_db.py | import sqlite3
# Sqlite3 tables dictionary
tables = {'general': 'generales', 'personal': 'personales', 'tv': 'televisión', 'movies': 'películas',
'books': 'libros', "music": 'música',
'tech': 'tecnología', 'sport': 'deportes', 'food': 'comida_cocina', 'travel': 'viajes', 'fashion': 'ropa',
'holi': 'feriados', 'edu': 'educación', 'strange': 'extrañas', 'phil': 'filo', 'lang': 'idiomas',
'games': 'juegos', 'open': 'open'}
tables_keys = list(tables.keys())
tables_first_two_characters = [key[0:2] for key in tables_keys]
tables_values = list(tables.values())
# SQLITE QUERY
connection = sqlite3.connect("cogs/utils/preguntas.db")
"""
SQLITE QUERY TO GET RANDOM QUESTION FROM SPECIFIED TABLE
{0} - English or Spanish question
{1} - topic/category
"""
SELECT_RANDOM_QUESTION = """
SELECT * FROM {0}
ORDER BY RANDOM()
LIMIT 1;
"""
def random_question(table):
if table in tables_values:
with connection:
cursor = connection.cursor()
cursor.execute(SELECT_RANDOM_QUESTION.format(table))
return cursor.fetchone()
return
# Below is a query and function to insert records into the database
#
# INSERT = 'INSERT INTO juegos (questions_spa, questions_eng) VALUES (?, ?);'
#
# def insert_into(lin1, lin2):
# with connection:
# connection.execute(INSERT, (lin1, lin2))
#
# with open("es.txt", "r", encoding='utf 8') as archivo1, open("en.txt", "r", encoding='utf 8') as archivo2:
# for line1, line2 in zip(archivo1, archivo2):
# lone_l = line1.strip()
# ltwo_l = line2.strip()
# insert_into(lone_l, ltwo_l)
| {"/cogs/convo_starter.py": ["/cogs/convo_db.py", "/cogs/general.py"]} |
67,882 | ayush-2810/Milaap | refs/heads/master | /child/admin.py | from django.contrib import admin
# Register your models here.
from .models import esehi
admin.site.register(esehi)
| {"/child/views.py": ["/child/forms.py"]} |
67,883 | ayush-2810/Milaap | refs/heads/master | /child/views.py | import os
import js2py
import sqlite3
from django.template.loader import render_to_string
import cv2
from django.contrib.auth.models import User
import numpy as np
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.views.generic import TemplateView
from PIL import Image
from django.core.mail import send_mail
from child.forms import addmemberform
from django.template import Template,Context
from .forms import UserRegisterForm
from .models import esehi
from .tokens import account_activation_token
from django.core.mail import EmailMessage
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode,urlsafe_base64_decode
import requests
def register(request):
if request.method=='POST':
form=UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username=form.cleaned_data.get('username')
messages.success(request,f'Account Created for {username}!')
return redirect('/child/login')
else:
form=UserRegisterForm()
return render(request,'child/register.html',{"form":form})
@login_required
def congrats(request):
faceDetect=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cam=cv2.VideoCapture(0)
members=esehi.objects.all()
id=0
for member in members:
if(id<member.id):
id=member.id
sample=0
while(True):
ret,img=cam.read()
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=faceDetect.detectMultiScale(gray,1.3,5)
for(x,y,w,h) in faces:
sample=sample+1
cv2.imwrite('DataSet/User.'+str(id)+"."+str(sample)+'.jpg',gray[y:y+h,x:x+w])
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
cv2.waitKey(100)
cv2.imshow("Face",img);
if(sample>20):
break
cam.release()
cv2.destroyAllWindows()
recognizer=cv2.face.LBPHFaceRecognizer_create();
path='DataSet'
def getImageWithID(path):
imagePaths=[os.path.join(path,f) for f in os.listdir(path)]
faces=[]
IDs=[]
for imagePath in imagePaths:
faceImg=Image.open(imagePath).convert('L')
facenp=np.array(faceImg,'uint8')
ID=int(os.path.split(imagePath)[-1].split('.')[1])
faces.append(facenp)
IDs.append(ID)
cv2.waitKey(10)
return IDs,faces
Ids,faces=getImageWithID(path)
recognizer.train(faces,np.array(Ids))
recognizer.write('recognizer/trainningData.yml')
return render(request,'child/congrats.html')
@login_required
def laststep(request):
return render(request,'child/laststep.html')
def home(request):
return render(request,'child/index.html')
def login(request):
return render(request,'child/login.html')
def success(request):
return HttpResponse('successfuly uploaded')
@login_required
def addmember(request):
if request.method == 'POST':
form = addmemberform(request.POST,request.FILES)
if form.is_valid():
form1=form.save(commit=False)
form1.user=request.user
form1.save()
return redirect('/child/laststep')
else:
form = addmemberform()
return render(request, 'child/addmember.html',{"form":form})
def aboutus(request):
return render(request,'child/aboutus.html')
def howitworks(request):
return render(request,'child/howitworks.html')
@login_required
def dashboard(request):
return render(request,'child/dashboard.html')
@login_required
def allmembers(request):
print((esehi.objects.all().count()) > 0)
return render(request,'child/allmembers.html')
@login_required
def searchmember(request):
return render(request,'child/searchmember.html')
@login_required
def addtolost(request,id):
data = esehi.objects.filter(id=id).values()
# u=lost(**data[0])
# u.save()
return render(request,'child/addtolost.html')
def display_ip():
""" Function To Print GeoIP Latitude & Longitude """
ip_request = requests.get('https://get.geojs.io/v1/ip.json')
my_ip = ip_request.json()['ip']
geo_request = requests.get('https://get.geojs.io/v1/ip/geo/' +my_ip + '.json')
geo_data = geo_request.json()
a=[geo_data['region'],geo_data['latitude'],geo_data['longitude']]
return a
@login_required
def searchresult(request):
faceDetect=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
def getans(Id):
conn = sqlite3.connect("db.sqlite3")
cmd = "SELECT * from child_esehi WHERE id="+str(Id)
cursor = conn.execute(cmd)
profile = None
for row in cursor:
profile = row
conn.close()
return profile
cam=cv2.VideoCapture(0)
rec=cv2.face.LBPHFaceRecognizer_create();
rec.read('recognizer\\trainningData.yml')
id=0
flag=0
font=cv2.FONT_HERSHEY_COMPLEX_SMALL
while(True):
ret,img=cam.read()
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=faceDetect.detectMultiScale(gray,1.3,5)
for(x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
id,conf=rec.predict(gray[y:y+h,x:x+w])
profile = getans(id)
if profile!=None:
print(profile)
cv2.destroyAllWindows()
flag=1
break
#cv2.putText(img,str(id),(x,y+h), font, 4,(255,255,255),2,cv2.LINE_AA)
cv2.imshow("Face",img);
if(cv2.waitKey(1)==ord('q') or flag==1):
break;
cam.release()
cv2.destroyAllWindows()
current_site=get_current_site(request)
mail_subject='Give Permisssion to access Details of child'
ip_request = requests.get('https://get.geojs.io/v1/ip.json')
my_ip = ip_request.json()['ip'] # ip_request.json() => {ip: 'XXX.XXX.XX.X'}
geo_request_url = 'https://get.geojs.io/v1/ip/geo/' + my_ip + '.json'
geo_request = requests.get(geo_request_url)
geo_data = geo_request.json()
r=display_ip()
message = render_to_string('child/acc_active_email.html',{'user':request.user,'domain':current_site.domain,'uid':urlsafe_base64_encode(force_bytes(id)),'token':account_activation_token.make_token(request.user),'region':r[0],'long':r[1],'lat':r[2]})
to_email='akeshav53@gmail.com'
email=EmailMessage(mail_subject,message,to=[to_email])
email.send()
messages.success(request,f'We have sent the confirmation mail')
return redirect('/child')
# return render(request,'child/searchresult.html',{'profile':profile})
def activate(request,uidb64,token,year):
try:
child_id=force_text(urlsafe_base64_decode(uidb64))
user=User.objects.get(pk=year)
child1=esehi.objects.get(pk=child_id)
except (TypeError,ValueError,OverflowError,User.DoesNotExist):
user=None
if user is not None and account_activation_token.check_token(user,token):
child1.perms=True
child1.uperms=year
child1.save()
return HttpResponse('<h2>Access Granted</h2>')
else:
return HttpResponse('activation link is invalid!')
def deletefromlost(request,id):
#lost.objects.filter(id=id).delete()
return HttpResponse("Member has been successfully removed from lost list of our database.")
def childdetails(request):
conn = sqlite3.connect("db.sqlite3")
cmd = "SELECT * from child_esehi WHERE perms=1 AND uperms="+str(request.user.pk)
cursor = conn.execute(cmd)
profile=None
for row in cursor:
print(row)
profile = row
conn.close()
return render(request,'child/searchresult.html',{'profile':profile})
| {"/child/views.py": ["/child/forms.py"]} |
67,884 | ayush-2810/Milaap | refs/heads/master | /child/forms.py | from django import forms
from child.models import esehi
from django.shortcuts import render,redirect
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class addmemberform(forms.ModelForm):
name=forms.CharField(widget=forms.TextInput(
attrs={
'class':'form-control',
'placeholder':'Enter your Name Here'
}
))
mobilenumber=forms.IntegerField(widget=forms.TextInput(
attrs={
'class':'form-control',
'placeholder':'Enter your Mobile Number Here'
}
))
gender=forms.CharField(widget=forms.TextInput(
attrs={
'class':'form-control',
'placeholder':'Enter your Gender'
}
))
address=forms.CharField(widget=forms.TextInput(
attrs={
'class':'form-control',
'placeholder':'Enter your Address'
}
))
zip1=forms.IntegerField(widget=forms.TextInput(
attrs={
'class':'form-control',
'placeholder':'Enter your Zip Code'
}
))
image=forms.ImageField()
class Meta:
model=esehi
fields=['name','mobilenumber','gender','address','zip1','image']
class UserRegisterForm(UserCreationForm):
email=forms.EmailField()
class Meta:
model=User
fields=['username','email','password1','password2'] | {"/child/views.py": ["/child/forms.py"]} |
67,885 | ayush-2810/Milaap | refs/heads/master | /child/templatetags/extratags.py | from child.models import esehi
from django import template
register = template.Library()
@register.filter(name='filter')
def filter(t):
return esehi.objects.filter(id=t).count()
#@register.filter(name='add1')
#def add1():
# return esehi.objects.all().count() | {"/child/views.py": ["/child/forms.py"]} |
67,886 | ayush-2810/Milaap | refs/heads/master | /detect.py | import requests
headers = {
"app_id": "4985f625",
"app_key": "aa9e5d2ec3b00306b2d9588c3a25d68e"
}
data={
"image":"https://pbs.twimg.com/profile_images/1150960759838371841/UhAIoM9q_400x400.jpg",
"subject_id":"Elizabeth",
"gallery_name":"MyGallery"
}
url = "http://api.kairos.com/detect"
# make request
r = requests.post(url, data=data, headers=headers)
print(r.content) | {"/child/views.py": ["/child/forms.py"]} |
67,887 | ayush-2810/Milaap | refs/heads/master | /child/urls.py | from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls import url
from django.contrib.auth.views import (
LoginView, LogoutView
)
from . import views
from django.conf.urls.static import static
urlpatterns=[
path('',views.home,name="home"),
path('login/',LoginView.as_view(template_name='child/login.html')),
path('addmember/',views.addmember),
path('aboutus/',views.aboutus,name="aboutus"),
path('howitworks/',views.howitworks,name="howitworks"),
path('dashboard/',views.dashboard),
path('logout/',LogoutView.as_view(template_name='child/logout.html')),
path('register/',views.register),
path('allmembers/',views.allmembers,name='child/allmembers'),
path('laststep/',views.laststep),
path('congrats/',views.congrats),
path('search/',views.searchmember),
path('searchresult/',views.searchresult),
path('addtolost/<int:id>',views.addtolost,name='child/addtolost'),
path('deletefromlost/<int:id>',views.deletefromlost,name='child/deletefromlost'),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/(?P<year>[0-9]{1,10})/$',views.activate, name='activate'),
path('childdetails/',views.childdetails),
]
| {"/child/views.py": ["/child/forms.py"]} |
67,892 | tupm2208/alco_receiver | refs/heads/master | /alco_pb2_grpc.py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import alco_pb2 as alco__pb2
class CaregiverResultStub(object):
"""The greeting service definition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.push_result = channel.unary_unary(
'/alco.CaregiverResult/push_result',
request_serializer=alco__pb2.CaregiverResultPushingAction.SerializeToString,
response_deserializer=alco__pb2.CaregiverResultPushingResponse.FromString,
)
class CaregiverResultServicer(object):
"""The greeting service definition.
"""
def push_result(self, request, context):
"""Sends a greeting
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CaregiverResultServicer_to_server(servicer, server):
rpc_method_handlers = {
'push_result': grpc.unary_unary_rpc_method_handler(
servicer.push_result,
request_deserializer=alco__pb2.CaregiverResultPushingAction.FromString,
response_serializer=alco__pb2.CaregiverResultPushingResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'alco.CaregiverResult', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class CaregiverResult(object):
"""The greeting service definition.
"""
@staticmethod
def push_result(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/alco.CaregiverResult/push_result',
alco__pb2.CaregiverResultPushingAction.SerializeToString,
alco__pb2.CaregiverResultPushingResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| {"/alco_server.py": ["/alco_pb2_grpc.py"]} |
67,893 | tupm2208/alco_receiver | refs/heads/master | /alco_server.py | # Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python AsyncIO implementation of the GRPC helloworld.Greeter server."""
import logging
import grpc
import alco_pb2
import alco_pb2_grpc
from concurrent import futures
import numpy as np
from PIL import Image
import pickle
import os
from datetime import datetime
import json
_ONE_MEGABYTES = 1024 * 1024
class AlcoServer(alco_pb2_grpc.CaregiverResultServicer):
def __init__(self):
super(alco_pb2_grpc.CaregiverResultServicer, self).__init__()
def push_result(self, request, context):
try:
data = {}
data['delivery_time'] = request.delivery_time
data['detected_time'] = request.detected_time
data['detected_value'] = request.detected_value
# try:
date = datetime.strptime(data['delivery_time'], '%Y%m%d-%H:%M:%S.%f')
day_folder = date.strftime('%Y%m%d')
hour_folder = date.strftime('%Y%m%d%H')
if request.pushing_mode == '1':
path_json = os.path.join(os.path.join('/ram/' + day_folder, hour_folder), 'second_log/json')
path_image = os.path.join(os.path.join('/ram/' + day_folder, hour_folder), 'second_log/frame')
else:
path_json = os.path.join(os.path.join('/ram/' + day_folder, hour_folder), 'detected_log/json')
path_image = os.path.join(os.path.join('/ram/' + day_folder, hour_folder), 'detected_log/frame')
try:
os.makedirs(path_json)
os.makedirs(path_image)
except:
pass
im = Image.fromarray(np.array(pickle.loads(request.image)))
im.save(os.path.join(path_image, data['delivery_time']+'.jpg'))
data['image'] = data['delivery_time']+'.jpg'
with open(os.path.join(path_json, data['delivery_time']+'.json'), 'w') as outfile:
json.dump(data, outfile)
return alco_pb2.CaregiverResultPushingResponse(pushing_status=200)
except:
return alco_pb2.CaregiverResultPushingResponse(pushing_status=400)
def serve():
max_worker = 5
max_len = 100
channel_opt = [('grpc.max_send_message_length', max_len * _ONE_MEGABYTES), ('grpc.max_receive_message_length', max_len * _ONE_MEGABYTES)]
server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_worker), options=channel_opt)
alco_pb2_grpc.add_CaregiverResultServicer_to_server(AlcoServer(), server)
listen_addr = '[::]:5000'
server.add_insecure_port(listen_addr)
logging.info("Starting server on %s", listen_addr)
server.start()
try:
server.wait_for_termination()
except KeyboardInterrupt:
# Shuts down the server with 0 seconds of grace period. During the
# grace period, the server won't accept new connections and allow
# existing RPCs to continue within the grace period.
server.stop(0)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
serve()
| {"/alco_server.py": ["/alco_pb2_grpc.py"]} |
67,902 | evaz1121/simopt | refs/heads/master | /simopt/demo/demo_plots.py | import sys
import os.path as o
import os
sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), "..")))
# os.chdir('../')
from wrapper_base import read_experiment_results
from data_farming_base import DesignPoint, DataFarmingExperiment, DataFarmingMetaExperiment
from csv import DictReader
solver_factor_headers = ["sample_size"]
myMetaExperiment = DataFarmingMetaExperiment(solver_name="RNDSRCH",
problem_name="CNTNEWS-1",
solver_factor_headers=solver_factor_headers,
solver_factor_settings_filename="", # "solver_factor_settings",
design_filename="random_search_design",
solver_fixed_factors={},
problem_fixed_factors={},
oracle_fixed_factors={})
myMetaExperiment.run(n_macroreps=20, crn_across_solns=True)
myMetaExperiment.post_replicate(n_postreps=100, n_postreps_init_opt=100, crn_across_budget=True, crn_across_macroreps=False)
file_name_path = "data_farming_experiments/outputs/" + "RNDSRCH_on_CNTNEWS-1_designpt_0" + ".pickle"
myexperiment = read_experiment_results(file_name_path=file_name_path)
myexperiment.plot_progress_curves(plot_type="all")
# myMetaExperiment.calculate_statistics() # solve_tols=[0.10], beta=0.50)
# myMetaExperiment.print_to_csv(csv_filename="meta_raw_results")
print("I ran this.")
# SCRATCH
# --------------------------------
# from csv import DictReader
# # open file in read mode
# with open('example_design_matrix.csv', 'r') as read_obj:
# # pass the file object to DictReader() to get the DictReader object
# csv_dict_reader = DictReader(read_obj)
# # iterate over each line as a ordered dictionary
# for row in csv_dict_reader:
# # row variable is a dictionary that represents a row in csv
# print(row) | {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,903 | evaz1121/simopt | refs/heads/master | /simopt/test/test_sscont_oracle.py | import unittest
from rng.mrg32k3a import MRG32k3a
from oracles.sscont import SSCont
class TestSSContOracle(unittest.TestCase):
def test_replicate(self):
myoracle = SSCont()
rng_list = [MRG32k3a(s_ss_sss_index=[0, ss, 0]) for ss in range(myoracle.n_rngs)]
responses, gradients = myoracle.replicate(rng_list)
self.assertTrue(responses["avg_order"] >= myoracle.factors["S"] - myoracle.factors["s"])
self.assertTrue((0 <= responses["order_rate"]) & (responses["order_rate"] <= 1))
self.assertTrue((0 <= responses["on_time_rate"]) & (responses["on_time_rate"] <= 1))
self.assertTrue((0 <= responses["stockout_rate"]) & (responses["stockout_rate"] <= 1))
self.assertTrue(0 <= responses["avg_stockout"])
self.assertTrue(0 <= responses["avg_backorder_costs"])
self.assertTrue(0 <= responses["avg_order_costs"])
self.assertTrue(0 <= responses["avg_holding_costs"])
if __name__ == '__main__':
unittest.main()
| {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,904 | evaz1121/simopt | refs/heads/master | /simopt/data_farming_base.py | import numpy as np
import os
import csv
import pickle
from copy import deepcopy
from directory import oracle_directory
from rng.mrg32k3a import MRG32k3a
from wrapper_base import Experiment
class DesignPoint(object):
"""
Base class for design points represented as dictionaries of factors.
Attributes
----------
oracle : Oracle object
oracle to simulate
oracle_factors : dict
oracle factor names and values
rng_list : list of rng.MRG32k3a objects
rngs for oracle to use when running replications at the solution
n_reps : int
number of replications run at a design point
responses : dict
responses observed from replications
gradients : dict of dict
gradients of responses (w.r.t. oracle factors) observed from replications
Arguments
---------
oracle : Oracle object
oracle with factors oracle_factors
"""
def __init__(self, oracle):
super().__init__()
# Create separate copy of Oracle object for use at this design point.
self.oracle = deepcopy(oracle)
self.oracle_factors = self.oracle.factors
self.n_reps = 0
self.responses = {}
self.gradients = {}
def attach_rngs(self, rng_list, copy=True):
"""
Attach a list of random-number generators to the design point.
Arguments
---------
rng_list : list of rng.MRG32k3a objects
list of random-number generators used to run simulation replications
"""
if copy:
self.rng_list = [deepcopy(rng) for rng in rng_list]
else:
self.rng_list = rng_list
def simulate(self, m=1):
"""
Simulate m replications for the current oracle factors.
Append results to the responses and gradients dictionaries.
Arguments
---------
m : int > 0
number of macroreplications to run at the design point
"""
for _ in range(m):
# Generate a single replication of oracle, as described by design point.
responses, gradients = self.oracle.replicate(rng_list=self.rng_list)
# If first replication, set up recording responses and gradients.
if self.n_reps == 0:
self.responses = {response_key: [] for response_key in responses}
self.gradients = {response_key: {factor_key: [] for factor_key in gradients[response_key]} for response_key in responses}
# Append responses and gradients.
for key in self.responses:
self.responses[key].append(responses[key])
for outerkey in self.gradients:
for innerkey in self.gradients[outerkey]:
self.gradients[outerkey][innerkey].append(gradients[outerkey][innerkey])
self.n_reps += 1
# Advance rngs to start of next subsubstream.
for rng in self.rng_list:
rng.advance_subsubstream()
class DataFarmingExperiment(object):
"""
Base class for data-farming experiments consisting of an oracle
and design of associated factors.
Attributes
----------
oracle : Oracle object
oracle on which the experiment is run
design : list of DesignPoint objects
list of design points forming the design
n_design_pts : int
number of design points in the design
Arguments
---------
oracle_name : string
name of oracle on which the experiment is run
factor_settings_filename : string
name of .txt file containing factor ranges and # of digits
factor_headers : list of strings
ordered list of factor names appearing in factor settings/design file
design_filename : string
name of .txt file containing design matrix
oracle_fixed_factors : dictionary
non-default values of oracle factors that will not be varied
"""
def __init__(self, oracle_name, factor_settings_filename, factor_headers, design_filename=None, oracle_fixed_factors={}):
# Initialize oracle object with fixed factors.
self.oracle = oracle_directory[oracle_name](fixed_factors=oracle_fixed_factors)
if design_filename is None:
# Create oracle factor design from .txt file of factor settings.
# Hard-coded for a single-stack NOLHS.
command = "stack_nolhs.rb -s 1 ./data_farming_experiments/" + factor_settings_filename + ".txt > ./data_farming_experiments/" + factor_settings_filename + "_design.txt"
os.system(command)
# Append design to base filename.
design_filename = factor_settings_filename + "_design"
# Read in design matrix from .txt file.
design_table = np.loadtxt("./data_farming_experiments/" + design_filename + ".txt")
# Count number of design_points.
self.n_design_pts = len(design_table)
# Create all design points.
self.design = []
design_pt_factors = {}
for i in range(self.n_design_pts):
for j in range(len(factor_headers)):
# Parse oracle factors for next design point.
design_pt_factors[factor_headers[j]] = design_table[i][j]
# Update oracle factors according to next design point.
self.oracle.factors.update(design_pt_factors)
# Create new design point and add to design.
self.design.append(DesignPoint(self.oracle))
def run(self, n_reps=10, crn_across_design_pts=True):
"""
Run a fixed number of macroreplications at each design point.
Arguments
---------
n_reps : int
number of replications run at each design point
crn_across_design_pts : Boolean
use CRN across design points?
"""
# Setup random number generators for oracle.
# Use stream 0 for all runs; start with substreams 0, 1, ..., oracle.n_rngs-1.
main_rng_list = [MRG32k3a(s_ss_sss_index=[0, ss, 0]) for ss in range(self.oracle.n_rngs)]
# All design points will share the same random number generator objects.
# Simulate n_reps replications from each design point.
for design_pt in self.design:
# Attach random number generators.
design_pt.attach_rngs(rng_list=main_rng_list, copy=False)
# Simulate n_reps replications from each design point.
design_pt.simulate(n_reps)
# Manage random number streams.
if crn_across_design_pts:
# Reset rngs to start of current substream.
for rng in main_rng_list:
rng.reset_substream()
else: # If not using CRN...
# ...advance rngs to starts of next set of substreams.
for rng in main_rng_list:
for _ in range(len(main_rng_list)):
rng.advance_substream()
def print_to_csv(self, csv_filename="raw_results"):
"""
Extract observed responses from simulated design points.
Publish to .csv output file.
Argument
--------
csv_filename : string
name of .csv file to print output to
"""
with open("./data_farming_experiments/" + csv_filename + ".csv", mode="w", newline="") as output_file:
csv_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# Print headers.
oracle_factor_names = list(self.oracle.specifications.keys())
response_names = list(self.design[0].responses.keys())
csv_writer.writerow(["DesignPt#"] + oracle_factor_names + ["MacroRep#"] + response_names)
for designpt_index in range(self.n_design_pts):
designpt = self.design[designpt_index]
# Parse list of oracle factors.
oracle_factor_list = [designpt.oracle_factors[oracle_factor_name] for oracle_factor_name in oracle_factor_names]
for mrep in range(designpt.n_reps):
# Parse list of responses.
response_list = [designpt.responses[response_name][mrep] for response_name in response_names]
print_list = [designpt_index] + oracle_factor_list + [mrep] + response_list
csv_writer.writerow(print_list)
class DataFarmingMetaExperiment(object):
"""
Base class for data-farming meta experiments consisting of problem-solver
pairs and a design of associated factors.
Attributes
----------
design : list of Experiment objects
list of design points forming the design
n_design_pts : int
number of design points in the design
Arguments
---------
solver_name : string
name of solver
problem_name : string
name of problem
solver_factor_settings_filename : string
name of .txt file containing solver factor ranges and # of digits
solver_factor_headers : list of strings
ordered list of solver factor names appearing in factor settings/design file
design_filename : string
name of .txt file containing design matrix
solver_fixed_factors : dict
dictionary of user-specified solver factors that will not be varied
problem_fixed_factors : dict
dictionary of user-specified problem factors that will not be varied
oracle_fixed_factors : dict
dictionary of user-specified oracle factors that will not be varied
"""
def __init__(self, solver_name, problem_name, solver_factor_headers, solver_factor_settings_filename=None, design_filename=None, solver_fixed_factors={}, problem_fixed_factors={}, oracle_fixed_factors={}):
# TO DO: Extend to allow a design on problem/oracle factors too.
# Currently supports designs on solver factors only.
if design_filename is None:
# Create solver factor design from .txt file of factor settings.
# Hard-coded for a single-stack NOLHS.
command = "stack_nolhs.rb -s 1 ./data_farming_experiments/" + solver_factor_settings_filename + ".txt > ./data_farming_experiments/" + solver_factor_settings_filename + "_design.txt"
os.system(command)
# Append design to base filename.
design_filename = solver_factor_settings_filename + "_design"
# Read in design matrix from .txt file.
design_table = np.loadtxt("./data_farming_experiments/" + design_filename + ".txt")
# Count number of design_points.
self.n_design_pts = len(design_table)
# Create all design points.
self.design = []
design_pt_solver_factors = {}
for i in range(self.n_design_pts):
# TO DO: Fix this issue with numpy 1D and 2D arrays handled differently
if len(solver_factor_headers) == 1:
# TO DO: Resolve type-casting issues:
# E.g., sample_size must be an integer for RNDSRCH, but np.loadtxt will make it a float.
# parse solver factors for next design point
design_pt_solver_factors[solver_factor_headers[0]] = int(design_table[i])
else:
for j in range(len(solver_factor_headers)):
# Parse solver factors for next design point.
design_pt_solver_factors[solver_factor_headers[j]] = design_table[i, j]
# Merge solver fixed factors and solver factors specified for design point.
new_design_pt_solver_factors = {**solver_fixed_factors, **design_pt_solver_factors}
# In Python 3.9, will be able to use: dict1 | dict2.
# Create new design point and add to design0.
file_name_path = "data_farming_experiments/outputs/" + solver_name + "_on_" + problem_name + "_designpt_" + str(i) + ".pickle"
new_design_pt = Experiment(solver_name, problem_name, new_design_pt_solver_factors, problem_fixed_factors, oracle_fixed_factors, file_name_path=file_name_path)
self.design.append(new_design_pt)
# Largely taken from MetaExperiment class in wrapper_base.py.
def run(self, n_macroreps=10):
"""
Run n_macroreps of each problem-solver design point.
Arguments
---------
n_macroreps : int
number of macroreplications for each design point
"""
for design_pt_index in range(self.n_design_pts):
# If the problem-solver pair has not been run in this way before,
# run it now.
experiment = self.design[design_pt_index]
if (getattr(experiment, "n_macroreps", None) != n_macroreps):
print("Running Design Point " + str(design_pt_index) + ".")
experiment.clear_runs()
experiment.run(n_macroreps)
# Largely taken from MetaExperiment class in wrapper_base.py.
def post_replicate(self, n_postreps, n_postreps_init_opt, crn_across_budget=True, crn_across_macroreps=False):
"""
For each design point, run postreplications at solutions
recommended by the solver on each macroreplication.
Arguments
---------
n_postreps : int
number of postreplications to take at each recommended solution
n_postreps_init_opt : int
number of postreplications to take at initial x0 and optimal x*
crn_across_budget : bool
use CRN for post-replications at solutions recommended at different times?
crn_across_macroreps : bool
use CRN for post-replications at solutions recommended on different macroreplications?
"""
for design_pt_index in range(self.n_design_pts):
experiment = self.design[design_pt_index]
# If the problem-solver pair has not been post-processed in this way before,
# post-process it now.
if (getattr(experiment, "n_postreps", None) != n_postreps
or getattr(experiment, "n_postreps_init_opt", None) != n_postreps_init_opt
or getattr(experiment, "crn_across_budget", None) != crn_across_budget
or getattr(experiment, "crn_across_macroreps", None) != crn_across_macroreps):
print("Post-processing Design Point " + str(design_pt_index) + ".")
experiment.clear_postreps()
experiment.post_replicate(n_postreps, n_postreps_init_opt, crn_across_budget, crn_across_macroreps)
def calculate_statistics(self, solve_tols=[0.05, 0.10, 0.20, 0.50], beta=0.50):
"""
For each design point, calculate statistics from each macroreplication.
- area under estimated progress curve
- alpha-solve time
Arguments
---------
solve_tols : list of floats in (0,1]
relative optimality gap(s) definining when a problem is solved
beta : float in (0,1)
quantile to compute, e.g., beta quantile
"""
for design_pt_index in range(self.n_design_pts):
experiment = self.design[design_pt_index]
experiment.clear_stats()
experiment.compute_area_stats(compute_CIs=False)
experiment.compute_solvability(solve_tols=solve_tols)
experiment.compute_solvability_quantiles(beta=0.50, compute_CIs=False)
experiment.record_experiment_results()
def print_to_csv(self, csv_filename="meta_raw_results"):
"""
Extract observed statistics from simulated design points.
Publish to .csv output file.
Argument
--------
csv_filename : string
name of .csv file to print output to
"""
with open("./data_farming_experiments/" + csv_filename + ".csv", mode="w", newline="") as output_file:
csv_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# Print headers.
base_experiment = self.design[0]
solver_factor_names = list(base_experiment.solver.specifications.keys())
problem_factor_names = [] # list(base_experiment.problem.specifications.keys())
oracle_factor_names = list(base_experiment.problem.oracle.specifications.keys())
csv_writer.writerow(["DesignPt#"]
+ solver_factor_names
+ problem_factor_names
+ oracle_factor_names
+ ["MacroRep#"]
+ ["Final Relative Optimality Gap"]
+ ["Area Under Progress Curve"]
+ ["0.05-Solve Time", "0.05-Solved? (Y/N)"]
+ ["0.10-Solve Time", "0.10-Solved? (Y/N)"]
+ ["0.20-Solve Time", "0.20-Solved? (Y/N)"]
+ ["0.50-Solve Time", "0.50-Solved? (Y/N)"])
for designpt_index in range(self.n_design_pts):
experiment = self.design[designpt_index]
# Parse lists of factors.
solver_factor_list = [experiment.solver.factors[solver_factor_name] for solver_factor_name in solver_factor_names]
problem_factor_list = []
oracle_factor_list = [experiment.problem.oracle.factors[oracle_factor_name] for oracle_factor_name in oracle_factor_names]
for mrep in range(experiment.n_macroreps):
# Parse list of statistics.
statistics_list = [experiment.all_prog_curves[mrep][-1],
experiment.areas[mrep],
experiment.solve_times[0][mrep],
int(experiment.solve_times[0][mrep] < np.infty),
experiment.solve_times[1][mrep],
int(experiment.solve_times[1][mrep] < np.infty),
experiment.solve_times[2][mrep],
int(experiment.solve_times[2][mrep] < np.infty),
experiment.solve_times[3][mrep],
int(experiment.solve_times[3][mrep] < np.infty)
]
print_list = [designpt_index] + solver_factor_list + problem_factor_list + oracle_factor_list + [mrep] + statistics_list
csv_writer.writerow(print_list)
| {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,905 | evaz1121/simopt | refs/heads/master | /simopt/base.py | #!/usr/bin/env python
"""
Summary
-------
Provide base classes for solvers, problems, and oracles.
Listing
-------
Solver : class
Problem : class
Oracle : class
Solution : class
"""
import numpy as np
from copy import deepcopy
from rng.mrg32k3a import MRG32k3a
class Solver(object):
"""
Base class to implement simulation-optimization solvers.
Attributes
----------
name : string
name of solver
objective_type : string
description of objective types:
"single" or "multi"
constraint_type : string
description of constraints types:
"unconstrained", "box", "deterministic", "stochastic"
variable_type : string
description of variable types:
"discrete", "continuous", "mixed"
gradient_needed : bool
indicates if gradient of objective function is needed
factors : dict
changeable factors (i.e., parameters) of the solver
specifications : dict
details of each factor (for GUI, data validation, and defaults)
rng_list : list of rng.MRG32k3a objects
list of RNGs used for the solver's internal purposes
solution_progenitor_rngs : list of rng.MRG32k3a objects
list of RNGs used as a baseline for simulating solutions
Arguments
---------
fixed_factors : dict
dictionary of user-specified solver factors
"""
def __init__(self, fixed_factors):
# Set factors of the solver.
# Fill in missing factors with default values.
self.factors = fixed_factors
for key in self.specifications:
if key not in fixed_factors:
self.factors[key] = self.specifications[key]["default"]
def __eq__(self, other):
"""
Check if two solvers are equivalent.
Arguments
---------
other : base.Solver object
other Solver object to compare to self
Returns
-------
bool
Are the two solvers equivalent?
"""
if type(self) == type(other):
if self.factors == other.factors:
return True
else:
print("Solver factors do not match.")
return False
else:
print("Solver types do not match.")
return False
def attach_rngs(self, rng_list):
"""
Attach a list of random-number generators to the solver.
Arguments
---------
rng_list : list of rng.MRG32k3a objects
list of random-number generators used for the solver's internal purposes
"""
self.rng_list = rng_list
def solve(self, problem):
"""
Run a single macroreplication of a solver on a problem.
Arguments
---------
problem : Problem object
simulation-optimization problem to solve
Returns
-------
recommended_solns : list of Solution objects
list of solutions recommended throughout the budget
intermediate_budgets : list of ints
list of intermediate budgets when recommended solutions changes
"""
raise NotImplementedError
def check_crn_across_solns(self):
return True
def check_solver_factor(self, factor_name):
"""
Determine if the setting of a solver factor is permissible.
Arguments
---------
factor_name : string
name of factor for dictionary lookup (i.e., key)
Returns
-------
is_permissible : bool
indicates if solver factor is permissible
"""
is_permissible = True
is_permissible *= self.check_factor_datatype(factor_name)
is_permissible *= self.check_factor_list[factor_name]()
return is_permissible
# raise NotImplementedError
def check_solver_factors(self):
"""
Determine if the joint settings of solver factors are permissible.
Returns
-------
is_simulatable : bool
indicates if solver factors are permissible
"""
return True
# raise NotImplementedError
def check_factor_datatype(self, factor_name):
"""
Determine if a factor's data type matches its specification.
Arguments
---------
factor_name : string
string corresponding to name of factor to check
Returns
-------
is_right_type : bool
indicates if factor is of specified data type
"""
is_right_type = isinstance(self.factors[factor_name], self.specifications[factor_name]["datatype"])
return is_right_type
def create_new_solution(self, x, problem):
"""
Create a new solution object with attached rngs primed
to simulate replications.
Arguments
---------
x : tuple
vector of decision variables
problem : base.Problem object
problem being solved by the solvers
Returns
-------
new_solution : base.Solution object
new solution
"""
# Create new solution with attached rngs.
new_solution = Solution(x, problem)
new_solution.attach_rngs(rng_list=self.solution_progenitor_rngs, copy=True)
# Manipulate progenitor rngs to prepare for next new solution.
if not self.factors["crn_across_solns"]: # If CRN are not used ...
# ...advance each rng to start of the substream = current substream + # of oracle RNGs.
for rng in self.solution_progenitor_rngs:
for _ in range(problem.oracle.n_rngs):
rng.advance_substream()
return new_solution
def rebase(self, n_reps):
"""
Rebase the progenitor rngs to start at a later subsubstream index.
Arguments
---------
n_reps : int >= 0
substream index to skip to
"""
new_rngs = []
for rng in self.solution_progenitor_rngs:
stream_index = rng.s_ss_sss_index[0]
substream_index = rng.s_ss_sss_index[1]
new_rngs.append(MRG32k3a(s_ss_sss_index=[stream_index, substream_index, n_reps]))
self.solution_progenitor_rngs = new_rngs
class Problem(object):
"""
Base class to implement simulation-optimization problems.
Attributes
----------
name : string
name of problem
dim : int
number of decision variables
n_objectives : int
number of objectives
n_stochastic_constraints : int
number of stochastic constraints
minmax : tuple of int (+/- 1)
indicator of maximization (+1) or minimization (-1) for each objective
constraint_type : string
description of constraints types:
"unconstrained", "box", "deterministic", "stochastic"
variable_type : string
description of variable types:
"discrete", "continuous", "mixed"
lower_bounds : tuple
lower bound for each decision variable
upper_bounds : tuple
upper bound for each decision variable
gradient_available : bool
indicates if gradient of objective function is available
optimal_value : float
optimal objective function value
optimal_solution : tuple
optimal solution
oracle : Oracle object
associated simulation oracle that generates replications
oracle_default_factors : dict
default values for overriding oracle-level default factors
oracle_fixed_factors : dict
combination of overriden oracle-level factors and defaults
oracle_decision_factors : set of str
set of keys for factors that are decision variables
rng_list : list of rng.MRG32k3a objects
list of RNGs used to generate a random initial solution
or a random problem instance
factors : dict
changeable factors of the problem
initial_solution : tuple
default initial solution from which solvers start
budget : int > 0
max number of replications (fn evals) for a solver to take
specifications : dict
details of each factor (for GUI, data validation, and defaults)
Arguments
---------
fixed_factors : dict
dictionary of user-specified problem factors
oracle_fixed_factors : dict
subset of user-specified non-decision factors to pass through to the oracle
"""
def __init__(self, fixed_factors, oracle_fixed_factors):
# Set factors of the problem.
# Fill in missing factors with default values.
self.factors = fixed_factors
for key in self.specifications:
if key not in fixed_factors:
self.factors[key] = self.specifications[key]["default"]
# Set subset of factors of the simulation oracle.
# Fill in missing oracle factors with problem-level default values.
for key in self.oracle_default_factors:
if key not in oracle_fixed_factors:
oracle_fixed_factors[key] = self.oracle_default_factors[key]
self.oracle_fixed_factors = oracle_fixed_factors
# super().__init__()
def __eq__(self, other):
"""
Check if two problems are equivalent.
Arguments
---------
other : base.Problem object
other Problem object to compare to self
Returns
-------
bool
Are the two problems equivalent?
"""
if type(self) == type(other):
if self.factors == other.factors:
# Check if non-decision-variable factors of oracles are the same.
non_decision_factors = set(self.oracle.factors.keys()) - self.oracle_decision_factors
for factor in non_decision_factors:
if self.oracle.factors[factor] != other.oracle.factors[factor]:
print("Oracle factors do not match")
return False
return True
else:
print("Problem factors do not match.")
return False
else:
print("Problem types do not match.")
return False
def check_initial_solution(self):
return self.check_deterministic_constraints(x=self.factors["initial_solution"])
def check_budget(self):
return self.factors["budget"] > 0
def check_problem_factor(self, factor_name):
"""
Determine if the setting of a problem factor is permissible.
Arguments
---------
factor_name : string
name of factor for dictionary lookup (i.e., key)
Returns
-------
is_permissible : bool
indicates if problem factor is permissible
"""
is_permissible = True
is_permissible *= self.check_factor_datatype(factor_name)
is_permissible *= self.check_factor_list[factor_name]()
return is_permissible
# raise NotImplementedError
def check_problem_factors(self):
"""
Determine if the joint settings of problem factors are permissible.
Returns
-------
is_simulatable : bool
indicates if problem factors are permissible
"""
return True
# raise NotImplementedError
def check_factor_datatype(self, factor_name):
"""
Determine if a factor's data type matches its specification.
Arguments
---------
factor_name : string
string corresponding to name of factor to check
Returns
-------
is_right_type : bool
indicates if factor is of specified data type
"""
is_right_type = isinstance(self.factors[factor_name], self.specifications[factor_name]["datatype"])
return is_right_type
def attach_rngs(self, rng_list):
"""
Attach a list of random-number generators to the problem.
Arguments
---------
rng_list : list of rng.MRG32k3a objects
list of random-number generators used to generate a random initial solution
or a random problem instance
"""
self.rng_list = rng_list
def vector_to_factor_dict(self, vector):
"""
Convert a vector of variables to a dictionary with factor keys
Arguments
---------
vector : tuple
vector of values associated with decision variables
Returns
-------
factor_dict : dictionary
dictionary with factor keys and associated values
"""
raise NotImplementedError
def factor_dict_to_vector(self, factor_dict):
"""
Convert a dictionary with factor keys to a vector
of variables.
Arguments
---------
factor_dict : dictionary
dictionary with factor keys and associated values
Returns
-------
vector : tuple
vector of values associated with decision variables
"""
raise NotImplementedError
def response_dict_to_objectives(self, response_dict):
"""
Convert a dictionary with response keys to a vector
of objectives.
Arguments
---------
response_dict : dictionary
dictionary with response keys and associated values
Returns
-------
objectives : tuple
vector of objectives
"""
raise NotImplementedError
def response_dict_to_stoch_constraints(self, response_dict):
"""
Convert a dictionary with response keys to a vector
of left-hand sides of stochastic constraints: E[Y] >= 0
Arguments
---------
response_dict : dictionary
dictionary with response keys and associated values
Returns
-------
stoch_constraints : tuple
vector of LHSs of stochastic constraint
"""
stoch_constraints = ()
return stoch_constraints
def deterministic_objectives_and_gradients(self, x):
"""
Compute deterministic components of objectives for a solution `x`.
Arguments
---------
x : tuple
vector of decision variables
Returns
-------
det_objectives : tuple
vector of deterministic components of objectives
det_objectives_gradients : tuple
vector of gradients of deterministic components of objectives
"""
det_objectives = (0,) * self.n_objectives
det_objectives_gradients = tuple([(0,) * self.dim for _ in range(self.n_objectives)])
return det_objectives, det_objectives_gradients
def deterministic_stochastic_constraints_and_gradients(self, x):
"""
Compute deterministic components of stochastic constraints
for a solution `x`.
Arguments
---------
x : tuple
vector of decision variables
Returns
-------
det_stoch_constraints : tuple
vector of deterministic components of stochastic
constraints
det_stoch_constraints_gradients : tuple
vector of gradients of deterministic components of
stochastic constraints
"""
det_stoch_constraints = (0,) * self.n_stochastic_constraints
det_stoch_constraints_gradients = tuple([(0,) * self.dim for _ in range(self.n_stochastic_constraints)])
return det_stoch_constraints, det_stoch_constraints_gradients
def check_deterministic_constraints(self, x):
"""
Check if a solution `x` satisfies the problem's deterministic
constraints.
Arguments
---------
x : tuple
vector of decision variables
Returns
-------
satisfies : bool
indicates if solution `x` satisfies the deterministic constraints.
"""
return True
def get_random_solution(self, rand_sol_rng):
"""
Generate a random solution for starting or restarting solvers.
Arguments
---------
rand_sol_rng : rng.MRG32k3a object
random-number generator used to sample a new random solution
Returns
-------
x : tuple
vector of decision variables
"""
pass
def simulate(self, solution, m=1):
"""
Simulate `m` i.i.d. replications at solution `x`.
Arguments
---------
solution : Solution object
solution to evalaute
m : int
number of replications to simulate at `x`
"""
if m < 1:
print('--* Error: Number of replications must be at least 1. ')
print('--* Aborting. ')
else:
# pad numpy arrays if necessary
if solution.n_reps + m > solution.storage_size:
solution.pad_storage(m)
# set the decision factors of the oracle
self.oracle.factors.update(solution.decision_factors)
for _ in range(m):
# generate one replication at x
responses, gradients = self.oracle.replicate(solution.rng_list)
# convert gradient subdictionaries to vectors mapping to decision variables
# TEMPORARILY COMMENT OUT GRADIENTS
# vector_gradients = {keys: self.factor_dict_to_vector(gradient_dict) for (keys, gradient_dict) in gradients.items()}
# convert responses and gradients to objectives and gradients and add
# to those of deterministic components of objectives
solution.objectives[solution.n_reps] = [sum(pairs) for pairs in zip(self.response_dict_to_objectives(responses), solution.det_objectives)]
# solution.objectives_gradients[solution.n_reps] = [[sum(pairs) for pairs in zip(stoch_obj, det_obj)] for stoch_obj, det_obj in zip(self.response_dict_to_objectives(vector_gradients), solution.det_objectives_gradients)]
if self.n_stochastic_constraints > 0:
# convert responses and gradients to stochastic constraints and gradients and add
# to those of deterministic components of stochastic constraints
solution.stoch_constraints[solution.n_reps] = [sum(pairs) for pairs in zip(self.response_dict_to_stoch_constraints(responses), solution.det_stoch_constraints)]
# solution.stoch_constraints_gradients[solution.n_reps] = [[sum(pairs) for pairs in zip(stoch_stoch_cons, det_stoch_cons)] for stoch_stoch_cons, det_stoch_cons in zip(self.response_dict_to_stoch_constraints(vector_gradients), solution.det_stoch_constraints_gradients)]
# increment counter
solution.n_reps += 1
# advance rngs to start of next subsubstream
for rng in solution.rng_list:
rng.advance_subsubstream()
# update summary statistics
solution.recompute_summary_statistics()
def simulate_up_to(self, solutions, n_reps):
"""
Simulate a set of solutions up to a given number of replications.
Arguments
---------
solutions : set
a set of base.Solution objects
n_reps : int > 0
common number of replications to simulate each solution up to
"""
for solution in solutions:
# If more replications needed, take them.
if solution.n_reps < n_reps:
n_reps_to_take = n_reps - solution.n_reps
self.simulate(solution=solution, m=n_reps_to_take)
class Oracle(object):
"""
Base class to implement simulation oracles (models) featured in
simulation-optimization problems.
Attributes
----------
name : string
name of oracle
n_rngs : int
number of random-number generators used to run a simulation replication
n_responses : int
number of responses (performance measures)
factors : dict
changeable factors of the simulation model
specifications : dict
details of each factor (for GUI, data validation, and defaults)
check_factor_list : dict
switch case for checking factor simulatability
Arguments
---------
fixed_factors : dict
dictionary of user-specified oracle factors
"""
def __init__(self, fixed_factors):
# set factors of the simulation oracle
# fill in missing factors with default values
self.factors = fixed_factors
for key in self.specifications:
if key not in fixed_factors:
self.factors[key] = self.specifications[key]["default"]
def __eq__(self, other):
"""
Check if two oracles are equivalent.
Arguments
---------
other : base.Oracle object
other Oracle object to compare to self
Returns
-------
bool
Are the two oracles equivalent?
"""
if type(self) == type(other):
if self.factors == other.factors:
return True
else:
print("Oracle factors do not match.")
return False
else:
print("Oracle types do not match.")
return False
def check_simulatable_factor(self, factor_name):
"""
Determine if a simulation replication can be run with the given factor.
Arguments
---------
factor_name : string
name of factor for dictionary lookup (i.e., key)
Returns
-------
is_simulatable : bool
indicates if oracle specified by factors is simulatable
"""
is_simulatable = True
is_simulatable *= self.check_factor_datatype(factor_name)
is_simulatable *= self.check_factor_list[factor_name]()
return is_simulatable
# raise NotImplementedError
def check_simulatable_factors(self):
"""
Determine if a simulation replication can be run with the given factors.
Returns
-------
is_simulatable : bool
indicates if oracle specified by factors is simulatable
"""
return True
# raise NotImplementedError
def check_factor_datatype(self, factor_name):
"""
Determine if a factor's data type matches its specification.
Returns
-------
is_right_type : bool
indicates if factor is of specified data type
"""
is_right_type = isinstance(self.factors[factor_name], self.specifications[factor_name]["datatype"])
return is_right_type
def replicate(self, rng_list):
"""
Simulate a single replication for the current oracle factors.
Arguments
---------
rng_list : list of rng.MRG32k3a objects
rngs for oracle to use when simulating a replication
Returns
-------
responses : dict
performance measures of interest
gradients : dict of dicts
gradient estimate for each response
"""
raise NotImplementedError
class Solution(object):
"""
Base class for solutions represented as vectors of decision variables
and dictionaries of decision factors.
Attributes
----------
x : tuple
vector of decision variables
dim : int
number of decision variables describing `x`
decision_factors : dict
decision factor names and values
rng_list : list of rng.MRG32k3a objects
rngs for oracle to use when running replications at the solution
n_reps : int
number of replications run at the solution
det_objectives : tuple
deterministic components added to objectives
det_objectives_gradients : tuple of tuples (# objectives x dimension)
gradients of deterministic components added to objectives
det_stoch_constraints : tuple
deterministic components added to LHS of stochastic constraints
det_stoch_constraints_gradients : tuple (# stochastic constraints x dimension)
gradients of deterministics components added to LHS stochastic constraints
storage_size : int
max number of replications that can be recorded in current storage
objectives : numpy array (# replications x # objectives)
objective(s) estimates from each replication
objectives_gradients : numpy array (# replications x # objectives x dimension)
gradient estimates of objective(s) from each replication
stochastic_constraints : numpy array (# replications x # stochastic constraints)
stochastic constraint estimates from each replication
stochastic_constraints_gradients : numpy array (# replications x # stochastic constraints x dimension)
gradient estimates of stochastic constraints from each replication
Arguments
---------
x : tuple
vector of decision variables
problem : Problem object
problem to which x is a solution
"""
def __init__(self, x, problem):
super().__init__()
self.x = x
self.dim = len(x)
self.decision_factors = problem.vector_to_factor_dict(x)
self.n_reps = 0
self.det_objectives, self.det_objectives_gradients = problem.deterministic_objectives_and_gradients(self.x)
self.det_stoch_constraints, self.det_stoch_constraints_gradients = problem.deterministic_stochastic_constraints_and_gradients(self.x)
init_size = 100 # initialize numpy arrays to store up to 100 replications
self.storage_size = init_size
# Raw data
self.objectives = np.zeros((init_size, problem.n_objectives))
self.objectives_gradients = np.zeros((init_size, problem.n_objectives, problem.dim))
if problem.n_stochastic_constraints > 0:
self.stoch_constraints = np.zeros((init_size, problem.n_stochastic_constraints))
self.stoch_constraints_gradients = np.zeros((init_size, problem.n_stochastic_constraints, problem.dim))
else:
self.stoch_constraints = None
self.stoch_constraints_gradients = None
# Summary statistics
# self.objectives_mean = np.full((problem.n_objectives), np.nan)
# self.objectives_var = np.full((problem.n_objectives), np.nan)
# self.objectives_stderr = np.full((problem.n_objectives), np.nan)
# self.objectives_cov = np.full((problem.n_objectives, problem.n_objectives), np.nan)
# self.objectives_gradients_mean = np.full((problem.n_objectives, problem.dim), np.nan)
# self.objectives_gradients_var = np.full((problem.n_objectives, problem.dim), np.nan)
# self.objectives_gradients_stderr = np.full((problem.n_objectives, problem.dim), np.nan)
# self.objectives_gradients_cov = np.full((problem.n_objectives, problem.dim, problem.dim), np.nan)
# self.stoch_constraints_mean = np.full((problem.n_stochastic_constraints), np.nan)
# self.stoch_constraints_var = np.full((problem.n_stochastic_constraints), np.nan)
# self.stoch_constraints_stderr = np.full((problem.n_stochastic_constraints), np.nan)
# self.stoch_constraints_cov = np.full((problem.n_stochastic_constraints, problem.n_stochastic_constraints), np.nan)
# self.stoch_constraints_gradients_mean = np.full((problem.n_stochastic_constraints, problem.dim), np.nan)
# self.stoch_constraints_gradients_var = np.full((problem.n_stochastic_constraints, problem.dim), np.nan)
# self.stoch_constraints_gradients_stderr = np.full((problem.n_stochastic_constraints, problem.dim), np.nan)
# self.stoch_constraints_gradients_cov = np.full((problem.n_stochastic_constraints, problem.dim, problem.dim), np.nan)
def attach_rngs(self, rng_list, copy=True):
"""
Attach a list of random-number generators to the solution.
Arguments
---------
rng_list : list of rng.MRG32k3a objects
list of random-number generators used to run simulation replications
"""
if copy:
self.rng_list = [deepcopy(rng) for rng in rng_list]
else:
self.rng_list = rng_list
def pad_storage(self, m):
"""
Append zeros to numpy arrays for summary statistics.
Arguments
---------
m : int
number of replications to simulate
"""
# Size of data storage
n_objectives = len(self.det_objectives)
base_pad_size = 100 # default is to append space for 100 more replications
# if more space needed, append in multiples of 100
pad_size = int(np.ceil(m / base_pad_size)) * base_pad_size
self.storage_size += pad_size
self.objectives = np.concatenate((self.objectives, np.zeros((pad_size, n_objectives))))
self.objectives_gradients = np.concatenate((self.objectives_gradients, np.zeros((pad_size, n_objectives, self.dim))))
if self.stoch_constraints is not None:
n_stochastic_constraints = len(self.det_stoch_constraints)
self.stoch_constraints = np.concatenate((self.stoch_constraints, np.zeros((pad_size, n_stochastic_constraints))))
self.stoch_constraints_gradients = np.concatenate((self.stoch_constraints_gradients, np.zeros((pad_size, n_stochastic_constraints, self.dim))))
def recompute_summary_statistics(self):
"""
Recompute summary statistics of the solution.
"""
self.objectives_mean = np.mean(self.objectives[:self.n_reps], axis=0)
if self.n_reps > 1:
self.objectives_var = np.var(self.objectives[:self.n_reps], axis=0, ddof=1)
self.objectives_stderr = np.std(self.objectives[:self.n_reps], axis=0, ddof=1) / np.sqrt(self.n_reps)
self.objectives_cov = np.cov(self.objectives[:self.n_reps], rowvar=False, ddof=1)
# TEMPORARILY COMMENT OUT GRADIENTS
# self.objectives_gradients_mean = np.mean(self.objectives_gradients[:self.n_reps], axis=0)
# if self.n_reps > 1:
# self.objectives_gradients_var = np.var(self.objectives_gradients[:self.n_reps], axis=0, ddof=1)
# self.objectives_gradients_stderr = np.std(self.objectives_gradients[:self.n_reps], axis=0, ddof=1) / np.sqrt(self.n_reps)
# self.objectives_gradients_cov = np.array([np.cov(self.objectives_gradients[:self.n_reps, obj], rowvar=False, ddof=1) for obj in range(len(self.det_objectives))])
if self.stoch_constraints is not None:
self.stoch_constraints_mean = np.mean(self.stoch_constraints[:self.n_reps], axis=0)
self.stoch_constraints_var = np.var(self.stoch_constraints[:self.n_reps], axis=0, ddof=1)
self.stoch_constraints_stderr = np.std(self.stoch_constraints[:self.n_reps], axis=0, ddof=1) / np.sqrt(self.n_reps)
self.stoch_constraints_cov = np.cov(self.stoch_constraints[:self.n_reps], rowvar=False, ddof=1)
# self.stoch_constraints_gradients_mean = np.mean(self.stoch_constraints_gradients[:self.n_reps], axis=0)
# self.stoch_constraints_gradients_var = np.var(self.stoch_constraints_gradients[:self.n_reps], axis=0, ddof=1)
# self.stoch_constraints_gradients_stderr = np.std(self.stoch_constraints_gradients[:self.n_reps], axis=0, ddof=1) / np.sqrt(self.n_reps)
# self.stoch_constraints_gradients_cov = np.array([np.cov(self.stoch_constraints_gradients[:self.n_reps, stcon], rowvar=False, ddof=1) for stcon in range(len(self.det_stoch_constraints))])
| {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,906 | evaz1121/simopt | refs/heads/master | /simopt/demo/demo_run_wrapper.py | import numpy as np
import sys
import os.path as o
sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), "..")))
from rng.mrg32k3a import MRG32k3a
from base import Solver, Problem, Oracle, Solution
from wrapper_base import Experiment, read_experiment_results, MetaExperiment
mymetaexperiment = MetaExperiment(solver_names=["RNDSRCH"], problem_names=["MM1-1", "CNTNEWS-1", "FACSIZE-1"], fixed_factors_filename="all_factors")
mymetaexperiment.run(n_macroreps=2, crn_across_solns=True)
mymetaexperiment.post_replicate(n_postreps=20, n_postreps_init_opt=100, crn_across_budget=True, crn_across_macroreps=False)
# mymetaexperiment.plot_area_scatterplot(plot_CIs=True, all_in_one=False)
# mymetaexperiment.plot_solvability_profiles(solve_tol=0.1)
# myexperiment = Experiment(solver_name="RNDSRCH", problem_name="MM1-1")
# # myexperiment = Experiment(solver_name="RNDSRCH", problem_name="CNTNEWS-1")
# myexperiment.run(n_macroreps=5, crn_across_solns=True)
# myexperiment.post_replicate(n_postreps=20, n_postreps_init_opt=100, crn_across_budget=True, crn_across_macroreps=False)
# myexperiment3 = read_experiment_results(file_name="RNDSRCH_on_CNTNEWS-1")
# myexperiment.post_replicate(n_postreps=20, n_postreps_init_opt=100, crn_across_budget=True, crn_across_macroreps=False)
# # myexperiment3.compute_area_stats()
# myexperiment3.plot_solvability_curves(solve_tols=[0.1])
# myexperiment3.compute_solvability_quantiles(beta=0.5)
# print(myexperiment3.solve_time_quantiles)
# myexperiment3.plot_progress_curves(plot_type="all")
# myexperiment3.plot_progress_curves(plot_type="mean")
# myexperiment3.plot_progress_curves(plot_type="quantile")
# myexperiment3.plot_progress_curves(plot_type="all", normalize=False)
# myexperiment3.plot_progress_curves(plot_type="mean", normalize=False)
# myexperiment3.plot_progress_curves(plot_type="quantile", normalize=False)
print('I ran this.')
| {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,907 | evaz1121/simopt | refs/heads/master | /simopt/demo/demo_oracle.py | """
This script is intended to help with debugging an oracle.
It imports an oracle, initializes an oracle object with given factors,
sets up pseudorandom number generators, and runs one or more replications.
"""
import sys
import os.path as o
sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), "..")))
# Import random number generator.
from rng.mrg32k3a import MRG32k3a
# Import oracle.
# Replace <filename> with name of .py file containing oracle class.
# Replace <oracle_class_name> with name of oracle class.
# Ex: from oracles.mm1queue import MM1Queue
from oracles.<filename> import <oracle_class_name>
# Fix factors of oracle. Specify a dictionary of factors.
# Look at Oracle class definition to get names of factors.
# Ex: for the MM1Queue class,
# fixed_factors = {"lambda": 3.0,
# "mu": 8.0}
fixed_factors = {} # Resort to all default values.
# Initialize an instance of the specified oracle class.
# Replace <oracle_class_name> with name of oracle class.
# Ex: myoracle = MM1Queue(fixed_factors)
myoracle = <oracle_class_name>(fixed_factors)
# Working example for MM1 oracle. (Commented out)
# -----------------------------------------------
# from oracles.mm1queue import MM1Queue
# fixed_factors = {"lambda": 3.0, "mu": 8.0}
# myoracle = MM1Queue(fixed_factors)
# -----------------------------------------------
# The rest of this script requires no changes.
# Check that all factors describe a simulatable oracle.
# Check fixed factors individually.
for key, value in myoracle.factors.items():
print(f"The factor {key} is set as {value}. Is this simulatable? {bool(myoracle.check_simulatable_factor(key))}.")
# Check all factors collectively.
print(f"Is the specified oracle simulatable? {bool(myoracle.check_simulatable_factors())}.")
# Create a list of RNG objects for the simulation oracle to use when
# running replications.
rng_list = [MRG32k3a(s_ss_sss_index=[0, ss, 0]) for ss in range(myoracle.n_rngs)]
# Run a single replication of the oracle.
responses, gradients = myoracle.replicate(rng_list)
print("\nFor a single replication:")
print("\nResponses:")
for key, value in responses.items():
print(f"\t {key} is {value}.")
print("\n Gradients:")
for outerkey in gradients:
print(f"\tFor the response {outerkey}:")
for innerkey, value in gradients[outerkey].items():
print(f"\t\tThe gradient w.r.t. {innerkey} is {value}.")
| {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,908 | evaz1121/simopt | refs/heads/master | /simopt/rng/__init__.py | from .mrg32k3a import MRG32k3a
| {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,909 | evaz1121/simopt | refs/heads/master | /simopt/wrapper_base.py | #!/usr/bin/env python
"""
Summary
-------
Provide base classes for experiments and meta experiments.
Plus helper functions for reading/writing data and plotting.
Listing
-------
Curve : class
mean_of_curves : function
quantile_of_curves : function
cdf_of_curves_crossing_times : function
quantile_cross_jump : function
difference_of_curves : function
max_difference_of_curves : function
Experiment : class
trim_solver_results : function
read_experiment_results : function
post_normalize : function
bootstrap_sample_all : function
bootstrap_procedure : function
functional_of_curves : function
compute_bootstrap_CI : function
plot_bootstrap_CIs : function
report_max_halfwidth : function
plot_progress_curves : function
plot_solvability_cdfs : function
plot_area_scatterplots : function
plot_solvability_profiles : function
setup_plot : function
save_plot : function
MetaExperiment : class
"""
import numpy as np
import matplotlib.pyplot as plt
from numpy.core.defchararray import endswith
from scipy.stats import norm
import pickle
import importlib
from copy import deepcopy
from rng.mrg32k3a import MRG32k3a
from base import Solution
from directory import solver_directory, problem_directory
class Curve(object):
"""
Base class for all curves.
Attributes
----------
x_vals : list of floats
values of horizontal components
y_vals : list of floats
values of vertical components
n_points : int
number of values in x- and y- vectors
Parameters
----------
x_vals : list of floats
values of horizontal components
y_vals : list of floats
values of vertical components
"""
def __init__(self, x_vals, y_vals):
if len(x_vals) != len(y_vals):
print("Vectors of x- and y- values must be of same length.")
self.x_vals = x_vals
self.y_vals = y_vals
self.n_points = len(x_vals)
def lookup(self, x):
"""
Lookup the y-value of the curve at an intermediate x-value.
Parameters
----------
x : float
x-value at which to lookup the y-value
Returns
-------
y : float
y-value corresponding to x
"""
if x < self.x_vals[0]:
y = np.nan
else:
idx = np.max(np.where(np.array(self.x_vals) <= x))
y = self.y_vals[idx]
return y
def compute_crossing_time(self, threshold):
"""
Compute the first time at which a curve drops below a given threshold.
Parameters
----------
threshold : float
value for which to find first crossing time
Returns
-------
crossing_time : float
first time at which a curve drops below threshold
"""
# Crossing time is defined as infinity if the curve does not drop
# below threshold.
crossing_time = np.inf
# Pass over curve to find first crossing time.
for i in range(self.n_points):
if self.y_vals[i] < threshold:
crossing_time = self.x_vals[i]
break
return crossing_time
def compute_area_under_curve(self):
"""
Compute the area under a curve.
Returns
-------
area : float
area under the curve
"""
area = np.dot(self.y_vals[:-1], np.diff(self.x_vals))
return area
def curve_to_mesh(self, mesh):
"""
Create a curve defined at equally spaced x values.
Parameters
----------
mesh : list of floats
list of uniformly spaced x values
Returns
-------
mesh_curve : wrapper_base.Curve object
curve with equally spaced x values
"""
mesh_curve = Curve(x_vals=mesh, y_vals=[self.lookup(x) for x in mesh])
return mesh_curve
def curve_to_full_curve(self):
"""
Create a curve with duplicate x- and y-values to indicate steps.
Returns
-------
full_curve : wrapper_base.Curve object
curve with duplicate x- and y-values
"""
duplicate_x_vals = [x for x in self.x_vals for _ in (0, 1)]
duplicate_y_vals = [y for y in self.y_vals for _ in (0, 1)]
full_curve = Curve(x_vals=duplicate_x_vals[1:], y_vals=duplicate_y_vals[:-1])
return full_curve
def plot(self, color_str="C0", curve_type="regular"):
"""
Plot a curve.
Parameters
----------
color_str : str
string indicating line color, e.g., "C0", "C1", etc.
Returns
-------
handle : list of matplotlib.lines.Line2D objects
curve handle, to use when creating legends
"""
if curve_type == "regular":
linestyle = "-"
linewidth = 2
elif curve_type == "conf_bound":
linestyle = "--"
linewidth = 1
handle, = plt.step(self.x_vals,
self.y_vals,
color=color_str,
linestyle=linestyle,
linewidth=linewidth,
where="post"
)
return handle
def mean_of_curves(curves):
"""
Compute pointwise (w.r.t. x values) mean of curves.
Starting and ending x values must coincide for all curves.
Parameters
----------
curves : list of wrapper_base.Curve objects
collection of curves to aggregate
Returns
-------
mean_curve : wrapper_base.Curve object
mean curve
"""
unique_x_vals = np.unique([x_val for curve in curves for x_val in curve.x_vals])
mean_y_vals = [np.mean([curve.lookup(x_val) for curve in curves]) for x_val in unique_x_vals]
mean_curve = Curve(x_vals=unique_x_vals.tolist(), y_vals=mean_y_vals)
return mean_curve
def quantile_of_curves(curves, beta):
"""
Compute pointwise (w.r.t. x values) quantile of curves.
Starting and ending x values must coincide for all curves.
Parameters
----------
curves : list of wrapper_base.Curve objects
collection of curves to aggregate
beta : float
quantile level
Returns
-------
quantile_curve : wrapper_base.Curve object
quantile curve
"""
unique_x_vals = np.unique([x_val for curve in curves for x_val in curve.x_vals])
quantile_y_vals = [np.quantile([curve.lookup(x_val) for curve in curves], q=beta) for x_val in unique_x_vals]
quantile_curve = Curve(x_vals=unique_x_vals.tolist(), y_vals=quantile_y_vals)
return quantile_curve
def cdf_of_curves_crossing_times(curves, threshold):
"""
Compute the cdf of crossing times of curves.
Parameters
----------
curves : list of wrapper_base.Curve objects
collection of curves to aggregate
threshold : float
value for which to find first crossing time
Returns
-------
cdf_curve : wrapper_base.Curve object
cdf of crossing times
"""
n_curves = len(curves)
crossing_times = [curve.compute_crossing_time(threshold) for curve in curves]
unique_x_vals = [0] + list(np.unique([crossing_time for crossing_time in crossing_times if crossing_time < np.inf])) + [1]
cdf_y_vals = [sum(crossing_time <= x_val for crossing_time in crossing_times) / n_curves for x_val in unique_x_vals]
cdf_curve = Curve(x_vals=unique_x_vals, y_vals=cdf_y_vals)
return cdf_curve
def quantile_cross_jump(curves, threshold, beta):
"""
Compute a simple curve with a jump at the quantile of the crossing times.
Parameters
----------
curves : list of wrapper_base.Curve objects
collection of curves to aggregate
threshold : float
value for which to find first crossing time
beta : float
quantile level
Returns
-------
jump_curve : wrapper_base.Curve object
piecewise-constant curve with a jump at the quantile crossing time (if finite)
"""
solve_time_quantile = np.quantile([curve.compute_crossing_time(threshold=threshold) for curve in curves], q=beta)
# Note: np.quantile will evaluate to np.nan if forced to interpolate
# between a finite and infinite value. These are rare cases. Since
# crossing times must be non-negative, the quantile should be mapped
# to positive infinity.
if solve_time_quantile == np.inf or np.isnan(solve_time_quantile):
jump_curve = Curve(x_vals=[0, 1], y_vals=[0, 0])
else:
jump_curve = Curve(x_vals=[0, solve_time_quantile, 1], y_vals=[0, 1, 1])
return jump_curve
def difference_of_curves(curve1, curve2):
"""
Compute the difference of two curves (Curve 1 - Curve 2).
Parameters
----------
curve1, curve2 : wrapper_base.Curve objects
curves to take the difference of
Returns
-------
difference_curve : wrapper_base.Curve object
difference of curves
"""
unique_x_vals = np.unique(curve1.x_vals + curve2.x_vals)
difference_y_vals = [(curve1.lookup(x_val) - curve2.lookup(x_val)) for x_val in unique_x_vals]
difference_curve = Curve(x_vals=unique_x_vals.tolist(), y_vals=difference_y_vals)
return difference_curve
def max_difference_of_curves(curve1, curve2):
"""
Compute the maximum difference of two curves (Curve 1 - Curve 2)
Parameters
----------
curve1, curve2 : wrapper_base.Curve objects
curves to take the difference of
Returns
-------
max_diff : float
maximum difference of curves
"""
difference_curve = difference_of_curves(curve1, curve2)
max_diff = max(difference_curve.y_vals)
return max_diff
class Experiment(object):
"""
Base class for running one solver on one problem.
Attributes
----------
solver : base.Solver object
simulation-optimization solver
problem : base.Problem object
simulation-optimization problem
n_macroreps : int > 0
number of macroreplications run
file_name_path : str
path of .pickle file for saving wrapper_base.Experiment object
all_recommended_xs : list of lists of tuples
sequences of recommended solutions from each macroreplication
all_intermediate_budgets : list of lists
sequences of intermediate budgets from each macroreplication
n_postreps : int
number of postreplications to take at each recommended solution
crn_across_budget : bool
use CRN for post-replications at solutions recommended at different times?
crn_across_macroreps : bool
use CRN for post-replications at solutions recommended on different macroreplications?
all_post_replicates : list of lists of lists
all post-replicates from all solutions from all macroreplications
all_est_objectives : numpy array of arrays
estimated objective values of all solutions from all macroreplications
n_postreps_init_opt : int
number of postreplications to take at initial solution (x0) and
optimal solution (x*)
crn_across_init_opt : bool
use CRN for post-replications at solutions x0 and x*?
x0 : tuple
initial solution (x0)
x0_postreps : list
post-replicates at x0
xstar : tuple
proxy for optimal solution (x*)
xstar_postreps : list
post-replicates at x*
objective_curves : list of wrapper_base.Curve objects
curves of estimated objective function values,
one for each macroreplication
progress_curves : list of wrapper_base.Curve objects
progress curves, one for each macroreplication
Arguments
---------
solver_name : str
name of solver
problem_name : str
name of problem
solver_rename : str
user-specified name for solver
problem_rename : str
user-specified name for problem
solver_fixed_factors : dict
dictionary of user-specified solver factors
problem_fixed_factors : dict
dictionary of user-specified problem factors
oracle_fixed_factors : dict
dictionary of user-specified oracle factors
file_name_path : str
path of .pickle file for saving wrapper_base.Experiment object
"""
def __init__(self, solver_name, problem_name, solver_rename=None, problem_rename=None, solver_fixed_factors={}, problem_fixed_factors={}, oracle_fixed_factors={}, file_name_path=None):
if solver_rename is None:
self.solver = solver_directory[solver_name](fixed_factors=solver_fixed_factors)
else:
self.solver = solver_directory[solver_name](name=solver_rename, fixed_factors=solver_fixed_factors)
if problem_rename is None:
self.problem = problem_directory[problem_name](fixed_factors=problem_fixed_factors, oracle_fixed_factors=oracle_fixed_factors)
else:
self.problem = problem_directory[problem_name](name=problem_rename, fixed_factors=problem_fixed_factors, oracle_fixed_factors=oracle_fixed_factors)
if file_name_path is None:
self.file_name_path = f"./experiments/outputs/{self.solver.name}_on_{self.problem.name}.pickle"
else:
self.file_name_path = file_name_path
def check_compatibility(self):
"""
Check whether the experiment's solver and problem are compatible.
Returns
-------
error_str : str
error message in the event problem and solver are incompatible
"""
error_str = ""
# Check number of objectives.
if self.solver.objective_type == "single" and self.problem.n_objectives > 1:
error_str += "Solver cannot solve a multi-objective problem.\n"
elif self.solver.objective_type == "multi" and self.problem.n_objectives == 1:
error_str += "Multi-objective solver being run on a single-objective problem.\n"
# Check constraint types.
constraint_types = ["unconstrained", "box", "deterministic", "stochastic"]
if constraint_types.index(self.solver.constraint_type) < constraint_types.index(self.problem.constraint_type):
error_str += "Solver can handle upto " + self.solver.constraint_type + " constraints, but problem has " + self.problem.constraint_type + " constraints.\n"
# Check variable types.
if self.solver.variable_type == "discrete" and self.problem.variable_type != "discrete":
error_str += "Solver is for discrete variables but problem variables are " + self.problem.variable_type + ".\n"
elif self.solver.variable_type == "continuous" and self.problem.variable_type != "continuous":
error_str += "Solver is for continuous variables but problem variables are " + self.problem.variable_type + ".\n"
# Check for existence of gradient estimates.
if self.solver.gradient_needed and not self.problem.gradient_available:
error_str += "Gradient-based solver does not have access to gradient for this problem.\n"
return error_str
def run(self, n_macroreps):
"""
Run n_macroreps of the solver on the problem.
Arguments
---------
n_macroreps : int
number of macroreplications of the solver to run on the problem
"""
self.n_macroreps = n_macroreps
self.all_recommended_xs = []
self.all_intermediate_budgets = []
# Create, initialize, and attach random number generators
# Stream 0: reserved for taking post-replications
# Stream 1: reserved for bootstrapping
# Stream 2: reserved for overhead ...
# Substream 0: rng for random problem instance
# Substream 1: rng for random initial solution x0 and
# restart solutions
# Substream 2: rng for selecting random feasible solutions
# Substream 3: rng for solver's internal randomness
# Streams 3, 4, ..., n_macroreps + 2: reserved for
# macroreplications
rng0 = MRG32k3a(s_ss_sss_index=[2, 0, 0]) # unused
rng1 = MRG32k3a(s_ss_sss_index=[2, 1, 0]) # unused
rng2 = MRG32k3a(s_ss_sss_index=[2, 2, 0])
rng3 = MRG32k3a(s_ss_sss_index=[2, 3, 0]) # unused
self.solver.attach_rngs([rng1, rng2, rng3])
# Run n_macroreps of the solver on the problem.
# Report recommended solutions and corresponding intermediate budgets.
for mrep in range(self.n_macroreps):
print(f"Running macroreplication {mrep + 1} of {self.n_macroreps} of Solver {self.solver.name} on Problem {self.problem.name}.")
# Create, initialize, and attach RNGs used for simulating solutions.
progenitor_rngs = [MRG32k3a(s_ss_sss_index=[mrep + 2, ss, 0]) for ss in range(self.problem.oracle.n_rngs)]
self.solver.solution_progenitor_rngs = progenitor_rngs
# print([rng.s_ss_sss_index for rng in progenitor_rngs])
# Run the solver on the problem.
recommended_solns, intermediate_budgets = self.solver.solve(problem=self.problem)
# Trim solutions recommended after final budget
recommended_solns, intermediate_budgets = trim_solver_results(problem=self.problem, recommended_solns=recommended_solns, intermediate_budgets=intermediate_budgets)
# Extract decision-variable vectors (x) from recommended solutions.
# Record recommended solutions and intermediate budgets.
self.all_recommended_xs.append([solution.x for solution in recommended_solns])
self.all_intermediate_budgets.append(intermediate_budgets)
# Save Experiment object to .pickle file.
self.record_experiment_results()
def check_run(self):
"""
Check if the experiment has been run.
Returns
-------
ran : bool
has the experiment been run?
"""
if getattr(self, "all_recommended_xs", None) is None:
ran = False
else:
ran = True
return ran
def post_replicate(self, n_postreps, crn_across_budget=True, crn_across_macroreps=False):
"""
Run postreplications at solutions recommended by the solver.
Arguments
---------
n_postreps : int
number of postreplications to take at each recommended solution
crn_across_budget : bool
use CRN for post-replications at solutions recommended at different times?
crn_across_macroreps : bool
use CRN for post-replications at solutions recommended on different macroreplications?
"""
self.n_postreps = n_postreps
self.crn_across_budget = crn_across_budget
self.crn_across_macroreps = crn_across_macroreps
# Create, initialize, and attach RNGs for oracle.
# Stream 0: reserved for post-replications.
# Skip over first set of substreams dedicated for sampling x0 and x*.
baseline_rngs = [MRG32k3a(s_ss_sss_index=[0, self.problem.oracle.n_rngs + rng_index, 0]) for rng_index in range(self.problem.oracle.n_rngs)]
# Initialize matrix containing
# all postreplicates of objective,
# for each macroreplication,
# for each budget.
self.all_post_replicates = [[[] for _ in range(len(self.all_intermediate_budgets[mrep]))] for mrep in range(self.n_macroreps)]
# Simulate intermediate recommended solutions.
for mrep in range(self.n_macroreps):
for budget_index in range(len(self.all_intermediate_budgets[mrep])):
x = self.all_recommended_xs[mrep][budget_index]
fresh_soln = Solution(x, self.problem)
fresh_soln.attach_rngs(rng_list=baseline_rngs, copy=False)
self.problem.simulate(solution=fresh_soln, m=self.n_postreps)
# Store results
self.all_post_replicates[mrep][budget_index] = list(fresh_soln.objectives[:fresh_soln.n_reps][:, 0]) # 0 <- assuming only one objective
if crn_across_budget:
# Reset each rng to start of its current substream.
for rng in baseline_rngs:
rng.reset_substream()
if crn_across_macroreps:
# Reset each rng to start of its current substream.
for rng in baseline_rngs:
rng.reset_substream()
else:
# Advance each rng to start of
# substream = current substream + # of oracle RNGs.
for rng in baseline_rngs:
for _ in range(self.problem.oracle.n_rngs):
rng.advance_substream()
# Store estimated objective for each macrorep for each budget.
self.all_est_objectives = [[np.mean(self.all_post_replicates[mrep][budget_index]) for budget_index in range(len(self.all_intermediate_budgets[mrep]))] for mrep in range(self.n_macroreps)]
# Save Experiment object to .pickle file.
self.record_experiment_results()
def check_postreplicate(self):
"""
Check if the experiment has been postreplicated.
Returns
-------
postreplicated : bool
has the experiment been postreplicated?
"""
if getattr(self, "all_est_objectives", None) is None:
postreplicated = False
else:
postreplicated = True
return postreplicated
def bootstrap_sample(self, bootstrap_rng, normalize=True):
"""
Generate a bootstrap sample of estimated objective curves or estimated
progress curves.
Parameters
----------
bootstrap_rng : MRG32k3a object
random number generator to use for bootstrapping
normalize : Boolean
normalize progress curves w.r.t. optimality gaps?
Returns
-------
bootstrap_curves : list of wrapper_base.Curve objects
bootstrapped estimated objective curves or estimated progress
curves of all solutions from all bootstrapped macroreplications
"""
bootstrap_curves = []
# Uniformly resample M macroreplications (with replacement) from 0, 1, ..., M-1.
# Subsubstream 0: reserved for this outer-level bootstrapping.
bs_mrep_idxs = bootstrap_rng.choices(range(self.n_macroreps), k=self.n_macroreps)
# Advance RNG subsubstream to prepare for inner-level bootstrapping.
bootstrap_rng.advance_subsubstream()
# Subsubstream 1: reserved for bootstrapping at x0 and x*.
# Bootstrap sample post-replicates at common x0.
# Uniformly resample L postreps (with replacement) from 0, 1, ..., L-1.
bs_postrep_idxs = bootstrap_rng.choices(range(self.n_postreps_init_opt), k=self.n_postreps_init_opt)
# Compute the mean of the resampled postreplications.
bs_initial_obj_val = np.mean([self.x0_postreps[postrep] for postrep in bs_postrep_idxs])
# Reset subsubstream if using CRN across budgets.
# This means the same postreplication indices will be used for resampling at x0 and x*.
if self.crn_across_init_opt:
bootstrap_rng.reset_subsubstream()
# Bootstrap sample postreplicates at reference optimal solution x*.
# Uniformly resample L postreps (with replacement) from 0, 1, ..., L.
bs_postrep_idxs = bootstrap_rng.choices(range(self.n_postreps_init_opt), k=self.n_postreps_init_opt)
# Compute the mean of the resampled postreplications.
bs_optimal_obj_val = np.mean([self.xstar_postreps[postrep] for postrep in bs_postrep_idxs])
# Compute initial optimality gap.
bs_initial_opt_gap = bs_initial_obj_val - bs_optimal_obj_val
# Advance RNG subsubstream to prepare for inner-level bootstrapping.
# Will now be at start of subsubstream 2.
bootstrap_rng.advance_subsubstream()
# Bootstrap within each bootstrapped macroreplication.
# Option 1: Simpler (default) CRN scheme, which makes for faster code.
if self.crn_across_budget and not self.crn_across_macroreps:
for idx in range(self.n_macroreps):
mrep = bs_mrep_idxs[idx]
# Inner-level bootstrapping over intermediate recommended solutions.
est_objectives = []
# Same postreplication indices for all intermediate budgets on
# a given macroreplciation.
bs_postrep_idxs = bootstrap_rng.choices(range(self.n_postreps), k=self.n_postreps)
for budget in range(len(self.all_intermediate_budgets[mrep])):
# If solution is x0...
if self.all_recommended_xs[mrep][budget] == self.x0:
est_objectives.append(bs_initial_obj_val)
# ...else if solution is x*...
elif self.all_recommended_xs[mrep][budget] == self.xstar:
est_objectives.append(bs_optimal_obj_val)
# ... else solution other than x0 or x*.
else:
# Compute the mean of the resampled postreplications.
est_objectives.append(np.mean([self.all_post_replicates[mrep][budget][postrep] for postrep in bs_postrep_idxs]))
# Record objective or progress curve.
if normalize:
frac_intermediate_budgets = [budget / self.problem.factors["budget"] for budget in self.all_intermediate_budgets[mrep]]
norm_est_objectives = [(est_objective - bs_optimal_obj_val) / bs_initial_opt_gap for est_objective in est_objectives]
new_progress_curve = Curve(x_vals=frac_intermediate_budgets, y_vals=norm_est_objectives)
bootstrap_curves.append(new_progress_curve)
else:
new_objective_curve = Curve(x_vals=self.all_intermediate_budgets[mrep], y_vals=est_objectives)
bootstrap_curves.append(new_objective_curve)
# Option 2: Non-default CRN behavior.
else:
for idx in range(self.n_macroreps):
mrep = bs_mrep_idxs[idx]
# Inner-level bootstrapping over intermediate recommended solutions.
est_objectives = []
for budget in range(len(self.all_intermediate_budgets[mrep])):
# If solution is x0...
if self.all_recommended_xs[mrep][budget] == self.x0:
est_objectives.append(bs_initial_obj_val)
# ...else if solution is x*...
elif self.all_recommended_xs[mrep][budget] == self.xstar:
est_objectives.append(bs_optimal_obj_val)
# ... else solution other than x0 or x*.
else:
# Uniformly resample N postreps (with replacement) from 0, 1, ..., N-1.
bs_postrep_idxs = bootstrap_rng.choices(range(self.n_postreps), k=self.n_postreps)
# Compute the mean of the resampled postreplications.
est_objectives.append(np.mean([self.all_post_replicates[mrep][budget][postrep] for postrep in bs_postrep_idxs]))
# Reset subsubstream if using CRN across budgets.
if self.crn_across_budget:
bootstrap_rng.reset_subsubstream()
# If using CRN across macroreplications...
if self.crn_across_macroreps:
# ...reset subsubstreams...
bootstrap_rng.reset_subsubstream()
# ...else if not using CRN across macrorep...
else:
# ...advance subsubstream.
bootstrap_rng.advance_subsubstream()
# Record objective or progress curve.
if normalize:
frac_intermediate_budgets = [budget / self.problem.factors["budget"] for budget in self.all_intermediate_budgets[mrep]]
norm_est_objectives = [(est_objective - bs_optimal_obj_val) / bs_initial_opt_gap for est_objective in est_objectives]
new_progress_curve = Curve(x_vals=frac_intermediate_budgets, y_vals=norm_est_objectives)
bootstrap_curves.append(new_progress_curve)
else:
new_objective_curve = Curve(x_vals=self.all_intermediate_budgets[mrep], y_vals=est_objectives)
bootstrap_curves.append(new_objective_curve)
return bootstrap_curves
def clear_run(self):
"""
Delete results from run() method and any downstream results.
"""
attributes = ["n_macroreps",
"all_recommended_xs",
"all_intermediate_budgets"]
for attribute in attributes:
try:
delattr(self, attribute)
except Exception:
pass
self.clear_postreplicate()
def clear_postreplicate(self):
"""
Delete results from post_replicate() method and any downstream results.
"""
attributes = ["n_postreps",
"crn_across_budget",
"crn_across_macroreps",
"all_post_replicates",
"all_est_objectives"]
for attribute in attributes:
try:
delattr(self, attribute)
except Exception:
pass
self.clear_postnorm()
def clear_postnorm(self):
"""
Delete results from post_normalize() associated with experiment.
"""
attributes = ["n_postreps_init_opt",
"crn_across_init_opt",
"x0",
"x0_postreps",
"xstar",
"xstar_postreps",
"objective_curves",
"progress_curves"
]
for attribute in attributes:
try:
delattr(self, attribute)
except Exception:
pass
def record_experiment_results(self):
"""
Save wrapper_base.Experiment object to .pickle file.
"""
with open(self.file_name_path, "wb") as file:
pickle.dump(self, file, pickle.HIGHEST_PROTOCOL)
def trim_solver_results(problem, recommended_solns, intermediate_budgets):
"""
Trim solutions recommended by solver after problem's max budget.
Arguments
---------
problem : base.Problem object
Problem object on which the solver was run
recommended_solutions : list of base.Solution objects
solutions recommended by the solver
intermediate_budgets : list of ints >= 0
intermediate budgets at which solver recommended different solutions
"""
# Remove solutions corresponding to intermediate budgets exceeding max budget.
invalid_idxs = [idx for idx, element in enumerate(intermediate_budgets) if element > problem.factors["budget"]]
for invalid_idx in sorted(invalid_idxs, reverse=True):
del recommended_solns[invalid_idx]
del intermediate_budgets[invalid_idx]
# If no solution is recommended at the final budget,
# re-recommend the latest recommended solution.
# (Necessary for clean plotting of progress curves.)
if intermediate_budgets[-1] < problem.factors["budget"]:
recommended_solns.append(recommended_solns[-1])
intermediate_budgets.append(problem.factors["budget"])
return recommended_solns, intermediate_budgets
def read_experiment_results(file_name_path):
"""
Read in wrapper_base.Experiment object from .pickle file.
Arguments
---------
file_name_path : string
path of .pickle file for reading wrapper_base.Experiment object
Returns
-------
experiment : wrapper_base.Experiment object
experiment that has been run or has been post-processed
"""
with open(file_name_path, "rb") as file:
experiment = pickle.load(file)
return experiment
def post_normalize(experiments, n_postreps_init_opt, crn_across_init_opt=True, proxy_init_val=None, proxy_opt_val=None, proxy_opt_x=None):
"""
Construct objective curves and (normalized) progress curves
for a collection of experiments on a given problem.
Parameters
----------
experiments : list of wrapper_base.Experiment objects
experiments of different solvers on a common problem
n_postreps_init_opt : int
number of postreplications to take at initial x0 and optimal x*
crn_across_init_opt : bool
use CRN for post-replications at solutions x0 and x*?
proxy_init_val : float
known objective function value of initial solution
proxy_opt_val : float
proxy for or bound on optimal objective function value
proxy_opt_x : tuple
proxy for optimal solution
"""
# Check that all experiments have the same problem and same
# post-experimental setup.
ref_experiment = experiments[0]
for experiment in experiments:
# Check if problems are the same.
if experiment.problem != ref_experiment.problem:
print("At least two experiments have different problem instances.")
# Check if experiments have common number of macroreps.
if experiment.n_macroreps != ref_experiment.n_macroreps:
print("At least two experiments have different numbers of macro-replications.")
# Check if experiment has been post-replicated and with common number of postreps.
if getattr(experiment, "n_postreps", None) is None:
print(f"The experiment of {experiment.solver_name} on {experiment.problem_name} has not been post-replicated.")
elif getattr(experiment, "n_postreps", None) != getattr(ref_experiment, "n_postreps", None):
print("At least two experiments have different numbers of post-replications.")
print("Estimation of optimal solution x* may be based on different numbers of post-replications.")
# Take post-replications at common x0.
# Create, initialize, and attach RNGs for oracle.
# Stream 0: reserved for post-replications.
baseline_rngs = [MRG32k3a(s_ss_sss_index=[0, rng_index, 0]) for rng_index in range(experiment.problem.oracle.n_rngs)]
x0 = ref_experiment.problem.factors["initial_solution"]
if proxy_init_val is not None:
x0_postreps = [proxy_init_val] * n_postreps_init_opt
else:
initial_soln = Solution(x0, ref_experiment.problem)
initial_soln.attach_rngs(rng_list=baseline_rngs, copy=False)
ref_experiment.problem.simulate(solution=initial_soln, m=n_postreps_init_opt)
x0_postreps = list(initial_soln.objectives[:n_postreps_init_opt][:, 0]) # 0 <- assuming only one objective
if crn_across_init_opt:
# Reset each rng to start of its current substream.
for rng in baseline_rngs:
rng.reset_substream()
# Determine (proxy for) optimal solution and/or (proxy for) its
# objective function value. If deterministic (proxy for) f(x*),
# create duplicate post-replicates to facilitate later bootstrapping.
# If proxy for f(x*) is specified...
if proxy_opt_val is not None:
xstar = None
xstar_postreps = [proxy_opt_val] * n_postreps_init_opt
# ...else if proxy for x* is specified...
elif proxy_opt_x is not None:
xstar = proxy_opt_x
# Take post-replications at xstar.
opt_soln = Solution(xstar, ref_experiment.problem)
opt_soln.attach_rngs(rng_list=baseline_rngs, copy=False)
ref_experiment.problem.simulate(solution=opt_soln, m=n_postreps_init_opt)
xstar_postreps = list(opt_soln.objectives[:n_postreps_init_opt][:, 0]) # 0 <- assuming only one objective
# ...else if f(x*) is known...
elif ref_experiment.problem.optimal_value is not None:
xstar = None
xstar_postreps = [ref_experiment.problem.optimal_value] * n_postreps_init_opt
# ...else if x* is known...
elif ref_experiment.problem.optimal_solution is not None:
xstar = ref_experiment.problem.optimal_solution
# Take post-replications at xstar.
opt_soln = Solution(xstar, ref_experiment.problem)
opt_soln.attach_rngs(rng_list=baseline_rngs, copy=False)
ref_experiment.problem.simulate(solution=opt_soln, m=n_postreps_init_opt)
xstar_postreps = list(opt_soln.objectives[:n_postreps_init_opt][:, 0]) # 0 <- assuming only one objective
# ...else determine x* empirically as estimated best solution
# found by any solver on any macroreplication.
else:
# TO DO: Simplify this block of code.
best_est_objectives = np.zeros(len(experiments))
for experiment_idx in range(len(experiments)):
experiment = experiments[experiment_idx]
exp_best_est_objectives = np.zeros(experiment.n_macroreps)
for mrep in range(experiment.n_macroreps):
exp_best_est_objectives[mrep] = np.max(experiment.problem.minmax[0] * np.array(experiment.all_est_objectives[mrep]))
best_est_objectives[experiment_idx] = np.max(exp_best_est_objectives)
best_experiment_idx = np.argmax(best_est_objectives)
best_experiment = experiments[best_experiment_idx]
best_exp_best_est_objectives = np.zeros(experiment.n_macroreps)
for mrep in range(best_experiment.n_macroreps):
best_exp_best_est_objectives[mrep] = np.max(best_experiment.problem.minmax[0] * np.array(best_experiment.all_est_objectives[mrep]))
best_mrep = np.argmax(best_exp_best_est_objectives)
best_budget_idx = np.argmax(experiment.problem.minmax[0] * np.array(best_experiment.all_est_objectives[best_mrep]))
xstar = best_experiment.all_recommended_xs[best_mrep][best_budget_idx]
# Take post-replications at x*.
opt_soln = Solution(xstar, ref_experiment.problem)
opt_soln.attach_rngs(rng_list=baseline_rngs, copy=False)
ref_experiment.problem.simulate(solution=opt_soln, m=n_postreps_init_opt)
xstar_postreps = list(opt_soln.objectives[:n_postreps_init_opt][:, 0]) # 0 <- assuming only one objective
# Compute signed initial optimality gap = f(x0) - f(x*).
initial_obj_val = np.mean(x0_postreps)
opt_obj_val = np.mean(xstar_postreps)
initial_opt_gap = initial_obj_val - opt_obj_val
# Store x0 and x* info and compute progress curves for each Experiment.
for experiment in experiments:
# DOUBLE-CHECK FOR SHALLOW COPY ISSUES.
experiment.n_postreps_init_opt = n_postreps_init_opt
experiment.crn_across_init_opt = crn_across_init_opt
experiment.x0 = x0
experiment.x0_postreps = x0_postreps
experiment.xstar = xstar
experiment.xstar_postreps = xstar_postreps
# Construct objective and progress curves.
experiment.objective_curves = []
experiment.progress_curves = []
for mrep in range(experiment.n_macroreps):
est_objectives = []
# Substitute estimates at x0 and x* (based on N postreplicates)
# with new estimates (based on L postreplicates).
for budget in range(len(experiment.all_intermediate_budgets[mrep])):
if experiment.all_recommended_xs[mrep][budget] == x0:
est_objectives.append(np.mean(x0_postreps))
elif experiment.all_recommended_xs[mrep][budget] == xstar:
est_objectives.append(np.mean(xstar_postreps))
else:
est_objectives.append(experiment.all_est_objectives[mrep][budget])
experiment.objective_curves.append(Curve(x_vals=experiment.all_intermediate_budgets[mrep], y_vals=est_objectives))
# Normalize by initial optimality gap.
norm_est_objectives = [(est_objective - opt_obj_val) / initial_opt_gap for est_objective in est_objectives]
frac_intermediate_budgets = [budget / experiment.problem.factors["budget"] for budget in experiment.all_intermediate_budgets[mrep]]
experiment.progress_curves.append(Curve(x_vals=frac_intermediate_budgets, y_vals=norm_est_objectives))
# Save Experiment object to .pickle file.
experiment.record_experiment_results()
def bootstrap_sample_all(experiments, bootstrap_rng, normalize=True):
"""
Generate bootstrap samples of estimated progress curves (normalized
and unnormalized) from a set of experiments.
Arguments
---------
experiments : list of list of wrapper_base.Experiment objects
experiments of different solvers and/or problems
bootstrap_rng : MRG32k3a object
random number generator to use for bootstrapping
normalize : bool
normalize progress curves w.r.t. optimality gaps?
Returns
-------
bootstrap_curves : list of list of list of wrapper_base.Curve objects
bootstrapped estimated objective curves or estimated progress curves
of all solutions from all macroreplications
"""
n_solvers = len(experiments)
n_problems = len(experiments[0])
bootstrap_curves = [[[] for _ in range(n_problems)] for _ in range(n_solvers)]
# Obtain a bootstrap sample from each experiment.
for solver_idx in range(n_solvers):
for problem_idx in range(n_problems):
experiment = experiments[solver_idx][problem_idx]
bootstrap_curves[solver_idx][problem_idx] = experiment.bootstrap_sample(bootstrap_rng, normalize)
# Reset substream for next solver-problem pair.
bootstrap_rng.reset_substream()
# Advance substream of random number generator to prepare for next bootstrap sample.
bootstrap_rng.advance_substream()
return bootstrap_curves
def bootstrap_procedure(experiments, n_bootstraps, plot_type, beta=None, solve_tol=None, estimator=None, normalize=True):
"""
Parameters
----------
experiments : list of list of wrapper_base.Experiment objects
experiments of different solvers and/or problems
n_bootstraps : int > 0
number of times to generate a bootstrap sample of estimated progress curves
plot_type : string
indicates which type of plot to produce
"mean" : estimated mean progress curve
"quantile" : estimated beta quantile progress curve
"area_mean" : mean of area under progress curve
"area_std_dev" : standard deviation of area under progress curve
"solve_time_quantile" : beta quantile of solve time
"solve_time_cdf" : cdf of solve time
"cdf_solvability" : cdf solvability profile
"quantile_solvability" : quantile solvability profile
"diff_cdf_solvability" : difference of cdf solvability profiles
"diff_quantile_solvability" : difference of quantile solvability profiles
beta : float in (0,1)
quantile to plot, e.g., beta quantile
solve_tol : float in (0,1]
relative optimality gap definining when a problem is solved
estimator : float or wrapper_base.Curve object
main estimator, e.g., mean convergence curve from an experiment
normalize : bool
normalize progress curves w.r.t. optimality gaps?
Returns
-------
bs_CI_lower_bounds, bs_CI_upper_bounds = floats or wrapper_base.Curve objects
lower and upper bound(s) of bootstrap CI(s), as floats or curves
"""
# Create random number generator for bootstrap sampling.
# Stream 1 dedicated for bootstrapping.
bootstrap_rng = MRG32k3a(s_ss_sss_index=[1, 0, 0])
# Obtain n_bootstrap replications.
bootstrap_replications = []
for bs_index in range(n_bootstraps):
# Generate bootstrap sample of estimated objective/progress curves.
bootstrap_curves = bootstrap_sample_all(experiments, bootstrap_rng=bootstrap_rng, normalize=normalize)
# Apply the functional of the bootstrap sample.
bootstrap_replications.append(functional_of_curves(bootstrap_curves, plot_type, beta=beta, solve_tol=solve_tol))
# Distinguish cases where functional returns a scalar vs a curve.
if plot_type in {"area_mean", "area_std_dev", "solve_time_quantile"}:
# Functional returns a scalar.
bs_CI_lower_bounds, bs_CI_upper_bounds = compute_bootstrap_CI(bootstrap_replications, conf_level=0.95, bias_correction=True, overall_estimator=estimator)
elif plot_type in {"mean", "quantile", "solve_time_cdf", "cdf_solvability", "quantile_solvability", "diff_cdf_solvability", "diff_quantile_solvability"}:
# Functional returns a curve.
unique_budgets = list(np.unique([budget for curve in bootstrap_replications for budget in curve.x_vals]))
bs_CI_lbs = []
bs_CI_ubs = []
for budget in unique_budgets:
bootstrap_subreplications = [curve.lookup(x=budget) for curve in bootstrap_replications]
sub_estimator = estimator.lookup(x=budget)
bs_CI_lower_bound, bs_CI_upper_bound = compute_bootstrap_CI(bootstrap_subreplications,
conf_level=0.95,
bias_correction=True,
overall_estimator=sub_estimator
)
bs_CI_lbs.append(bs_CI_lower_bound)
bs_CI_ubs.append(bs_CI_upper_bound)
bs_CI_lower_bounds = Curve(x_vals=unique_budgets, y_vals=bs_CI_lbs)
bs_CI_upper_bounds = Curve(x_vals=unique_budgets, y_vals=bs_CI_ubs)
return bs_CI_lower_bounds, bs_CI_upper_bounds
def functional_of_curves(bootstrap_curves, plot_type, beta=0.5, solve_tol=0.1):
"""
Compute a functional of the bootstrapped objective/progress curves.
Parameters
----------
bootstrap_curves : list of list of list of wrapper_base.Curve objects
bootstrapped estimated objective curves or estimated progress curves
of all solutions from all macroreplications
plot_type : string
indicates which type of plot to produce
"mean" : estimated mean progress curve
"quantile" : estimated beta quantile progress curve
"area_mean" : mean of area under progress curve
"area_std_dev" : standard deviation of area under progress curve
"solve_time_quantile" : beta quantile of solve time
"solve_time_cdf" : cdf of solve time
"cdf_solvability" : cdf solvability profile
"quantile_solvability" : quantile solvability profile
"diff_cdf_solvability" : difference of cdf solvability profiles
"diff_quantile_solvability" : difference of quantile solvability profiles
beta : float in (0,1)
quantile to plot, e.g., beta quantile
solve_tol : float in (0,1]
relative optimality gap definining when a problem is solved
Returns
-------
functional : list
functional of bootstrapped curves, e.g, mean progress curves,
mean area under progress curve, quantile of crossing time, etc.
"""
if plot_type == "mean":
# Single experiment --> returns a curve.
functional = mean_of_curves(bootstrap_curves[0][0])
elif plot_type == "quantile":
# Single experiment --> returns a curve.
functional = quantile_of_curves(bootstrap_curves[0][0], beta=beta)
elif plot_type == "area_mean":
# Single experiment --> returns a scalar.
functional = np.mean([curve.compute_area_under_curve() for curve in bootstrap_curves[0][0]])
elif plot_type == "area_std_dev":
# Single experiment --> returns a scalar.
functional = np.std([curve.compute_area_under_curve() for curve in bootstrap_curves[0][0]], ddof=1)
elif plot_type == "solve_time_quantile":
# Single experiment --> returns a scalar
functional = np.quantile([curve.compute_crossing_time(threshold=solve_tol) for curve in bootstrap_curves[0][0]], q=beta)
elif plot_type == "solve_time_cdf":
# Single experiment --> returns a curve.
functional = cdf_of_curves_crossing_times(bootstrap_curves[0][0], threshold=solve_tol)
elif plot_type == "cdf_solvability":
# One solver, multiple problems --> returns a curve.
functional = mean_of_curves([cdf_of_curves_crossing_times(curves=progress_curves, threshold=solve_tol) for progress_curves in bootstrap_curves[0]])
elif plot_type == "quantile_solvability":
# One solver, multiple problems --> returns a curve.
functional = mean_of_curves([quantile_cross_jump(curves=progress_curves, threshold=solve_tol, beta=beta) for progress_curves in bootstrap_curves[0]])
elif plot_type == "diff_cdf_solvability":
# Two solvers, multiple problems --> returns a curve.
solvability_profile_1 = mean_of_curves([cdf_of_curves_crossing_times(curves=progress_curves, threshold=solve_tol) for progress_curves in bootstrap_curves[0]])
solvability_profile_2 = mean_of_curves([cdf_of_curves_crossing_times(curves=progress_curves, threshold=solve_tol) for progress_curves in bootstrap_curves[1]])
functional = difference_of_curves(solvability_profile_1, solvability_profile_2)
elif plot_type == "diff_quantile_solvability":
# Two solvers, multiple problems --> returns a curve.
solvability_profile_1 = mean_of_curves([quantile_cross_jump(curves=progress_curves, threshold=solve_tol, beta=beta) for progress_curves in bootstrap_curves[0]])
solvability_profile_2 = mean_of_curves([quantile_cross_jump(curves=progress_curves, threshold=solve_tol, beta=beta) for progress_curves in bootstrap_curves[1]])
functional = difference_of_curves(solvability_profile_1, solvability_profile_2)
else:
print("Not a valid plot type.")
return functional
def compute_bootstrap_CI(observations, conf_level=0.95, bias_correction=True, overall_estimator=None):
"""
Construct a bootstrap confidence interval for an estimator.
Parameters
----------
observations : list
estimators from all bootstrap instances
conf_level : float in (0,1)
confidence level for confidence intervals, i.e., 1-gamma
bias_correction : bool
use bias-corrected bootstrap CIs (via percentile method)?
overall estimator : float
estimator to compute bootstrap confidence interval of
(required for bias corrected CI)
Returns
-------
bs_CI_lower_bound : float
lower bound of bootstrap CI
bs_CI_upper_bound : float
upper bound of bootstrap CI
"""
# Compute bootstrapping confidence interval via percentile method.
# See Efron (1981) "Nonparameteric Standard Errors and Confidence Intervals."
if bias_correction:
if overall_estimator is None:
print("Estimator required to compute bias-corrected CIs.")
# For biased-corrected CIs, see equation (4.4) on page 146.
z0 = norm.ppf(np.mean([obs < overall_estimator for obs in observations]))
zconflvl = norm.ppf(conf_level)
q_lower = norm.cdf(2 * z0 - zconflvl)
q_upper = norm.cdf(2 * z0 + zconflvl)
else:
# For uncorrected CIs, see equation (4.3) on page 146.
q_lower = (1 - conf_level) / 2
q_upper = 1 - (1 - conf_level) / 2
bs_CI_lower_bound = np.quantile(observations, q=q_lower)
bs_CI_upper_bound = np.quantile(observations, q=q_upper)
return bs_CI_lower_bound, bs_CI_upper_bound
def plot_bootstrap_CIs(bs_CI_lower_bounds, bs_CI_upper_bounds, color_str="C0"):
"""
Plot bootstrap confidence intervals.
Parameters
----------
bs_CI_lower_bounds, bs_CI_upper_bounds : wrapper_base.Curve objects
lower and upper bounds of bootstrap CIs, as curves
color_str : str
string indicating line color, e.g., "C0", "C1", etc.
"""
bs_CI_lower_bounds.plot(color_str=color_str, curve_type="conf_bound")
bs_CI_upper_bounds.plot(color_str=color_str, curve_type="conf_bound")
# Shade space between curves.
# Convert to full curves to get piecewise-constant shaded areas.
plt.fill_between(x=bs_CI_lower_bounds.curve_to_full_curve().x_vals,
y1=bs_CI_lower_bounds.curve_to_full_curve().y_vals,
y2=bs_CI_upper_bounds.curve_to_full_curve().y_vals,
color=color_str,
alpha=0.2
)
def report_max_halfwidth(curve_pairs, normalize):
"""
Compute and print caption for max halfwidth of one or more bootstrap CI curves
Parameters
----------
curve_pairs : list of list of wrapper_base.Curve objects
list of paired bootstrap CI curves
normalize : bool
normalize progress curves w.r.t. optimality gaps?
"""
# Compute max halfwidth of bootstrap confidence intervals.
min_lower_bound = np.inf
max_upper_bound = -np.inf
max_halfwidths = []
for curve_pair in curve_pairs:
min_lower_bound = min(min_lower_bound, min(curve_pair[0].y_vals))
max_upper_bound = max(max_upper_bound, max(curve_pair[1].y_vals))
max_halfwidths.append(0.5 * max_difference_of_curves(curve_pair[1], curve_pair[0]))
max_halfwidth = max(max_halfwidths)
# Print caption about max halfwidth.
if normalize:
xloc = 0.05
yloc = -0.35
else:
# xloc = 0.05 * budget of the problem
xloc = 0.05 * curve_pairs[0][0].x_vals[-1]
yloc = min_lower_bound - 0.25 * (max_upper_bound - min_lower_bound)
txt = f"The max halfwidth of the bootstrap CIs is {round(max_halfwidth, 2)}."
plt.text(x=xloc, y=yloc, s=txt)
def check_common_problem_and_reference(experiments):
"""
Check if a collection of experiments have the same problem, x0, and x*.
Parameters
----------
experiments : list of wrapper_base.Experiment objects
experiments of different solvers on a common problem
"""
ref_experiment = experiments[0]
for experiment in experiments:
if experiment.problem != ref_experiment.problem:
print("At least two experiments have different problem instances.")
if experiment.x0 != ref_experiment.x0:
print("At least two experiments have different starting solutions.")
if experiment.xstar != ref_experiment.xstar:
print("At least two experiments have different optimal solutions.")
def plot_progress_curves(experiments, plot_type, beta=0.50, normalize=True, all_in_one=True, plot_CIs=True, print_max_hw=True):
"""
Plot individual or aggregate progress curves for one or more solvers
on a single problem.
Parameters
----------
experiments : list of wrapper_base.Experiment objects
experiments of different solvers on a common problem
plot_type : string
indicates which type of plot to produce
"all" : all estimated progress curves
"mean" : estimated mean progress curve
"quantile" : estimated beta quantile progress curve
beta : float in (0,1)
quantile to plot, e.g., beta quantile
normalize : bool
normalize progress curves w.r.t. optimality gaps?
all_in_one : bool
plot curves together or separately
plot_CIs : bool
plot bootstrapping confidence intervals?
print_max_hw : bool
print caption with max half-width
"""
# Check if problems are the same with the same x0 and x*.
check_common_problem_and_reference(experiments)
# Set up plot.
n_experiments = len(experiments)
if all_in_one:
ref_experiment = experiments[0]
setup_plot(plot_type=plot_type,
solver_name="SOLVER SET",
problem_name=ref_experiment.problem.name,
normalize=normalize,
budget=ref_experiment.problem.factors["budget"],
beta=beta
)
solver_curve_handles = []
if print_max_hw:
curve_pairs = []
for exp_idx in range(n_experiments):
experiment = experiments[exp_idx]
color_str = "C" + str(exp_idx)
if plot_type == "all":
# Plot all estimated progress curves.
if normalize:
handle = experiment.progress_curves[0].plot(color_str=color_str)
for curve in experiment.progress_curves[1:]:
curve.plot(color_str=color_str)
else:
handle = experiment.objective_curves[0].plot(color_str=color_str)
for curve in experiment.objective_curves[1:]:
curve.plot(color_str=color_str)
elif plot_type == "mean":
# Plot estimated mean progress curve.
if normalize:
estimator = mean_of_curves(experiment.progress_curves)
else:
estimator = mean_of_curves(experiment.objective_curves)
handle = estimator.plot(color_str=color_str)
elif plot_type == "quantile":
# Plot estimated beta-quantile progress curve.
if normalize:
estimator = quantile_of_curves(experiment.progress_curves, beta)
else:
estimator = quantile_of_curves(experiment.objective_curves, beta)
handle = estimator.plot(color_str=color_str)
else:
print("Not a valid plot type.")
solver_curve_handles.append(handle)
if plot_CIs and plot_type != "all":
# Note: "experiments" needs to be a list of list of Experiments.
bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[[experiment]],
n_bootstraps=100,
plot_type=plot_type,
beta=beta,
estimator=estimator,
normalize=normalize
)
plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve, color_str=color_str)
if print_max_hw:
curve_pairs.append([bs_CI_lb_curve, bs_CI_ub_curve])
plt.legend(handles=solver_curve_handles, labels=[experiment.solver.name for experiment in experiments], loc="upper right")
if print_max_hw and plot_type != "all":
report_max_halfwidth(curve_pairs=curve_pairs, normalize=normalize)
save_plot(solver_name="SOLVER SET",
problem_name=ref_experiment.problem.name,
plot_type=plot_type,
normalize=normalize
)
else: # Plot separately.
for experiment in experiments:
setup_plot(plot_type=plot_type,
solver_name=experiment.solver.name,
problem_name=experiment.problem.name,
normalize=normalize,
budget=experiment.problem.factors["budget"],
beta=beta
)
if plot_type == "all":
# Plot all estimated progress curves.
if normalize:
for curve in experiment.progress_curves:
curve.plot()
else:
for curve in experiment.objective_curves:
curve.plot()
elif plot_type == "mean":
# Plot estimated mean progress curve.
if normalize:
estimator = mean_of_curves(experiment.progress_curves)
else:
estimator = mean_of_curves(experiment.objective_curves)
estimator.plot()
elif plot_type == "quantile":
# Plot estimated beta-quantile progress curve.
if normalize:
estimator = quantile_of_curves(experiment.progress_curves, beta)
else:
estimator = quantile_of_curves(experiment.objective_curves, beta)
estimator.plot()
else:
print("Not a valid plot type.")
if plot_CIs and plot_type != "all":
# Note: "experiments" needs to be a list of list of Experiments.
bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[[experiment]],
n_bootstraps=100,
plot_type=plot_type,
beta=beta,
estimator=estimator,
normalize=normalize
)
plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve)
if print_max_hw:
report_max_halfwidth(curve_pairs=[[bs_CI_lb_curve, bs_CI_ub_curve]], normalize=normalize)
save_plot(solver_name=experiment.solver.name,
problem_name=experiment.problem.name,
plot_type=plot_type,
normalize=normalize
)
def plot_solvability_cdfs(experiments, solve_tol=0.1, all_in_one=True, plot_CIs=True, print_max_hw=True):
"""
Plot the solvability cdf for one or more solvers on a single problem.
Arguments
---------
experiments : list of wrapper_base.Experiment objects
experiments of different solvers on a common problem
solve_tol : float in (0,1]
relative optimality gap definining when a problem is solved
all_in_one : bool
plot curves together or separately
plot_CIs : bool
plot bootstrapping confidence intervals?
print_max_hw : bool
print caption with max half-width
"""
# Check if problems are the same with the same x0 and x*.
check_common_problem_and_reference(experiments)
# Set up plot.
n_experiments = len(experiments)
if all_in_one:
ref_experiment = experiments[0]
setup_plot(plot_type="solve_time_cdf",
solver_name="SOLVER SET",
problem_name=ref_experiment.problem.name,
solve_tol=solve_tol
)
solver_curve_handles = []
if print_max_hw:
curve_pairs = []
for exp_idx in range(n_experiments):
experiment = experiments[exp_idx]
color_str = "C" + str(exp_idx)
# Plot cdf of solve times.
estimator = cdf_of_curves_crossing_times(experiment.progress_curves, threshold=solve_tol)
handle = estimator.plot(color_str=color_str)
solver_curve_handles.append(handle)
if plot_CIs:
# Note: "experiments" needs to be a list of list of Experiments.
bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[[experiment]],
n_bootstraps=100,
plot_type="solve_time_cdf",
solve_tol=solve_tol,
estimator=estimator,
normalize=True
)
plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve, color_str=color_str)
if print_max_hw:
curve_pairs.append([bs_CI_lb_curve, bs_CI_ub_curve])
plt.legend(handles=solver_curve_handles, labels=[experiment.solver.name for experiment in experiments], loc="lower right")
if print_max_hw:
report_max_halfwidth(curve_pairs=curve_pairs, normalize=True)
save_plot(solver_name="SOLVER SET",
problem_name=ref_experiment.problem.name,
plot_type="solve_time_cdf",
normalize=True,
extra=solve_tol
)
else: # Plot separately.
for experiment in experiments:
setup_plot(plot_type="solve_time_cdf",
solver_name=experiment.solver.name,
problem_name=experiment.problem.name,
solve_tol=solve_tol
)
estimator = cdf_of_curves_crossing_times(experiment.progress_curves, threshold=solve_tol)
estimator.plot()
if plot_CIs:
# Note: "experiments" needs to be a list of list of Experiments.
bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[[experiment]],
n_bootstraps=100,
plot_type="solve_time_cdf",
solve_tol=solve_tol,
estimator=estimator,
normalize=True
)
plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve)
if print_max_hw:
report_max_halfwidth(curve_pairs=[[bs_CI_lb_curve, bs_CI_ub_curve]], normalize=True)
save_plot(solver_name=experiment.solver.name,
problem_name=experiment.problem.name,
plot_type="solve_time_cdf",
normalize=True,
extra=solve_tol
)
def plot_area_scatterplots(experiments, all_in_one=True, plot_CIs=True, print_max_hw=True):
"""
Plot a scatter plot of mean and standard deviation of area under progress curves.
Either one plot for each solver or one plot for all solvers.
Parameters
----------
experiments : list of list of wrapper_base.Experiment objects
experiments used to produce plots
all_in_one : bool
plot curves together or separately
plot_CIs : bool
plot bootstrapping confidence intervals?
print_max_hw : bool
print caption with max half-width
"""
# Set up plot.
n_solvers = len(experiments)
n_problems = len(experiments[0])
if all_in_one:
marker_list = ["o", "v", "s", "*", "P", "X", "D", "V", ">", "<"]
setup_plot(plot_type="area",
solver_name="SOLVER SET",
problem_name="PROBLEM SET"
)
solver_names = [solver_experiments[0].solver.name for solver_experiments in experiments]
solver_curve_handles = []
# TO DO: Build up capability to print max half-width.
if print_max_hw:
curve_pairs = []
for solver_idx in range(n_solvers):
for problem_idx in range(n_problems):
experiment = experiments[solver_idx][problem_idx]
color_str = "C" + str(solver_idx)
marker_str = marker_list[solver_idx % len(marker_list)] # Cycle through list of marker types.
# Plot mean and standard deviation of area under progress curve.
areas = [curve.compute_area_under_curve() for curve in experiment.progress_curves]
mean_estimator = np.mean(areas)
std_dev_estimator = np.std(areas, ddof=1)
if plot_CIs:
# Note: "experiments" needs to be a list of list of Experiments.
mean_bs_CI_lb, mean_bs_CI_ub = bootstrap_procedure(experiments=[[experiment]],
n_bootstraps=100,
plot_type="area_mean",
estimator=mean_estimator,
normalize=True
)
std_dev_bs_CI_lb, std_dev_bs_CI_ub = bootstrap_procedure(experiments=[[experiment]],
n_bootstraps=100,
plot_type="area_std_dev",
estimator=std_dev_estimator,
normalize=True
)
# if print_max_hw:
# curve_pairs.append([bs_CI_lb_curve, bs_CI_ub_curve])
x_err = [[mean_estimator - mean_bs_CI_lb], [mean_bs_CI_ub - mean_estimator]]
y_err = [[std_dev_estimator - std_dev_bs_CI_lb], [std_dev_bs_CI_ub - std_dev_estimator]]
handle = plt.errorbar(x=mean_estimator,
y=std_dev_estimator,
xerr=x_err,
yerr=y_err,
color=color_str,
marker=marker_str,
elinewidth=1
)
else:
handle = plt.scatter(x=mean_estimator, y=std_dev_estimator, color=color_str, marker=marker_str)
solver_curve_handles.append(handle)
plt.legend(handles=solver_curve_handles, labels=solver_names, loc="upper right")
save_plot(solver_name="SOLVER SET",
problem_name="PROBLEM SET",
plot_type="area_scatterplot",
normalize=True
)
else:
for solver_idx in range(n_solvers):
ref_experiment = experiments[solver_idx][0]
setup_plot(plot_type="area",
solver_name=ref_experiment.solver.name,
problem_name="PROBLEM SET"
)
if print_max_hw:
curve_pairs = []
for problem_idx in range(n_problems):
experiment = experiments[solver_idx][problem_idx]
# Plot mean and standard deviation of area under progress curve.
areas = [curve.compute_area_under_curve() for curve in experiment.progress_curves]
mean_estimator = np.mean(areas)
std_dev_estimator = np.std(areas, ddof=1)
if plot_CIs:
# Note: "experiments" needs to be a list of list of Experiments.
mean_bs_CI_lb, mean_bs_CI_ub = bootstrap_procedure(experiments=[[experiment]],
n_bootstraps=100,
plot_type="area_mean",
estimator=mean_estimator,
normalize=True
)
std_dev_bs_CI_lb, std_dev_bs_CI_ub = bootstrap_procedure(experiments=[[experiment]],
n_bootstraps=100,
plot_type="area_std_dev",
estimator=std_dev_estimator,
normalize=True
)
# if print_max_hw:
# curve_pairs.append([bs_CI_lb_curve, bs_CI_ub_curve])
x_err = [[mean_estimator - mean_bs_CI_lb], [mean_bs_CI_ub - mean_estimator]]
y_err = [[std_dev_estimator - std_dev_bs_CI_lb], [std_dev_bs_CI_ub - std_dev_estimator]]
handle = plt.errorbar(x=mean_estimator,
y=std_dev_estimator,
xerr=x_err,
yerr=y_err,
marker="o",
color="C0",
elinewidth=1
)
else:
handle = plt.scatter(x=mean_estimator, y=std_dev_estimator, color="C0", marker="o")
save_plot(solver_name=experiment.solver.name,
problem_name="PROBLEM SET",
plot_type="area_scatterplot",
normalize=True
)
def plot_solvability_profiles(experiments, plot_type, all_in_one=True, plot_CIs=True, print_max_hw=True, solve_tol=0.1, beta=0.5, ref_solver=None):
"""
Plot the (difference of) solvability profiles for each solver on a set of problems.
Parameters
----------
experiments : list of list of wrapper_base.Experiment objects
experiments used to produce plots
plot_type : string
indicates which type of plot to produce
"cdf_solvability" : cdf-solvability profile
"quantile_solvability" : quantile-solvability profile
"diff_cdf_solvability" : difference of cdf-solvability profiles
"diff_quantile_solvability" : difference of quantile-solvability profiles
all_in_one : bool
plot curves together or separately
plot_CIs : bool
plot bootstrapping confidence intervals?
print_max_hw : bool
print caption with max half-width
solve_tol : float in (0,1]
relative optimality gap definining when a problem is solved
beta : float in (0,1)
quantile to compute, e.g., beta quantile
ref_solver : str
name of solver used as benchmark for difference profiles
"""
# Set up plot.
n_solvers = len(experiments)
n_problems = len(experiments[0])
if all_in_one:
if plot_type == "cdf_solvability":
setup_plot(plot_type=plot_type,
solver_name="SOLVER SET",
problem_name="PROBLEM SET",
solve_tol=solve_tol
)
elif plot_type == "quantile_solvability":
setup_plot(plot_type=plot_type,
solver_name="SOLVER SET",
problem_name="PROBLEM SET",
beta=beta,
solve_tol=solve_tol
)
elif plot_type == "diff_cdf_solvability":
setup_plot(plot_type=plot_type,
solver_name="SOLVER SET",
problem_name="PROBLEM SET",
solve_tol=solve_tol
)
elif plot_type == "diff_quantile_solvability":
setup_plot(plot_type=plot_type,
solver_name="SOLVER SET",
problem_name="PROBLEM SET",
beta=beta,
solve_tol=solve_tol
)
solver_names = [solver_experiments[0].solver.name for solver_experiments in experiments]
solver_curves = []
solver_curve_handles = []
for solver_idx in range(n_solvers):
solver_sub_curves = []
color_str = "C" + str(solver_idx)
# For each problem compute the cdf or quantile of solve times.
for problem_idx in range(n_problems):
experiment = experiments[solver_idx][problem_idx]
if plot_type in {"cdf_solvability", "diff_cdf_solvability"}:
sub_curve = cdf_of_curves_crossing_times(curves=experiment.progress_curves, threshold=solve_tol)
if plot_type in {"quantile_solvability", "diff_quantile_solvability"}:
sub_curve = quantile_cross_jump(curves=experiment.progress_curves, threshold=solve_tol, beta=beta)
solver_sub_curves.append(sub_curve)
# Plot solvability profile for the solver.
# Exploit the fact that each solvability profile is an average of more basic curves.
solver_curve = mean_of_curves(solver_sub_curves)
# CAUTION: Using mean above requires an equal number of macro-replications per problem.
solver_curves.append(solver_curve)
if plot_type in {"cdf_solvability", "quantile_solvability"}:
handle = solver_curve.plot(color_str=color_str)
solver_curve_handles.append(handle)
if plot_CIs:
# Note: "experiments" needs to be a list of list of Experiments.
bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[experiments[solver_idx]],
n_bootstraps=100,
plot_type=plot_type,
solve_tol=solve_tol,
beta=beta,
estimator=solver_curve,
normalize=True
)
plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve, color_str=color_str)
if plot_type == "cdf_solvability":
plt.legend(handles=solver_curve_handles, labels=solver_names, loc="lower right")
save_plot(solver_name="SOLVER SET",
problem_name="PROBLEM SET",
plot_type=plot_type,
normalize=True,
extra=solve_tol
)
elif plot_type == "quantile_solvability":
plt.legend(handles=solver_curve_handles, labels=solver_names, loc="lower right")
save_plot(solver_name="SOLVER SET",
problem_name="PROBLEM SET",
plot_type=plot_type,
normalize=True,
extra=[solve_tol, beta]
)
elif plot_type in {"diff_cdf_solvability", "diff_quantile_solvability"}:
non_ref_solvers = [solver_name for solver_name in solver_names if solver_name != ref_solver]
ref_solver_idx = solver_names.index(ref_solver)
for solver_idx in range(n_solvers):
if solver_idx is not ref_solver_idx:
diff_solver_curve = difference_of_curves(solver_curves[solver_idx], solver_curves[ref_solver_idx])
color_str = "C" + str(solver_idx)
handle = diff_solver_curve.plot(color_str=color_str)
solver_curve_handles.append(handle)
if plot_CIs:
# Note: "experiments" needs to be a list of list of Experiments.
bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[experiments[solver_idx], experiments[ref_solver_idx]],
n_bootstraps=100,
plot_type=plot_type,
solve_tol=solve_tol,
beta=beta,
estimator=diff_solver_curve,
normalize=True
)
plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve, color_str=color_str)
offset_labels = [f"{non_ref_solver} - {ref_solver}" for non_ref_solver in non_ref_solvers]
plt.legend(handles=solver_curve_handles, labels=offset_labels, loc="lower right")
if plot_type == "diff_cdf_solvability":
save_plot(solver_name="SOLVER SET",
problem_name="PROBLEM SET",
plot_type=plot_type,
normalize=True,
extra=solve_tol
)
elif plot_type == "diff_quantile_solvability":
save_plot(solver_name="SOLVER SET",
problem_name="PROBLEM SET",
plot_type=plot_type,
normalize=True,
extra=[solve_tol, beta]
)
else:
solver_names = [solver_experiments[0].solver.name for solver_experiments in experiments]
solver_curves = []
for solver_idx in range(n_solvers):
solver_sub_curves = []
# For each problem compute the cdf or quantile of solve times.
for problem_idx in range(n_problems):
experiment = experiments[solver_idx][problem_idx]
if plot_type in {"cdf_solvability", "diff_cdf_solvability"}:
sub_curve = cdf_of_curves_crossing_times(curves=experiment.progress_curves, threshold=solve_tol)
if plot_type in {"quantile_solvability", "diff_quantile_solvability"}:
sub_curve = quantile_cross_jump(curves=experiment.progress_curves, threshold=solve_tol, beta=beta)
solver_sub_curves.append(sub_curve)
# Plot solvability profile for the solver.
# Exploit the fact that each solvability profile is an average of more basic curves.
solver_curve = mean_of_curves(solver_sub_curves)
solver_curves.append(solver_curve)
if plot_type in {"cdf_solvability", "quantile_solvability"}:
# Set up plot.
if plot_type == "cdf_solvability":
setup_plot(plot_type=plot_type,
solver_name=experiments[solver_idx][0].solver.name,
problem_name="PROBLEM SET",
solve_tol=solve_tol
)
elif plot_type == "quantile_solvability":
setup_plot(plot_type=plot_type,
solver_name=experiments[solver_idx][0].solver.name,
problem_name="PROBLEM SET",
beta=beta,
solve_tol=solve_tol
)
handle = solver_curve.plot()
if plot_CIs:
# Note: "experiments" needs to be a list of list of Experiments.
bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[experiments[solver_idx]],
n_bootstraps=100,
plot_type=plot_type,
solve_tol=solve_tol,
beta=beta,
estimator=solver_curve,
normalize=True
)
plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve)
if plot_type == "cdf_solvability":
save_plot(solver_name=experiments[solver_idx][0].solver.name,
problem_name="PROBLEM SET",
plot_type=plot_type,
normalize=True,
extra=solve_tol
)
elif plot_type == "quantile_solvability":
save_plot(solver_name=experiments[solver_idx][0].solver.name,
problem_name="PROBLEM SET",
plot_type=plot_type,
normalize=True,
extra=[solve_tol, beta]
)
if plot_type in {"diff_cdf_solvability", "diff_quantile_solvability"}:
non_ref_solvers = [solver_name for solver_name in solver_names if solver_name != ref_solver]
ref_solver_idx = solver_names.index(ref_solver)
for solver_idx in range(n_solvers):
if solver_idx is not ref_solver_idx:
if plot_type == "diff_cdf_solvability":
setup_plot(plot_type=plot_type,
solver_name=experiments[solver_idx][0].solver.name,
problem_name="PROBLEM SET",
solve_tol=solve_tol
)
elif plot_type == "diff_quantile_solvability":
setup_plot(plot_type=plot_type,
solver_name=experiments[solver_idx][0].solver.name,
problem_name="PROBLEM SET",
beta=beta,
solve_tol=solve_tol
)
diff_solver_curve = difference_of_curves(solver_curves[solver_idx], solver_curves[ref_solver_idx])
handle = diff_solver_curve.plot()
if plot_CIs:
# Note: "experiments" needs to be a list of list of Experiments.
bs_CI_lb_curve, bs_CI_ub_curve = bootstrap_procedure(experiments=[experiments[solver_idx], experiments[ref_solver_idx]],
n_bootstraps=100,
plot_type=plot_type,
solve_tol=solve_tol,
beta=beta,
estimator=diff_solver_curve,
normalize=True
)
plot_bootstrap_CIs(bs_CI_lb_curve, bs_CI_ub_curve)
if plot_type == "diff_cdf_solvability":
save_plot(solver_name=experiments[solver_idx][0].solver.name,
problem_name="PROBLEM SET",
plot_type=plot_type,
normalize=True,
extra=solve_tol
)
elif plot_type == "diff_quantile_solvability":
save_plot(solver_name=experiments[solver_idx][0].solver.name,
problem_name="PROBLEM SET",
plot_type=plot_type,
normalize=True,
extra=[solve_tol, beta]
)
def setup_plot(plot_type, solver_name="SOLVER SET", problem_name="PROBLEM SET", normalize=True, budget=None, beta=None, solve_tol=None):
"""
Create new figure. Add labels to plot and reformat axes.
Parameters
----------
plot_type : string
indicates which type of plot to produce
"all" : all estimated progress curves
"mean" : estimated mean progress curve
"quantile" : estimated beta quantile progress curve
"solve_time_cdf" : cdf of solve time
"cdf_solvability" : cdf solvability profile
"quantile_solvability" : quantile solvability profile
"diff_cdf_solvability" : difference of cdf solvability profiles
"diff_quantile_solvability" : difference of quantile solvability profiles
"area" : area scatterplot
solver_name : string
name of solver
problem_name : string
name of problem
normalize : Boolean
normalize progress curves w.r.t. optimality gaps?
budget : int
budget of problem, measured in function evaluations
beta : float in (0,1)
quantile to compute, e.g., beta quantile
solve_tol : float in (0,1]
relative optimality gap definining when a problem is solved
"""
plt.figure()
# Set up axes and axis labels.
if normalize:
plt.xlabel("Fraction of Budget", size=14)
plt.ylabel("Fraction of Initial Optimality Gap", size=14)
plt.xlim((0, 1))
plt.ylim((-0.1, 1.1))
else:
plt.xlabel("Budget", size=14)
plt.ylabel("Objective Function Value", size=14)
plt.xlim((0, budget))
plt.tick_params(axis="both", which="major", labelsize=12)
# Specify title (plus alternative y-axis label and alternative axes).
if plot_type == "all":
if normalize:
title = f"{solver_name} on {problem_name}\nProgress Curves"
else:
title = f"{solver_name} on {problem_name}\nObjective Curves"
elif plot_type == "mean":
if normalize:
title = f"{solver_name} on {problem_name}\nMean Progress Curve"
else:
title = f"{solver_name} on {problem_name}\nMean Objective Curve"
elif plot_type == "quantile":
if normalize:
title = f"{solver_name} on {problem_name}\n{round(beta, 2)}-Quantile Progress Curve"
else:
title = f"{solver_name} on {problem_name}\n{round(beta, 2)}-Quantile Objective Curve"
elif plot_type == "solve_time_cdf":
plt.ylabel("Fraction of Macroreplications Solved", size=14)
title = f"{solver_name} on {problem_name}\nCDF of {round(solve_tol, 2)}-Solve Times"
elif plot_type == "cdf_solvability":
plt.ylabel("Problem Averaged Solve Fraction", size=14)
title = f"CDF-Solvability Profile for {solver_name}\nProfile of CDFs of {round(solve_tol, 2)}-Solve Times"
elif plot_type == "quantile_solvability":
plt.ylabel("Fraction of Problems Solved", size=14)
title = f"Quantile Solvability Profile for {solver_name}\nProfile of {round(beta, 2)}-Quantiles of {round(solve_tol, 2)}-Solve Times"
elif plot_type == "diff_cdf_solvability":
plt.ylabel("Difference in Problem Averaged Solve Fraction", size=14)
title = f"Difference of CDF-Solvability Profile for {solver_name}\nDifference of Profiles of CDFs of {round(solve_tol, 2)}-Solve Times"
plt.plot([0, 1], [0, 0], color="black", linestyle="--")
plt.ylim((-1, 1))
elif plot_type == "diff_quantile_solvability":
plt.ylabel("Difference in Fraction of Problems Solved", size=14)
title = f"Difference of Quantile Solvability Profile for {solver_name}\nDifference of Profiles of {round(beta, 2)}-Quantiles of {round(solve_tol, 2)}-Solve Times"
plt.plot([0, 1], [0, 0], color="black", linestyle="--")
plt.ylim((-1, 1))
elif plot_type == "area":
plt.xlabel("Mean Area", size=14)
plt.ylabel("Std Dev of Area")
plt.xlim((0, 1))
plt.ylim((0, 0.5))
title = f"{solver_name}\nAreas Under Progress Curves"
plt.title(title, size=14)
def save_plot(solver_name, problem_name, plot_type, normalize, extra=None):
"""
Create new figure. Add labels to plot and reformat axes.
Arguments
---------
solver_name : string
name of solver
problem_name : string
name of problem
plot_type : string
indicates which type of plot to produce
"all" : all estimated progress curves
"mean" : estimated mean progress curve
"quantile" : estimated beta quantile progress curve
"solve_time_cdf" : cdf of solve time
"cdf_solvability" : cdf solvability profile
"quantile_solvability" : quantile solvability profile
"diff_cdf_solvability" : difference of cdf solvability profiles
"diff_quantile_solvability" : difference of quantile solvability profiles
"area" : area scatterplot
normalize : Boolean
normalize progress curves w.r.t. optimality gaps?
extra : float (or list of floats)
extra number(s) specifying quantile (e.g., beta) and/or solve tolerance
"""
# Form string name for plot filename.
if plot_type == "all":
plot_name = "all_prog_curves"
elif plot_type == "mean":
plot_name = "mean_prog_curve"
elif plot_type == "quantile":
plot_name = "quantile_prog_curve"
elif plot_type == "solve_time_cdf":
plot_name = f"cdf_{extra}_solve_times"
elif plot_type == "cdf_solvability":
plot_name = f"profile_cdf_{extra}_solve_times"
elif plot_type == "quantile_solvability":
plot_name = f"profile_{extra[1]}_quantile_{extra[0]}_solve_times"
elif plot_type == "diff_cdf_solvability":
plot_name = "diff_cdf_solvability_profile"
elif plot_type == "diff_quantile_solvability":
plot_name = "diff_quantile_solvability_profile"
elif plot_type == "area":
plot_name = "area_scatterplot"
if not normalize:
plot_name = plot_name + "_unnorm"
path_name = f"experiments/plots/{solver_name}_on_{problem_name}_{plot_type}.png"
# Reformat path_name to be suitable as a string literal.
path_name = path_name.replace("\\", "")
path_name = path_name.replace("$", "")
path_name = path_name.replace(" ", "_")
plt.savefig(path_name, bbox_inches="tight")
class MetaExperiment(object):
"""
Base class for running one or more solver on one or more problem.
Attributes
----------
solver_names : list of strings
list of solver names
n_solvers : int > 0
number of solvers
problem_names : list of strings
list of problem names
n_problems : int > 0
number of problems
all_solver_fixed_factors : dict of dict
fixed solver factors for each solver
outer key is solver name
inner key is factor name
all_problem_fixed_factors : dict of dict
fixed problem factors for each problem
outer key is problem name
inner key is factor name
all_oracle_fixed_factors : dict of dict
fixed oracle factors for each problem
outer key is problem name
inner key is factor name
experiments : list of list of Experiment objects
all problem-solver pairs
Arguments
---------
solver_names : list of strings
list of solver names
problem_names : list of strings
list of problem names
solver_renames : list of strings
user-specified names for solvers
problem_renames : list of strings
user-specified names for problems
fixed_factors_filename : string
name of .py file containing dictionaries of fixed factors
for solvers/problems/oracles.
"""
def __init__(self, solver_names, problem_names, solver_renames=None, problem_renames=None, fixed_factors_filename=None):
self.n_solvers = len(solver_names)
self.n_problems = len(problem_names)
if solver_renames is None:
self.solver_names = solver_names
else:
self.solver_names = solver_renames
if problem_renames is None:
self.problem_names = problem_names
else:
self.problem_names = problem_renames
# Read in fixed solver/problem/oracle factors from .py file in the Experiments folder.
# File should contain three dictionaries of dictionaries called
# - all_solver_fixed_factors
# - all_problem_fixed_factors
# - all_oracle_fixed_factors
if fixed_factors_filename is None:
self.all_solver_fixed_factors = {solver_name: {} for solver_name in self.solver_names}
self.all_problem_fixed_factors = {problem_name: {} for problem_name in self.problem_names}
self.all_oracle_fixed_factors = {problem_name: {} for problem_name in self.problem_names}
else:
fixed_factors_filename = "experiments.inputs." + fixed_factors_filename
all_factors = importlib.import_module(fixed_factors_filename)
self.all_solver_fixed_factors = getattr(all_factors, "all_solver_fixed_factors")
self.all_problem_fixed_factors = getattr(all_factors, "all_problem_fixed_factors")
self.all_oracle_fixed_factors = getattr(all_factors, "all_oracle_fixed_factors")
# Create all problem-solver pairs (i.e., instances of Experiment class)
self.experiments = []
for solver_idx in range(self.n_solvers):
solver_experiments = []
for problem_idx in range(self.n_problems):
try:
# If a file exists, read in Experiment object.
with open(f"experiments/outputs/{self.solver_names[solver_idx]}_on_{self.problem_names[problem_idx]}.pickle", "rb") as file:
next_experiment = pickle.load(file)
# TO DO: Check if the solver/problem/oracle factors in the file match
# those for the MetaExperiment.
except Exception:
# If no file exists, create new Experiment object.
print(f"No experiment file exists for {self.solver_names[solver_idx]} on {self.problem_names[problem_idx]}. Creating new experiment.")
next_experiment = Experiment(solver_name=solver_names[solver_idx],
problem_name=problem_names[problem_idx],
solver_rename=self.solver_names[solver_idx],
problem_rename=self.problem_names[problem_idx],
solver_fixed_factors=self.all_solver_fixed_factors[self.solver_names[solver_idx]],
problem_fixed_factors=self.all_problem_fixed_factors[self.problem_names[problem_idx]],
oracle_fixed_factors=self.all_oracle_fixed_factors[self.problem_names[problem_idx]])
solver_experiments.append(next_experiment)
self.experiments.append(solver_experiments)
def check_compatibility(self):
"""
Check whether all experiments' solvers and problems are compatible.
Returns
-------
error_str : str
error message in the event any problem and solver are incompatible
"""
error_str = ""
for solver_idx in range(self.n_solvers):
for problem_idx in range(self.n_problems):
new_error_str = self.experiments[solver_idx][problem_idx].check_compatibility()
if new_error_str != "":
error_str += f"For solver {self.solver_names[solver_idx]} and problem {self.problem_names[problem_idx]}... {new_error_str}"
return error_str
def run(self, n_macroreps):
"""
Run n_macroreps of each solver on each problem.
Arguments
---------
n_macroreps : int
number of macroreplications of the solver to run on the problem
"""
for solver_idx in range(self.n_solvers):
for problem_idx in range(self.n_problems):
experiment = self.experiments[solver_idx][problem_idx]
# If the problem-solver pair has not been run in this way before,
# run it now and save result to .pickle file.
if (getattr(experiment, "n_macroreps", None) != n_macroreps):
print(f"Running {n_macroreps} macro-replications of {experiment.solver.name} on {experiment.problem.name}.")
experiment.clear_run()
experiment.run(n_macroreps)
def post_replicate(self, n_postreps, crn_across_budget=True, crn_across_macroreps=False):
"""
For each problem-solver pair, run postreplications at solutions
recommended by the solver on each macroreplication.
Arguments
---------
n_postreps : int
number of postreplications to take at each recommended solution
crn_across_budget : bool
use CRN for post-replications at solutions recommended at different times?
crn_across_macroreps : bool
use CRN for post-replications at solutions recommended on different macroreplications?
"""
for solver_index in range(self.n_solvers):
for problem_index in range(self.n_problems):
experiment = self.experiments[solver_index][problem_index]
# If the problem-solver pair has not been post-replicated in this way before,
# post-process it now.
if (getattr(experiment, "n_postreps", None) != n_postreps
or getattr(experiment, "crn_across_budget", None) != crn_across_budget
or getattr(experiment, "crn_across_macroreps", None) != crn_across_macroreps):
print(f"Post-processing {experiment.solver.name} on {experiment.problem.name}.")
experiment.clear_postreplicate()
experiment.post_replicate(n_postreps, crn_across_budget, crn_across_macroreps)
def post_normalize(self, n_postreps_init_opt, crn_across_init_opt=True):
"""
Construct objective curves and (normalized) progress curves
for all collections of experiments on all given problem.
Parameters
----------
experiments : list of wrapper_base.Experiment objects
experiments of different solvers on a common problem
n_postreps_init_opt : int
number of postreplications to take at initial x0 and optimal x*
crn_across_init_opt : bool
use CRN for post-replications at solutions x0 and x*?
"""
for problem_idx in range(self.n_problems):
experiments_same_problem = [self.experiments[solver_idx][problem_idx] for solver_idx in range(self.n_solvers)]
post_normalize(experiments=experiments_same_problem,
n_postreps_init_opt=n_postreps_init_opt,
crn_across_init_opt=crn_across_init_opt)
| {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,910 | evaz1121/simopt | refs/heads/master | /simopt/test/__init__.py | # from . import test_matmodops, test_mrg32k3a
| {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,911 | evaz1121/simopt | refs/heads/master | /simopt/test/test_matmodops.py | import unittest
from rng.matmodops import *
A = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
Aneg = [[-1, -2, -3],
[-4, -5, -6],
[-7, -8, -9]
]
b = [1, 2, 3]
bneg = [-1, -2, -3]
m = 3
class TestMatModOps(unittest.TestCase):
def test_mat33_mat31_mult(self):
self.assertEqual(mat33_mat31_mult(A, b), [14, 32, 50])
def test_mat33_mat33_mult(self):
self.assertEqual(mat33_mat33_mult(A, A), [[30, 36, 42], [66, 81, 96], [102, 126, 150]])
def test_mat31_mod(self):
self.assertEqual(mat31_mod(b, m), [1, 2, 0])
def test_mat31_mod_neg(self):
self.assertEqual(mat31_mod(bneg, m), [2, 1, 0])
def test_mat33_mod(self):
self.assertEqual(mat33_mod(A, m), [[1, 2, 0], [1, 2, 0], [1, 2, 0]])
def test_mat33_mod_neg(self):
self.assertEqual(mat33_mod(Aneg, m), [[2, 1, 0], [2, 1, 0], [2, 1, 0]])
def test_mat33_mat33_mod(self):
self.assertEqual(mat33_mat33_mod(A, A, m), [[0, 0, 0], [0, 0, 0], [0, 0, 0]])
def test_mat33_power_mod_power0(self):
self.assertEqual(mat33_power_mod(A, 0, m), [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
def test_mat33_power_mod_power1(self):
self.assertEqual(mat33_power_mod(A, 1, m), [[1, 2, 0], [1, 2, 0], [1, 2, 0]])
def test_mat33_power_mod_power2(self):
self.assertEqual(mat33_power_mod(A, 2, m), [[0, 0, 0], [0, 0, 0], [0, 0, 0]])
if __name__ == '__main__':
unittest.main() | {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,912 | evaz1121/simopt | refs/heads/master | /simopt/demo/demo_df_wrapper.py | import sys
import os.path as o
import os
sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), "..")))
# os.chdir('../')
# from oracles.mm1queue import MM1Queue
from data_farming_base import DesignPoint, DataFarmingExperiment, DataFarmingMetaExperiment
from csv import DictReader
# factor_headers = ["purchase_price", "sales_price", "salvage_price", "order_quantity"]
# myexperiment = DataFarmingExperiment(oracle_name="CNTNEWS", factor_settings_filename="oracle_factor_settings", factor_headers=factor_headers, design_filename=None, oracle_fixed_factors={})
# myexperiment.run(n_reps=10, crn_across_design_pts=False)
# myexperiment.print_to_csv(csv_filename="cntnews_data_farming_output")
solver_factor_headers = ["sample_size"]
myMetaExperiment = DataFarmingMetaExperiment(solver_name="RNDSRCH",
problem_name="FACSIZE-2",
solver_factor_headers=solver_factor_headers,
solver_factor_settings_filename="", # solver_factor_settings",
design_filename="random_search_design",
solver_fixed_factors={},
problem_fixed_factors={},
oracle_fixed_factors={})
myMetaExperiment.run(n_macroreps=20)
myMetaExperiment.post_replicate(n_postreps=100, n_postreps_init_opt=100, crn_across_budget=True, crn_across_macroreps=False)
# myMetaExperiment.calculate_statistics() # solve_tols=[0.10], beta=0.50)
# myMetaExperiment.print_to_csv(csv_filename="meta_raw_results")
print("I ran this.")
# SCRATCH
# --------------------------------
# from csv import DictReader
# # open file in read mode
# with open('example_design_matrix.csv', 'r') as read_obj:
# # pass the file object to DictReader() to get the DictReader object
# csv_dict_reader = DictReader(read_obj)
# # iterate over each line as a ordered dictionary
# for row in csv_dict_reader:
# # row variable is a dictionary that represents a row in csv
# print(row) | {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,913 | evaz1121/simopt | refs/heads/master | /simopt/rng/matmodops.py | #!/usr/bin/env python
"""
Summary
-------
Useful matrix/modulus operations for mrg32k3a generator.
Listing
-------
mat33_mat31_mult
mat33_mat33_mult
mat31_mod
mat33_mod
mat33_mat33_mod
mat33_power_mod
"""
def mat33_mat31_mult(A, b):
"""
Multiply a 3x3 matrix with a 3x1 matrix.
Arguments
---------
A : list of list of float
3x3 matrix
b : list of float
3x1 matrix
Returns
-------
res : list of float
3x1 matrix
"""
res = [0, 0, 0]
r3 = range(3)
for i in r3:
res[i] = sum([A[i][j] * b[j] for j in r3])
return res
def mat33_mat33_mult(A, B):
"""
Multiply a 3x3 matrix with a 3x3 matrix.
Arguments
---------
A : list of list of float
3x3 matrix
B : list of list of float
3x3 matrix
Returns
-------
res : list of float
3x3 matrix
"""
res = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]
r3 = range(3)
for i in r3:
for j in r3:
res[i][j] = sum([A[i][k] * B[k][j] for k in r3])
return res
def mat31_mod(b, m):
"""
Compute moduli of a 3x1 matrix.
Arguments
---------
b : list of float
3x1 matrix
m : float
modulus
Returns
-------
res : list of float
3x1 matrix
"""
res = [0, 0, 0]
for i in range(3):
res[i] = int(b[i] - int(b[i] / m) * m)
# if negative, add back modulus m
if res[i] < 0:
res[i] += m
return res
def mat33_mod(A, m):
"""
Compute moduli of a 3x3 matrix.
Arguments
---------
A : list of float
3x3 matrix
m : float
modulus
Returns
-------
res : list of float
3x3 matrix
"""
res = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]
r3 = range(3)
for i in r3:
for j in r3:
res[i][j] = int(A[i][j] - int(A[i][j] / m) * m)
# if negative, add back modulus m
if res[i][j] < 0:
res[i][j] += m
return res
def mat33_mat33_mod(A, B, m):
"""
Compute moduli of a 3x3 matrix x 3x3 matrix product.
Arguments
---------
A : list of list of float
3x3 matrix
B : list of list of float
3x3 matrix
m : float
modulus
Returns
-------
res : list of list of float
3x3 matrix
"""
C = mat33_mat33_mult(A, B)
res = mat33_mod(C, m)
return res
def mat33_power_mod(A, j, m):
"""
Compute moduli of a 3x3 matrix power.
Use divide-and-conquer algorithm described in L'Ecuyer (1990).
Arguments
---------
A : list of list of float
3x3 matrix
j : int
exponent
m : float
modulus
Returns
-------
res : list of list of float
3x3 matrix
"""
B = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
]
while j > 0:
if (j % 2 == 1):
B = mat33_mat33_mod(A, B, m)
A = mat33_mat33_mod(A, A, m)
j = int(j / 2)
res = B
return res
| {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,914 | evaz1121/simopt | refs/heads/master | /simopt/directory.py | #!/usr/bin/env python
"""
Summary
-------
Provide dictionary directories listing solvers, problems, and oracles.
Listing
-------
solver_directory : dictionary
problem_directory : dictionary
oracle_directory : dictionary
"""
# import solvers
from solvers.astrodf import ASTRODF
from solvers.randomsearch import RandomSearch
from solvers.simannealing import SANE
# import oracles and problems
from oracles.cntnv import CntNV, CntNVMaxProfit
from oracles.mm1queue import MM1Queue, MM1MinMeanSojournTime
from oracles.facilitysizing import FacilitySize, FacilitySizingTotalCost, FacilitySizingMaxService
from oracles.rmitd import RMITD, RMITDMaxRevenue
from oracles.sscont import SSCont, SSContMinCost
# directory dictionaries
solver_directory = {
"ASTRODF": ASTRODF,
"RNDSRCH": RandomSearch,
"SANE": SANE,
}
problem_directory = {
"CNTNEWS-1": CntNVMaxProfit,
"MM1-1": MM1MinMeanSojournTime,
"FACSIZE-1": FacilitySizingTotalCost,
"FACSIZE-2": FacilitySizingMaxService,
"RMITD-1": RMITDMaxRevenue,
"SSCONT-1": SSContMinCost
}
oracle_directory = {
"CNTNEWS": CntNV,
"MM1": MM1Queue,
"FACSIZE": FacilitySize,
"RMITD": RMITD,
"SSCONT": SSCont
}
| {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,915 | evaz1121/simopt | refs/heads/master | /simopt/rng/mrg32k3a.py | #!/usr/bin/env python
"""
Summary
-------
Provide a subclass of random.Random using mrg32k3a as the generator
with stream/substream/subsubstream support.
Listing
-------
MRG32k3a : class
advance_stream : method
advance_substream : method
advance_subsubstream : method
reset_stream : method
reset_substream : method
reset_subsubstream : method
start_fixed_s_ss_sss : method
"""
# Code largely adopted from PyMOSO repository (https://github.com/pymoso/PyMOSO).
import numpy as np
import random
from math import log, ceil, sqrt, exp
from copy import deepcopy
from .matmodops import mat33_mat31_mult, mat33_mat33_mult, mat31_mod, mat33_mod, mat33_mat33_mod, mat33_power_mod
# Constants used in mrg32k3a and in substream generation.
# P. L'Ecuyer, ``Good Parameter Sets for Combined Multiple Recursive Random Number Generators'',
# Operations Research, 47, 1 (1999), 159--164.
# P. L'Ecuyer, R. Simard, E. J. Chen, and W. D. Kelton,
# ``An Objected-Oriented Random-Number Package with Many Long Streams and Substreams'',
# Operations Research, 50, 6 (2002), 1073--1075.
mrgnorm = 2.328306549295727688e-10
mrgm1 = 4294967087
mrgm2 = 4294944443
mrga12 = 1403580
mrga13n = 810728
mrga21 = 527612
mrga23n = 1370589
A1p0 = [[0, 1, 0],
[0, 0, 1],
[-mrga13n, mrga12, 0]
]
A2p0 = [[0, 1, 0],
[0, 0, 1],
[-mrga23n, 0, mrga21]
]
# A1p47 = mat33_power_mod(A1p0, 2**47, mrgm1).
A1p47 = [[1362557480, 3230022138, 4278720212],
[3427386258, 3848976950, 3230022138],
[2109817045, 2441486578, 3848976950]
]
# A2p47 = mat33_power_mod(A2p0, 2**47, mrgm2).
A2p47 = [[2920112852, 1965329198, 1177141043],
[2135250851, 2920112852, 969184056],
[296035385, 2135250851, 4267827987]
]
# A1p94 = mat33_power_mod(A1p0, 2**94, mrgm1).
A1p94 = [[2873769531, 2081104178, 596284397],
[4153800443, 1261269623, 2081104178],
[3967600061, 1830023157, 1261269623]
]
# A2p94 = mat33_power_mod(A2p0, 2**94, mrgm2).
A2p94 = [[1347291439, 2050427676, 736113023],
[4102191254, 1347291439, 878627148],
[1293500383, 4102191254, 745646810]
]
# A1p141 = mat33_power_mod(A1p0, 2**141, mrgm1).
A1p141 = [[3230096243, 2131723358, 3262178024],
[2882890127, 4088518247, 2131723358],
[3991553306, 1282224087, 4088518247]
]
# A2p141 = mat33_power_mod(A2p0, 2**141, mrgm2).
A2p141 = [[2196438580, 805386227, 4266375092],
[4124675351, 2196438580, 2527961345],
[94452540, 4124675351, 2825656399]
]
# Constants used in Beasley-Springer-Moro algorithm for approximating
# the inverse cdf of the standard normal distribution.
bsma = [2.50662823884, -18.61500062529, 41.39119773534, -25.44106049637]
bsmb = [-8.47351093090, 23.08336743743, -21.06224101826, 3.13082909833]
bsmc = [0.3374754822726147, 0.9761690190917186, 0.1607979714918209, 0.0276438810333863, 0.0038405729373609, 0.0003951896511919, 0.0000321767881768, 0.0000002888167364, 0.0000003960315187]
# Adapted to pure Python from the P. L'Ecuyer code referenced above.
def mrg32k3a(state):
"""
Generate a random number between 0 and 1 from a given state.
Parameters
----------
state : tuple of int of length 6
current state of the generator
Returns
-------
new_state : tuple of int of length 6
next state of the generator
u : float
pseudo uniform random variate
"""
# Component 1.
p1 = mrga12 * state[1] - mrga13n * state[0]
k1 = int(p1 / mrgm1)
p1 -= k1 * mrgm1
if p1 < 0.0:
p1 += mrgm1
# Component 2.
p2 = mrga21 * state[5] - mrga23n * state[3]
k2 = int(p2 / mrgm2)
p2 -= k2 * mrgm2
if p2 < 0.0:
p2 += mrgm2
# Combination.
if p1 <= p2:
u = (p1 - p2 + mrgm1) * mrgnorm
else:
u = (p1 - p2) * mrgnorm
new_state = (state[1], state[2], int(p1), state[4], state[5], int(p2))
return new_state, u
def bsm(u):
"""
Approximate a quantile of the standard normal distribution via
the Beasley-Springer-Moro algorithm.
Arguments
---------
u : float
probability value for the desired quantile (between 0 and 1)
Returns
-------
z : float
"""
y = u - 0.5
if abs(y) < 0.42:
# Approximate from the center (Beasly-Springer 1977).
r = pow(y, 2)
r2 = pow(r, 2)
r3 = pow(r, 3)
r4 = pow(r, 4)
asum = sum([bsma[0], bsma[1] * r, bsma[2] * r2, bsma[3] * r3])
bsum = sum([1, bsmb[0] * r, bsmb[1] * r2, bsmb[2] * r3, bsmb[3] * r4])
z = y * (asum / bsum)
else:
# Approximate from the tails (Moro 1995).
if y < 0.0:
signum = -1
r = u
else:
signum = 1
r = 1 - u
s = log(-log(r))
s0 = pow(s, 2)
s1 = pow(s, 3)
s2 = pow(s, 4)
s3 = pow(s, 5)
s4 = pow(s, 6)
s5 = pow(s, 7)
s6 = pow(s, 8)
clst = [bsmc[0], bsmc[1] * s, bsmc[2] * s0, bsmc[3] * s1, bsmc[4] * s2, bsmc[5] * s3, bsmc[6] * s4, bsmc[7] * s5, bsmc[8] * s6]
t = sum(clst)
z = signum * t
return z
class MRG32k3a(random.Random):
"""
Implements mrg32k3a as the generator for a random.Random object.
Attributes
----------
_current_state : tuple of int of length 6
current state of mrg32k3a generator
ref_seed : tuple of int of length 6
seed from which to start the generator
streams/substreams/subsubstreams are referenced w.r.t. ref_seed
s_ss_sss_index : list of int of length 3
triplet of the indices of the current stream-substream-subsubstream
stream_start : list of int of length 6
state corresponding to the start of the current stream
substream_start: list of int of length 6
state corresponding to the start of the current substream
subsubstream_start: list of int of length 6
state corresponding to the start of the current subsubstream
Arguments
---------
ref_seed : tuple of int of length 6 (optional)
seed from which to start the generator
s_ss_sss_index : list of int of length 3
triplet of the indices of the stream-substream-subsubstream to start at
See also
--------
random.Random
"""
def __init__(self, ref_seed=(12345, 12345, 12345, 12345, 12345, 12345), s_ss_sss_index=None):
assert(len(ref_seed) == 6)
self.version = 2
self.generate = mrg32k3a
self.ref_seed = ref_seed
super().__init__(ref_seed)
if s_ss_sss_index is None:
s_ss_sss_index = [0, 0, 0]
self.start_fixed_s_ss_sss(s_ss_sss_index)
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
return result
def seed(self, new_state):
"""
Set the state (or seed) of the generator and update the generator state.
Arguments
---------
new_state : tuple of int of length 6
new state to which to advance the generator
"""
assert(len(new_state) == 6)
self._current_state = new_state
#super().seed(new_state)
def getstate(self):
"""
Return the state of the generator.
Returns
-------
_current_state : tuple of int of length 6
current state of the generator
random.Random.getstate() : tuple of int
Random.getstate output
See also
--------
random.Random
"""
return self.get_current_state(), super().getstate()
def setstate(self, state):
"""
Set the internal state of the generator.
Arguments
---------
state : tuple
state[0] is new state for the generator
state[1] is random.Random.getstate()
See also
--------
random.Random
"""
self.seed(state[0])
super().setstate(state[1])
def random(self):
"""
Generate a standard uniform variate and advance the generator
state.
Returns
-------
u : float
pseudo uniform random variate
"""
state = self._current_state
new_state, u = self.generate(state)
self.seed(new_state)
return u
def get_current_state(self):
"""
Return the current state of the generator.
Returns
-------
_current_state : tuple of int of length 6
current state of the generator
"""
return self._current_state
def normalvariate(self, mu=0, sigma=1):
"""
Generate a normal random variate.
Arguments
---------
mu : float
expected value of the normal distribution from which to
generate
sigma : float
standard deviation of the normal distribution from which to
generate
Returns
-------
float
a normal random variate from the specified distribution
"""
u = self.random()
z = bsm(u)
return mu + sigma*z
def mvnormalvariate(self, mean_vec, cov, factorized=True):
"""
Generate a normal random vector.
Arguments
---------
mean_vec : array
location parameters of the multivariate normal distribution
from which to generate
cov : array
covariance matrix of the multivariate normal distribution
from which to generate
factorized : Bool
False : need to calculate chol based on covariance
True : do not need to calculate chol since we already have it
Returns
-------
list of float
a normal random multivariate from the specified distribution
"""
n_cols = len(cov)
if not factorized:
Chol = np.linalg.cholesky(cov)
else:
Chol = cov
observations = [self.normalvariate(0, 1) for _ in range(n_cols)]
return Chol.dot(observations).transpose() + mean_vec
def poissonvariate(self, lmbda):
"""
Generate a poisson random variate.
Arguments
---------
lmbda : float
expected value of the poisson distribution from which to
generate
Returns
-------
float
a poisson random variate from the specified distribution
"""
if lmbda < 35:
n = 0
p = self.random()
threshold = exp(-lmbda)
while p >= threshold:
u = self.random()
p = p * u
n = n + 1
else:
z = self.normalvariate()
n = max(ceil(lmbda + sqrt(lmbda)*z - 0.5), 0)
return n
def advance_stream(self):
"""
Advance the state of the generator to the start of the next stream.
Streams are of length 2**141.
"""
state = self.stream_start
# Split the state into 2 components of length 3.
st1 = state[0:3]
st2 = state[3:6]
# Efficiently advance state -> A*s % m for both state parts.
nst1m = mat33_mat31_mult(A1p141, st1)
nst2m = mat33_mat31_mult(A2p141, st2)
nst1 = mat31_mod(nst1m, mrgm1)
nst2 = mat31_mod(nst2m, mrgm2)
nstate = tuple(nst1 + nst2)
self.seed(nstate)
# Increment the stream index.
self.s_ss_sss_index[0] += 1
# Reset index for substream and subsubstream.
self.s_ss_sss_index[1] = 0
self.s_ss_sss_index[2] = 0
# Update state referencing.
self.stream_start = nstate
self.substream_start = nstate
self.subsubstream_start = nstate
def advance_substream(self):
"""
Advance the state of the generator to the start of the next substream.
Substreams are of length 2**94.
"""
state = self.substream_start
# Split the state into 2 components of length 3.
st1 = state[0:3]
st2 = state[3:6]
# Efficiently advance state -> A*s % m for both state parts.
nst1m = mat33_mat31_mult(A1p94, st1)
nst2m = mat33_mat31_mult(A2p94, st2)
nst1 = mat31_mod(nst1m, mrgm1)
nst2 = mat31_mod(nst2m, mrgm2)
nstate = tuple(nst1 + nst2)
self.seed(nstate)
# Increment the substream index.
self.s_ss_sss_index[1] += 1
# Reset index for subsubstream.
self.s_ss_sss_index[2] = 0
# Update state referencing.
self.substream_start = nstate
self.subsubstream_start = nstate
def advance_subsubstream(self):
"""
Advance the state of the generator to the start of the next subsubstream.
Subsubstreams are of length 2**47.
"""
state = self.subsubstream_start
# Split the state into 2 components of length 3.
st1 = state[0:3]
st2 = state[3:6]
# Efficiently advance state -> A*s % m for both state parts.
nst1m = mat33_mat31_mult(A1p47, st1)
nst2m = mat33_mat31_mult(A2p47, st2)
nst1 = mat31_mod(nst1m, mrgm1)
nst2 = mat31_mod(nst2m, mrgm2)
nstate = tuple(nst1 + nst2)
self.seed(nstate)
# Increment the subsubstream index.
self.s_ss_sss_index[2] += 1
# Update state referencing.
self.subsubstream_start = nstate
def reset_stream(self):
"""
Reset the state of the generator to the start of the current stream.
"""
nstate = self.stream_start
self.seed(nstate)
# Update state referencing.
self.substream_start = nstate
self.subsubstream_start = nstate
# Reset index for substream and subsubstream.
self.s_ss_sss_index[1] = 0
self.s_ss_sss_index[2] = 0
def reset_substream(self):
"""
Reset the state of the generator to the start of the current substream.
"""
nstate = self.substream_start
self.seed(nstate)
# Update state referencing.
self.subsubstream_start = nstate
# Reset index for subsubstream.
self.s_ss_sss_index[2] = 0
def reset_subsubstream(self):
"""
Reset the state of the generator to the start of the current subsubstream.
"""
nstate = self.subsubstream_start
self.seed(nstate)
def start_fixed_s_ss_sss(self, s_ss_sss_triplet):
"""
Set the rng to the start of a specified (stream, substream, subsubstream) triplet.
Arguments
---------
s_ss_sss_triplet : list of int of length 3
triplet of the indices of the current stream-substream-subsubstream
"""
state = self.ref_seed
# Split the reference seed into 2 components of length 3.
st1 = state[0:3]
st2 = state[3:6]
# Advance to start of specified stream.
# Efficiently advance state -> A*s % m for both state parts.
nst1m = mat33_mat31_mult(mat33_power_mod(A1p141, s_ss_sss_triplet[0], mrgm1), st1)
nst2m = mat33_mat31_mult(mat33_power_mod(A2p141, s_ss_sss_triplet[0], mrgm2), st2)
st1 = mat31_mod(nst1m, mrgm1)
st2 = mat31_mod(nst2m, mrgm2)
self.stream_start = tuple(st1 + st2)
# Advance to start of specified substream.
# Efficiently advance state -> A*s % m for both state parts.
nst1m = mat33_mat31_mult(mat33_power_mod(A1p94, s_ss_sss_triplet[1], mrgm1), st1)
nst2m = mat33_mat31_mult(mat33_power_mod(A2p94, s_ss_sss_triplet[1], mrgm2), st2)
st1 = mat31_mod(nst1m, mrgm1)
st2 = mat31_mod(nst2m, mrgm2)
self.substream_start = tuple(st1 + st2)
# Advance to start of specified subsubstream.
# Efficiently advance state -> A*s % m for both state parts.
nst1m = mat33_mat31_mult(mat33_power_mod(A1p47, s_ss_sss_triplet[2], mrgm1), st1)
nst2m = mat33_mat31_mult(mat33_power_mod(A2p47, s_ss_sss_triplet[2], mrgm2), st2)
st1 = mat31_mod(nst1m, mrgm1)
st2 = mat31_mod(nst2m, mrgm2)
self.subsubstream_start = tuple(st1 + st2)
nstate = tuple(st1 + st2)
self.seed(nstate)
# Update index referencing.
self.s_ss_sss_index = s_ss_sss_triplet
| {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,916 | evaz1121/simopt | refs/heads/master | /simopt/test/test_mrg32k3a.py | import unittest
from rng.mrg32k3a import *
A1p127 = [[2427906178, 3580155704, 949770784],
[226153695, 1230515664, 3580155704],
[1988835001, 986791581, 1230515664]
]
A2p127 = [[1464411153, 277697599, 1610723613],
[32183930, 1464411153.0, 1022607788],
[2824425944, 32183930.0, 2093834863]
]
A1p76 = [[82758667, 1871391091, 4127413238],
[3672831523, 69195019, 1871391091],
[3672091415, 3528743235, 69195019]
]
A2p76 = [[1511326704, 3759209742, 1610795712],
[4292754251, 1511326704, 3889917532],
[3859662829, 4292754251, 3708466080],
]
seed = (12345, 12345, 12345, 12345, 12345, 12345)
class TestMRG32k3a(unittest.TestCase):
def test_A1p127(self):
self.assertEqual(mat33_power_mod(A1p0, 2**127, mrgm1), A1p127)
def test_A2p127(self):
self.assertEqual(mat33_power_mod(A2p0, 2**127, mrgm2), A2p127)
def test_A1p76(self):
self.assertEqual(mat33_power_mod(A1p0, 2**76, mrgm1), A1p76)
def test_A2p76(self):
self.assertEqual(mat33_power_mod(A2p0, 2**76, mrgm2), A2p76)
def test_A1p47(self):
self.assertEqual(mat33_power_mod(A1p0, 2**47, mrgm1), A1p47)
def test_A2p47(self):
self.assertEqual(mat33_power_mod(A2p0, 2**47, mrgm2), A2p47)
def test_A1p94(self):
self.assertEqual(mat33_power_mod(A1p0, 2**94, mrgm1), A1p94)
def test_A2p94(self):
self.assertEqual(mat33_power_mod(A2p0, 2**94, mrgm2), A2p94)
def test_A1p141(self):
self.assertEqual(mat33_power_mod(A1p0, 2**141, mrgm1), A1p141)
def test_A2p141(self):
self.assertEqual(mat33_power_mod(A2p0, 2**141, mrgm2), A2p141)
def test_get_current_state(self):
rng = MRG32k3a()
self.assertEqual(rng.get_current_state(), seed)
def test_first_state(self):
rng = MRG32k3a()
self.assertEqual(rng._current_state, seed)
def test_second_state(self):
rng = MRG32k3a()
rng.random()
st1 = mat31_mod(mat33_mat31_mult(A1p0, seed[0:3]), mrgm1)
st2 = mat31_mod(mat33_mat31_mult(A2p0, seed[3:6]), mrgm2)
self.assertSequenceEqual(rng._current_state, st1 + st2)
def test_third_state(self):
rng = MRG32k3a()
rng.random()
rng.random()
A1sq = mat33_mat33_mult(A1p0, A1p0)
A2sq = mat33_mat33_mult(A2p0, A2p0)
st1 = mat31_mod(mat33_mat31_mult(A1sq, seed[0:3]), mrgm1)
st2 = mat31_mod(mat33_mat31_mult(A2sq, seed[3:6]), mrgm2)
self.assertSequenceEqual(rng._current_state, st1 + st2)
def test_hundreth_state(self):
rng = MRG32k3a()
for _ in range(99):
rng.random()
st1 = mat31_mod(mat33_mat31_mult(mat33_power_mod(A1p0, 99, mrgm1), seed[0:3]), mrgm1)
st2 = mat31_mod(mat33_mat31_mult(mat33_power_mod(A2p0, 99, mrgm2), seed[3:6]), mrgm2)
self.assertSequenceEqual(rng._current_state, st1 + st2)
def test_advance_stream(self):
rng = MRG32k3a(s_ss_sss_index=[0, 1, 1])
rng.advance_stream()
rng2 = MRG32k3a(s_ss_sss_index=[1, 0, 0])
self.assertEqual(rng._current_state, rng2._current_state)
self.assertEqual(rng.stream_start, rng._current_state)
self.assertEqual(rng.substream_start, rng._current_state)
self.assertEqual(rng.subsubstream_start, rng._current_state)
self.assertEqual(rng.s_ss_sss_index, [1, 0, 0])
def test_advance_substream(self):
rng = MRG32k3a(s_ss_sss_index=[0, 0, 1])
rng.advance_substream()
rng2 = MRG32k3a(s_ss_sss_index=[0, 1, 0])
self.assertEqual(rng._current_state, rng2._current_state)
self.assertEqual(rng.stream_start, seed)
self.assertEqual(rng.substream_start, rng._current_state)
self.assertEqual(rng.subsubstream_start, rng._current_state)
self.assertEqual(rng.s_ss_sss_index, [0, 1, 0])
def test_advance_subsubstream(self):
rng = MRG32k3a()
rng.advance_subsubstream()
rng2 = MRG32k3a(s_ss_sss_index=[0, 0, 1])
self.assertEqual(rng._current_state, rng2._current_state)
self.assertEqual(rng.stream_start, seed)
self.assertEqual(rng.substream_start, seed)
self.assertEqual(rng.subsubstream_start, rng._current_state)
self.assertEqual(rng.s_ss_sss_index, [0, 0, 1])
def test_reset_stream(self):
rng = MRG32k3a(s_ss_sss_index=[1, 1, 1])
rng.random()
rng.reset_stream()
rng2 = MRG32k3a(s_ss_sss_index=[1, 0, 0])
self.assertEqual(rng._current_state, rng2._current_state)
self.assertEqual(rng.stream_start, rng._current_state)
self.assertEqual(rng.substream_start, rng._current_state)
self.assertEqual(rng.subsubstream_start, rng._current_state)
self.assertEqual(rng.s_ss_sss_index, [1, 0, 0])
def test_reset_substream(self):
rng = MRG32k3a(s_ss_sss_index=[1, 1, 1])
rng.random()
rng.reset_substream()
rng2 = MRG32k3a(s_ss_sss_index=[1, 1, 0])
self.assertEqual(rng._current_state, rng2._current_state)
rng3 = MRG32k3a(s_ss_sss_index=[1, 0, 0])
self.assertEqual(rng.stream_start, rng3._current_state)
self.assertEqual(rng.substream_start, rng._current_state)
self.assertEqual(rng.subsubstream_start, rng._current_state)
self.assertEqual(rng.s_ss_sss_index, [1, 1, 0])
def test_reset_subsubstream(self):
rng = MRG32k3a(s_ss_sss_index=[1, 1, 1])
rng.random()
rng.reset_subsubstream()
rng2 = MRG32k3a(s_ss_sss_index=[1, 1, 1])
self.assertEqual(rng._current_state, rng2._current_state)
rng3 = MRG32k3a(s_ss_sss_index=[1, 0, 0])
rng4 = MRG32k3a(s_ss_sss_index=[1, 1, 0])
self.assertEqual(rng.stream_start, rng3._current_state)
self.assertEqual(rng.substream_start, rng4._current_state)
self.assertEqual(rng.subsubstream_start, rng._current_state)
self.assertEqual(rng.s_ss_sss_index, [1, 1, 1])
def test_init_fixed_s_ss_sss(self):
rng = MRG32k3a(s_ss_sss_index=[1, 1, 1])
rng2 = MRG32k3a()
rng2.start_fixed_s_ss_sss([1, 1, 1])
self.assertEqual(rng._current_state, rng2._current_state)
self.assertEqual(rng.stream_start, rng2.stream_start)
self.assertEqual(rng.substream_start, rng2.substream_start)
self.assertEqual(rng.subsubstream_start, rng2.subsubstream_start)
self.assertEqual(rng.s_ss_sss_index, rng2.s_ss_sss_index)
def test_jump_fixed_s_ss_sss(self):
rng = MRG32k3a()
rng.start_fixed_s_ss_sss([1, 1, 1])
rng2 = MRG32k3a()
rng2.advance_stream()
rng2.advance_substream()
rng2.advance_subsubstream()
self.assertEqual(rng._current_state, rng2._current_state)
self.assertEqual(rng.stream_start, rng2.stream_start)
self.assertEqual(rng.substream_start, rng2.substream_start)
self.assertEqual(rng.subsubstream_start, rng2.subsubstream_start)
self.assertEqual(rng.s_ss_sss_index, rng2.s_ss_sss_index)
if __name__ == '__main__':
unittest.main() | {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,917 | evaz1121/simopt | refs/heads/master | /simopt/timing.py | import cProfile
import pstats
import io
#import run_experiments
pr = cProfile.Profile()
pr.enable()
#exec(open("run_experiments.py").read())
exec(open("run_experiments.py").read())
pr.disable()
s = io.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('tottime')
ps.print_stats()
with open('profile_results.txt', 'w+') as f:
f.write(s.getvalue()) | {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,918 | evaz1121/simopt | refs/heads/master | /simopt/demo/demo_problem.py | import numpy as np
import sys
import os.path as o
sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), "..")))
from rng.mrg32k3a import MRG32k3a
from oracles.cntnv import CntNVMaxProfit
# from oracles.mm1queue import MM1MinMeanSojournTime
# from oracles.facilitysizing import FacilitySizingTotalCost
# from oracles.rmitd import RMITDMaxRevenue
#from oracles.sscont import SSContMinCost
from base import Solution
myproblem = CntNVMaxProfit()
# myproblem = SSContMinCost()
# x = (7, 50)
# mysolution = Solution(x, myproblem)
# # Create and attach rngs to solution
# rng_list = [MRG32k3a(s_ss_sss_index=[0, ss, 0]) for ss in range(myproblem.oracle.n_rngs)]
# # print(rng_list)
# mysolution.attach_rngs(rng_list, copy=False)
# # print(mysolution.rng_list)
# # Test simulate()
# n_reps = 20
# myproblem.simulate(mysolution, m=n_reps)
# print('For ' + str(n_reps) + ' replications:')
# #print('The individual objective estimates are {}'.format(mysolution.objectives[:10]))
# print('The mean objective is {}'.format(mysolution.objectives_mean))
# #print('The stochastic constraint estimates are {}'.format(mysolution.stoch_constraints[:10]))
# #print('The individual gradient estimates are {}'.format(mysolution.objectives_gradients[:10]))
| {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,919 | evaz1121/simopt | refs/heads/master | /simopt/solvers/astrodf.py | """
Summary
-------
ASTRODF
Based on the sample average approximation, the solver makes the surrogate model within the trust region at each iteration k.
The sample sizes are determined adaptively.
Solve the subproblem and decide whether the algorithm take the candidate solution as next ieration center point or not.
Cannot handle stochastic constraints.
"""
from base import Solver
from numpy.linalg import inv
from numpy.linalg import norm
import numpy as np
import math
import warnings
warnings.filterwarnings("ignore")
class ASTRODF(Solver):
"""
Needed description
Attributes
----------
name : string
name of solver
objective_type : string
description of objective types:
"single" or "multi"
constraint_type : string
description of constraints types:
"unconstrained", "box", "deterministic", "stochastic"
variable_type : string
description of variable types:
"discrete", "continuous", "mixed"
gradient_needed : bool
indicates if gradient of objective function is needed
factors : dict
changeable factors (i.e., parameters) of the solver
specifications : dict
details of each factor (for GUI, data validation, and defaults)
rng_list : list of rng.MRG32k3a objects
list of RNGs used for the solver's internal purposes
Arguments
---------
name : str
user-specified name for solver
fixed_factors : dict
fixed_factors of the solver
See also
--------
base.Solver
"""
def __init__(self, name="ASTRODF", fixed_factors={}):
self.name = name
self.objective_type = "single"
self.constraint_type = "deterministic"
self.variable_type = "continuous"
self.gradient_needed = False
self.specifications = {
"crn_across_solns": {
"description": "Use CRN across solutions?",
"datatype": bool,
"default": True
},
"delta_max": {
"description": "maximum value of the radius",
"datatype": float,
"default": 200
},
"eta_1": {
"description": "threshhold for success at all",
"datatype": float,
"default": 0.1
},
"eta_2": {
"description": "threshhold for good success",
"datatype": float,
"default": 0.5
},
"gamma_0": {
"description": "shrinkage/expansion ratio for delta_0 in parameter tuning",
"datatype": float,
"default": 0.5
},
"gamma_1": {
"description": "very successful step radius increase",
"datatype": float,
"default": 1.25
},
"gamma_2": {
"description": "unsuccessful step radius decrease",
"datatype": float,
"default": 0.8
},
"w": {
"description": "decreasing rate for delta in contracation loop",
"datatype": float,
"default": 0.9
},
"mu": {
"description": "the constant to make upper bound for delta in contraction loop",
"datatype": float,
"default": 100
},
"beta": {
"description": "the constant to make the delta in main loop not too small",
"datatype": float,
"default": 50
},
"c_lambda": {
"description": "hyperparameter (exponent) to determine minimum sample size",
"datatype": float,
"default": 0.1 ##changed
},
"epsilon_lambda": {
"description": "hyperparameter (coefficient) to determine minimum sample size",
"datatype": float,
"default": 0.5
},
"kappa": {
"description": "hyperparameter in adaptive sampling in outer/inner loop",
"datatype": float,
"default": 100
}
}
self.check_factor_list = {
"crn_across_solns": self.check_crn_across_solns,
"sample_size": self.check_sample_size
}
super().__init__(fixed_factors)
def check_sample_size(self):
return self.factors["sample_size"] > 0
'''
def check_solver_factors(self):
pass
'''
def standard_basis(self, size, index):
arr = np.zeros(size)
arr[index] = 1.0
return arr
def local_model_evaluate(self, x_k, q):
X = [1]
X = np.append(X, np.array(x_k))
X = np.append(X, np.array(x_k) ** 2)
return np.matmul(X, q)
def samplesize(self, k, sig2, delta):
c_lambda = self.factors["c_lambda"]
epsilon_lambda = self.factors["epsilon_lambda"]
kappa = self.factors["kappa"]
# lambda_k = max(2,(10 + c_lambda) * math.log(k+1, 10) ** (1 + epsilon_lambda))
# lambda_k = max(3,(10 + c_lambda * problem.dim * math.log(problem.dim+0.1, 10)) * math.log(k+1, 10) ** (1 + epsilon_lambda))
lambda_k = (10 + c_lambda) * math.log(k, 10) ** (1 + epsilon_lambda)
# S_k = math.floor(max(3,lambda_k,(lambda_k*sig)/((kappa^2)*delta**(2*(1+1/alpha_k)))))
# S_k = math.floor(max(lambda_k, (lambda_k * sig) / ((kappa ^ 2) * delta ** 4)))
# compute sample size
N_k = math.ceil(max(2, lambda_k, lambda_k * sig2 / ((kappa ^ 2) * delta ** 4)))
return N_k
def model_construction(self, x_k, delta, k, problem, expended_budget):
w = self.factors["w"]
mu = self.factors["mu"]
beta = self.factors["beta"]
j = 0
d = problem.dim
while True:
fval = []
j = j + 1
delta_k = delta * w ** (j - 1)
# make the interpolation set
Y = self.interpolation_points(x_k, delta_k, problem)
for i in range(2 * d + 1):
new_solution = self.create_new_solution(Y[i][0], problem)
# need to check there is existing result
problem.simulate(new_solution, 1)
expended_budget += 1
sample_size = 1
# Adaptive sampling
while True:
problem.simulate(new_solution, 1)
expended_budget += 1
sample_size += 1
sig2 = new_solution.objectives_var
if sample_size >= self.samplesize(k, sig2, delta_k):
break
fval.append(-1 * problem.minmax[0] * new_solution.objectives_mean)
Z = self.interpolation_points(np.array(x_k) - np.array(x_k), delta_k, problem)
# make the model and get the model parameters
q, grad, Hessian = self.coefficient(Z, fval, problem)
# check the condition and break
if norm(grad) > 0.1:
break
if delta_k <= mu * norm(grad):
break
delta_k = min(max(beta * norm(grad), delta_k), delta)
return fval, Y, q, grad, Hessian, delta_k, expended_budget
def coefficient(self, Y, fval, problem):
M = []
d = problem.dim
for i in range(0, 2 * d + 1):
M.append(1)
M[i] = np.append(M[i], np.array(Y[i]))
M[i] = np.append(M[i], np.array(Y[i]) ** 2)
q = np.matmul(inv(M), fval)
Hessian = np.diag(q[d + 1:2 * d + 1])
return q, q[1:d + 1], Hessian
def interpolation_points(self, x_k, delta, problem):
Y = [[x_k]]
d = problem.dim
epsilon = 0.01
for i in range(0, d):
plus = Y[0] + delta * self.standard_basis(d, i)
minus = Y[0] - delta * self.standard_basis(d, i)
if sum(x_k) != 0:
# block constraints
if minus[0][i] < problem.lower_bounds[i]:
minus[0][i] = problem.lower_bounds[i] + epsilon
# Y[0][i] = (minus[0][i]+plus[0][i])/2
if plus[0][i] > problem.upper_bounds[i]:
plus[0][i] = problem.upper_bounds[i] - epsilon
# Y[0][i] = (minus[0][i]+plus[0][i])/2
Y.append(plus)
Y.append(minus)
return Y
def parameter_tuning(self, delta, problem):
recommended_solns = []
intermediate_budgets = []
expended_budget = 0
# default values
delta_max = self.factors["delta_max"]
eta_1 = self.factors["eta_1"]
eta_2 = self.factors["eta_2"]
gamma_1 = self.factors["gamma_1"]
gamma_2 = self.factors["gamma_2"]
k = 0 # iteration number
# Start with the initial solution
new_x = problem.factors["initial_solution"]
new_solution = self.create_new_solution(new_x, problem)
recommended_solns.append(new_solution)
intermediate_budgets.append(expended_budget)
while expended_budget < problem.factors["budget"] * 0.01:
k += 1
fval, Y, q, grad, Hessian, delta_k, expended_budget = self.model_construction(new_x, delta, k, problem, expended_budget)
# Cauchy reduction
if np.matmul(np.matmul(grad, Hessian), grad) <= 0:
tau = 1
else:
tau = min(1, norm(grad) ** 3 / (delta * np.matmul(np.matmul(grad, Hessian), grad)))
grad = np.reshape(grad, (1, problem.dim))[0]
candidate_x = new_x - tau * delta * grad / norm(grad)
candidate_solution = self.create_new_solution(tuple(candidate_x), problem)
# adaptive sampling needed
problem.simulate(candidate_solution, 1)
expended_budget += 1
sample_size = 1
# Adaptive sampling
while True:
problem.simulate(candidate_solution, 1)
expended_budget += 1
sample_size += 1
sig2 = candidate_solution.objectives_var
if sample_size >= self.samplesize(k, sig2, delta_k):
break
# calculate success ratio
fval_tilde = -1 * problem.minmax[0] * candidate_solution.objectives_mean
# replace the candidate x if the interpolation set has lower objective function value
if min(fval) < fval_tilde:
minpos = fval.index(min(fval))
fval_tilde = min(fval)
candidate_x = Y[minpos][0]
if (self.local_model_evaluate(np.zeros(problem.dim), q) - self.local_model_evaluate(
np.array(candidate_x) - np.array(new_x), q)) == 0:
rho = 0
else:
rho = (fval[0] - fval_tilde) / (
self.local_model_evaluate(np.zeros(problem.dim), q) - self.local_model_evaluate(
candidate_x - new_x, q));
if rho >= eta_2: # very successful
new_x = candidate_x
final_ob = candidate_solution.objectives_mean
delta_k = min(gamma_1 * delta_k, delta_max)
recommended_solns.append(candidate_solution)
intermediate_budgets.append(expended_budget)
elif rho >= eta_1: # successful
new_x = candidate_x
final_ob = candidate_solution.objectives_mean
delta_k = min(delta_k, delta_max)
recommended_solns.append(candidate_solution)
intermediate_budgets.append(expended_budget)
else:
delta_k = min(gamma_2 * delta_k, delta_max)
final_ob = fval[0]
return final_ob, k, delta_k, recommended_solns, intermediate_budgets, expended_budget, new_x
def solve(self, problem):
"""
Run a single macroreplication of a solver on a problem.
Arguments
---------
problem : Problem object
simulation-optimization problem to solve
crn_across_solns : bool
indicates if CRN are used when simulating different solutions
Returns
-------
recommended_solns : list of Solution objects
list of solutions recommended throughout the budget
intermediate_budgets : list of ints
list of intermediate budgets when recommended solutions changes
"""
recommended_solns = []
intermediate_budgets = []
expended_budget = 0
delta_max = self.factors["delta_max"]
gamma_0 = self.factors["gamma_0"]
delta_candidate = [gamma_0 * delta_max, delta_max, delta_max / gamma_0]
#print(delta_candidate)
# default values
eta_1 = self.factors["eta_1"]
eta_2 = self.factors["eta_2"]
gamma_1 = self.factors["gamma_1"]
gamma_2 = self.factors["gamma_2"]
k = 0 # iteration number
# Start with the initial solution
new_x = problem.factors["initial_solution"]
new_solution = self.create_new_solution(new_x, problem)
recommended_solns.append(new_solution)
intermediate_budgets.append(expended_budget)
# Parameter tuning run
tp_final_ob_pt, k, delta, recommended_solns, intermediate_budgets, expended_budget, new_x = self.parameter_tuning(
delta_candidate[0], problem)
for i in range(1, 3):
final_ob_pt, k_pt, delta_pt, recommended_solns_pt, intermediate_budgets_pt, expended_budget_pt, new_x_pt = self.parameter_tuning(
delta_candidate[i], problem)
expended_budget += expended_budget_pt
if -1 * problem.minmax[0] * final_ob_pt < -1 * problem.minmax[0] * tp_final_ob_pt:
k = k_pt
delta = delta_pt
recommended_solns = recommended_solns_pt
intermediate_budgets = intermediate_budgets_pt
new_x = new_x_pt
intermediate_budgets = (
intermediate_budgets + 2 * np.ones(len(intermediate_budgets)) * problem.factors["budget"] * 0.01).tolist()
intermediate_budgets[0] = 0
while expended_budget < problem.factors["budget"]:
k += 1
fval, Y, q, grad, Hessian, delta_k, expended_budget = self.model_construction(new_x, delta, k, problem,
expended_budget)
# Cauchy reduction
if np.matmul(np.matmul(grad, Hessian), grad) <= 0:
tau = 1
else:
tau = min(1, norm(grad) ** 3 / (delta * np.matmul(np.matmul(grad, Hessian), grad)))
grad = np.reshape(grad, (1, problem.dim))[0]
candidate_x = new_x - tau * delta * grad / norm(grad)
for i in range(problem.dim):
if candidate_x[i] < problem.lower_bounds[i]:
candidate_x[i] = problem.lower_bounds[i] + 0.01
elif candidate_x[i] > problem.upper_bounds[i]:
candidate_x[i] = problem.upper_bounds[i] - 0.01
candidate_solution = self.create_new_solution(tuple(candidate_x), problem)
# adaptive sampling needed
problem.simulate(candidate_solution, 1)
expended_budget += 1
sample_size = 1
# Adaptive sampling
while True:
problem.simulate(candidate_solution, 1)
expended_budget += 1
sample_size += 1
sig2 = candidate_solution.objectives_var
if sample_size >= self.samplesize(k, sig2, delta_k):
break
# calculate success ratio
fval_tilde = -1 * problem.minmax[0] * candidate_solution.objectives_mean
# replace the candidate x if the interpolation set has lower objective function value
if min(fval) < fval_tilde:
minpos = fval.index(min(fval))
fval_tilde = min(fval)
candidate_x = Y[minpos][0]
if (self.local_model_evaluate(np.zeros(problem.dim), q) - self.local_model_evaluate(
np.array(candidate_x) - np.array(new_x), q)) == 0:
rho = 0
else:
rho = (fval[0] - fval_tilde) / (
self.local_model_evaluate(np.zeros(problem.dim), q) - self.local_model_evaluate(
candidate_x - new_x, q));
if rho >= eta_2: # very successful
new_x = candidate_x
delta_k = min(gamma_1 * delta_k, delta_max)
recommended_solns.append(candidate_solution)
intermediate_budgets.append(expended_budget)
elif rho >= eta_1: # successful
new_x = candidate_x
delta_k = min(delta_k, delta_max)
recommended_solns.append(candidate_solution)
intermediate_budgets.append(expended_budget)
else:
delta_k = min(gamma_2 * delta_k, delta_max)
return recommended_solns, intermediate_budgets
| {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,920 | evaz1121/simopt | refs/heads/master | /simopt/timing_bootstrap.py | from wrapper_base import Experiment, read_experiment_results, post_normalize, plot_progress_curves
# new_experiment = Experiment(solver_name="RNDSRCH",
# problem_name="CNTNEWS-1")
# # Run experiment with M = 100.
# new_experiment.run(n_macroreps=100)
# # Post replicate experiment with N = 100.
# new_experiment.post_replicate(n_postreps=100)
# # Post normalize.
# post_normalize([new_experiment], n_postreps_init_opt=200)
new_experiment = read_experiment_results("experiments/outputs/RNDSRCH_on_CNTNEWS-1.pickle")
# Mean progress curves from all solvers on one problem.
plot_progress_curves(experiments=[new_experiment],
plot_type="mean",
all_in_one=True,
plot_CIs=True,
print_max_hw=False
)
| {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,921 | evaz1121/simopt | refs/heads/master | /simopt/demo/demo_solver_problem.py | import sys
import os.path as o
import os
sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), "..")))
# os.chdir('../')
from wrapper_base import Experiment, read_experiment_results
solver_name = "RNDSRCH" # random search solver
problem_name = "CNTNEWS-1"
myexperiment = Experiment(solver_name, problem_name, solver_fixed_factors={"sample_size": 50})
#print(myexperiment.problem.check_problem_factor("initial_solution"))
myexperiment.run(n_macroreps=10)
#print("Here")
#file_name_path = "experiments/outputs/" + solver_name + "_on_" + problem_name + ".pickle"
#myexperiment = read_experiment_results(file_name_path)
myexperiment.post_replicate(n_postreps=200, crn_across_budget=True, crn_across_macroreps=False)
#print("Now here.")
# myexperiment.plot_progress_curves(plot_type="all", normalize=False)
# myexperiment.plot_progress_curves(plot_type="all", normalize=True)
# #print("Finally here.")
#myexperiment.plot_progress_curves(plot_type="mean", normalize=True, plot_CIs=True)
# # myexperiment.plot_progress_curves(plot_type="quantile", normalize=True)
#myexperiment.plot_solvability_curves(solve_tols=[0.2]) | {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,922 | evaz1121/simopt | refs/heads/master | /simopt/solvers/simannealing.py | """
Summary
-------
Simulated Annealing in Noisy Environments (SANE).
"""
import numpy as np
import scipy.stats as ss
from base import Solver, Solution
class SANE(Solver):
"""
Simulated Annealing in Noisy Environments (SANE)
"Simulated Annealing in the Presence of Noise"
Jurgen Branke, Stephan Meisel and Christian Schmidt
Journal of Heuristics (2008) 14: 627--654.
Attributes
----------
name : string
name of solver
objective_type : string
description of objective types:
"single" or "multi"
constraint_type : string
description of constraints types:
"unconstrained", "box", "deterministic", "stochastic"
variable_type : string
description of variable types:
"discrete", "continuous", "mixed"
gradient_needed : bool
indicates if gradient of objective function is needed
factors : dict
changeable factors (i.e., parameters) of the solver
specifications : dict
details of each factor (for GUI, data validation, and defaults)
rng_list : list of rng.MRG32k3a objects
list of RNGs used for the solver's internal purposes
Arguments
---------
name : str
user-specified name for solver
fixed_factors : dict
fixed_factors of the solver
See also
--------
base.Solver
"""
def __init__(self, name="SANE", fixed_factors={}):
self.name = name
self.objective_type = "single"
self.constraint_type = "deterministic"
self.variable_type = "mixed"
self.gradient_needed = False
self.specifications = {
"crn_across_solns": {
"description": "Use CRN across solutions?",
"datatype": bool,
"default": True
},
"sampling_variance": {
"description": "Variance of difference in objective values",
"datatype": float,
"default": 100.0
},
"init_temp": {
"description": "Initial temperature",
"datatype": float,
"default": 10.0
},
"cooling_coeff": {
"description": "Coefficient for geometric cooling temperature schedule",
"datatype": float,
"default": 0.95**(1/100)
}
}
self.check_factor_list = {
"crn_across_solns": self.check_crn_across_solns,
"sampling_variance": self.check_sampling_variance,
"init_temp": self.check_init_temp,
"cooling_coeff": self.check_cooling_coeff
}
super().__init__(fixed_factors)
def check_sampling_variance(self):
return self.factors["sample_variance"] > 0
def check_init_temp(self):
return self.factors["init_temp"] > 0
def check_cooling_coeff(self):
return 0 < self.factors["cooling_coeff"] < 1
def solve(self, problem):
"""
Run a single macroreplication of a solver on a problem.
Arguments
---------
problem : Problem object
simulation-optimization problem to solve
Returns
-------
recommended_solns : list of Solution objects
list of solutions recommended throughout the budget
intermediate_budgets : list of ints
list of intermediate budgets when recommended solutions changes
"""
recommended_solns = []
intermediate_budgets = []
expended_budget = 0
temperature = self.factors["init_temp"]
# self.rng_list[0] is unused.
# Designate random number generator for random sampling.
find_next_soln_rng = self.rng_list[1]
# Designate random number generator for switching to new solutions.
switch_soln_rng = self.rng_list[2]
# Sequentially generate a random neighboring solution, assess its
# quality, and switch based on estimated differences and current
# temperature.
# TO DO: Double-check how RNGs are to be used to simulate solutions.
while expended_budget < problem.factors["budget"]:
if expended_budget == 0:
# Start at initial solution and record as best.
current_x = problem.factors["initial_solution"]
current_solution = self.create_new_solution(current_x, problem)
recommended_solns.append(current_solution)
intermediate_budgets.append(expended_budget)
if temperature >= 1./np.sqrt(8.0/(np.pi*self.factors["sampling_variance"])):
#print("First Case")
# Simulate one replication of current solution.
# Fresh sampling, so create new solution objects.
current_solution = self.create_new_solution(current_x, problem)
problem.simulate(current_solution, m=1)
expended_budget += 1
# Simulate one replication at new neighboring solution
# Fresh sampling, so create new solution objects.
new_x = problem.get_random_solution(find_next_soln_rng)
new_solution = self.create_new_solution(new_x, problem)
problem.simulate(new_solution, m=1)
expended_budget += 1
# Follow Ceperley and Dewing acceptance condition.
# See Equation (15) on pg. 638 of Branke et al. (2008).
delta_hat = problem.minmax * (current_solution.objectives_mean - new_solution.objectives_mean)
if delta_hat <= -0.5*self.factors["sampling_variance"]/temperature:
prob_switch = 1
else:
prob_switch = np.exp(-1*(delta_hat/temperature + 0.5*self.factors["sampling_variance"]/temperature**2))
# Switch to new solution with probability prob_switch
coin_flip = switch_soln_rng.random()
if coin_flip < prob_switch:
#print("Switched")
recommended_solns.append(new_solution)
intermediate_budgets.append(expended_budget)
current_x = new_x
else:
#print("Second Case")
#print(expended_budget)
# Create a fresh solution object for current solution
current_solution = self.create_new_solution(current_x, problem)
# Identify new neighboring solution to simulate.
# TO DO: generalize to neighborhood of current solution.
new_x = problem.get_random_solution(find_next_soln_rng)
new_solution = self.create_new_solution(new_x, problem)
# Do sequential sampling until error probability matches Glauber probability
prob_error = 1
prob_glauber = 0
sample_size = 0
while prob_error > prob_glauber:
problem.simulate(current_solution, m=1)
expended_budget += 1
problem.simulate(new_solution, m=1)
expended_budget += 1
sample_size += 1
# Estimate difference in objective value.
delta_hat = problem.minmax * (current_solution.objectives_mean - new_solution.objectives_mean)
prob_error = ss.norm.cdf(-np.abs(delta_hat)*np.sqrt(sample_size)/np.sqrt(self.factors["sampling_variance"]))
prob_glauber = 1.0/(1.0 + np.exp(np.abs(delta_hat)/temperature))
#print(expended_budget)
# Accept new solution.
recommended_solns.append(new_solution)
intermediate_budgets.append(expended_budget)
current_x = new_x
# Update temperature according to cooling schedule.
temperature = self.factors["init_temp"]*self.factors["cooling_coeff"]**expended_budget
return recommended_solns, intermediate_budgets
| {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,923 | evaz1121/simopt | refs/heads/master | /simopt/run_experiments.py | from wrapper_base import Experiment, plot_area_scatterplots, post_normalize, plot_progress_curves, plot_solvability_cdfs, read_experiment_results, plot_solvability_profiles
from rng.mrg32k3a import MRG32k3a
# 3 versions of random search
rs_sample_sizes = [10, 50, 100]
# Problem ranges: 5*5 = 25 problem instances
demand_means = [25.0, 50.0, 100.0, 200.0, 400.0] #, 800.0]
lead_means = [1.0, 3.0, 6.0, 9.0, 12.0] #, 15.0]
# default values
# "demand_mean": 100.0
# "lead_mean": 6.0
# "backorder_cost": 4.0
# "holding_cost": 1.0
# "fixed_cost": 36.0
# "variable_cost": 2.0
# # First Section: Running experiments.
# # Loop over problems.
# for dm in demand_means:
# for lm in lead_means:
# oracle_fixed_factors = {"demand_mean": dm,
# "lead_mean": lm
# }
# # Budget = 1000 for (s,S) inventory problem.
# # RS w/ sample size 100 will get through only 10 iterations.
# problem_fixed_factors = {"budget": 1000}
# problem_rename = f"SSCONT-1_dm={dm}_lm={lm}"
# # Temporarily store experiments on the same problem for post-normalization.
# # experiments_same_problem = []
# # Loop over solvers.
# # for rs_ss in rs_sample_sizes:
# # solver_fixed_factors = {"sample_size": rs_ss}
# # solver_rename = f"RNDSRCH_ss={rs_ss}"
# # # Create experiment.
# # new_experiment = Experiment(solver_name="RNDSRCH",
# # problem_name="SSCONT-1",
# # solver_rename=solver_rename,
# # problem_rename=problem_rename,
# # solver_fixed_factors=solver_fixed_factors,
# # problem_fixed_factors=problem_fixed_factors,
# # oracle_fixed_factors=oracle_fixed_factors
# # )
# # # Run experiment with M = 50.
# # new_experiment.run(n_macroreps=10)
# # # Post replicate experiment with N = 100.
# # new_experiment.post_replicate(n_postreps=100)
# # experiments_same_problem.append(new_experiment)
# # Run ASTRO-DF. (COMMENTED OUT)
# solver_fixed_factors = {"delta_max": 200.0}
# new_experiment = Experiment(solver_name="ASTRODF",
# problem_name="SSCONT-1",
# problem_rename=problem_rename,
# solver_fixed_factors=solver_fixed_factors,
# problem_fixed_factors=problem_fixed_factors,
# oracle_fixed_factors=oracle_fixed_factors
# )
# # Run experiment with M = 10.
# new_experiment.run(n_macroreps=10)
# # Post replicate experiment with N = 100.
# new_experiment.post_replicate(n_postreps=100)
# # experiments_same_problem.append(new_experiment)
# # # Post-normalize experiments with L = 200.
# # # Provide NO proxies for f(x0), f(x*), or f(x).
# # post_normalize(experiments=experiments_same_problem, n_postreps_init_opt=200)
# # STOPPING POINT.
# # If experiments have been run, comment out the First Section.
# Second Section: Plotting.
# For plotting, "experiments" will be a list of list of Experiment objects.
# outer list - indexed by solver
# inner list - index by problem
experiments = []
# Load .pickle files of past results.
# TODO: Concatenate file name strings.
# Load all experiments for a given solver, for all solvers.
for rs_ss in rs_sample_sizes:
solver_rename = f"RNDSRCH_ss={rs_ss}"
experiments_same_solver = []
for dm in demand_means:
for lm in lead_means:
problem_rename = f"SSCONT-1_dm={dm}_lm={lm}"
file_name = f"{solver_rename}_on_{problem_rename}"
# Load experiment.
new_experiment = read_experiment_results(f"experiments/outputs/{file_name}.pickle")
# Rename problem and solver to produce nicer plot labels.
new_experiment.solver.name = f"Random Search {rs_ss}"
new_experiment.problem.name = fr"SSCONT-1 with $\mu_D={round(dm)}$ and $\mu_L={round(lm)}$"
experiments_same_solver.append(new_experiment)
experiments.append(experiments_same_solver)
# Load ASTRO-DF results
solver_rename = f"ASTRODF"
experiments_same_solver = []
for dm in demand_means:
for lm in lead_means:
problem_rename = f"SSCONT-1_dm={dm}_lm={lm}"
file_name = f"{solver_rename}_on_{problem_rename}"
# Load experiment.
new_experiment = read_experiment_results(f"experiments/outputs/{file_name}.pickle")
# Rename problem and solver to produce nicer plot labels.
new_experiment.solver.name = "ASTRO-DF"
new_experiment.problem.name = fr"SSCONT-1 with $\mu_D={round(dm)}$ and $\mu_L={round(lm)}$"
#print(new_experiment.problem.name)
experiments_same_solver.append(new_experiment)
experiments.append(experiments_same_solver)
# Plotting
n_solvers = len(experiments)
n_problems = len(experiments[0])
# # Post-normalize to incorporate ASTRO-DF results
# for problem_idx in range(n_problems):
# experiments_same_problem = [experiments[solver_idx][problem_idx] for solver_idx in range(n_solvers)]
# post_normalize(experiments=experiments_same_problem, n_postreps_init_opt=200)
# # All progress curves for one experiment.
# plot_progress_curves([experiments[0][0], experiments[3][0]], plot_type="all", all_in_one=True)
# All progress curves for one experiment.
plot_progress_curves([experiments[solver_idx][0] for solver_idx in range(n_solvers)], plot_type="all", all_in_one=True)
# All progress curves for one experiment.
plot_progress_curves([experiments[solver_idx][22] for solver_idx in range(n_solvers)], plot_type="all", all_in_one=True)
# # All progress curves for one experiment.
# plot_progress_curves([experiments[0][22], experiments[3][22]], plot_type="all", all_in_one=True)
# # Mean progress curves from all solvers on one problem.
# plot_progress_curves(experiments=[experiments[solver_idx][0] for solver_idx in range(n_solvers)],
# plot_type="mean",
# all_in_one=True,
# plot_CIs=True,
# print_max_hw=False
# )
# # Mean progress curves from all solvers on one problem.
# plot_progress_curves(experiments=[experiments[solver_idx][22] for solver_idx in range(n_solvers)],
# plot_type="mean",
# all_in_one=True,
# plot_CIs=True,
# print_max_hw=False
# )
# # Plot 0.9-quantile progress curves from all solvers on one problem.
# plot_progress_curves(experiments=[experiments[solver_idx][0] for solver_idx in range(n_solvers)],
# plot_type="quantile",
# beta=0.9,
# all_in_one=True,
# plot_CIs=True,
# print_max_hw=False
# )
# Plot 0.9-quantile progress curves from all solvers on one problem.
plot_progress_curves(experiments=[experiments[solver_idx][22] for solver_idx in range(n_solvers)],
plot_type="quantile",
beta=0.9,
all_in_one=True,
plot_CIs=True,
print_max_hw=False
)
# # Plot cdf of 0.2-solve times for all solvers on one problem.
# plot_solvability_cdfs(experiments=[experiments[solver_idx][0] for solver_idx in range(n_solvers)],
# solve_tol=0.2,
# all_in_one=True,
# plot_CIs=True,
# print_max_hw=False
# )
# # Plot cdf of 0.2-solve times for all solvers on one problem.
# plot_solvability_cdfs(experiments=[experiments[solver_idx][22] for solver_idx in range(n_solvers)],
# solve_tol=0.2,
# all_in_one=True,
# plot_CIs=True,
# print_max_hw=False
# )
# # Plot area scatterplots of all solvers on all problems.
# plot_area_scatterplots(experiments=experiments,
# all_in_one=True,
# plot_CIs=False,
# print_max_hw=False
# )
# # Plot cdf 0.1-solvability profiles of all solvers on all problems.
# plot_solvability_profiles(experiments=experiments,
# plot_type="cdf_solvability",
# all_in_one=True,
# plot_CIs=True,
# print_max_hw=False,
# solve_tol=0.1
# )
# # Plot 0.5-quantile 0.1-solvability profiles of all solvers on all problems.
# plot_solvability_profiles(experiments=experiments,
# plot_type="quantile_solvability",
# all_in_one=True,
# plot_CIs=True,
# print_max_hw=False,
# solve_tol=0.1,
# beta=0.5
# )
# # Plot difference of cdf 0.1-solvability profiles of all solvers on all problems.
# # Reference solver = ASTRO-DF.
# plot_solvability_profiles(experiments=experiments,
# plot_type="diff_cdf_solvability",
# all_in_one=True,
# plot_CIs=True,
# print_max_hw=False,
# solve_tol=0.1,
# ref_solver="ASTRO-DF"
# )
# # Plot difference of 0.5-quantile 0.1-solvability profiles of all solvers on all problems.
# # Reference solver = ASTRO-DF.
# plot_solvability_profiles(experiments=experiments,
# plot_type="diff_quantile_solvability",
# all_in_one=True,
# plot_CIs=True,
# print_max_hw=False,
# solve_tol=0.1,
# beta=0.5,
# ref_solver="ASTRO-DF"
# )
| {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,924 | evaz1121/simopt | refs/heads/master | /simopt/demo/demo_multiple_solvers.py | import sys
import os.path as o
import os
sys.path.append(o.abspath(o.join(o.dirname(sys.modules[__name__].__file__), "..")))
from wrapper_base import MetaExperiment
mymetaexperiment = MetaExperiment(solver_names=["RNDSRCH", "ASTRODF"], problem_names=["CNTNEWS-1", "MM1-1"]) #, fixed_factors_filename="all_factors")
print(mymetaexperiment.check_compatibility())
mymetaexperiment.run(n_macroreps=2)
mymetaexperiment.post_replicate(n_postreps=200, crn_across_budget=True, crn_across_macroreps=False)
mymetaexperiment.post_normalize(n_postreps_init_opt=200, crn_across_init_opt=True)
#mymetaexperiment.plot_solvability_profiles(solve_tol=0.1, beta=0.5, ref_solver="RNDSRCH30")
#mymetaexperiment.plot_area_scatterplot(plot_CIs=False, all_in_one=False)
#mymetaexperiment.plot_progress_curves(plot_type="quantile", beta=0.90, normalize=True)
#mymetaexperiment.plot_solvability_curves(solve_tols=[0.1, 0.2]) | {"/simopt/rng/__init__.py": ["/simopt/rng/mrg32k3a.py"], "/simopt/rng/mrg32k3a.py": ["/simopt/rng/matmodops.py"]} |
67,931 | welld7/db_quality | refs/heads/master | /db_test.py | #from random import randint
import pytest
from db_quality_main import *
@pytest.fixture()
def handle_connection():
conn = create_connection(database2)
yield conn
conn.close()
@pytest.mark.sanity_db_exists
@pytest.mark.db_exists
def test_check_status_table_exists(handle_connection):
conn = handle_connection
cur = conn.cursor()
cur.execute(''' SELECT * FROM sqlite_master WHERE name ='check_status' and type='table'; ''')
table = cur.fetchone()
assert table
@pytest.mark.sanity_db_exists
@pytest.mark.db_exists
def test_check_object_table_exists(handle_connection):
conn = handle_connection
cur = conn.cursor()
cur.execute(''' SELECT * FROM sqlite_master WHERE name ='check_object' and type='table'; ''')
table = cur.fetchone()
assert table
@pytest.mark.sanity_db_exists
@pytest.mark.db_exists
def test_number_of_load_dates(handle_connection):
conn = handle_connection
cur = conn.cursor()
cur.execute('''SELECT count(*) from check_status;''')
cnt1 = cur.fetchone()
cur.execute('''SELECT count( DISTINCT load_date) from check_object;''')
cnt2 = cur.fetchone()
assert cnt1[0] == cnt2[0]
@pytest.mark.db_exists
def test_load_dates_set(handle_connection):
conn = handle_connection
cur = conn.cursor()
cur.execute('''SELECT load_date from check_status;''')
set1 = set(cur.fetchall())
cur.execute('''SELECT DISTINCT load_date from check_object;''')
set2 = set(cur.fetchall())
assert set1 == set2
#TODO: debug unique&date
@pytest.mark.db_exists
#@pytest.mark.skip
def test_whole_db_consistency(handle_connection):
conn = handle_connection
#print_table(conn)
cur = conn.cursor()
# if performance becomes critical, we can create one complex query
cur.execute(''' SELECT DISTINCT load_date from check_object ;''')
all_ld_dates = cur.fetchall()
row_in_check_object_table_list = []
rows_status_list=[]
for row in all_ld_dates:
ld_date = row[0]
next_day = get_next_day(ld_date)
#debug TODO: delete it
cur.execute(''' SELECT * from check_object WHERE load_date>=? AND load_date<?;''',
(ld_date, next_day))
rows = cur.fetchall()
for row2 in rows:
print(row2)
row_in_check_object_table =\
calculate_status_values_in_check_object_table(conn, ld_date, next_day)
row_in_check_object_table_list.append(row_in_check_object_table)
print("calculated:", row_in_check_object_table)
cur.execute(''' SELECT * from check_status WHERE load_date>=? AND load_date<?;''',
(ld_date, next_day))
rows_status = cur.fetchone()
rows_status_list.append(rows_status)
print("check_status:", rows_status)
print("-------------------------------------------------------------------")
#TODO: elaborater comparison
assert row_in_check_object_table_list == rows_status_list
#only some subset of the DB (useful for huge DBs)
@pytest.mark.db_exists
@pytest.mark.skip
def test_subset_db_consistency(handle_connection):
pass
#FIXME: Changing the same DB in this test
@pytest.mark.db_exists
@pytest.mark.parametrize("day_input", ['2217-01-05', '3017-01-05'])
@pytest.mark.parametrize("int_input", [2, -200])
def test_add_new_day_to_db(handle_connection, day_input, int_input):
conn = handle_connection
row1 = (day_input, randint(0, 1000000), 0, 2.0, "hi", '2013-01-05')
rowid1 = insert_new_row(conn, row1)
row2 = (day_input, randint(0, 1000000), int_input, 2.0, "hi", '2013-01-05')
rowid2 = insert_new_row(conn, row2)
row3 = (day_input, randint(0, 1000000), 2*int_input, 2.0, "hi", '2013-01-05')
rowid3 = insert_new_row(conn, row3)
rowid_inserted = add_day_status_row(conn, day_input)
assert int_input == get_int_avg_in_status_table_by_rowid(conn, rowid_inserted)
delete_check_object_row_by_rowid(conn, rowid1)
delete_check_object_row_by_rowid(conn, rowid2)
delete_check_object_row_by_rowid(conn, rowid3)
delete_check_status_row_by_rowid(conn, rowid_inserted)
| {"/db_test.py": ["/db_quality_main.py"], "/generate_db.py": ["/db_quality_main.py"], "/sanity_test.py": ["/db_quality_main.py"]} |
67,932 | welld7/db_quality | refs/heads/master | /generate_db.py | from db_quality_main import *
from string import ascii_lowercase
#from pairing import pair, depair
MIN = -1000000
MAX = 1000000
MAX_STR_LENGTH = 10
MAX_ID = 1000000
GENERATE_DAYS=10
ROWS_PER_DAY=3
# standard Cantor pairing function
pairing_function = lambda a, b: ((a + b) * (a + b + 1) +b) / 2
def get_prev_day_raw( date_raw):
return date_raw + datetime.timedelta(days=-1)
def generate_for_ld_date(ld_date, rows_per_day):
avg_int = randint(MIN, MAX)
avg_float = uniform(MIN, MAX)
avg_date_raw = datetime.datetime(randint(1970, 2100), randint(1, 12),
randint(1, 28)) # 2100 is fine
avg_date_str = datetime.datetime.strftime(avg_date_raw, "%Y-%m-%d")
print("generate for", ld_date)
null_cnt = 0
z0_cnt = 0
id_int_all_pairs_set = set()
non_unique_pair_set = set()
# just one pair counter for current ld_date for all pairs per day
for n_th_in_day in range(rows_per_day):
current_int = avg_int - rows_per_day + n_th_in_day * 2 + 1
current_float = avg_float - rows_per_day + n_th_in_day * 2 + 1
# count zero number (int+float)
z0_cnt += 1 if current_int == 0 else 0
z0_cnt += 1 if current_float == 0 else 0
# generate a lower case random string
# if the random lenth is 0, None generates
length = randint(0, MAX_STR_LENGTH)
if length == 0:
current_sting = None
null_cnt += 1
else:
current_sting = ''.join(choice(ascii_lowercase)
for _ in range(length))
id = randint(0, MAX_ID)
pair = pairing_function(id, current_int)
# already in set of pairs?
if pair not in id_int_all_pairs_set:
# 1. new pair => to the set of all pairs
id_int_all_pairs_set.add(pair)
else:
# 2. met again => to the set non-unique pair
non_unique_pair_set.add(pair)
#todo: more complicated algorythm for date generation
row = (ld_date, id, current_int,
current_float, current_sting, avg_date_str)
print("insert", row)
insert_new_row(conn, row)
print("insert", len(non_unique_pair_set))
return (ld_date, len(non_unique_pair_set), rows_per_day, null_cnt,
z0_cnt, avg_int, avg_float, avg_date_str)
def generate_db(conn, generate_days, rows_per_day):
today = datetime.date.today()
date = today
for _ in range(generate_days):
date = get_prev_day_raw(date)
ld_date = datetime.datetime.strftime(date, "%Y-%m-%d")
row2 = generate_for_ld_date(ld_date, rows_per_day)
insert_new_row_status(conn, row2)
def main():
conn = create_connection(database2)
cur = conn.cursor()
create_table(conn, sql_create_main_table)
create_table(conn, sql_create_status_table)
cur.execute(''' delete from check_status; ''')
cur.execute(''' delete from check_object; ''')
conn.commit()
#for preformance reasons
sql = ''' PRAGMA synchronous = 0; '''
cur.execute(sql)
conn.commit()
generate_db(conn, generate_days=GENERATE_DAYS, rows_per_day=ROWS_PER_DAY )
print_table(conn)
conn.close()
if __name__ == '__main__':
main()
| {"/db_test.py": ["/db_quality_main.py"], "/generate_db.py": ["/db_quality_main.py"], "/sanity_test.py": ["/db_quality_main.py"]} |
67,933 | welld7/db_quality | refs/heads/master | /db_quality_main.py | import sqlite3
from random import randint, uniform, choice
import datetime
database = "pythonsqlite.db"
database_tmp = "pythonsqlite_tmp.db"
database2 = "pythonsqlite_backup_12.db"
sql_create_main_table = """ CREATE TABLE IF NOT EXISTS check_object (
load_date date,
id integer,
int_value integer,
float_value float,
char_value varchar(10),
date_value date
); """
sql_create_status_table = """ CREATE TABLE IF NOT EXISTS check_status (
load_date date,
non_unique_id_int integer,
count integer,
null_count integer,
z0_count int,
int_avg float,
float_avg float,
date_avg float
); """
def create_connection(db):
""" create a database connection to the SQLite database
specified by db_file
:return: Connection object or None
"""
try:
conn = sqlite3.connect(db)
except Exception as e:
conn = None
return conn
def create_table(conn, create_table_sql):
"""
create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:created table cursor
"""
try:
cur = conn.cursor()
cur.execute(create_table_sql)
except Exception as e:
cur = None
return cur
def insert_new_row_status(conn, row):
"""
Create a new task
:param conn:
:param row:
:return:created rowid
"""
sql = ''' INSERT INTO check_status(load_date,non_unique_id_int,count,null_count,z0_count,int_avg,float_avg,date_avg)
VALUES(?,?,?,?,?,?,?,?); '''
cur = conn.cursor()
cur.execute(sql, row)
conn.commit()
return cur.lastrowid
def insert_new_row(conn, row):
"""
Create a new row
:param conn:
:param row:
:return:created rowid
"""
sql = ''' INSERT INTO check_object(load_date,id,int_value,float_value,char_value,date_value)
VALUES(?,?,?,?,?,?); '''
cur = conn.cursor()
cur.execute(sql, row)
conn.commit()
return cur.lastrowid
def get_count(conn, day, next_day):
"""
Count check_object rows
:param conn:
:param day
:param next_day
:return::count
"""
sql = ''' SELECT count(*) from check_object WHERE load_date>=? AND load_date<?;'''
cur = conn.cursor()
cur.execute(sql, (day, next_day))
count = cur.fetchone()[0]
return count
def get_null_count(conn, day, next_day):
"""
Count null in all rows and columns
:param conn:
:param day
:param next_day
:return:count
"""
sql = ''' SELECT SUM(CASE WHEN load_date IS NULL THEN 1 ELSE 0 END
+ CASE WHEN int_value IS NULL THEN 1 ELSE 0 END
+ CASE WHEN float_value IS NULL THEN 1 ELSE 0 END
+ CASE WHEN char_value IS NULL THEN 1 ELSE 0 END
+ CASE WHEN date_value IS NULL THEN 1 ELSE 0 END) AS TotalNotNullCount
FROM check_object
WHERE load_date>=? AND load_date<?;'''
cur = conn.cursor()
cur.execute(sql, (day, next_day))
count = cur.fetchone()[0]
return count
def get_z0_count(conn, day, next_day):
"""
Count 0s
"""
sql = ''' SELECT SUM(CASE WHEN id = 0 THEN 1 ELSE 0 END
+ CASE WHEN int_value = 0 THEN 1 ELSE 0 END
+ CASE WHEN float_value = 0 THEN 1 ELSE 0 END)
FROM check_object
WHERE load_date>=? AND load_date<?;'''
cur = conn.cursor()
cur.execute(sql, (day, next_day))
count = cur.fetchone()[0]
return count
def get_int_avg(conn, day, next_day):
"""
Get int_value average
:param conn:
:return:
"""
sql = ''' SELECT AVG(int_value)
FROM check_object
WHERE load_date>=? AND load_date<?;'''
cur = conn.cursor()
cur.execute(sql, (day, next_day))
count = cur.fetchone()[0]
return count
def get_float_avg(conn, day, next_day):
"""
Get float_value average
:param conn:
:return:
"""
sql = ''' SELECT AVG(float_value)
FROM check_object
WHERE load_date>=? AND load_date<?;'''
cur = conn.cursor()
cur.execute(sql, (day, next_day))
count = cur.fetchone()[0]
return count
def get_date_avg(conn, day, next_day):
"""
Get date average
:param conn:
:return:
"""
sql = ''' SELECT CAST(AVG(CAST(date_value AS INT)) AS DATETIME)
FROM check_object
WHERE load_date>=? AND load_date<?;'''
cur = conn.cursor()
cur.execute(sql, (day, next_day))
count = cur.fetchone()[0]
return count
def count_non_unique_id_int(conn, day, next_day):
"""
Count non-unique id+int_value combinations
:param conn:
:return:
"""
sql = ''' SELECT count(*) FROM (SELECT DISTINCT id, int_value
FROM check_object
WHERE load_date>=? AND load_date<?);'''
cur = conn.cursor()
cur.execute(sql, (day, next_day))
count = cur.fetchone()[0]
return count
def get_int_avg_in_status_table_by_rowid(conn, rowid):
cur = conn.cursor()
cur.execute('SELECT int_avg FROM check_status WHERE rowid=? OR rowid=?;', (rowid,rowid) )#sorry for that
one = cur.fetchone()[0]
return one
def get_float_avg_in_status_table_by_rowid(conn, rowid):
cur = conn.cursor()
cur.execute('SELECT float_avg FROM check_status WHERE rowid=? OR rowid=?;', (rowid,rowid) )
one = cur.fetchone()[0]
return one
def get_z0_count_in_status_table_by_rowid(conn, rowid):
cur = conn.cursor()
cur.execute('SELECT z0_count FROM check_status WHERE rowid=? OR rowid=?;', (rowid,rowid) )
one = cur.fetchone()[0]
return one
def get_null_count_in_status_table_by_rowid(conn, rowid):
cur = conn.cursor()
cur.execute('SELECT null_count FROM check_status WHERE rowid=? OR rowid=?;', (rowid,rowid) )
one = cur.fetchone()[0]
return one
def get_date_avg_in_status_table_by_rowid(conn, rowid):
cur = conn.cursor()
cur.execute('SELECT date_avg FROM check_status WHERE rowid=? OR rowid=?;', (rowid,rowid) )
one = cur.fetchone()[0]
return one
def get_non_unique_id_int_in_status_table_by_rowid(conn, rowid):
cur = conn.cursor()
cur.execute('SELECT non_unique_id_int FROM check_status WHERE rowid=? OR rowid=?;',
(rowid,rowid))
one = cur.fetchone()[0]
return one
def delete_check_object_row_by_rowid(conn, rowid):
cur = conn.cursor()
cur.execute('DELETE FROM check_object WHERE rowid=? OR rowid=?;', (rowid,rowid) )
conn.commit()
def delete_check_status_row_by_rowid(conn, rowid):
cur = conn.cursor()
cur.execute('DELETE FROM check_status WHERE rowid=? OR rowid=?;', (rowid,rowid) )
conn.commit()
def print_table(conn):
"""
Print check_object and check_status tables
:param conn:
:return:
"""
sql = ''' SELECT * from check_object ORDER BY load_date DESC;'''
cur = conn.cursor()
cur.execute(sql)
rows = cur.fetchall()
print("check_object:")
for row in rows:
print(row)
sql = ''' SELECT * from check_status;'''
cur.execute(sql)
rows = cur.fetchall()
print("check_status:")
for row in rows:
print(row)
cur.execute('''SELECT count(*) from check_status;''')
count = cur.fetchone()[0]
print(count)
def drop_object_table(conn):
sql = ''' DROP TABLE IF EXISTS check_object; '''
cur = conn.cursor()
cur.execute(sql)
def drop_status_table(conn):
sql = ''' DROP TABLE IF EXISTS check_status; '''
cur = conn.cursor()
cur.execute(sql)
def get_next_day(load_data):
ld_date = datetime.datetime.strptime(load_data, "%Y-%m-%d")
next_day = datetime.datetime.strftime(ld_date + datetime.timedelta(days=1),
"%Y-%m-%d")
return next_day
def calculate_status_values_in_check_object_table( conn, load_data, next_day):
non_unique_id_int = count_non_unique_id_int(conn, load_data, next_day)
count = get_count(conn, load_data, next_day)
null_count = get_null_count(conn, load_data, next_day)
z0_count = get_z0_count(conn, load_data, next_day)
int_avg = get_int_avg(conn, load_data, next_day)
float_avg = get_float_avg(conn, load_data, next_day)
date_avg = get_date_avg(conn, load_data, next_day)
return (load_data, non_unique_id_int, count, null_count,
z0_count, int_avg, float_avg, date_avg)
# next day is for performance
def add_day_status_row(conn, load_data):
next_day = get_next_day(load_data)
row = calculate_status_values_in_check_object_table(conn, load_data, next_day)
rowid = insert_new_row_status(conn, row)
conn.commit()
#print_table(conn)
return rowid
# 1. ID is intentionally non-unique. So ID is NOT a primary key
# 2. We won't calculate avg for VARCHAR as we don't know how to interpret it
def main():
# create a database connection
conn = create_connection(database)
if conn is not None:
# create projects table
# create_table(conn, sql_create_projects_table)
# create tasks table
#create_table(conn, sql_create_tasks_table)
create_table(conn, sql_create_main_table)
load_data = '2015-02-09'
row = (load_data, randint(0, 1000000), 1, 2.0, "hi", '2013-01-05')
insert_new_row(conn, row)
row = (load_data, randint(0, 1000000), None, 10, "1hi", '2017-01-05')
insert_new_row(conn, row)
create_table(conn, sql_create_status_table)
add_day_status_row(conn, load_data)
conn.close()
else:
print("Error! cannot create the database connection.")
if __name__ == '__main__':
main()
| {"/db_test.py": ["/db_quality_main.py"], "/generate_db.py": ["/db_quality_main.py"], "/sanity_test.py": ["/db_quality_main.py"]} |
67,934 | welld7/db_quality | refs/heads/master | /sanity_test.py | #from random import randint
import pytest
from db_quality_main import *
load_date1 = '2017-01-05'
some_date2 = '2013-01-05'
some_sting = "hi"
@pytest.fixture()
def handle_connection():
conn = create_connection(database_tmp)
yield conn
conn.close()
@pytest.fixture()
def handle_connection_and_drop():
conn = create_connection(database_tmp)
# id:non-unique
create_table(conn, sql_create_main_table)
create_table(conn, sql_create_status_table)
conn.commit()
cur = conn.cursor()
#delete rows in case something has already been there
cur.execute(''' delete from check_status; ''')
cur.execute(''' delete from check_object; ''')
conn.commit()
yield conn
drop_object_table(conn)
drop_status_table(conn)
conn.close()
@pytest.mark.sanity
@pytest.mark.parametrize("int_input", [3, 18, 1029])
def test_avg_int_one_date(handle_connection_and_drop, int_input):
conn = handle_connection_and_drop
row1 = (load_date1, randint(0, 1000000), 0, 2.0, some_sting, some_date2)
insert_new_row(conn, row1)
row2 = (load_date1, randint(0, 1000000), int_input, 2.0, some_sting, some_date2)
insert_new_row(conn, row2)
row3 = (load_date1, randint(0, 1000000), 2*int_input, 2.0, some_sting, some_date2)
insert_new_row(conn, row3)
# we don't know what's going on in add_day_status_row, so we'll check
# the avg value in db by id
rowid_inserted = add_day_status_row(conn, load_date1)
#print_table(conn)
assert int_input == get_int_avg_in_status_table_by_rowid(conn, rowid_inserted)
@pytest.mark.sanity
@pytest.mark.parametrize("int_input", [1,2])
def test_avg_int_different_load_dates(handle_connection_and_drop, int_input):
conn = handle_connection_and_drop
row1 = (load_date1, randint(0, 1000000), 0, 2.0, some_sting, some_date2)
insert_new_row(conn, row1)
row2 = (load_date1, randint(0, 1000000), int_input, 2.0, some_sting, some_date2)
insert_new_row(conn, row2)
row3 = (load_date1, randint(0, 1000000), 2 * int_input, 2.0, some_sting, some_date2)
insert_new_row(conn, row3)
row4 = ('2017-01-04', randint(0, 1000000), 1000000, 2.0, some_sting, some_date2)
insert_new_row(conn, row4)
#the avg_int isn't changed by the other day row
rowid_inserted = add_day_status_row(conn, load_date1)
#print_table(conn)
assert int_input == get_int_avg_in_status_table_by_rowid(conn, rowid_inserted)
@pytest.mark.sanity
@pytest.mark.parametrize("float_input", [-1., 1.7976931348623157e+30, 1029.])
def test_avg_float_one_date(handle_connection_and_drop, float_input):
conn = handle_connection_and_drop
row1 = (load_date1, randint(0, 1000000), 0, 0, some_sting, some_date2)
insert_new_row(conn, row1)
row2 = (load_date1, randint(0, 1000000), 0, float_input, some_sting, some_date2)
insert_new_row(conn, row2)
row3 = (load_date1, randint(0, 1000000), 0, 2*float_input, some_sting, some_date2)
insert_new_row(conn, row3)
rowid_inserted = add_day_status_row(conn, load_date1)
#print_table(conn)
assert float_input == get_float_avg_in_status_table_by_rowid(conn, rowid_inserted)
#TODO
@pytest.mark.sanity
@pytest.mark.skip
def test_avg_float_different_load_dates(handle_connection_and_drop, float_input):
pass
@pytest.mark.sanity
@pytest.mark.parametrize("date", ['2020-12-1', '1970-2-28', '1000-10-10'])
def test_avg_date_one_ld_date(handle_connection_and_drop, date):
conn = handle_connection_and_drop
row1 = (load_date1, randint(0, 1000000), 0, 2.0, some_sting, date)
insert_new_row(conn, row1)
row2 = (load_date1, randint(0, 1000000), 0, 2.0, some_sting, date)
insert_new_row(conn, row2)
row3 = (load_date1, randint(0, 1000000), 0, 2.0, some_sting, date)
insert_new_row(conn, row3)
rowid_inserted = add_day_status_row(conn, load_date1)
#print_table(conn)
#FIXME int() is a simplification
assert int(date[:4]) == int(get_date_avg_in_status_table_by_rowid(conn, rowid_inserted))
@pytest.mark.sanity
@pytest.mark.parametrize("number_of_rows", [0, 1, 300])
def test_count(handle_connection_and_drop, number_of_rows):
conn = handle_connection_and_drop
if conn is not None:
day = load_date1
for _ in range(number_of_rows):
row = (day, 0, 0, 0, "", day)
insert_new_row(conn, row)
assert number_of_rows == get_count(conn, day, get_next_day(day))
@pytest.mark.sanity
@pytest.mark.parametrize("rows_x3_input", [2, 118])
def test_z0_count_one_date(handle_connection_and_drop, rows_x3_input):
conn = handle_connection_and_drop
for _ in range (rows_x3_input):
row1 = (load_date1, randint(0, 1000000), 0, 0, some_sting, some_date2)
insert_new_row(conn, row1)
row2 = (load_date1, randint(0, 1000000), 0, 1.0, some_sting, some_date2)
insert_new_row(conn, row2)
rowid_inserted = add_day_status_row(conn, load_date1)
assert rows_x3_input * 3 == get_z0_count_in_status_table_by_rowid(conn, rowid_inserted)
@pytest.mark.sanity
@pytest.mark.parametrize("rows_x3_input", [12, 99])
def test_null_count_one_date(handle_connection_and_drop, rows_x3_input):
conn = handle_connection_and_drop
for _ in range (rows_x3_input):
row1 = (load_date1, randint(0, 1000000), 0, 0, None, some_date2)
insert_new_row(conn, row1)
row2 = (load_date1, randint(0, 1000000), 0, 0, None, None)
insert_new_row(conn, row2)
rowid_inserted = add_day_status_row(conn, load_date1)
#print_table(conn)
assert rows_x3_input * 3 == get_null_count_in_status_table_by_rowid(conn, rowid_inserted)
@pytest.mark.sanity
def test_non_unique_one_date(handle_connection_and_drop, int_input):
conn = handle_connection_and_drop
row1 = (load_date1, 0, 0, 2.0, some_sting, some_date2)
insert_new_row(conn, row1)
row2 = (load_date1, 0, 0, 2.0, some_sting, some_date2)
insert_new_row(conn, row2)
# we don't know what's going on in add_day_status_row, so we'll check
# the avg value in db by id
rowid_inserted = add_day_status_row(conn, load_date1)
#print_table(conn)
assert 1 == get_non_unique_id_int_in_status_table_by_rowid(conn, rowid_inserted)
#TODO: add more tests for a different ld_date | {"/db_test.py": ["/db_quality_main.py"], "/generate_db.py": ["/db_quality_main.py"], "/sanity_test.py": ["/db_quality_main.py"]} |
67,969 | alperencesur/itucsdb1943 | refs/heads/master | /classes/veteriner.py | class Veteriner:
def __init__ (self, vetid, address, district, serviceRate, priceRate, telephone, overallScore, vetName, voteNum, cityName):
self.vetid = vetid
self.address = address
self.district = district
self.serviceRate = serviceRate
self.priceRate = priceRate
self.telephone = telephone
self.overallScore = overallScore
self.vetName = vetName
self.voteNum = voteNum
self.cityName = cityName | {"/classes/Database.py": ["/classes/post.py", "/classes/comment.py", "/classes/foundation.py", "/classes/blog.py", "/classes/notices.py", "/classes/veteriner.py", "/classes/rate.py", "/classes/foundationcontact.py", "/classes/Notification.py", "/classes/Profile.py"], "/server.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/classes/blog.py", "/classes/foundation.py", "/classes/foundationcontact.py"], "/views.py": ["/classes/Users.py"], "/dbinit.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/server.py"]} |
67,970 | alperencesur/itucsdb1943 | refs/heads/master | /classes/Users.py | import psycopg2 as dbapi2
from flask import current_app
from flask_login import UserMixin
url = "postgres://rgkksygg:BO8pGAZa6BqFR84mF43EMNNljm3jRnM5@rogue.db.elephantsql.com:5432/rgkksygg"
class Users(UserMixin):
def __init__(self,id,name,surname,username,isVet,password,facebookLink,twitterLink,youtubeLink,instagramLink,websiteLink,registerTime,photoURL):
self.id = id
self.name = name
self.surname = surname
self.email = username
self.isVet = False
self.photoURL = photoURL
self.password = password
self.isLogin = True
self.facebookLink = facebookLink
self.twitterLink = twitterLink
self.youtubeLink = youtubeLink
self.instagramLink = instagramLink
self.websiteLink = websiteLink
self.registerTime = registerTime
self.isAdmin = False
def createUser(self,name,surname,email,photoURL,password,facebookLink,twitterLink,youtubeLink,instagramLink,websiteLink,registerTime,isVet):
self.name = name
self.surname = surname
self.email = email
self.photoURL = photoURL
self.password = password
self.facebookLink = facebookLink
self.twitterLink = twitterLink
self.youtubeLink = youtubeLink
self.instagramLink = instagramLink
self.websiteLink = websiteLink
self.registerTime = registerTime
self.isLogin = True
if isVet == True:
self.isVet = True
else:
self.isVet = False
with dbapi2.connect(url) as connection:
cursor = connection.cursor()
statement = """SELECT * FROM POST """
cursor.execute(statement)
print(cursor.fetchone())
@property
def is_authenticated(self):
return True
@ property
def is_anonymous(self):
return False
@property
def is_active(self):
return True
def get_user(id):
with dbapi2.connect(url) as connection:
cursor = connection.cursor()
statement = """SELECT PASSWORD FROM USERS WHERE EMAIL = '{0}' """.format(id)
cursor.execute(statement)
db = cursor.fetchone()
if db is not None:
password = db[0]
statement = """select userid,name,surname,email,isvet,facebook,twitter,youtube,instagram,website,registerdate,photo from users left join socialmedia on users.userid = socialmedia.ownerid where email = '{0}'""".format(id)
cursor.execute(statement)
db2 = cursor.fetchone()
print(db2)
user = Users(db2[0],db2[1],db2[2],db2[3],db2[4],password,db2[5],db2[6],db2[7],db2[8],db2[9],db2[10],db2[11])
return user
else:
return None | {"/classes/Database.py": ["/classes/post.py", "/classes/comment.py", "/classes/foundation.py", "/classes/blog.py", "/classes/notices.py", "/classes/veteriner.py", "/classes/rate.py", "/classes/foundationcontact.py", "/classes/Notification.py", "/classes/Profile.py"], "/server.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/classes/blog.py", "/classes/foundation.py", "/classes/foundationcontact.py"], "/views.py": ["/classes/Users.py"], "/dbinit.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/server.py"]} |
67,971 | alperencesur/itucsdb1943 | refs/heads/master | /classes/Database.py | # coding=utf-8
import os
from classes.post import Post
from classes.comment import Comment
from classes.foundation import Foundation
from classes.blog import Blog
from classes.notices import Notice
import psycopg2 as dbapi2
from classes.veteriner import Veteriner
from classes.rate import *
from classes.foundationcontact import *
import sys
#reload(sys)
#sys.setdefaultencoding('utf-8')
from classes.Notification import *
from classes.Profile import *
from flask import session
try:
from urllib.parse import urlparse as up
except ImportError:
from urlparse import urlparse as up
url = "postgres://rgkksygg:BO8pGAZa6BqFR84mF43EMNNljm3jRnM5@rogue.db.elephantsql.com:5432/rgkksygg"
class Database:
def __init__(self, url):
self.url = url
self.posts = {}
self.last_post_key = 0
self.foundations = {}
self._last_foundation_key = 0
self.blogs = {}
self._last_blog_key = 0
self.last_blog_key = 0
def add_post(self, post):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
if post.posttag is None and post.description is None:
query = """INSERT INTO Post(USERID, POSTDATE, PHOTOURL, TITLE ) VALUES ('{0}','{1}','{2}','{3}' );""".format(post.userid,post.postdate,post.photo, post.title)
elif post.posttag is None:
query = """INSERT INTO Post(USERID, POSTDATE, PHOTOURL, DESCRIPTION, TITLE ) VALUES ('{0}','{1}','{2}','{3}', '{4}' );""".format(post.userid,post.postdate,post.photo, post.description, post.title)
elif post.description is None:
query = """INSERT INTO Post(USERID, POSTDATE, PHOTOURL, TITLE, POSTTAG ) VALUES ('{0}','{1}','{2}','{3}', '{4}' );""".format(post.userid,post.postdate,post.photo, post.title, post.posttag)
else:
query = """INSERT INTO Post(USERID, POSTDATE, PHOTOURL, DESCRIPTION, TITLE, POSTTAG ) VALUES ('{0}','{1}','{2}','{3}', '{4}', '{5}' );""".format(post.userid,post.postdate,post.photo, post.description, post.title, post.posttag)
cursor.execute(query)
connection.commit()
statement = """ SELECT POSTID FROM POST WHERE ( USERID = %s) AND (PHOTOURL = %s) AND (TITLE = %s) AND (POSTDATE = %s) """
cursor.execute(statement, (post.userid, post.photo, post.title, post.postdate))
obj = cursor.fetchone()
post_key = obj[0]
# self.last_post_key += 1
# self.posts[self.last_post_key] = post
return post_key
def delete_notifications(self,userid):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """DELETE FROM NOTIFICATION WHERE OWNERID = '{0}' OR USERID = '{0}'""".format(userid)
cursor.execute(statement)
def delete_notices(self,userid):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """DELETE FROM NOTICE WHERE USERID = '{0}'""".format(userid)
cursor.execute(statement)
def delete_socialMedia(self,userid):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """DELETE FROM SOCIALMEDIA WHERE OWNERID = '{0}' """.format(userid)
cursor.execute(statement)
def get_post(self,post_key):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
query = """ SELECT * FROM POST WHERE POSTID = '{0}' """.format(post_key)
cursor.execute(query)
postid,userid,postdate,photourl,description,title,posttag = cursor.fetchone()
post = Post(postid, userid, postdate, photourl, title, description = description, posttag = posttag)
return post
return None
def delete_user_comments(self,userid):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """DELETE FROM COMMENT WHERE USERID = '{0}' """.format(userid)
cursor.execute(statement)
def delete_user_likes(self,userid):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """DELETE FROM LIKES WHERE WHOLIKED = '{0}' """.format(userid)
cursor.execute(statement)
def delete_user_rating(self,userid):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """DELETE FROM RATING WHERE USERID = '{0}' """.format(userid)
cursor.execute(statement)
def delete_post(self,userid):
with dbapi2.connect(self.url) as connection:
posts = []
cursor = connection.cursor()
statement = """SELECT POSTID FROM POST WHERE USERID = '{0}'""".format(userid)
cursor.execute(statement)
for postid in cursor:
self.delete_patigram(postid)
def get_posts(self):
posts = []
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
query = """SELECT * FROM POST ORDER BY POSTDATE"""
cursor.execute(query)
for postid,userid,postdate,photourl,description,title,posttag in cursor:
posts.append((postid , Post(postid, userid, postdate, photourl, title, description = description, posttag = posttag)))
return posts
def delete_patigram(self,postid):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """ delete from comment
WHERE POSTID = %s;
DELETE FROM LIKES
WHERE POSTID = %s;
DELETE FROM POST
WHERE POSTID = %s;"""
cursor.execute(statement, (postid,postid,postid))
def update_patigram(self,postid,title,description):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """ UPDATE POST
SET TITLE = %s,
DESCRIPTION = %s
WHERE (POSTID = %s);"""
cursor.execute(statement,(title, description, postid))
def get_post_user(self,post_key):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """SELECT USERID FROM POST
WHERE (POSTID = %s)"""
cursor.execute(statement,(post_key,))
user_ = cursor.fetchone()
user_ = user_[0]
return user_
def patigram_add_like(self, post_key, userid, date_time):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """INSERT INTO LIKES (POSTID, WHOLIKED, DATE)
VALUES(%s, %s, %s);"""
cursor.execute(statement, (post_key, userid, date_time))
def patigram_get_like_num(self, postid):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """ SELECT COUNT(POSTID) FROM LIKES
WHERE POSTID = %s;"""
cursor.execute(statement,(postid,))
likeN = cursor.fetchone()
likeNum = likeN[0]
like = int(likeNum)
return like
def patigram_delete_like(self,postid,userid):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """ DELETE FROM LIKES
WHERE(POSTID = %s) AND (WHOLIKED = %s);"""
cursor.execute(statement,(postid,userid))
def patigram_is_user_liked(self, postid, userid):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """SELECT DATE FROM LIKES
WHERE (WHOLIKED = %s) AND (POSTID = %s)"""
cursor.execute(statement,(userid, postid))
date = cursor.fetchone()
# date = date[0]
print(date)
if date is None:
return 0
else:
return 1
def get_notices(self,Lost):
notices = []
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
query = """select noticeid,notice.userid,users.name,users.surname,animaltype,age,strain,gender,photourl,islost,description,contact,date,place from notice left join users on users.userid = notice.userid ORDER BY DATE"""
cursor.execute(query)
for noticeID,userID,name,surname,animalType,age,strain,gender,photoURL,isLost,description,contact,date,place in cursor:
if isLost == Lost:
notices.append((noticeID,Notice(noticeID,userID,name,surname,animalType,age,strain,gender,photoURL,isLost,description,contact,date,place)))
return notices
def get_notice(self,noticeID):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
query = """select noticeid,notice.userid,users.name,users.surname,animaltype,age,strain,gender,photourl,islost,description,contact,date,place from notice left join users on users.userid = notice.userid where noticeid = '{0}'""".format(noticeID)
cursor.execute(query)
noticeID,userID,name,surname,animalType,age,strain,gender,photoURL,isLost,description,contact,date,place = cursor.fetchone()
notice = Notice(noticeID,userID,name,surname,animalType,age,strain,gender,photoURL,isLost,description,contact,date,place)
return notice
def get_notifications(self):
notifications = []
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
query = """SELECT NOTIFICATION.NOTIFICATIONID,USERS.NAME, USERS.SURNAME, NOTIFICATION.POSTTYPE, NOTIFICATION.NOTIFICATIONTIME, NOTIFICATION.ISSEEN,NOTIFICATION.CONTENT, NOTIFICATION.NOTTYPE,NOTIFICATION.TITLE FROM NOTIFICATION LEFT JOIN USERS ON NOTIFICATION.USERID = USERS.USERID WHERE NOTIFICATION.OWNERID = {0} ORDER BY NOTIFICATIONTIME""".format(session['user_id'])
cursor.execute(query)
for notificationID,name,surname,postType,notificationTime,isSeen,content,notType,title in cursor:
if postType == 1: #Patigram
if notType == 3:
title = ""
else:
if notType == 0:
description = """Your Patigram Post named "{0}" is liked by {1} {2}.""".format(title,name,surname)
elif notType == 1:
description = """Your Patigram Post named "{0}" is commented by {1} {2}.""".format(title,name,surname)
elif notType == 2:
description = """Your Patigram Post named "{0}" is shared successfully.""".format(title)
else:
description = """Your Patigram Post named "{0}" is deleted successfully.""".format(title)
if postType == 3: #Notice
description = """Your Notice named "{0}" is shared successfully""".format(title)
if postType == 0: #Blog
if notType == 0:
description = """Your blog named "{0}" is liked by {1} {2}.""".format(title,name,surname)
elif notType == 2:
description = """Your blog named "{0}" is shared successfully.""".format(title)
print(description)
notifications.append((notificationID,Notificition(notificationID,name,surname,title,notType,notificationTime,isSeen,postType,description,content)))
return notifications
def add_comment(self,Comment):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """INSERT INTO COMMENT(POSTID, USERID, DATE, COMMENT, POSTTYPE) VALUES (%s, %s, %s, %s, %s);"""
cursor.execute(statement, (Comment.postid, Comment.userid, Comment.date, Comment.comment, Comment.posttype))
def add_notification(self,postType,postTitle,notType,userID,ownerID,content,time):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """ INSERT INTO NOTIFICATION(TITLE,NOTIFICATIONTIME,USERID,OWNERID,POSTTYPE,NOTTYPE,CONTENT)
VALUES(%s,%s,%s,%s,%s,%s,%s)"""
cursor.execute(statement,(postTitle,time,userID,ownerID,postType,notType,content))
def add_notice(self,title,place,animalType,gender,strain,age,photoUrl,isLost,contact,date,userID):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """ INSERT INTO NOTICE(USERID,ANIMALTYPE,AGE,STRAIN,GENDER,PHOTOURL,ISLOST,DESCRIPTION,CONTACT,DATE,PLACE)
VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
cursor.execute(statement,(userID,animalType,age,strain,gender,photoUrl,isLost,title,contact,date,place))
self.add_notification(3,title,2,userID,userID,"",date)
def get_comments(self, posttype, postid):
comments = []
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """SELECT USERS.NAME, COMMENT.USERID,USERS.SURNAME,COMMENT.COMMENT FROM COMMENT JOIN USERS
ON (COMMENT.USERID = USERS.USERID)
WHERE (POSTTYPE = %s) AND (POSTID = %s)
ORDER BY COMMENTID DESC;"""
cursor.execute(statement,(posttype,postid))
connection.commit()
for name, userid, surname, comment in cursor:
comments.append({"name": name, "userid":userid, "surname": surname, "comment": comment})
return comments
def add_foundation(self, foundation):
with dbapi2.connect(self.url) as connection:
cursor = connection. cursor()
statement = """INSERT INTO FOUNDATIONCONTACT ( FACEBOOK, TWITTER, INSTAGRAM, WEBSITE)
VALUES ( %s,%s,%s,%s);
"""
cursor.execute(statement, (foundation.facebook, foundation.twitter, foundation.instagram, foundation.website))
query = """SELECT FOUNDID FROM FOUNDATIONCONTACT WHERE (FACEBOOK = %s)"""
cursor.execute(query, (foundation.facebook,))
nowid = cursor.fetchone()
nowid = nowid[0]
query = """INSERT INTO FOUNDATION (FOUNDID,PHOTO, DONATIONURL, ABOUT, FOUNDNAME, ADDRESS)
VALUES (%s, %s,%s,%s,%s,%s);
"""
cursor.execute(query, (nowid, foundation.photo, foundation.donationurl, foundation.about, foundation.foundname, foundation.address))
foundation_key = cursor.lastrowid
return foundation_key
def delete_foundation(self, foundation_key):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
query = """DELETE FROM FOUNDATION WHERE FOUNDID = '{0}'""".format(foundation_key)
cursor.execute(query)
connection.commit()
def get_foundation(self, foundation_key):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
query = """SELECT FOUNDATION.FOUNDID, PHOTO, DONATIONURL, ABOUT, FOUNDNAME, ADDRESS, FACEBOOK, TWITTER, INSTAGRAM, WEBSITE FROM FOUNDATION LEFT JOIN FOUNDATIONCONTACT ON (FOUNDATION.FOUNDID = FOUNDATIONCONTACT.FOUNDID) WHERE (FOUNDATION.FOUNDID = %s)"""
cursor.execute(query, (foundation_key,))
foundid, photo, donationurl, about, foundname, address, facebook, twitter, instagram, website = cursor.fetchone()
foundation = Foundation(foundid, photo, donationurl, about, foundname, address, facebook, twitter, instagram, website)
return foundation
return None
def get_foundations(self):
foundations = []
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
query = """SELECT FOUNDATION.FOUNDID, PHOTO, DONATIONURL, ABOUT, FOUNDNAME, ADDRESS, FOUNDATIONCONTACT.FACEBOOK, FOUNDATIONCONTACT.TWITTER, FOUNDATIONCONTACT.INSTAGRAM, FOUNDATIONCONTACT.WEBSITE FROM FOUNDATION LEFT JOIN FOUNDATIONCONTACT
ON (Foundation.FOUNDID = FoundationContact.FOUNDID) """
cursor.execute(query)
connection.commit()
for foundid, photo, donationurl, about, foundname, address, facebook, twitter, instagram, website in cursor:
foundations.append((foundid, Foundation(foundid, photo, donationurl, about, foundname, address, facebook, twitter, instagram,website)))
return foundations
def update_foundation(self, foundid, about, donationurl):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """UPDATE FOUNDATION
SET ABOUT = %s,
DONATIONURL = %s
WHERE (FOUNDID =%s);
"""
cursor.execute(statement, (about,donationurl, foundid))
connection.commit()
def add_blog(self, blog):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
query = """INSERT INTO BLOG (USERID, BLOGTAG, TITLE, TEXT, LIKENUMBER, DISLIKENUMBER, PHOTO,POSTDATE) VALUES (%s,%s,%s,%s,%s,%s,%s,%s);"""
cursor.execute(query, (blog.userid, blog.blogtag, blog.title, blog.text, blog.likeNum, blog.dislikeNum, blog.photo, blog.postdate))
connection.commit()
blog_key = cursor.lastrowid
return blog_key
def delete_blog(self, blog_key):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
query = """DELETE FROM BLOG WHERE BLOGID = '{0}' """.format(blog_key)
cursor.execute(query)
connection.commit()
def blog_like(self, blog_key):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """UPDATE BLOG
SET LIKENUMBER = LIKENUMBER + 1
WHERE (BLOGID=%s)"""
cursor.execute(statement, (blog_key,))
connection.commit()
def blog_dislike(self, blog_key):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """UPDATE BLOG
SET DISLIKENUMBER = DISLIKENUMBER + 1
WHERE (BLOGID=%s)"""
cursor.execute(statement, (blog_key,))
connection.commit()
def get_cats(self):
catblogs = []
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """SELECT * FROM BLOG WHERE BLOG.BLOGTAG = 'Cat'"""
cursor.execute(statement)
for blogid,userid,blogtag,title,text,likeNum,dislikeNum,photo,postdate in cursor:
catblogs.append((blogid, Blog(blogid, userid, blogtag, title, text, likeNum, dislikeNum, photo,postdate)))
return catblogs
def get_dogs(self):
dogblogs = []
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """SELECT * FROM BLOG WHERE BLOG.BLOGTAG = 'Dog'"""
cursor.execute(statement)
for blogid,userid,blogtag,title,text,likeNum,dislikeNum,photo,postdate in cursor:
dogblogs.append((blogid, Blog(blogid, userid, blogtag, title, text, likeNum, dislikeNum, photo,postdate)))
return dogblogs
def get_birds(self):
birdblogs = []
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """SELECT * FROM BLOG WHERE BLOG.BLOGTAG = 'Bird'"""
cursor.execute(statement)
for blogid,userid,blogtag,title,text,likeNum,dislikeNum,photo,postdate in cursor:
birdblogs.append((blogid, Blog(blogid, userid, blogtag, title, text, likeNum, dislikeNum, photo,postdate)))
return birdblogs
def get_other(self):
otherblogs = []
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """SELECT * FROM BLOG WHERE BLOG.BLOGTAG = 'Other'"""
cursor.execute(statement)
for blogid,userid,blogtag,title,text,likeNum,dislikeNum,photo,postdate in cursor:
otherblogs.append((blogid, Blog(blogid, userid, blogtag, title, text, likeNum, dislikeNum, photo,postdate)))
return otherblogs
def get_blog(self, blog_key):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
query = """SELECT * FROM BLOG WHERE BLOGID = '{0}' """.format(blog_key)
cursor.execute(query)
blogid,userid,blogtag,title,text,likeNum,dislikeNum,photo,postdate = cursor.fetchone()
blog = Blog(blogid, userid, blogtag, title, text, likeNum, dislikeNum, photo,postdate)
return blog
return None
def get_blogs(self):
blogs = []
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
query = """SELECT * FROM BLOG ORDER BY POSTDATE"""
cursor.execute(query)
for blogid,userid,blogtag,title,text,likeNum,dislikeNum,photo,postdate in cursor:
blogs.append((blogid, Blog(blogid, userid, blogtag, title, text, likeNum, dislikeNum, photo,postdate)))
return blogs
def update_blog(self, blogid, title, blogtag, text):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """UPDATE BLOG
SET TITLE = %s,
BLOGTAG = %s,
TEXT = %s
WHERE BLOGID = %s;"""
cursor.execute(statement, (title, blogtag, text, blogid))
def create_initial_vets(self):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """ INSERT INTO Vet(ADDRESS, DISTRICT, TELEPHONE, VETNAME, CITYID) VALUES ('Çeliktepe mah. Münir Kemal cd. no:38', 'Kağıthane', '02425676755', 'Çeliktepe Pati Veteriner', 34 );
INSERT INTO Vet(ADDRESS, DISTRICT, TELEPHONE, VETNAME, CITYID) VALUES ('Cikcilli mah. Gümüşler cd. no:52', 'Alanya', '02125152610', 'Cikcilli Veteriner', 7);
INSERT INTO Vet(ADDRESS, DISTRICT, TELEPHONE, VETNAME, CITYID) VALUES ('Gürsel mah. Komşu cd. no:95','Kağıthane', '02127656578', 'Patisever Veteriner', 34);
INSERT INTO Vet(ADDRESS, DISTRICT, TELEPHONE, VETNAME, CITYID) VALUES ('Yıldız mah. Abdülhamit cd. no:39', 'Beşiktaş','02128979908', 'Yıldız Veteriner', 34);
INSERT INTO Vet(ADDRESS, DISTRICT, TELEPHONE, VETNAME, CITYID) VALUES ('Saray mah. Mehmet Çavuş sk. no:10','Alanya', '024253979828','Alaiye Veteriner', 7);
INSERT INTO Vet(ADDRESS, DISTRICT, TELEPHONE, VETNAME, CITYID) VALUES ('Kırcalı mah. Şehzade sk. no:33', 'Merkez', '03585698005', 'Şehzade Pati Veteriner', 5 );
INSERT INTO Vet(ADDRESS, DISTRICT, TELEPHONE, VETNAME, CITYID) VALUES ('Hastane mah. Düzaltı cd. no:2', 'Meram', '06473849516', 'Meram Patileri Veteriner', 42);
INSERT INTO Vet(ADDRESS, DISTRICT, TELEPHONE, VETNAME, CITYID) VALUES ('Merkez mah. Kaptan Ali cd. no:61','Ortahisar', '06147904544', 'Mavi Bordo Veteriner', 61);"""
cursor.execute(statement)
connection.commit()
def create_initial_cities(self):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """INSERT INTO CITY VALUES(1, 'Adana');
INSERT INTO CITY VALUES(7, 'Antalya');
INSERT INTO CITY VALUES(34, 'İstanbul');
INSERT INTO CITY VALUES(35, 'İzmir');
INSERT INTO CITY VALUES(5, 'Amasya');
INSERT INTO CITY VALUES(61, 'Trabzon');
INSERT INTO CITY VALUES(43, 'Kütahya');
INSERT INTO CITY VALUES(42, 'Konya');
INSERT INTO CITY VALUES(6, 'Ankara');
INSERT INTO CITY VALUES(10, 'Bursa');"""
cursor.execute(statement)
connection.commit()
def get_vet_cities(self):
with dbapi2.connect(self.url) as connection:
cities = []
cursor = connection.cursor()
statement = """SELECT DISTINCT CITY.CITYID, CITY.CITYNAME FROM VET LEFT JOIN CITY
ON (VET.CITYID = CITY.CITYID)
ORDER BY CITY.CITYID ASC;"""
cursor.execute(statement)
connection.commit()
for cityid, city_name in cursor:
cities.append((cityid, city_name))
return cities
def get_cityname(self, cityid):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """SELECT CITYNAME FROM CITY
WHERE (CITYID = %s)"""
cursor.execute(statement, (cityid,))
connection.commit()
city_name = cursor.fetchone()
city_name = city_name[0]
return city_name
def get_user_detail(self,userid):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """select name,surname,email,isvet,facebook,twitter,youtube,instagram,website,registerdate,photo from users left join socialmedia on users.userid = socialmedia.ownerid where userid = '{0}'""".format(userid)
cursor.execute(statement)
db = cursor.fetchone()
user = Profile(db[0],db[1],db[2],db[3],db[4],db[5],db[6],db[7],db[8],db[9],db[10])
return user
def notification_seen(self,userid):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """UPDATE NOTIFICATION SET ISSEEN = 1 WHERE OWNERID = '{0}' """.format(userid)
cursor.execute(statement)
def update_user_photo(self,userid,url):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """UPDATE USERS SET PHOTO = '{0}' WHERE USERID = '{1}' """.format(url,userid)
cursor.execute(statement)
def update_notice(self,noticeid,title,date):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """UPDATE NOTICE SET DESCRIPTION = '{0}', DATE = '{1}' WHERE NOTICEID = '{2}' """.format(title,date,noticeid)
cursor.execute(statement)
def delete_vet(self,vet_id):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """ delete from rating where (vetid = %s);
delete from vet where (vetid = %s);"""
cursor.execute(statement, (vet_id, vet_id))
connection.commit()
def get_vets(self):
with dbapi2.connect(self.url) as connection:
vets = []
cursor = connection.cursor()
statement = """ SELECT VETID,DISTRICT,VETNAME, OVERALLSCORE, VOTENUM, CITY.CITYNAME FROM VET LEFT JOIN CITY
ON (VET.CITYID = CITY.CITYID)"""
cursor.execute(statement)
connection.commit()
for vetid,district, vetname, score, votenum, cityname in cursor:
vets.append({ "vetid":vetid, "vetname":vetname, "district": district, "cityname":cityname,"score": score, "votenum":votenum})
return vets
def get_selected_vets(self, selectedid):
with dbapi2.connect(self.url) as connection:
vets = []
cursor = connection.cursor()
statement = """ SELECT VETID,DISTRICT,VETNAME, OVERALLSCORE, VOTENUM, CITY.CITYNAME FROM VET LEFT JOIN CITY
ON (VET.CITYID = CITY.CITYID)
WHERE ( VET.CITYID = %s) """
cursor.execute(statement,(selectedid,))
connection.commit()
for vetid,district, vetname, score, votenum, cityname in cursor:
vets.append({ "vetid":vetid, "vetname":vetname, "district": district, "cityname":cityname,"score": score, "votenum":votenum})
return vets
def get_vet(self, vetid):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """ SELECT VETID,ADDRESS,DISTRICT,SERVICERATE, PRICERATE, TELEPHONE, OVERALLSCORE, VETNAME, VOTENUM, CITYNAME
FROM VET LEFT JOIN CITY
ON(VET.CITYID = CITY.CITYID)
WHERE (VETID = %s)"""
cursor.execute(statement,(vetid,))
connection.commit()
vetid, address, district, servicerate, pricerate, telephone, overallscore, vetname, votenum, cityname = cursor.fetchone()
print("oddddd %s",cityname)
vet = Veteriner(vetid, address, district, servicerate, pricerate, telephone, overallscore, vetname, votenum, cityname)
print(vet.vetName)
return vet
return None
def delete_rate(self, userid, vetid):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """ SELECT OVERALLSCORE, PRICERATE, SERVICERATE FROM RATING
WHERE (USERID = %s) AND (VETID = %s)"""
cursor.execute(statement,(userid, vetid))
overall, price, service = cursor.fetchone()
statement = """DELETE FROM RATING
WHERE (USERID = %s) AND (VETID = %s);"""
cursor.execute(statement,(userid, vetid))
statement=""" SELECT VOTENUM FROM VET
WHERE (VETID = %s)"""
cursor.execute(statement,(vetid,))
voteN = cursor.fetchone()
vote = voteN[0]
vot = int(vote)
print(vot)
if vot is 1:
statement = """ UPDATE VET
SET OVERALLSCORE = 0,
PRICERATE = 0,
SERVICERATE = 0,
VOTENUM = 0
WHERE (VETID = %s);"""
cursor.execute(statement,(vetid,))
else:
statement = """ UPDATE Vet
SET OVERALLSCORE = ((OVERALLSCORE * VOTENUM) - %s) / (VOTENUM-1),
PRICERATE = ((PRICERATE * VOTENUM) - %s) / (VOTENUM-1),
SERVICERATE = ((SERVICERATE * Vet.VOTENUM) - %s) / (VOTENUM-1),
VOTENUM = VOTENUM - 1
WHERE (VETID = %s);"""
cursor.execute(statement, (overall, price, service, vetid))
def add_rate(self, rate):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """SELECT * FROM RATING
WHERE (USERID = %s) AND (VETID = %s)"""
cursor.execute(statement,(rate.userid, rate.vetid))
if cursor.fetchone() is not None:
print("none değilmiş")
self.delete_rate(rate.userid, rate.vetid)
statement = """INSERT INTO Rating(USERID, VETID, OVERALLSCORE, PRICERATE, SERVICERATE, COMMENT, DATE, TITLE)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s);"""
cursor.execute(statement,(rate.userid, rate.vetid, rate.overallScore, rate.priceRate, rate.serviceRate, rate.comment, rate.date, rate.title))
#Scores must be updated
statement = """ UPDATE Vet
SET OVERALLSCORE = ((OVERALLSCORE * VOTENUM) + (%s)) / (VOTENUM+1),
PRICERATE = ((PRICERATE * VOTENUM) + (%s)) / (VOTENUM+1),
SERVICERATE = ((SERVICERATE * VOTENUM) + (%s)) / (VOTENUM+1),
VOTENUM = VOTENUM + 1
WHERE (VETID = %s);"""
cursor.execute(statement, (rate.overallScore, rate.priceRate, rate.serviceRate, rate.vetid))
def get_user_name(self, userid):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """SELECT name, surname FROM USERS
WHERE (USERID = %s)"""
cursor.execute(statement, (userid,))
name, surname = cursor.fetchone()
user_ = name + " " + surname
return user_
def get_rates(self,vetid):
rates = []
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """SELECT rating.userid, rateid, name, surname, vetid, overallScore, priceRate, serviceRate, comment, date, title FROM RATING LEFT JOIN USERS
ON (RATING.USERID = USERS.USERID)
WHERE (VETID = %s)"""
cursor.execute(statement, (vetid,))
for userid, rateid, name, surname, vetid, overallScore, priceRate, serviceRate, comment, date, title in cursor:
user = name + " " + surname
rates.append((userid, (Rate(rateid, user, vetid, overallScore, priceRate, serviceRate, comment, title, date))))
return rates
def update_rating(self,vetid,userid,comment,date):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """UPDATE RATING
SET COMMENT = %s,
DATE = %s
WHERE (USERID = %s) AND (VETID = %s);"""
cursor.execute(statement,(comment, date, userid, vetid))
connection.commit()
def delete_user(self,userid):
with dbapi2.connect(self.url) as connection:
cursor = connection.cursor()
statement = """DELETE FROM USERS WHERE USERID = '{0}'""".format(userid)
cursor.execute(statement)
if __name__ == "__main__":
# session.pop('logged_in',None)
#session['logged_in'] = False
#up.uses_netloc.append("postgres")
print("geldik buralara3")
url = up.urlparse(os.environ["postgres://rgkksygg:BO8pGAZa6BqFR84mF43EMNNljm3jRnM5@rogue.db.elephantsql.com:5432/rgkksygg"])
conn = dbapi2.connect(database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
) | {"/classes/Database.py": ["/classes/post.py", "/classes/comment.py", "/classes/foundation.py", "/classes/blog.py", "/classes/notices.py", "/classes/veteriner.py", "/classes/rate.py", "/classes/foundationcontact.py", "/classes/Notification.py", "/classes/Profile.py"], "/server.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/classes/blog.py", "/classes/foundation.py", "/classes/foundationcontact.py"], "/views.py": ["/classes/Users.py"], "/dbinit.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/server.py"]} |
67,972 | alperencesur/itucsdb1943 | refs/heads/master | /server.py | # coding=utf-8
import os
import sys
from datetime import datetime as dt
from os.path import dirname, join, realpath
import psycopg2 as dbapi2
from flask import (Blueprint, Flask, current_app, flash, redirect,
render_template, request, session, url_for)
from flask_login import (LoginManager, current_user, login_required,
login_user, logout_user)
from passlib.apps import custom_app_context as pwd_context
from passlib.hash import pbkdf2_sha256 as hasher
from werkzeug.utils import secure_filename
from classes.comment import *
from classes.Database import Database
from classes.forms import *
from classes.post import *
from classes.rate import *
from classes.Users import *
from views import site
from datetime import datetime as dt
from datetime import datetime
from classes.blog import *
from classes.foundation import *
from classes.foundationcontact import *
try:
from urllib.parse import urlparse as up
except ImportError:
from urlparse import urlparse as up
now = datetime.now()
#import sys
#reload(sys)
#sys.setdefaultencoding('utf-8')
#For uploading photo
UPLOAD_FOLDER = join(dirname(realpath(__file__)), 'static/patigram')
ALLOWED_EXTENSIONS = { 'png', 'jpg', 'jpeg', 'gif'}
UPLOAD_FOLDER_NOTICE = join(dirname(realpath(__file__)), 'static/notice')
UPLOAD_FOLDER_BLOG = join(dirname(realpath(__file__)), 'static/blog')
UPLOAD_FOLDER_FOUNDATION = join(dirname(realpath(__file__)), 'static/foundation')
app = Flask(__name__)
app.secret_key = 'super secret key'
app.register_blueprint(site)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['UPLOAD_FOLDER_NOTICE'] = UPLOAD_FOLDER_NOTICE
app.config['UPLOAD_FOLDER_BLOG'] = UPLOAD_FOLDER_BLOG
app.config['UPLOAD_FOLDER_FOUNDATION'] = UPLOAD_FOLDER_FOUNDATION
app.app_context()
lm = LoginManager()
@lm.user_loader
def load_user(id):
return get_user(id)
url = "postgres://rgkksygg:BO8pGAZa6BqFR84mF43EMNNljm3jRnM5@rogue.db.elephantsql.com:5432/rgkksygg"
db = Database(url)
app.config["db"] = db
@app.route("/delete")
def delete_user():
db.delete_notifications(session['user_id'])
db.delete_notices(session['user_id'])
db.delete_socialMedia(session['user_id'])
db.delete_user_comments(session['user_id'])
db.delete_user_likes(session['user_id'])
db.delete_post(session['user_id'])
db.delete_user_rating(session['user_id'])
db.delete_user(session['user_id'])
session['logged_in'] = False
next_page = request.args.get("next", url_for("home_page"))
return redirect(next_page)
@app.route("/")
def home_page():
return render_template("home.html")
@app.route("/login", methods=['GET','POST'])
def login_page():
if request.method == "GET":
return render_template("login.html")
else:
form = request.form
username = form['username']
password = form['password']
user = get_user(username)
if user is not None:
if hasher.verify(password, user.password):
session['logged_in'] = True
session['user_id'] = user.id
print(session['user_id'])
flash("You have logged in.")
next_page = request.args.get("next", url_for("home_page"))
return redirect(next_page)
else:
print("you cant logged")
flash("You cant logged in.")
return render_template("login.html",message="You entered wrong password! Try Again")
else:
return render_template("login.html",message="User cannot be found. If you don't have an account, you can register")
@app.route("/register", methods=['GET','POST'])
def register_page():
if request.method == "GET":
return render_template("register.html")
else:
form = request.form
name = form['name']
surname = form['surname']
email = form['email']
password = form['password']
hashed = hasher.hash(password)
facebook = form['facebook']
twitter = form['twitter']
instagram = form['instagram']
youtube = form['youtube']
website = form['website']
if form.get('isVet'):
isVet = 1
else:
isVet = 0
photoUrl = form['ck2']
registerTime = now.strftime("%d/%m/%y %H:%M:%S")
with dbapi2.connect(url) as connection:
cursor = connection.cursor()
try:
statement = """INSERT INTO Users(NAME, SURNAME, EMAIL,ISVET,PASSWORD,PHOTO,REGISTERDATE)
VALUES (%s,%s,%s,%s,%s,%s,%s); """
cursor.execute(statement,(name,surname,email,isVet,hashed,photoUrl,registerTime))
except:
return render_template("register.html",message = "The email address is already used!")
with dbapi2.connect(url) as connection:
cursor = connection.cursor()
statement = """ SELECT USERID FROM USERS WHERE EMAIL = '{0}' """.format(email)
cursor.execute(statement)
userid = cursor.fetchone()[0]
with dbapi2.connect(url) as connection:
cursor = connection.cursor()
statement = """ INSERT INTO SOCIALMEDIA(OWNERID,FACEBOOK,TWITTER,INSTAGRAM,YOUTUBE,WEBSITE)
VALUES('{0}','{1}','{2}','{3}','{4}','{5}') """.format(userid,facebook,twitter,instagram,youtube,website)
cursor.execute(statement)
# next_page = request.args.get("next", url_for("login_page"))
return redirect(url_for("login_page"))
@app.route("/logout")
def logout_page():
session['logged_in'] = False
next_page = request.args.get("next", url_for("home_page"))
return redirect(next_page)
@app.route("/post")
def post_page():
return "Post page"
@app.route("/profile")
def profile_page():
user = db.get_user_detail(session['user_id'])
return render_template("profile.html",user = user)
@app.route("/profile/<int:userid>")
def other_profile_page(userid):
user = db.get_user_detail(userid)
return render_template("othersProfile.html",user = user)
@app.route("/blog", methods=["GET", "POST"])
def blog_page():
db = current_app.config["db"]
if request.method == "GET":
blogs = db.get_blogs()
return render_template("blog/blog.html", blogs=sorted(blogs))
else:
if "all" in request.form:
blogs = db.get_blogs()
elif "cat" in request.form:
print("burda")
blogs = db.get_cats()
elif "dog" in request.form:
blogs = db.get_dogs()
elif "bird" in request.form:
blogs = db.get_birds()
elif "other" in request.form:
blogs = db.get_other()
return render_template("blog/blog.html", blogs=sorted(blogs))
@app.route("/blog/like/<int:blog_key>")
def blog_like(blog_key):
db = current_app.config["db"]
db.blog_like(blog_key)
return redirect(url_for("blog_info_page",blog_key = blog_key))
@app.route("/blog/dislike/<int:blog_key>")
def blog_dislike(blog_key):
db = current_app.config["db"]
db.blog_dislike(blog_key)
return redirect(url_for("blog_info_page", blog_key = blog_key))
@app.route("/blog/edit/<int:blog_key>", methods=["GET", "POST"])
def blog_edit(blog_key):
if request.method == "GET":
return render_template("blog/blogedit.html")
else:
db = current_app.config["db"]
old_blog = db.get_blog(blog_key)
form_titlerr = request.form.get("title", "").strip()
# if len(form_titlerr) == 0 and "title" in request.form:
# return render_template("blog/blogedit.html", error=1)
form_title = request.form["title"]
form_blogtag = request.form["blogtag"]
form_text = request.form["text"]
if "title" in request.form and "blogtag" in request.form and "text" in request.form:
db.update_blog(blog_key, request.form["title"], request.form["blogtag"], request.form["text"])
elif "title" in request.form and "blogtag" in request.form:
db.update_blog(blog_key, request.form["title"], request.form["blogtag"], old_blog.text)
elif "title" in request.form and "text" in request.form:
db.update_blog(blog_key, request.form["title"], old_blog.blogtag, request.form["text"])
elif "blogtag" in request.form and "text" in request.form:
db.update_blog(blog_key, old_blog.title, request.form["blogtag"], request.form["text"])
elif "title" in request.form:
db.update_blog(blog_key, request.form["title"], old_blog.blogtag, old_blog.text)
elif "blogtag" in request.form:
db.update_blog(blog_key, old_blog.title, request.form["blogtag"], old_blog.text)
elif "text" in request.form:
db.update_blog(blog_key, old_blog.title, old_blog.blogtag, request.form["text"])
return redirect(url_for("blog_info_page", blog_key = blog_key))
@app.route("/blog/<int:blog_key>")
def blog_info_page(blog_key):
db = current_app.config["db"]
blog = db.get_blog(blog_key)
return render_template("blog/bloginfo.html", blog=blog)
@app.route("/blog/blogadd", methods=["GET","POST"])
def blog_add_page():
if request.method == "GET":
values = {"title":"", "text": ""}
return render_template("blog/blogadd.html", values=values)
else:
valid = validate_blog_form(request.form)
if not valid:
return render_template("blog/blogadd.html", values = request.form)
title = request.form.data['title']
text = request.form.data["text"]
date_time = now.strftime("%d/%m/%y %H:%M:%S")
blogtag = request.form["tag"]
file = request.files["image"]
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER_BLOG'], filename))
photo = filename
user_id = 1
blog_id = 4
likeNum = 0
dislikeNum = 0
blog = Blog(blog_id, user_id, blogtag, title, text, likeNum, dislikeNum, photo,date_time)
db = current_app.config["db"]
blog_key = db.add_blog(blog)
return redirect(url_for("blog_page", blog_key = blog_key))
def validate_blog_form(form):
form.data = {}
form.errors = {}
form_title = form.get("title", "").strip()
if len(form_title) == 0:
form.errors["title"] = "Title can not be blank."
else:
form.data["title"] = form_title
form_text = form.get("text")
if len(form_text) == 0:
form.errors["text"] = "Text can not be blank."
else:
form.data["text"] = form_text
return len(form.errors) == 0
@app.route("/blog/blogdelete", methods = ["GET", "POST"])
def blogs_delete():
db = current_app.config["db"]
if request.method == "GET":
blogs = db.get_blogs()
return render_template("/blog/blogdelete.html", blogs=sorted(blogs))
else:
form_blog_keys = request.form.getlist("blog_keys")
for form_blog_key in form_blog_keys:
db.delete_blog(int(form_blog_key))
return redirect(url_for("blog_page"))
@app.route("/findvet", methods=["GET", "POST"])
def findVet_page():
print("buraya girdin")
db = current_app.config["db"]
# db.create_initial_cities()
# db.create_initial_vets() # This function should be used after deleting all vets
now_user = session['user_id']
if request.method == "GET":
vets = db.get_vets()
#db.create_initial_vets()
for vet in vets:
print(vet["cityname"])
score = vet["score"]
score = score * 20
vet["score"] = score
cities = db.get_vet_cities()
return render_template("findVet/findVet.html", vets=vets,cities=cities if cities else None, now_user = now_user)
else:
form_id = request.form["city_select"]
if form_id == "0":
vets = db.get_vets()
selected_city = 0
else:
selected_city = db.get_cityname(form_id)
vets = db.get_selected_vets(form_id)
for vet in vets:
print(vet["cityname"])
score = vet["score"]
score = score * 20
score = int(score)
vet["score"] = score
cities = db.get_vet_cities()
return render_template("findVet/findVet.html", vets=vets, cities=cities if cities else None, selected_city=selected_city, now_user = now_user)
@app.route("/findVet/<int:vet_key>", methods=["GET","POST"])
def vet_custom_page(vet_key):
db = current_app.config["db"]
if request.method == "POST":
form_comment = request.form["comment"]
now_id = request.form["add"]
now_id = session['user_id']
date_time = now.strftime("%d/%m/%y %H:%M:%S")
db.update_rating(vet_key, now_id, form_comment,date_time)
now_user = session['user_id']
vet = db.get_vet(vet_key)
vet.overallScore = int(vet.overallScore)
vet.priceRate = int(vet.priceRate)
vet.serviceRate = int(vet.serviceRate)
# print(vet.vetName)
rates = db.get_rates(vet_key)
return render_template("findVet/vet_custom_page.html", vet=vet,rates=rates, now_user = now_user)
@app.route("/findVet/delete/<int:vet_id>")
def delete_vet(vet_id):
db = current_app.config["db"]
db.delete_vet(vet_id)
return redirect(url_for("findVet_page"))
@app.route("/findVet/evaluation/<int:vet_key>",methods=["GET","POST"])
def vet_evaluation_page(vet_key):
db = current_app.config["db"]
if request.method == "GET":
vet = db.get_vet(vet_key)
vet.overallScore = int(vet.overallScore)
vet.priceRate = int(vet.priceRate)
vet.serviceRate = int(vet.serviceRate)
return render_template("findVet/vet_evaluation_page.html", vet=vet)
else:
form_title = request.form["title"]
form_comment = request.form["comment"]
form_overall = request.form["overallScore"]
form_price = request.form["priceRate"]
form_service = request.form["serviceRate"]
date_time = now.strftime("%d/%m/%y %H:%M:%S")
vetid = vet_key
userid = session['user_id']
rateid = 1 # Just for errors, not real value. Sql will give real rateid
new_rate = Rate(rateid, userid, vetid, form_overall, form_price, form_service, form_comment, form_title, date_time)
db.add_rate(new_rate)
vet = db.get_vet(vet_key)
print(vet.overallScore)
vet.overallScore = int(vet.overallScore)
vet.priceRate = int(vet.priceRate)
vet.serviceRate = int(vet.serviceRate)
rates = db.get_rates(vet_key)
return redirect(url_for("vet_custom_page",vet_key=vet_key))
@app.route("/foundation")
def foundation_page():
db = current_app.config["db"]
foundations = db.get_foundations()
return render_template("foundation/foundation.html", foundations= (foundations))
@app.route("/foundation/edit/")
def foundation_edit():
db = current_app.config["db"]
foundations = db.get_foundations()
return render_template("foundation/foundationedit.html", foundations=sorted(foundations))
@app.route("/foundation/foundationedit/<int:foundation_key>", methods=["GET","POST"])
def foundation_update(foundation_key):
if request.method == "GET":
return render_template("foundation/foundationupdate.html")
else:
db = current_app.config["db"]
old_foundation = db.get_foundation(foundation_key)
#form_about = request.form["about"]
#form_donationurl = request.form["donationurl"]
if "about" in request.form and "donationurl" in request.form:
db.update_foundation(foundation_key, request.form["about"], request.form["donationurl"])
elif "about" in request.form:
db.update_foundation(foundation_key, request.form["about"], old_foundation.donationurl)
elif "donationurl" in request.form:
db.update_foundation(foundation_key, old_foundation.about, request.form["donationurl"])
return redirect(url_for("foundation_update", foundation_key = foundation_key))
@app.route("/foundation/foundationadd", methods=["GET","POST"])
def foundation_add_page():
if request.method == "GET":
values = {"foundname":"", "about":""}
return render_template("foundation/foundationadd.html", values=values)
else:
valid = validate_foundation_form(request.form)
if not valid:
return render_template("foundation/foundationadd.html", values =request.form)
foundname = request.form["foundname"]
donationurl = request.form["donationurl"]
file = request.files["image"]
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER_FOUNDATION'], filename))
photo = filename
about = request.form["about"]
address = request.form["address"]
facebook =request.form["facebook"]
twitter = request.form["twitter"]
instagram = request.form["instagram"]
website =request.form["website"]
foundid = 4
foundation = Foundation(foundid, photo, donationurl, about, foundname, address, facebook, twitter, instagram,website)
db = current_app.config["db"]
foundation_key = db.add_foundation(foundation)
return redirect(url_for("foundation_page", foundation_key = foundation_key))
def validate_foundation_form(form):
form.data = {}
form.errors = {}
form_foundname = form.get("foundname", "").strip()
if len(form_foundname) == 0:
form.errors["foundname"] = "Foundation name can not be blank."
else:
form.data["foundname"] = form_foundname
form_about = form.get("about")
if len(form_about) == 0:
form.errors["about"] = "About can not be blank."
else:
form.data["about"] = form_about
return len(form.errors) == 0
@app.route("/foundation/foundationdelete", methods=["GET", "POST"])
def foundation_delete():
db = current_app.config["db"]
if request.method == "GET":
foundations = db.get_foundations()
return render_template("/foundation/foundationdelete.html", foundations=(foundations))
else:
form_foundation_keys = request.form.getlist("foundation_keys")
for form_foundation_key in form_foundation_keys:
db.delete_foundation(int(form_foundation_key))
return redirect(url_for("foundation_page"))
@app.route("/notice/lost")
def notice_page():
db = current_app.config["db"]
notices = db.get_notices(1)
return render_template("notices.html",notices = sorted(notices, reverse=True),header="Lost Pet Notices")
@app.route("/notice/owner")
def owner_notice_page():
db = current_app.config["db"]
notices = db.get_notices(0)
return render_template("notices.html",notices = sorted(notices, reverse=True),header="Find Owner Notices")
@app.route("/notice/<int:noticeID>")
def noticeDetail_page(noticeID):
db = current_app.config["db"]
notice = db.get_notice(noticeID)
print(notice.photoURL)
return render_template("noticeDetail.html",notice=notice)
@app.route("/notice/edit/<int:noticeid>",methods=["GET", "POST"])
def notice_edit_page(noticeid):
if request.method == "GET":
return render_template("noticeEdit.html")
else:
form = request.form
title = form['name']
date_time = now.strftime("%d/%m/%y %H:%M:%S")
print(date_time)
db.update_notice(noticeid,title,date_time)
next_page = request.args.get("next", url_for("notice_page"))
return redirect(next_page)
@app.route("/notice/add", methods=['GET','POST'])
def noticeAdd_page():
if request.method == "GET":
return render_template("noticeAdd.html")
else:
errors = {}
form = request.form
file = request.files["image"]
if file.filename == '':
errors["file"] = "An image is necessary for notice, please give one."
return render_template("patigram/patigramAdd.html", errors=errors)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER_NOTICE'], filename))
form_photo = filename
photoUrl = "../static/notice/" + form_photo
title = form['title']
animalType = form['animalType']
place = form['place']
gender = form['gender']
strain = form['strain']
age = request.form.get('age')
agee = int(age)
isLost = request.form['tag']
date_time = now.strftime("%d/%m/%y %H:%M:%S")
contact = form['phone']
db.add_notice(title,place,animalType,gender,strain,agee,photoUrl,isLost,contact,date_time,session['user_id'])
return render_template("noticeAdd.html")
@app.route("/forum")
def forum_page():
return "Forum page"
@app.route("/forum/add")
def forum_add_page():
return "Forum add page"
@app.route("/patigram/like/<int:post_key>")
def patigram_like(post_key):
db =current_app.config["db"]
userid = session['user_id']
date_time = now.strftime("%d/%m/%y %H:%M:%S")
db.patigram_add_like(post_key, userid, date_time)
post = db.get_post(post_key)
db.add_notification(1,post.title,0,userid,post.userid,"",date_time)
return redirect(url_for("patigram_page"))
@app.route("/patigram/likedel/<int:post_key>")
def patigram_delete_like(post_key):
db = current_app.config["db"]
userid = session['user_id']
db.patigram_delete_like(post_key, userid)
return redirect(url_for("patigram_page"))
@app.route("/patigram/<int:post_key>")
def patigram_custom_page(post_key):
db = current_app.config["db"]
post = db.get_post(post_key)
patigram_post_type = 0
likenum = db.patigram_get_like_num( post_key)
now_user = session['user_id']
post_user = db.get_post_user(post_key)
is_user_post = 2
#now_user = now_user[0]
print(type(now_user))
print(type(post_user))
if int(now_user) == post_user:
is_user_post = 1
else:
is_user_post = 0
print(is_user_post)
post.userid = db.get_user_name(post.userid)
comments = db.get_comments(patigram_post_type,post_key)
if post is None:
abort(404) #This should be defined
userid = post_user
return render_template("patigram/patigram_custom.html", post=post, comments = comments, is_user_post = is_user_post, likenum = likenum, userid=userid)
@app.route("/patigram", methods=["GET", "POST"])
def patigram_page():
patigrams = []
if request.method == "GET":
db = current_app.config["db"]
posts = db.get_posts()
userNow = session['user_id']
for postkey,post in posts:
post.userid = db.get_user_name(post.userid)
isliked = db.patigram_is_user_liked(post.postid, userNow)
print(isliked)
getlike = db.patigram_get_like_num(post.postid)
patigrams.append((postkey,(post,getlike,isliked)))
return render_template("patigram/patigram.html", patigrams=sorted(patigrams, reverse=True))
else:
form_comment = request.form["comment"]
userid = session['user_id']
commentid = 1 # Just for errors
form_postid = request.form["add"]
date_time = now.strftime("%d/%m/%y %H:%M:%S")
post_type = 0
db = current_app.config["db"]
db.add_comment(Comment(commentid, form_postid, userid, date_time, form_comment, post_type))
post = db.get_post(form_postid)
db.add_notification(1,post.title,1,userid,post.userid,form_comment,date_time)
return redirect(url_for("patigram_custom_page", post_key=form_postid))
@app.route("/patigram/delete/<int:post_key>")
def patigram_delete(post_key):
db = current_app.config["db"]
post = db.get_post(post_key)
db.delete_patigram(post_key)
userid = session['user_id']
date_time = now.strftime("%d/%m/%y %H:%M:%S")
db.add_notification(1,post.title,3,userid,userid,"",date_time)
return redirect(url_for("patigram_page"))
@app.route("/patigram/update/<int:post_key>", methods =["GET", "POST"])
def patigram_update(post_key):
if request.method == "GET":
return render_template("patigram/patigramUpdate.html", post_key = post_key)
else:
db = current_app.config["db"]
old_post = db.get_post(post_key)
# is_title = request.form["title"]
# is_desc = request.form["description"]
form_titlerr = request.form.get("title_sentence", "").strip()
if len(form_titlerr) == 0 and "title" in request.form:
return render_template("patigram/patigramUpdate.html", error = 1)
form_title = request.form["title_sentence"]
form_description = request.form["description_sentence"]
if "title" in request.form and "description" in request.form:
db.update_patigram(post_key, request.form["title_sentence"], request.form["description_sentence"])
print("a")
elif "title" in request.form:
db.update_patigram(post_key, request.form["title_sentence"], old_post.description)
print("ab")
elif "description" in request.form:
db.update_patigram(post_key, old_post.title, request.form["description_sentence"])
print("abc")
return redirect(url_for("patigram_custom_page", post_key = post_key))
# Checking extensions of loaded file
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.',1)[1].lower() in ALLOWED_EXTENSIONS
@app.route("/patigram/add", methods=["GET","POST"])
def patigram_add_page():
if request.method == "GET":
return render_template("patigram/patigramAdd.html")
else:
errors = {}
file = request.files["image"]
if file.filename == '':
errors["file"] = "An image is necessary for patigram post, please give one."
return render_template("patigram/patigramAdd.html", errors=errors)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
form_photo = filename
form_title = request.form.get("title", "").strip()
if len(form_title) == 0:
errors["title"] = "You should give a title to patigram post, please give one."
return render_template("patigram/patigramAdd.html", errors=errors)
form_title = request.form['title']
form_description = request.form["description"]
form_tag = request.form["tag"]
date_time = now.strftime("%d/%m/%y %H:%M:%S")
user_id = session['user_id']
post_id = 3 # I don't use it, just for errors
post = Post(post_id,user_id,date_time,form_photo,form_title,description=form_description if form_description else None, posttag=form_tag if form_tag else None)
db = current_app.config["db"]
post_key = db.add_post(post)
db.add_notification(1,form_title,2,user_id,user_id,"",date_time)
return redirect(url_for("patigram_custom_page", post_key=post_key))
@app.route("/notifications")
def notifications_page():
notifications = db.get_notifications()
db.notification_seen(session['user_id'])
return render_template("notifications.html",notifications = sorted(notifications, reverse=True))
@app.route("/avatar", methods=["GET","POST"])
def change_avatar():
if request.method == "GET":
return render_template("avatarChange.html")
else:
form = request.form
photoUrl = form['ck2']
print(photoUrl)
db.update_user_photo(session['user_id'],photoUrl)
return redirect(url_for("profile_page"))
@app.route("/notification/add")
def notification_add_page():
return "not add"
if __name__ == "__main__":
app.secret_key = 'super secret key'
app.run(debug=True)
| {"/classes/Database.py": ["/classes/post.py", "/classes/comment.py", "/classes/foundation.py", "/classes/blog.py", "/classes/notices.py", "/classes/veteriner.py", "/classes/rate.py", "/classes/foundationcontact.py", "/classes/Notification.py", "/classes/Profile.py"], "/server.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/classes/blog.py", "/classes/foundation.py", "/classes/foundationcontact.py"], "/views.py": ["/classes/Users.py"], "/dbinit.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/server.py"]} |
67,973 | alperencesur/itucsdb1943 | refs/heads/master | /classes/Profile.py | class Profile():
def __init__(self,name,surname,username,isVet,facebookLink,twitterLink,youtubeLink,instagramLink,websiteLink,registerTime,photoURL):
self.name = name
self.surname = surname
self.email = username
self.isVet = isVet
self.photoURL = photoURL
self.facebookLink = facebookLink
self.twitterLink = twitterLink
self.youtubeLink = youtubeLink
self.instagramLink = instagramLink
self.websiteLink = websiteLink
self.registerTime = registerTime | {"/classes/Database.py": ["/classes/post.py", "/classes/comment.py", "/classes/foundation.py", "/classes/blog.py", "/classes/notices.py", "/classes/veteriner.py", "/classes/rate.py", "/classes/foundationcontact.py", "/classes/Notification.py", "/classes/Profile.py"], "/server.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/classes/blog.py", "/classes/foundation.py", "/classes/foundationcontact.py"], "/views.py": ["/classes/Users.py"], "/dbinit.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/server.py"]} |
67,974 | alperencesur/itucsdb1943 | refs/heads/master | /classes/Notification.py | class Notificition:
def __init__(self,notificationID,userName,userSurname,postTitle,notificationType,notificationTime,isSeen,postType,description,content):
self.notificitionID = notificationID
self.userName = userName
self.userSurname = userSurname
self.postTitle = postTitle
self.notificationType = notificationType #0:Begeni, 1:Yorum, 2:Eklendi, 3:Silindi
self.notificationTime = notificationTime
self.isSeen = isSeen
self.postType = postType #0:Blog, 1:Patigram, 2:Forum, 3:Ilan
self.description = description
self.content = content | {"/classes/Database.py": ["/classes/post.py", "/classes/comment.py", "/classes/foundation.py", "/classes/blog.py", "/classes/notices.py", "/classes/veteriner.py", "/classes/rate.py", "/classes/foundationcontact.py", "/classes/Notification.py", "/classes/Profile.py"], "/server.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/classes/blog.py", "/classes/foundation.py", "/classes/foundationcontact.py"], "/views.py": ["/classes/Users.py"], "/dbinit.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/server.py"]} |
67,975 | alperencesur/itucsdb1943 | refs/heads/master | /classes/post.py | class Post:
def __init__(self, postid, userid, postdate, photo, title, description=None, posttag=None):
self.postid = postid
self.userid = userid
self.postdate = postdate
self.photo = photo
self.description = description
self.title = title
self.posttag = posttag
| {"/classes/Database.py": ["/classes/post.py", "/classes/comment.py", "/classes/foundation.py", "/classes/blog.py", "/classes/notices.py", "/classes/veteriner.py", "/classes/rate.py", "/classes/foundationcontact.py", "/classes/Notification.py", "/classes/Profile.py"], "/server.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/classes/blog.py", "/classes/foundation.py", "/classes/foundationcontact.py"], "/views.py": ["/classes/Users.py"], "/dbinit.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/server.py"]} |
67,976 | alperencesur/itucsdb1943 | refs/heads/master | /classes/__init__.py |
import psycopg2 as dbapi2
CREATE_QUERIES = [
"""
CREATE TABLE Users (
USERID SERIAL PRIMARY KEY,
NAME VARCHAR(40) NOT NULL,
SURNAME VARCHAR(40) NOT NULL,
EMAIL VARCHAR(80),
ISVET INTEGER NOT NULL,
PHOTO VARCHAR(255),
PASSWORD VARCHAR(120),
REGISTERDATE VARCHAR(40)
)
""",
"""
CREATE TABLE IF NOT EXISTS Post (
POSTID SERIAL PRIMARY KEY,
USERID INTEGER REFERENCES Users (USERID),
POSTDATE VARCHAR(24),
PHOTOURL VARCHAR(255) NOT NULL,
DESCRIPTION VARCHAR(255),
TITLE VARCHAR(27) NOT NULL,
POSTTAG VARCHAR(20)
)
""",
"""
CREATE TABLE IF NOT EXISTS Likes(
LIKEID SERIAL PRIMARY KEY,
POSTID INTEGER REFERENCES Post (POSTID),
WHOLIKED INTEGER REFERENCES Users (USERID),
DATE VARCHAR(24)
)
""",
"""
CREATE TABLE IF NOT EXISTS CITY(
CITYID INTEGER PRIMARY KEY,
CITYNAME VARCHAR(30)
)
) """,
"""
CREATE TABLE IF NOT EXISTS Vet(
VETID SERIAL PRIMARY KEY,
ADDRESS VARCHAR(255) NOT NULL,
DISTRICT VARCHAR(20) NOT NULL,
SERVICERATE FLOAT DEFAULT 0.0,
PRICERATE FLOAT DEFAULT 0.0,
TELEPHONE VARCHAR(15) NOT NULL UNIQUE,
OVERALLSCORE FLOAT DEFAULT 0.0,
VETNAME VARCHAR(50) NOT NULL,
CITYID INTEGER REFERENCES CITY(CITYID),
VOTENUM INTEGER DEFAULT 0,
CHECK (((SERVICERATE >= 0.0) AND (SERVICERATE <= 10.0)) AND ((PRICERATE >= 0.0) AND (PRICERATE <= 10.0)) AND ((OVERALLSCORE >= 0.0) AND (OVERALLSCORE <= 10.0)))
)
""",
"""
CREATE TABLE IF NOT EXISTS Rating(
RATEID SERIAL PRIMARY KEY,
USERID INTEGER REFERENCES USERS(USERID),
VETID INTEGER REFERENCES VET(VETID),
OVERALLSCORE INTEGER NOT NULL,
PRICERATE INTEGER NOT NULL,
SERVICERATE INTEGER NOT NULL,
COMMENT VARCHAR(255),
DATE VARCHAR(24) NOT NULL,
TITLE VARCHAR(50) NOT NULL
)
""",
"""
CREATE TABLE IF NOT EXIST Blog (
BLOGID SERIAL PRIMARY KEY,
USERID INTEGER REFERENCES Users (USERID),
BLOGTAG VARCHAR(20),
TITLE VARCHAR(100) NOT NULL,
TEXT VARCHAR(255) NOT NULL,
LIKENUMBER INTEGER DEFAULT 0,
DISLIKENUMBER INTEGER DEFAULT 0,
POSTDATE DATE
)
""",
"""
CREATE TABLE IF NOT EXIST Notice (
NOTICEID SERIAL PRIMARY KEY,
USERID INTEGER REFERENCES Users (USERID),
ANIMALTYPE VARCHAR(10),
AGE INTEGER NOT NULL,
STRAIN VARCHAR(20),
GENDER VARCHAR(10),
PHOTOURL VARCHAR(255),
ISLOST INTEGER NOT NULL,
DESCRIPTION VARCHAR(255),
CONTACT VARCHAR(100),
DATE VARCHAR(100),
PLACE VARCHAR(80)
)
""",
"""
CREATE TABLE IF NOT EXIST FoundationContact(
FOUNDID SERIAL PRIMARY KEY,
FACEBOOK VARCHAR(255),
TWITTER VARCHAR(255),
INSTAGRAM VARCHAR(255),
YOUTUBE VARCHAR(255),
WEBSITE VARCHAR(255)
)
""",
"""
CREATE TABLE IF NOT EXIST Foundation (
FOUNDID INTEGER FOREIGN KEY REFERENCES FondationContact(FOUNDID),
PHOTO VARCHAR(255),
DONATIONURL VARCHAR(255),
ABOUT VARCHAR(255) NOT NULL,
FOUNDNAME VARCHAR(50) NOT NULL,
ADDRESS VARCHAR(100),
PRIMARY KEY(FOUNDID)
)
""",
"""
CREATE TABLE IF NOT EXIST Notification(
NOTIFICATIONID SERIAL PRIMARY KEY,
POSTID INTEGER NOT NULL,
USERID INTEGER REFERENCES USERS(USERID),
OWNERID INTEGER REFERENCES USERS(USERID),
CONTENT VARCHAR(200),
POSTTYPE INTEGER NOT NULL,
NOTIFICATIONTIME VARCHAR(20) NOT NULL,
NOTTYPE INTEGER NOT NULL,
ISSEEN INTEGER DEFAULT 0,
)
""",
"""
CREATE TABLE IF NOT EXIST Comment(
COMMENTID SERIAL PRIMARY KEY,
POSTID INTEGER NOT NULL REFERENCES Post (POSTID),
USERID INTEGER REFERENCES Users (USERID),
DATE VARCHAR(24),
COMMENT VARCHAR(70),
POSTTYPE INTEGER NOT NULL
)
""",
"""
CREATE TABLE IF NOT EXIST SocialMedia(
OWNERID INTEGER REFERENCES Users(USERID),
FACEBOOK VARCHAR(255),
TWITTER VARCHAR(255),
INSTAGRAM VARCHAR(255),
YOUTUBE VARCHAR(255),
WEBSITE VARCHAR(255),
PRIMARY KEY (OWNERID)
)
"""
]
# def initialize(url):
# with dbapi2.connect(url) as connection:
# cursor = connection.cursor()
# for statement in CREATE_QUERIES:
# cursor.execute(statement)
# cursor.close()
| {"/classes/Database.py": ["/classes/post.py", "/classes/comment.py", "/classes/foundation.py", "/classes/blog.py", "/classes/notices.py", "/classes/veteriner.py", "/classes/rate.py", "/classes/foundationcontact.py", "/classes/Notification.py", "/classes/Profile.py"], "/server.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/classes/blog.py", "/classes/foundation.py", "/classes/foundationcontact.py"], "/views.py": ["/classes/Users.py"], "/dbinit.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/server.py"]} |
67,977 | alperencesur/itucsdb1943 | refs/heads/master | /classes/rate.py | class Rate:
def __init__(self, rateid, userid, vetid, overallScore, priceRate, serviceRate, comment, title, date):
self.rateid = rateid
self.userid = userid
self.vetid = vetid
self.overallScore = overallScore
self.priceRate = priceRate
self.serviceRate = serviceRate
self.comment = comment
self.title = title
self.date = date | {"/classes/Database.py": ["/classes/post.py", "/classes/comment.py", "/classes/foundation.py", "/classes/blog.py", "/classes/notices.py", "/classes/veteriner.py", "/classes/rate.py", "/classes/foundationcontact.py", "/classes/Notification.py", "/classes/Profile.py"], "/server.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/classes/blog.py", "/classes/foundation.py", "/classes/foundationcontact.py"], "/views.py": ["/classes/Users.py"], "/dbinit.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/server.py"]} |
67,978 | alperencesur/itucsdb1943 | refs/heads/master | /classes/blog.py | class Blog:
def __init__(self, blogid, userid, blogtag, title, text, likeNum, dislikeNum, photo,postdate):
self.blogid = blogid
self.userid = userid
self.blogtag = blogtag
self.title = title
self.text = text
self.likeNum = likeNum
self.dislikeNum = dislikeNum
self.photo = photo
self.postdate = postdate
| {"/classes/Database.py": ["/classes/post.py", "/classes/comment.py", "/classes/foundation.py", "/classes/blog.py", "/classes/notices.py", "/classes/veteriner.py", "/classes/rate.py", "/classes/foundationcontact.py", "/classes/Notification.py", "/classes/Profile.py"], "/server.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/classes/blog.py", "/classes/foundation.py", "/classes/foundationcontact.py"], "/views.py": ["/classes/Users.py"], "/dbinit.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/server.py"]} |
67,979 | alperencesur/itucsdb1943 | refs/heads/master | /views.py | from flask import Blueprint, render_template , redirect , current_app,url_for
from flask import request,flash,session,abort
from datetime import datetime as dt
from flask_login import LoginManager,login_user,login_required,current_user
from datetime import datetime
now = datetime.now()
from flask_login import logout_user
from passlib.apps import custom_app_context as pwd_context
import psycopg2 as dbapi2
from passlib.hash import pbkdf2_sha256 as hasher
from classes.Users import *
site = Blueprint('site', __name__)
url = "postgres://rgkksygg:BO8pGAZa6BqFR84mF43EMNNljm3jRnM5@rogue.db.elephantsql.com:5432/rgkksygg"
| {"/classes/Database.py": ["/classes/post.py", "/classes/comment.py", "/classes/foundation.py", "/classes/blog.py", "/classes/notices.py", "/classes/veteriner.py", "/classes/rate.py", "/classes/foundationcontact.py", "/classes/Notification.py", "/classes/Profile.py"], "/server.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/classes/blog.py", "/classes/foundation.py", "/classes/foundationcontact.py"], "/views.py": ["/classes/Users.py"], "/dbinit.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/server.py"]} |
67,980 | alperencesur/itucsdb1943 | refs/heads/master | /classes/notices.py | class Notice:
def __init__(self,noticeID,userID,name,surname,animalType,age,strain,gender,photoURL,isLost,description,contact,date,place):
self.noticeID = noticeID
self.name = name
self.surname = surname
self.userID = userID
self.animalType = animalType
self.age = age
self.strain = strain
self.gender = gender
self.photoURL = photoURL
self.isLost = isLost
self.description = description
self.contact = contact
self.date = date
self.place = place | {"/classes/Database.py": ["/classes/post.py", "/classes/comment.py", "/classes/foundation.py", "/classes/blog.py", "/classes/notices.py", "/classes/veteriner.py", "/classes/rate.py", "/classes/foundationcontact.py", "/classes/Notification.py", "/classes/Profile.py"], "/server.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/classes/blog.py", "/classes/foundation.py", "/classes/foundationcontact.py"], "/views.py": ["/classes/Users.py"], "/dbinit.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/server.py"]} |
67,981 | alperencesur/itucsdb1943 | refs/heads/master | /classes/comment.py | class Comment:
def __init__(self, commentid, postid, userid, date, comment, posttype ):
self.commentid = commentid
self.postid = postid
self.userid = userid
self.date = date
self.comment = comment
self.posttype = posttype #patigram icin 0 olacak, bu objeyi baska kullanacak olan ne icin hangi degeri alacagini belirtsin!
| {"/classes/Database.py": ["/classes/post.py", "/classes/comment.py", "/classes/foundation.py", "/classes/blog.py", "/classes/notices.py", "/classes/veteriner.py", "/classes/rate.py", "/classes/foundationcontact.py", "/classes/Notification.py", "/classes/Profile.py"], "/server.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/classes/blog.py", "/classes/foundation.py", "/classes/foundationcontact.py"], "/views.py": ["/classes/Users.py"], "/dbinit.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/server.py"]} |
67,982 | alperencesur/itucsdb1943 | refs/heads/master | /classes/foundationcontact.py | class FoundationContact():
def __init__(self, foundid, facebook, twitter, instagram, youtube, website):
self.foundid = foundid
self.facebook = facebook
self.twitter = twitter
self.instagram = instagram
self.youtube = youtube
self.website = website | {"/classes/Database.py": ["/classes/post.py", "/classes/comment.py", "/classes/foundation.py", "/classes/blog.py", "/classes/notices.py", "/classes/veteriner.py", "/classes/rate.py", "/classes/foundationcontact.py", "/classes/Notification.py", "/classes/Profile.py"], "/server.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/classes/blog.py", "/classes/foundation.py", "/classes/foundationcontact.py"], "/views.py": ["/classes/Users.py"], "/dbinit.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/server.py"]} |
67,983 | alperencesur/itucsdb1943 | refs/heads/master | /classes/forms.py | from flask_wtf import FlaskForm
from wtforms import FileField, SubmitField, FormField, PasswordField, StringField, TextAreaField, SelectField, RadioField, FloatField, IntegerField
from wtforms.validators import DataRequired, NumberRange, Length, Regexp
class LoginForm(FlaskForm):
username = StringField("Username", validators=[DataRequired()])
password = PasswordField("Password", validators=[DataRequired()]) | {"/classes/Database.py": ["/classes/post.py", "/classes/comment.py", "/classes/foundation.py", "/classes/blog.py", "/classes/notices.py", "/classes/veteriner.py", "/classes/rate.py", "/classes/foundationcontact.py", "/classes/Notification.py", "/classes/Profile.py"], "/server.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/classes/blog.py", "/classes/foundation.py", "/classes/foundationcontact.py"], "/views.py": ["/classes/Users.py"], "/dbinit.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/server.py"]} |
67,984 | alperencesur/itucsdb1943 | refs/heads/master | /classes/foundation.py | class Foundation():
def __init__(self, foundid, photo, donationurl, about, foundname, address, facebook, twitter,instagram, website):
self.foundid = foundid
self.photo = photo
self.donationurl = donationurl
self.about = about
self.foundname = foundname
self.address = address
self.facebook = facebook
self.twitter = twitter
self.instagram = instagram
self.website = website | {"/classes/Database.py": ["/classes/post.py", "/classes/comment.py", "/classes/foundation.py", "/classes/blog.py", "/classes/notices.py", "/classes/veteriner.py", "/classes/rate.py", "/classes/foundationcontact.py", "/classes/Notification.py", "/classes/Profile.py"], "/server.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/classes/blog.py", "/classes/foundation.py", "/classes/foundationcontact.py"], "/views.py": ["/classes/Users.py"], "/dbinit.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/server.py"]} |
67,985 | alperencesur/itucsdb1943 | refs/heads/master | /dbinit.py | import os
import sys
from flask_login import (LoginManager, current_user, login_required,
login_user, logout_user)
import sys
from datetime import datetime as dt
from os.path import dirname, join, realpath
import psycopg2 as dbapi2
from flask import (Blueprint, Flask, current_app, flash, redirect,
render_template, request, session, url_for)
from flask_login import (LoginManager, current_user, login_required,
login_user, logout_user)
from passlib.apps import custom_app_context as pwd_context
from passlib.hash import pbkdf2_sha256 as hasher
from werkzeug.utils import secure_filename
from classes.comment import *
from classes.Database import Database
from classes.forms import *
from classes.post import *
from classes.rate import *
from classes.Users import *
from views import site
from datetime import datetime as dt
from datetime import datetime
try:
from urllib.parse import urlparse as up
except ImportError:
from urlparse import urlparse as up
now = datetime.now()
app = Flask(__name__)
from flask import (Blueprint, Flask, current_app, flash, redirect,
render_template, request, session, url_for)
import psycopg2 as dbapi2
import server
lm = LoginManager()
INIT_STATEMENTS = [
"CREATE TABLE IF NOT EXISTS DUMMY (NUM INTEGER)",
"INSERT INTO DUMMY VALUES (42)",
]
def initialize(url):
with dbapi2.connect(url) as connection:
cursor = connection.cursor()
for statement in INIT_STATEMENTS:
cursor.execute(statement)
cursor.close()
if __name__ == "__main__":
url = os.getenv("DATABASE_URL")
app.secret_key = 'super secret key'
lm.init_app(app)
lm.login_view = "login_page"
if url is None:
sys.exit(1)
initialize(url)
| {"/classes/Database.py": ["/classes/post.py", "/classes/comment.py", "/classes/foundation.py", "/classes/blog.py", "/classes/notices.py", "/classes/veteriner.py", "/classes/rate.py", "/classes/foundationcontact.py", "/classes/Notification.py", "/classes/Profile.py"], "/server.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/classes/blog.py", "/classes/foundation.py", "/classes/foundationcontact.py"], "/views.py": ["/classes/Users.py"], "/dbinit.py": ["/classes/comment.py", "/classes/Database.py", "/classes/forms.py", "/classes/post.py", "/classes/rate.py", "/classes/Users.py", "/views.py", "/server.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.