index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
16,763
|
red23495/random_util
|
refs/heads/master
|
/Python/Algebra/binary_exponentiation.py
|
def mod_pow(base, power, mod=None):
"""
Implements divide and conquere binary exponetiation algorithm
Complexity: O(log power)
source: https://cp-algorithms.com/algebra/binary-exp.html
Params:
@base: base number which needs to be multiplied
@power: power to which base should be raised
@mod: modulas with which number should be mod'ed
returns: (base^power)%mod
"""
if mod:
base %= mod
result = 1
while power > 0:
if power & 1:
result *= base
if mod:
result %= mod
base *= base
if mod:
base %= mod
power //= 2
return result
|
{"/Python/Algebra/test/test_binary_exponentiation.py": ["/Python/Algebra/binary_exponentiation.py"]}
|
16,815
|
MydonSolutions/SNAPpyPackets
|
refs/heads/master
|
/send_udp.py
|
import socket
from SNAPPacket import SNAPPacket
import time
channels = 256
def alternate(i, evenVal, oddVal):
return evenVal if i%2==0 else oddVal
def int8Comp4(i, realMul, realDiv, realBias, imagMul, imagDiv, imagBias):
return alternate(i, (int((realMul*i)/realDiv) + realBias)%8, (int((imagMul*i)/imagDiv) + imagBias)%8)
def createPacket(fengId, nchan, schan, real, sampleNumber=0):
return SNAPPacket(
0, #fwVersion
True, #packetType is voltage
nchan, #channels
schan, #channelNum
fengId, #fEngineId
sampleNumber, #sampleNumber
[alternate(i, real, chanI + 8*(chanI%2))#conjugate every second channel
for chanI in range(nchan) for i in range(16*2*2)]
)
cachedSampleIs = 1
packets = [createPacket(fengI, channels, channels*strmI, sampleI, sampleI*16)
for sampleI in range(cachedSampleIs) for strmI in range(1) for fengI in range(1)]
UDP_IP = "10.11.1.156"
UDP_PORT = 4015
MESSAGE = packets[0].packet()
print("UDP target IP: %s" % UDP_IP)
print("UDP target port: %s" % UDP_PORT)
print(len(packets), "different packets.")
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
start = time.time()
while('y' in input("Send Block? ")):
for pktI in range(0, 16384):
for packet in packets:
sock.sendto(packet.packet(), (UDP_IP, UDP_PORT))
# sock.sendto(b'hello', (UDP_IP, UDP_PORT))
packet.update(packetNumber=packet.packetNumber+(16*cachedSampleIs))
time.sleep(0.00001)
print(time.time() - start)
|
{"/send_udp.py": ["/SNAPPacket.py"], "/receive_udp.py": ["/SNAPPacket.py"]}
|
16,816
|
MydonSolutions/SNAPpyPackets
|
refs/heads/master
|
/receive_udp.py
|
#https://wiki.python.org/moin/UdpCommunication
import socket
from SNAPPacket import SNAPPacket
UDP_IP = "0.0.0.0"
UDP_PORT = 4015
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind((UDP_IP, UDP_PORT))
while True:
data, addr = sock.recvfrom(8192+16) # buffer size is 8192 bytes
# print("received message: %s" % data)
SNAPPacket(packetBytes = data).print(True)
# if "n" in input("Continue (Y/n)? "):
# break
|
{"/send_udp.py": ["/SNAPPacket.py"], "/receive_udp.py": ["/SNAPPacket.py"]}
|
16,817
|
MydonSolutions/SNAPpyPackets
|
refs/heads/master
|
/SNAPPacket.py
|
mask4bits = ((1 << 4) -1)
import numpy as np
mask8bits = ((1 << 8) -1)
mask16bits = ((1 << 16) -1)
mask64bits = ((1 << 64) -1)
class SNAPPacket(object):
"""
ATA SNAP Firmware Manual, Release 2.0.0
---------------------------------------
Section 2.3.2 "Output Data Formats: Voltage Packets", pg 5
https://github.com/realtimeradio/ata_snap/blob/nov-observing/docs/manual.pdf
struct voltage_packet {
uint8_t version;
uint8_t type;
uint16_t n_chans;
uint16_t chan;
uint16_t feng_id
uint64_t timestamp;
complex4 data[n_chans, 16, 2] // 4-bit real + 4-bit imaginary
};
• version; Firmware version: Bit [7] is always 1 for Voltage packets. The remaining bits contain a
compile-time defined firmware version, represented in the form bit[6].bits[5:3].bits[2:0]. This document
refers to firmware version 2.0.0.
• type; Packet type: Bit [0] is 1 if the axes of data payload are in order [slowest to fastest] channel x time x
polarization. This is currently the only supported mode. Bit [1] is 0 if the data payload comprises 4+4 bit
complex integers. This is currently the only supported mode.
• n_chans; Number of Channels: Indicates the number of frequency channels present in the payload of
this data packet.
• chan; Channel number: The index of the first channel present in this packet. For example, a channel
number c implies the packet contains channels c to c + n_chans - 1.
• feng_id; Antenna ID: A runtime configurable ID which uniquely associates a packet with a particular
SNAP board.
• timestamp; Sample number: The index of the first time sample present in this packet. For example, a
sample number 𝑠 implies the packet contains samples 𝑠 to 𝑠 + 15. Sample number can be referred to GPS
time through knowledge of the system sampling
"""
def __init__(self,
fwVersion: int = None,
packetType: bool = None,
channels: int = None,
channelNum: int = None,
fEngineId: int = None,
packetNumber: int = None,
samples: [int] = None,
packetBytes: bytearray = None,
byteorder: str = 'big'
):
self.bytearr = bytearray(8192+16)
self.payloadbytes = -1
if packetBytes is not None:
self.setHeader(
int.from_bytes(packetBytes[0:1], byteorder=byteorder),
int.from_bytes(packetBytes[1:2], byteorder=byteorder),
int.from_bytes(packetBytes[2:4], byteorder=byteorder),
int.from_bytes(packetBytes[4:6], byteorder=byteorder),
int.from_bytes(packetBytes[6:8], byteorder=byteorder),
int.from_bytes(packetBytes[8:16], byteorder=byteorder)
)
self.setSampleBytes(packetBytes[16:])
else:
if not self.setHeader(fwVersion, packetType, channels, channelNum, fEngineId, packetNumber):
exit()
if not self.setSamples(samples):
exit()
def setHeader(self,
fwVersion: int = None,
packetType: bool = None,
channels: int = None,
channelNum: int = None,
fEngineId: int = None,
packetNumber: int = None,
update: bool = False
):
notAllArgs = False
if fwVersion is not None:
self.fwVersion = fwVersion & mask8bits
self.bytearr[0] = self.fwVersion
else:
notAllArgs = True
if packetType is not None:
self.packetType = (3 if packetType else 0) & mask8bits
self.bytearr[1] = self.packetType
else:
notAllArgs = True
if channels is not None:
self.channels = channels & mask16bits
self.bytearr[2] = (self.channels >> 8) & mask8bits
self.bytearr[3] = self.channels & mask8bits
else:
notAllArgs = True
if channelNum is not None:
self.channelNum = channelNum & mask16bits
self.bytearr[4] = (self.channelNum >> 8) & mask8bits
self.bytearr[5] = self.channelNum & mask8bits
else:
notAllArgs = True
if fEngineId is not None:
self.fEngineId = fEngineId & mask16bits
self.bytearr[6] = (self.fEngineId >> 8) & mask8bits
self.bytearr[7] = self.fEngineId & mask8bits
else:
notAllArgs = True
if packetNumber is not None:
self.packetNumber = packetNumber & mask64bits
self.bytearr[ 8] = (self.packetNumber >> 56) & mask8bits
self.bytearr[ 9] = (self.packetNumber >> 48) & mask8bits
self.bytearr[10] = (self.packetNumber >> 40) & mask8bits
self.bytearr[11] = (self.packetNumber >> 32) & mask8bits
self.bytearr[12] = (self.packetNumber >> 24) & mask8bits
self.bytearr[13] = (self.packetNumber >> 16) & mask8bits
self.bytearr[14] = (self.packetNumber >> 8) & mask8bits
self.bytearr[15] = self.packetNumber & mask8bits
else:
notAllArgs = True
self.payloadbytes = self.channels * 2 * 16
if notAllArgs and not update:
print("Please provide all of the header's arguments.");
self.payloadbytes = -1
return False
return True
def setSamples(self, samples):
if len(samples)/2 != self.payloadbytes:
print("Header inferred payload byte size {} differs from samples length {}\n".format(
self.payloadbytes, len(samples)/2
))
return False
for sampleI in range(self.payloadbytes):
self.bytearr[16+sampleI] = ((samples[2*sampleI] & mask4bits) << 4) + (samples[2*sampleI+1] & mask4bits)
return True
def setSampleBytes(self, samples):
if len(samples) != self.payloadbytes:
print("Header inferred payload byte size {} differs from samples length {}\n".format(
self.payloadbytes, len(samples)
))
return False
self.bytearr[16:self.payloadbytes] = samples
return True
def packet(self):
return self.bytearr[:16+self.payloadbytes]
def print(self, headerOnly=False):
if headerOnly:
print(self.headerStr())
else:
print(self.str())
def twosCompliment(self, value, bits):
return value if value < (1<<(bits-1)) else (value % (1<<(bits-1))) - (1<<(bits-1))
def str(self):
return """{}
\rSamples (0x): {}""".format(self.headerStr(),
[complex(self.twosCompliment(i>>4, 4) , self.twosCompliment(i & mask4bits, 4))
for i in self.bytearr[16:self.payloadbytes]])
def headerStr(self):
return """Firmware Version: {}
\rPacket type: {}
\rNumber of Channels: {}
\rChannel number: {}
\rAntenna ID: {}
\rPacket number: {}
\rPayload bytes: {}""".format(self.fwVersion,
self.packetType,
self.channels,
self.channelNum,
self.fEngineId,
self.packetNumber,
self.payloadbytes)
def update(self,
fwVersion: int = None,
packetType: bool = None,
channels: int = None,
channelNum: int = None,
fEngineId: int = None,
packetNumber: int = None,
samples: [int] = None
):
self.setHeader(fwVersion, packetType, channels, channelNum, fEngineId, packetNumber, update=True)
if samples is not None:
self.setSamples(samples)
if __name__ == '__main__':
testPacket = SNAPPacket(
0,
True,
2,
2,
0,
3735928559,
[i % 16 for i in range(16*2*2)]
)
testPacket.print()
testPacketBytes = testPacket.packet()
dupPacket = SNAPPacket(packetBytes=testPacketBytes)
dupPacket.print()
dupPacketBytes = dupPacket.packet()
print(testPacketBytes)
print(dupPacketBytes)
|
{"/send_udp.py": ["/SNAPPacket.py"], "/receive_udp.py": ["/SNAPPacket.py"]}
|
16,840
|
sumitnicmar/PetCover
|
refs/heads/main
|
/projectwork/testapp/models.py
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.db.models.constraints import UniqueConstraint
# Create your models here.
class Doctor(models.Model):
user = models.OneToOneField(User,related_name='doctor', null=True, on_delete=models.CASCADE)
location_choice = (('kothrud','Kothrud'),('swargate','Swargate'),('wakad','Wakad'),('aundh','Aundh'),('katraj','Katraj'))
name = models.CharField(max_length=100)
address = models.CharField(max_length=256)
location = models.CharField(max_length=20,choices=location_choice,default='kothrud')
number = models.BigIntegerField(null=True)
date = models.DateField(default=timezone.now)
def __str__(self):
return str('{} {}'.format(self.user.first_name,self.user.last_name))
class Customer(models.Model):
location_choice = (('kothrud','Kothrud'),('swargate','Swargate'),('wakad','Wakad'),('aundh','Aundh'),('katraj','Katraj'))
user = models.OneToOneField(User,related_name='profile', null=True, on_delete=models.CASCADE)
name = models.CharField(max_length=100,null=True)
address = models.CharField(max_length=256,null=True)
location = models.CharField(max_length=20,choices=location_choice,default='kothrud')
number = models.BigIntegerField(null=True)
date = models.DateField(default=timezone.now)
def __str__(self):
return str(self.name)
class Appointment(models.Model):
TIMESLOT_LIST = (
('09:00 – 10:00','09:00 – 10:00'),
('10:00 – 11:00','10:00 – 11:00'),
('11:00 – 12:00','11:00 – 12:00'),
('12:00 – 13:00','12:00 – 13:00'),
('13:00 – 14:00','13:00 – 14:00'),
('14:00 – 15:00','14:00 – 15:00'),
('15:00 – 16:00','15:00 – 16:00'),
('16:00 – 17:00','16:00 – 17:00'),
('17:00 – 18:00','17:00 – 18:00')
)
STATUS_CHOICE = ((1,'Pending'),(2,'Closed'),(3,'Cancle'))
doctor = models.ForeignKey(Doctor, on_delete=models.CASCADE)
date = models.DateField()
timeslot = models.CharField(max_length=20, choices=TIMESLOT_LIST)
user = models.ForeignKey(User, on_delete=models.CASCADE)
status = models.SmallIntegerField(choices=STATUS_CHOICE,default=1)
today = models.BooleanField(default=False)
class Meta:
unique_together = ['doctor','date','timeslot']
def __str__(self):
return str('Appointment ID-{} on date {}'.format(self.id,self.date))
|
{"/projectwork/testapp/forms.py": ["/projectwork/testapp/models.py"], "/projectwork/testapp/views.py": ["/projectwork/testapp/decorators.py"]}
|
16,841
|
sumitnicmar/PetCover
|
refs/heads/main
|
/projectwork/testapp/migrations/0009_auto_20210202_1301.py
|
# Generated by Django 3.1.3 on 2021-02-02 07:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testapp', '0008_auto_20210201_1716'),
]
operations = [
migrations.AlterField(
model_name='appointment',
name='timeslot',
field=models.IntegerField(choices=[('09:00 – 10:00', '09:00 – 10:00'), ('10:00 – 11:00', '10:00 – 11:00'), ('11:00 – 12:00', '11:00 – 12:00'), ('12:00 – 13:00', '12:00 – 13:00'), ('13:00 – 14:00', '13:00 – 14:00'), ('14:00 – 15:00', '14:00 – 15:00'), ('15:00 – 16:00', '15:00 – 16:00'), ('16:00 – 17:00', '16:00 – 17:00'), ('17:00 – 18:00', '17:00 – 18:00')]),
),
]
|
{"/projectwork/testapp/forms.py": ["/projectwork/testapp/models.py"], "/projectwork/testapp/views.py": ["/projectwork/testapp/decorators.py"]}
|
16,842
|
sumitnicmar/PetCover
|
refs/heads/main
|
/projectwork/testapp/migrations/0010_auto_20210202_1303.py
|
# Generated by Django 3.1.3 on 2021-02-02 07:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testapp', '0009_auto_20210202_1301'),
]
operations = [
migrations.AlterField(
model_name='appointment',
name='timeslot',
field=models.IntegerField(choices=[(0, '09:00 – 10:00'), (1, '10:00 – 11:00'), (2, '11:00 – 12:00'), (3, '12:00 – 13:00'), (4, '13:00 – 14:00'), (5, '14:00 – 15:00'), (6, '15:00 – 16:00'), (7, '16:00 – 17:00'), (8, '17:00 – 18:00')]),
),
]
|
{"/projectwork/testapp/forms.py": ["/projectwork/testapp/models.py"], "/projectwork/testapp/views.py": ["/projectwork/testapp/decorators.py"]}
|
16,843
|
sumitnicmar/PetCover
|
refs/heads/main
|
/projectwork/testapp/decorators.py
|
from django.http import HttpResponse
from django.shortcuts import redirect
def authenticaton(view_func):
def wraper_func(request,*args,**kwargs):
if request.user.is_authenticated:
return redirect('userhome')
else:
return view_func(request,*args,**kwargs)
return wraper_func
def allowed_users(allowed_rolls=[]):
def decorator(view_func):
def wraper_func(request,*args,**kwargs):
group = None
if request.user.groups.exists():
group = request.user.groups.all()[0].name
if group in allowed_rolls:
return view_func(request,*args,**kwargs)
else:
return HttpResponse('You are not authorised to view this page')
return wraper_func
return decorator
|
{"/projectwork/testapp/forms.py": ["/projectwork/testapp/models.py"], "/projectwork/testapp/views.py": ["/projectwork/testapp/decorators.py"]}
|
16,844
|
sumitnicmar/PetCover
|
refs/heads/main
|
/projectwork/testapp/migrations/0003_auto_20210130_2238.py
|
# Generated by Django 3.1.3 on 2021-01-30 17:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testapp', '0002_appointment_doctor'),
]
operations = [
migrations.AlterField(
model_name='doctor',
name='number',
field=models.BigIntegerField(null=True),
),
]
|
{"/projectwork/testapp/forms.py": ["/projectwork/testapp/models.py"], "/projectwork/testapp/views.py": ["/projectwork/testapp/decorators.py"]}
|
16,845
|
sumitnicmar/PetCover
|
refs/heads/main
|
/projectwork/testapp/admin.py
|
from django.contrib import admin
from testapp.models import Customer,Doctor,Appointment
# Register your models here.
admin.site.register(Customer)
admin.site.register(Doctor)
admin.site.register(Appointment)
|
{"/projectwork/testapp/forms.py": ["/projectwork/testapp/models.py"], "/projectwork/testapp/views.py": ["/projectwork/testapp/decorators.py"]}
|
16,846
|
sumitnicmar/PetCover
|
refs/heads/main
|
/projectwork/testapp/migrations/0016_customer_name.py
|
# Generated by Django 3.1.3 on 2021-02-14 15:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testapp', '0015_auto_20210214_2039'),
]
operations = [
migrations.AddField(
model_name='customer',
name='name',
field=models.CharField(max_length=100, null=True),
),
]
|
{"/projectwork/testapp/forms.py": ["/projectwork/testapp/models.py"], "/projectwork/testapp/views.py": ["/projectwork/testapp/decorators.py"]}
|
16,847
|
sumitnicmar/PetCover
|
refs/heads/main
|
/projectwork/testapp/migrations/0002_appointment_doctor.py
|
# Generated by Django 3.1.3 on 2021-01-30 16:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('testapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Doctor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('address', models.CharField(max_length=256)),
('location', models.CharField(choices=[('kothrud', 'Kothrud'), ('swargate', 'Swargate'), ('wakad', 'Wakad'), ('aundh', 'Aundh'), ('katraj', 'Katraj')], default='kothrud', max_length=20)),
('number', models.IntegerField()),
('email', models.EmailField(max_length=100)),
('date', models.DateField(default=django.utils.timezone.now)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='doctor', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Appointment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('timeslot', models.IntegerField(choices=[(0, '09:00 – 10:00'), (1, '10:00 – 11:00'), (2, '11:00 – 12:00'), (3, '12:00 – 13:00'), (4, '13:00 – 14:00'), (5, '14:00 – 15:00'), (6, '15:00 – 16:00'), (7, '16:00 – 17:00'), (8, '17:00 – 18:00')])),
('doctor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.doctor')),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.customer')),
],
options={
'unique_together': {('doctor', 'date', 'timeslot')},
},
),
]
|
{"/projectwork/testapp/forms.py": ["/projectwork/testapp/models.py"], "/projectwork/testapp/views.py": ["/projectwork/testapp/decorators.py"]}
|
16,848
|
sumitnicmar/PetCover
|
refs/heads/main
|
/projectwork/testapp/migrations/0015_auto_20210214_2039.py
|
# Generated by Django 3.1.3 on 2021-02-14 15:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('testapp', '0014_auto_20210209_0032'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='email',
),
migrations.RemoveField(
model_name='customer',
name='name',
),
]
|
{"/projectwork/testapp/forms.py": ["/projectwork/testapp/models.py"], "/projectwork/testapp/views.py": ["/projectwork/testapp/decorators.py"]}
|
16,849
|
sumitnicmar/PetCover
|
refs/heads/main
|
/projectwork/testapp/forms.py
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Customer,Appointment
from datetime import date
'''from datetimewidget.widgets import DateTimeWidget'''
class CreateUserForm(UserCreationForm):
class Meta:
model=User
fields = ['username', 'email', 'password1', 'password2','first_name','last_name']
def __init__(self, *args, **kwargs):
super(CreateUserForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {'class': 'form-control'}
class UpdateUserForm(forms.ModelForm):
class Meta:
model=User
fields = ['first_name','last_name','email']
def __init__(self, *args, **kwargs):
super(UpdateUserForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {'class': 'form-control'}
class CustomerForm(forms.ModelForm):
class Meta:
model=Customer
fields = ['number','address','location']
def __init__(self, *args, **kwargs):
super(CustomerForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {'class': 'form-control'}
class DateInput(forms.DateInput):
input_type = 'date'
class AppointmentForm(forms.ModelForm):
class Meta:
model = Appointment
fields = ['doctor','date','timeslot']
widgets = {'date':DateInput()}
def clean_date(self):
day = self.cleaned_data['date']
if day <= date.today():
raise forms.ValidationError('Date should be upcoming (tomorrow or later)')
return day
def __init__(self, *args, **kwargs):
super(AppointmentForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {'class': 'form-control'}
class UpdateAppointmentForm(forms.ModelForm):
class Meta:
model = Appointment
fields = ['date','timeslot','doctor']
widgets = {'date':DateInput()}
def clean_date(self):
day = self.cleaned_data['date']
if day <= date.today():
raise forms.ValidationError('Date should be upcoming (tomorrow or later)')
return day
def __init__(self, *args, **kwargs):
super(UpdateAppointmentForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {'class': 'form-control'}
|
{"/projectwork/testapp/forms.py": ["/projectwork/testapp/models.py"], "/projectwork/testapp/views.py": ["/projectwork/testapp/decorators.py"]}
|
16,850
|
sumitnicmar/PetCover
|
refs/heads/main
|
/projectwork/testapp/migrations/0011_auto_20210202_2101.py
|
# Generated by Django 3.1.3 on 2021-02-02 15:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('testapp', '0010_auto_20210202_1303'),
]
operations = [
migrations.AlterField(
model_name='appointment',
name='timeslot',
field=models.CharField(choices=[('09:00\xa0–\xa010:00', '09:00\xa0–\xa010:00'), ('10:00\xa0–\xa011:00', '10:00\xa0–\xa011:00'), ('11:00\xa0–\xa012:00', '11:00\xa0–\xa012:00'), ('12:00\xa0–\xa013:00', '12:00\xa0–\xa013:00'), ('13:00\xa0–\xa014:00', '13:00\xa0–\xa014:00'), ('14:00\xa0–\xa015:00', '14:00\xa0–\xa015:00'), ('15:00\xa0–\xa016:00', '15:00\xa0–\xa016:00'), ('16:00\xa0–\xa017:00', '16:00\xa0–\xa017:00'), ('17:00\xa0–\xa018:00', '17:00\xa0–\xa018:00')], max_length=20),
),
migrations.AlterField(
model_name='appointment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.user'),
),
]
|
{"/projectwork/testapp/forms.py": ["/projectwork/testapp/models.py"], "/projectwork/testapp/views.py": ["/projectwork/testapp/decorators.py"]}
|
16,851
|
sumitnicmar/PetCover
|
refs/heads/main
|
/projectwork/testapp/views.py
|
from django.shortcuts import render,redirect
from testapp.forms import CreateUserForm,CustomerForm,AppointmentForm,UpdateAppointmentForm,UpdateUserForm
from django.shortcuts import get_object_or_404
from testapp.models import Appointment
from django.contrib.auth.models import User
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from .decorators import authenticaton,allowed_users
from django.dispatch import receiver
from testapp.models import Customer
from django.contrib.auth.models import Group
from datetime import date
# Create your views here.
# Customer views
@authenticaton
def homeview(request):
return render(request,'testapp/home.html')
@authenticaton
def registration(request):
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
user = form.save()
username = form.cleaned_data.get('username')
messages.success(request, 'Account was created for ' + username)
return redirect ('login')
return render(request,'testapp/registration.html',{'form':form})
@authenticaton
def loginpage(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(redirect,username=username,password=password)
if user is not None:
login(request,user)
return redirect('userhome')
else:
messages.info(request,'Username or password is incorrect')
return render(request,'testapp/login.html')
def logoutuser(request):
logout(request)
return redirect('home')
@login_required(login_url='login')
def userhome(request):
return render(request,'testapp/userhome.html')
@login_required(login_url='login')
def appointmentview(request):
if request.method == 'POST':
form = AppointmentForm(request.POST)
if form.is_valid():
form = form.save(commit=False)
form.user = request.user
form.save()
return redirect('userhome')
if request.method == 'GET':
form = AppointmentForm()
return render (request,'testapp/appointment.html',{'form':form})
@login_required(login_url='login')
def displayinfo(request):
current_id = request.user.id
info = Customer.objects.get(user_id=current_id)
return render (request,'testapp/displayinfo.html',{'info':info})
@login_required(login_url='login')
def display_history(request):
form = Appointment.objects.filter(user_id = request.user.id).order_by('-date')
formdoc = Appointment.objects.filter(doctor__user_id = request.user.id).order_by('-date')
return render(request,'testapp/display_history.html',{'form':form,'formdoc':formdoc})
@login_required(login_url='login')
def display_appointment(request,id):
form = Appointment.objects.filter(id=id)
return render(request,'testapp/display_allhistory.html',{'form':form})
@login_required(login_url='login')
def upcoming_appointment(request):
form = Appointment.objects.filter(user_id = request.user.id, date__gte=date.today(),status=1).order_by('date','timeslot')
docform = Appointment.objects.filter(doctor__user_id = request.user.id, date__gte=date.today(),status=1).order_by('date','timeslot')
return render(request,'testapp/upcoming_appointment.html',{'form':form,'docform':docform})
@login_required(login_url='login')
def update_appointment(request,id):
update = get_object_or_404(Appointment,id=id,user_id=request.user.id)
if request.method=='POST':
form = UpdateAppointmentForm(request.POST,instance=update)
if form.is_valid():
form = form.save(commit=False)
form.user = request.user
form.save()
return redirect('upcoming')
else:
form = UpdateAppointmentForm(instance=update)
return render(request,'testapp/updateappointment.html',{'form':form})
@login_required(login_url='login')
def change_status(request,id):
status = get_object_or_404(Appointment,id=id)
if status.status == 1:
status.status = 3
status.save()
return redirect('userhome')
@login_required(login_url='login')
def close_status(request,id):
status = get_object_or_404(Appointment,id=id)
if status.status == 1:
status.status = 2
status.save()
return redirect('userhome')
@login_required(login_url='login')
def update_customerform(request):
update_profile_customer = get_object_or_404(Customer,user_id=request.user.id)
update_profile_user = get_object_or_404(User,id=request.user.id)
if request.method == 'POST':
form1 = CustomerForm(request.POST,instance=update_profile_customer)
form2 = UpdateUserForm(request.POST,instance=update_profile_user)
if form1.is_valid() and form2.is_valid():
profile1 = form1.save(commit=False)
profile1.save()
profile2 = form2.save(commit=False)
profile2.save()
return redirect ('userhome')
else:
form1 = CustomerForm(instance=update_profile_customer)
form2 = UpdateUserForm(instance=update_profile_user)
return render (request,'testapp/updatecuform.html',{'form1':form1,'form2':form2})
@login_required(login_url='login')
def delete_user(request,id):
user = User.objects.get(id=request.user.id)
user.delete()
return redirect('home')
# Doctor views
|
{"/projectwork/testapp/forms.py": ["/projectwork/testapp/models.py"], "/projectwork/testapp/views.py": ["/projectwork/testapp/decorators.py"]}
|
16,852
|
sumitnicmar/PetCover
|
refs/heads/main
|
/projectwork/testapp/migrations/0004_auto_20210201_1458.py
|
# Generated by Django 3.1.3 on 2021-02-01 09:28
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('testapp', '0003_auto_20210130_2238'),
]
operations = [
migrations.AlterField(
model_name='appointment',
name='patient',
field=models.ForeignKey(default=django.contrib.auth.models.User, on_delete=django.db.models.deletion.CASCADE, to='testapp.customer'),
),
]
|
{"/projectwork/testapp/forms.py": ["/projectwork/testapp/models.py"], "/projectwork/testapp/views.py": ["/projectwork/testapp/decorators.py"]}
|
16,853
|
sumitnicmar/PetCover
|
refs/heads/main
|
/projectwork/projectwork/urls.py
|
"""projectwork URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from testapp import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.homeview,name='home'),
path('register/',views.registration,name='register'),
path('login/',views.loginpage,name='login'),
path('userhome',views.userhome,name='userhome'),
path('logout/',views.logoutuser,name='logout'),
path('updateform/',views.update_customerform,name='updateform'),
path('deleteuser/<int:id>/',views.delete_user,name='deleteuser'),
path('displayinfo/',views.displayinfo,name='displayinfo'),
path('appointment/',views.appointmentview,name='appointment'),
path('history/',views.display_history,name='history'),
path('allhistory/<int:id>/',views.display_appointment,name='allhistory'),
path('upcoming',views.upcoming_appointment,name='upcoming'),
path('updateappointment/<int:id>/',views.update_appointment,name='updateappointment'),
path('status/<int:id>/',views.change_status,name='statuschange'),
path('clstatus/<int:id>/',views.close_status,name='closestatus'),
]
|
{"/projectwork/testapp/forms.py": ["/projectwork/testapp/models.py"], "/projectwork/testapp/views.py": ["/projectwork/testapp/decorators.py"]}
|
16,854
|
sumitnicmar/PetCover
|
refs/heads/main
|
/projectwork/testapp/migrations/0012_appointment_status.py
|
# Generated by Django 3.1.3 on 2021-02-05 07:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testapp', '0011_auto_20210202_2101'),
]
operations = [
migrations.AddField(
model_name='appointment',
name='status',
field=models.BooleanField(choices=[(False, 'Pending'), (True, 'Closed')], default=False),
),
]
|
{"/projectwork/testapp/forms.py": ["/projectwork/testapp/models.py"], "/projectwork/testapp/views.py": ["/projectwork/testapp/decorators.py"]}
|
16,855
|
sumitnicmar/PetCover
|
refs/heads/main
|
/projectwork/testapp/migrations/0006_auto_20210201_1633.py
|
# Generated by Django 3.1.3 on 2021-02-01 11:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('testapp', '0005_auto_20210201_1557'),
]
operations = [
migrations.RemoveField(
model_name='appointment',
name='patient',
),
migrations.AddField(
model_name='appointment',
name='user',
field=models.ForeignKey(default=2, on_delete=django.db.models.deletion.CASCADE, to='auth.user'),
preserve_default=False,
),
]
|
{"/projectwork/testapp/forms.py": ["/projectwork/testapp/models.py"], "/projectwork/testapp/views.py": ["/projectwork/testapp/decorators.py"]}
|
16,860
|
VelDia/Cult_game
|
refs/heads/master
|
/Tutorial.py
|
# Tutorial part
from Characters import People, NPC, MainCharacter, Character
from character_data import*
#from random import randint, choice
#================================================
# Tutorial NPC
#================================================
npc1 = NPC('Andrew', 27, 1, 50, 0)
npc2 = NPC('Bob', 32, 1, 50, 0)
npc3 = NPC('Alina', 20, 0, 50, 0)
#================================================
#================================================
#================================================
# First visit
#================================================
'''
def FirstVisit():
MainCharacter.greeting()
npc1.greeting()
while True:
answear = bool(input('Enter your action'))
if answear == 'q':
answear = input('Enter your last action')
if answear == '0':
MainCharacter.finish_phrase(True)
npc1.farewell(answear)
if answear == '1':
MainCharacter.finish_phrase(False)
npc1.farewell(answear)
break
else:
if answear == '0':
MainCharacter.recruitment(True)
npc1.phrase(answear)
if answear == '1':
MainCharacter.recruitment(False)
npc1.phrase(answear)
if npc1.motivation > 90:
print('Perfect you have first member in your cult!')
else:
print('Damn! He\'s so strong try harder!!!')
#FirstVisit()
'''
def Visit():
MainCharacter.greeting()
npc1.greeting()
npc1.showStats()
#bool bad1 = False, bad2 = False, bad3 = False
bad1 = MainCharacter.recruitment(characterRecruitmentSoft,characterRecruitmentSoftBad)
npc1.phrase(bad1)
npc1.showStats()
bad2 = MainCharacter.recruitment(characterRecruitmentHard, characterRecruitmentHardBad)
npc1.phrase(bad2)
npc1.showStats()
bad3 = MainCharacter.recruitment(characterFinishPhrase, characterFinishPhraseBad)
npc1.farewell(bad3)
npc1.showStats()
if npc1.motivation <= 50:
print('You lost... The person didn`t believe you.')
elif npc1.motivation > 50 and npc1.motivation < 90:
print('Congrats, you`ve got a new believer.')
MainCharacter.add_believer()
elif npc1.motivation >= 90:
print('Heh, you were good enough to get a worshipper.')
MainCharacter.add_believer()
print('The amount of believers:', MainCharacter.believers)
'''
def SecondVisit():
MainCharacter.greeting()
npc2.greeting()
while True:
answear = input('Enter your action')
if answear == 'q':
answear = input('Enter your last action')
if answear == '0':
MainCharacter.finish_phrase(1)
npc2.farewell(answear)
if answear == '1':
MainCharacter.finish_phrase(0)
npc2.farewell(answear)
break
else:
if answear == '0':
MainCharacter.recruitment(1)
npc2.phrase(answear)
if answear == '1':
MainCharacter.recruitment(0)
npc2.phrase(answear)
if npc2.motivation > 90:
print('Perfect you have first member in your cult!')
else:
print('Damn! He\'s so strong try harder!!!')
SecondVisit()
def ThirdVisit():
MainCharacter.greeting()
npc3.greeting()
while True:
answear = input('Enter your action')
if answear == 'q':
answear = input('Enter your last action')
if answear == '0':
MainCharacter.finish_phrase(1)
npc3.farewell(answear)
if answear == '1':
MainCharacter.finish_phrase(0)
npc3.farewell(answear)
break
else:
if answear == '0':
MainCharacter.recruitment(1)
npc3.phrase(answear)
if answear == '1':
MainCharacter.recruitment(0)
npc3.phrase(answear)
if npc3.motivation > 90:
print('Perfect you have first member in your cult!')
else:
print('Damn! He\'s so strong try harder!!!')
ThirdVisit()
'''
Visit()
|
{"/Tutorial.py": ["/Characters.py", "/character_data.py"], "/Characters.py": ["/character_data.py", "/npc_data.py"]}
|
16,861
|
VelDia/Cult_game
|
refs/heads/master
|
/character_data.py
|
# File with Сharacter phrases
characterGreeting = ['Oh, hi', 'What\'s up?', 'With God to you I come']
characterRecruitmentSoft = ['Lets talk about religion', 'Wanna see something cool?', 'Would you mind to go to our church?']
characterRecruitmentSoftBad = ['I need people for making cult', 'Join our religion', 'Hey, I see you\'re rich, enough would ya join our religion']
characterRecruitmentHard = ['Okey, so you have to visit our church', 'You`ll find all answers in our church', 'Join our church to feel happy']
characterRecruitmentHardBad = ['You must come']
characterFinishPhrase = ['We will wait for ya!', 'God will be proud of you']
characterFinishPhraseBad = ['We will wait for your money!', 'God in need of you and $$$', 'Hope you will come with somebody to our religion']
|
{"/Tutorial.py": ["/Characters.py", "/character_data.py"], "/Characters.py": ["/character_data.py", "/npc_data.py"]}
|
16,862
|
VelDia/Cult_game
|
refs/heads/master
|
/Characters.py
|
# Main Character logic
# And NPC logic
from random import choice, randint
#from Tutorial import Visit
from character_data import characterGreeting
from npc_data import*
import time, threading
#================================================
# Main Character logic
#================================================
class People:
def __init__(self, name, gender, age, picture):
self.name = name
self.gender = gender
self.age = age
self.picture = picture
def GetName(self):
Name = input("Enter character name: ")
return Name
def GetGender(self):
Gender = int(input("Enter your gender: (\"0\" for female, \"1\" for male, \"9\" for a random gender) "))
if Gender == 9:
Gender = randint(0,1)
if Gender == 0:
strGender = 'female'
elif Gender == 1:
strGender = 'male'
else:
print('Error: wrong gender initialization...')
return strGender
def get_picture(self, age, gender):
if gender == 0:
print("female") #enter/search the folder with girl`s pictures
elif gender == 1:
print("male") #enter/search the folder with boy`s pictures
if age < 30:
print("young person")
#find and attach the picture of the young person
elif age >= 30 and age <=50:
print("middle-aged person")
#find and attach the picture of the middle-aged person
else:
#or elif age >= 50:... and then else: print("Unable to attach the picture")
print("old person")
#find and attach the picture of the old person
class Character(People):
believers = 0
level = 0
money = 0
def __init__(self, name, age, gender, money, charisma, energy, level, believers, picture):
People.__init__(self, name, gender, age, picture)
self.money = money
self.charisma = charisma
self.energy = energy
self.level = level
self.believers = believers
self.picture = picture
"""
[name] - name of character (Player choose at the beginning)
[age] - age of character (at the beginning equals to 21)
[gender] - gender of a character (0 - female or 1 - male)
[money] - amount of money that character have (at the beginning equals to 0)
[charisma] - level of charisma of character (at the beginning equals to 0)
[energy] - level of energy of character (at the beginning equals to 100)
#[element] - element of character (0, 1, 2) == (rock, paper, scissors)
[level] - literally, the level of the character, which indicates how much stages the main character has gone through...
[believers] - the amount of people, who believe in religion of the character
"""
def greeting(self):
print(choice(characterGreeting))
def recruitment(self, ListOfPhrases, ListOfPhrasesBad):
print('1.',choice(ListOfPhrases))
print('2.',choice(ListOfPhrasesBad))
answ = int(input('Choose the answer: '))
if answ == 1:
bad = False
elif answ == 2:
bad = True
else:
print('Something went wrong try again')
return bad
'''
def recruitment(self, bad):
if bad == True:
print(choice(characterRecruitmentSoftBad))
else:
print(choice(characterRecruitmentSoft))
def finish_phrase(self, bad):
if bad == True:
print(choice(characterFinishPhraseBad))
else:
print(choice(characterFinishPhrase))
'''
def add_believer(self):
self.believers += 1
def level_up(self):
self.level += 1
#================================================
# Creating Main Character
#================================================
'''
def aTutorial():
global MainCharacter
MainCharacter = Character(Character.GetName(Character), 21, Character.GetGender(Character), 0, 100, 0, 0, 0, Character.get_picture(Character, 21, Character.GetGender(Character)))
Visit()
'''
def SkipTutorial():
# actions with MainCharacter characteristics
# Add soon
pass
#================================================
#================================================
MainCharacter = Character('Default name', 21, 0, 0, 50, 100, 1, 0, 0)
#================================================
# NPC logic
#================================================
class NPC(People):
def __init__(self, name, age, gender, motivation, picture):
People.__init__(self, name, gender, age, picture)
self.motivation = motivation
"""
[name] - NPC name
[age] - NPC age (from 18 to 80)
[gender] - gender of NPC (smart random) (0 - female, 1 - male)
[charisma] - level of charisma of character (smart random)
[motivation] - motivation to listen to Main Character (smart random)
#[element] - element of character (0, 1, 2) == (rock, paper, scissors)
[picture] - the number of the picture which will be shown on the screen when the NPC is shown
"""
def greeting(self):
print(choice(npcGreeting) + ' I\'m ' + self.name)
def phrase(self, bad):
if bad == True:
print(choice(npcAnswearBad))
self.motivation -= randint(5, 10) # Need to add smart random parameters
else:
print(choice(npcAnswear))
self.motivation += randint(5, 10) # Here too
def farewell(self, bad):
if bad == True:
print(choice(npcFarewellBad))
self.motivation -= randint(1, 5) # Here too
else:
print(choice(npcFarewell))
self.motivation += randint(1, 5) # Here too
def showStats(self):
print('Name:', self.name)
print('Age:', self.age)
print('Motivation:', self.motivation)
# Need to dislocate it
def SmartRandom(level): # For creating appropriate NPC
"""
SmartRandom
Args:
level (int): takes level from 0 to 4 (easy, medium, hard, extreme, ureal)
Returns:
tuple: ready for creating new class 'NPC' member data
"""
if level == 0: # Easy
MIN_charisma = 0
MAX_charisma = MainCharacter.charisma + 2
MIN_motivation = 50
MAX_motivation = 70
elif level == 1: # Medium
MIN_charisma = MainCharacter.charisma
MAX_charisma = MainCharacter.charisma + 5
MIN_motivation = 50
MAX_motivation = 60
elif level == 2: # Hard
MIN_charisma = MainCharacter.charisma + 5
MAX_charisma = MainCharacter.charisma + 15
MIN_motivation = 30
MAX_motivation = 50
elif level == 3: # Extreme
MIN_charisma = MainCharacter.charisma + 15
MAX_charisma = MainCharacter.charisma + 35
MIN_motivation = 20
MAX_motivation = 40
elif level == 4: # Unreal
MIN_charisma = MainCharacter.charisma + 35
MAX_charisma = MainCharacter.charisma + 65
MIN_motivation = 0
MAX_motivation = 30
age = randint(18, 80)
gender = randint(0, 1)
charisma = randint(MIN_charisma, MAX_charisma)
motivation = randint(MIN_motivation, MAX_motivation)
if gender == 0:
name = choice(npcFemaleNames)
else:
name = choice(npcMaleNames)
return name, age, gender, charisma, motivation
class worshipper(People):
def __init__(self, name, age, gender, charisma, motivation, picture, newcomers_per_time, all_newcomers = 0):
People.__init__(self, name, gender, age, picture)
self.charisma = charisma
self.newcomers_per_time = newcomers_per_time
self.all_newcomers = all_newcomers
'''
[charisma] - the koeficient of successful recruition (1 to 10)
[newcomers_per_time] - the amount of people the worshipper assigns per some time (initially 1 to 10)
[all_newcomers] - the amount of people the worshipper has ever assigned
'''
def autoRecruitment(self):
self.all_newcomers += self.charisma * self.newcomers_per_time
print(str(self.name) + ' has already recrited ' + str(int(self.all_newcomers)) + ' believers and has just added +' + str(self.newcomers_per_time * self.charisma))
threading.Timer(10, self.autoRecruitment).start()
#aTutorial()
#w1 = worshipper('John', 21, 1, 7, 100, 0, 1)
#w1.autoRecruitment()
|
{"/Tutorial.py": ["/Characters.py", "/character_data.py"], "/Characters.py": ["/character_data.py", "/npc_data.py"]}
|
16,863
|
VelDia/Cult_game
|
refs/heads/master
|
/npc_data.py
|
# File with NPC phrases
npcMaleNames = ['John', 'Boris', 'Andrew', 'Emmet', 'Eliot', 'Bob', 'Steve', 'Tony', 'Michel', 'Peter', 'Frank', 'George']
npcFemaleNames = ['Lara', 'Emma', 'Natasha', 'Scarlet', 'Helen', 'Linda', 'Rachel', 'Tina', 'Sofi', 'Clara', 'Eleonor', 'Sonya']
npcGreeting = ['Hello', 'Main respecturen', 'Hi MF!', 'Oh, hi']
npcAnswear = ['Yes', 'Why not, yeah', 'Ok', 'Hell yeah']
npcAnswearBad = ['No', 'No! Never', 'Emmm, no', 'What?!']
npcFarewell = ['Have a nice day!', 'Thanks, good bye', 'Bye', "Good bye"]
npcFarewellBad = ['Get hell outa here!', '*Door slaps*', 'Fuck off!']
|
{"/Tutorial.py": ["/Characters.py", "/character_data.py"], "/Characters.py": ["/character_data.py", "/npc_data.py"]}
|
16,864
|
VelDia/Cult_game
|
refs/heads/master
|
/Web_GUI_test.py
|
import eel
import pickle
fileName = 'data'
fileExistsFlag = False
eel.init('web')
try:
infile = open(fileName,'rb')
user_info = pickle.load(infile)
infile.close()
print(user_info)
fileExistsFlag = True
except IOError:
print("Enter data please")
@eel.expose
def get_info(name, gender, age):
if str(name) == '' or age == '':
name = 'User'
age = 18
age = int(age)
if gender == "Male":
user_info = (str(name), age, 1)
else:
user_info = (str(name), age, 0)
outfile = open(fileName,'wb')
pickle.dump(user_info, outfile)
outfile.close()
print("Name = " + name)
print("Gender = " + gender)
print("Age = " + str(age))
try:
if fileExistsFlag == True:
eel.start('html/main.html', host='localhost', size=(500, 500))
else:
eel.start('html/start_page.html', host='localhost', size=(500, 500))
except:
print("End of program")
|
{"/Tutorial.py": ["/Characters.py", "/character_data.py"], "/Characters.py": ["/character_data.py", "/npc_data.py"]}
|
16,865
|
VelDia/Cult_game
|
refs/heads/master
|
/Perlin_noise/Playing_perlin.py
|
from perlin import PerlinNoiseFactory
import PIL.Image
size = 800
res = 40
frames = 1
frameres = 5
space_range = size//res
frame_range = frames//frameres
pnf = PerlinNoiseFactory(3, octaves=3, tile=(space_range, space_range, frame_range))
for t in range(frames):
img = PIL.Image.new('L', (size, size))
for x in range(size):
for y in range(size):
n = pnf(x/res, y/res, t/frameres)
img.putpixel((x, y), int((n + 1) / 2 * 255 + 0.5))
print( int((n + 1) / 2 * 255 + 0.5))
img.save("noiseframe{:03d}.png".format(t))
print(t)
|
{"/Tutorial.py": ["/Characters.py", "/character_data.py"], "/Characters.py": ["/character_data.py", "/npc_data.py"]}
|
16,866
|
hitswint/TemperatureMonitor
|
refs/heads/master
|
/TM/urls.py
|
from django.conf.urls import url
from TM.views import index, index_plot
# from swint.models import Article, Category
urlpatterns = [
url(r'^$', index, name='index-view'),
url(r'^plot$', index_plot, name='index-plot-view'),
]
|
{"/TM/urls.py": ["/TM/views.py"], "/TM/views.py": ["/TM/models.py", "/TM/gl.py"]}
|
16,867
|
hitswint/TemperatureMonitor
|
refs/heads/master
|
/TM/models.py
|
from django.db import models
# * ArticleSwint
class Temperature(models.Model):
"""Model for Articles."""
time = models.DateTimeField(verbose_name=u"时间", auto_now_add=True)
value = models.TextField(verbose_name=u"温度")
class Meta():
ordering = [
'time',
]
def __unicode__(self):
return self.title
__str__ = __unicode__
# Create your models here.
|
{"/TM/urls.py": ["/TM/views.py"], "/TM/views.py": ["/TM/models.py", "/TM/gl.py"]}
|
16,868
|
hitswint/TemperatureMonitor
|
refs/heads/master
|
/TM/views.py
|
# from django.shortcuts import render
from django.shortcuts import render_to_response
from django.http import HttpResponse
# from django.views.generic import ListView
# from django.conf import settings
from TM.models import Temperature
# import logging
from django.views.decorators.csrf import csrf_exempt
import sqlite3
# 查看tables:apt安装sqlite3,然后sqlite3 db.sqlite3,输入.tables。
import matplotlib.pyplot as plt
# import os
import TM.gl as gl
from channels import Group
# matplotlib.use('Agg')
# import matplotlib
# logger = logging.getLogger(__name__)
# Create your views here.
@csrf_exempt
def index(request):
if request.method == 'POST':
add = Temperature(value=request.POST.get("temperature_data", ""))
add.save() # 不save无法保存到数据库
return HttpResponse(gl.ON_OFF)
else:
on_off_value = request.GET.get('on_off_button')
if on_off_value:
gl.ON_OFF = int(on_off_value)
# Channel('websocket.receive').send({'text': str(gl.ON_OFF)})
Group('default').send({'text': str(gl.ON_OFF)})
return HttpResponse(gl.ON_OFF)
# temperature_list = Temperature.objects.all()
return render_to_response('TM/index.html', {'on_off': gl.ON_OFF})
def index_plot(request):
# 从sqlite中获取数据。
conn = sqlite3.connect('db.sqlite3')
cur = conn.cursor()
cur.execute("SELECT * FROM TM_Temperature")
data = cur.fetchall()
data_0 = [int(row[0]) for row in data][-500:]
data_2 = [float(row[2]) for row in data][-500:]
plot_file = 'static/TM/plot.png'
fig1, ax1 = plt.subplots(figsize=(8, 4), dpi=98)
ax1.set_title(u'房间温度', fontproperties='KaiTi')
ax1.set_xlabel(u'时间(小时)', fontproperties='KaiTi')
ax1.set_ylabel(u'温度(\u2103)', fontproperties='KaiTi')
plt.ylim(-30, 30)
ax1.plot(
data_0,
data_2, )
fig1.savefig(plot_file)
plt.close(fig1)
# temperature_list = Temperature.objects.all()
return HttpResponse(plot_file)
# * Base_Mixin
# class Base_Mixin(object):
# """Basic mix class."""
# def get_context_data(self, *args, **kwargs):
# context = super(Base_Mixin, self).get_context_data(**kwargs)
# try:
# # 网站标题等内容
# context['website_title'] = settings.WEBSITE_TITLE
# except Exception:
# logger.error(u'[BaseMixin]加载基本信息出错')
# return context
# * Index_View
# class Index_View(Base_Mixin, ListView):
# """view for index.html"""
# model = Temperature
# # 或者
# # queryset = Temperature.objects.all()
# template_name = 'TM/index.html'
# context_object_name = 'temperature_list'
# # def get(self, request, *args, **kwargs):
# # article_id = self.kwargs.get('id')
# # # 如果ip不存在就把文章的浏览次数+1。
# # if ip not in visited_ips:
# # try:
# # article = self.queryset.get(id=article_id)
# # except ArticleSwint.DoesNotExist:
# # logger.error(u'[ArticleView]访问不存在的文章:[%s]' % article_id)
# # raise Http404
# # else:
# # article.view_times += 1
# # article.save()
# # visited_ips.append(ip)
# # # 更新缓存
# # cache.set(article_id, visited_ips, 15 * 60)
# # return super(Article_View, self).get(request, *args, **kwargs)
# @csrf_exempt
# def post(self, request, *args, **kwargs):
# add = Temperature(value=request.POST)
# add.save() # 不save无法保存到数据库
# # 或者
# # Temperature.objects.create(value=request.POST)
# kwargs['Temp'] = request.POST + 1
# return super(Index_View, self).post(request, *args, **kwargs)
|
{"/TM/urls.py": ["/TM/views.py"], "/TM/views.py": ["/TM/models.py", "/TM/gl.py"]}
|
16,869
|
hitswint/TemperatureMonitor
|
refs/heads/master
|
/TM/gl.py
|
ON_OFF = 0
|
{"/TM/urls.py": ["/TM/views.py"], "/TM/views.py": ["/TM/models.py", "/TM/gl.py"]}
|
16,870
|
thoklei/bigdatachallenge
|
refs/heads/master
|
/models.py
|
import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as layers
from AutoconLayer import AutoconLayer
def get_bartimaeus(sequence_length, rec_units, drop1, dense_units, drop2):
model = tf.keras.Sequential()
model.add(layers.LSTM(rec_units, input_shape=[sequence_length,19]))
model.add(layers.Dropout(drop1))
model.add(layers.Dense(dense_units, activation='tanh', kernel_initializer=tf.keras.initializers.lecun_normal()))
model.add(layers.Dropout(drop2))
model.add(layers.Dense(22, activation='softmax'))
return model
def get_rnn(sequence_length, rec_units, drop1, dense_units, drop2):
model = tf.keras.Sequential()
model.add(layers.SimpleRNN(rec_units, input_shape=[sequence_length, 19]))
model.add(layers.Dropout(drop1))
model.add(layers.Dense(dense_units, activation='tanh', kernel_initializer=tf.keras.initializers.lecun_normal()))
model.add(layers.Dropout(drop2))
model.add(layers.Dense(22, activation='softmax'))
return model
def get_dwarf(sequence_length, rec_units, drop):
model = tf.keras.Sequential()
model.add(layers.LSTM(rec_units, input_shape=[sequence_length,19]))
model.add(layers.Dropout(drop))
model.add(layers.Dense(22, activation='softmax'))
return model
def get_nathanael(sequence_length):
model = tf.keras.Sequential()
model.add(layers.LSTM(60, input_shape=[sequence_length,19]))
model.add(layers.Dropout(0.5))
#model.add(LSTM(50))
model.add(layers.Dense(32, activation='tanh'))
#model.add(layers.Dropout(0.8))
model.add(layers.Dense(22, activation='softmax'))
return model
def get_ptolemaeus(sequence_length):
model = tf.keras.Sequential()
model.add(layers.LSTM(60, input_shape=[sequence_length,19]))
model.add(layers.Dropout(0.8))
#model.add(LSTM(50))
model.add(layers.Dense(32, activation='tanh'))
model.add(layers.Dropout(0.8))
model.add(layers.Dense(22, activation='softmax'))
return model
def get_grindelwald(sequence_length):
model = tf.keras.Sequential()
model.add(layers.LSTM(80, input_shape=[sequence_length, 19]))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(64, activation='relu', kernel_initializer=tf.keras.initializers.he_normal()))
model.add(layers.Dense(22, activation='softmax'))
return model
def get_autoconceptor(sequence_length):
model = tf.keras.Sequential()
model.add(AutoconLayer(output_dim=50, alpha=200, lam=0.001, batchsize=32, layer_norm=True, reuse=None))
model.add(layers.Dense(32, activation='tanh'))
#model.add(layers.Dropout(0.8))
model.add(layers.Dense(22, activation='softmax'))
return model
|
{"/models.py": ["/AutoconLayer.py"], "/abgabe.py": ["/utils.py"], "/lstm_network.py": ["/utils.py", "/models.py"], "/extend_training.py": ["/utils.py"], "/AutoconLayer.py": ["/autoconceptor.py"]}
|
16,871
|
thoklei/bigdatachallenge
|
refs/heads/master
|
/utils.py
|
import sys
import os
import csv
import itertools
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from scipy import signal
from scipy import ndimage
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
classes = ['run', 'walk', 'stand', 'sit', 'sit-to-stand', 'stand-to-sit',
'stair-up', 'stair-down', 'jump-one-leg', 'jump-two-leg', 'curve-left-step',
'curve-right-step', 'curve-left-spin-Lfirst', 'curve-left-spin-Rfirst',
'curve-right-spin-Lfirst', 'curve-right-spin-Rfirst', 'lateral-shuffle-left',
'lateral-shuffle-right','v-cut-left-Lfirst', 'v-cut-left-Rfirst', 'v-cut-right-Lfirst', 'v-cut-right-Rfirst']
sensors = ['EMG1', 'EMG2', 'EMG3', 'EMG4', 'Microphone', 'ACC upper X', 'ACC upper Y','ACC upper Z', 'Goniometer X',
'ACC lower X', 'ACC lower Y', 'ACC lower Z', 'Goniometer Y', 'Gyro upper X', 'Gyro upper Y', 'Gyro upper Z',
'Gyro lower X', 'Gyro lower Y', 'Gyro lower Z']
variance_sensors = ['EMG1', 'EMG2', 'EMG3', 'EMG4', 'Microphone']
smooth_sensors = ['ACC upper X', 'ACC upper Y','ACC upper Z', 'Goniometer X','ACC lower X', 'ACC lower Y',
'ACC lower Z', 'Goniometer Y', 'Gyro upper X', 'Gyro upper Y', 'Gyro upper Z', 'Gyro lower X',
'Gyro lower Y', 'Gyro lower Z']
data_path = "/Users/thomasklein/Projects/BremenBigDataChallenge2019/bbdc_2019_Bewegungsdaten/"
# --------- utility functions ---------- #
def smooth(data, windowsize, std):
"""
Smoothes a 1d-data array with a gaussian of given size and sigma
"""
kernel = signal.gaussian(windowsize, std=std)
kernel /= np.sum(kernel)
return np.convolve(data, kernel, 'valid')
def variance_filter(data, windowsize):
"""
Calculates the local variance of a signal by evaluating a sliding window.
"""
half = windowsize//2
res = np.zeros(data.shape[0]-windowsize)
for i in range(half,len(data)-half):
res[i-half] = np.std(data[i-half:i+half])
return res
def sample(data, num_samples):
"""
Samples a 1d-signal num_samples times.
"""
samples = [int(sample) for sample in np.linspace(0, data.shape[0]-1, num_samples)]
return data[samples]
def recurrent_feature_extractor(data, num_samples):
"""
Extracts features from a 19-dimensional sequence.
data = 2d-numpy array of shape [timesteps, sensors]
num_samples = how many samples to extract
"""
def smooth_extractor(data, num_samples):
"""
Samples a signal after smoothing it.
data = 1d-numpy array of length timestep
num_samples = how many samples to extract
"""
smoothed = smooth(data,500,200)
sstd = np.std(smoothed)
if sstd == 0:
sstd = 0.00001
smoothed = (smoothed - np.mean(smoothed))/sstd
return sample(smoothed, num_samples)
def variance_extractor(data, num_samples):
"""
Samples the local variance of a signal.
data = 1d-numpy array of length timesteps
num_samples = how many samples to extract
"""
var_data = smooth(variance_filter(data,windowsize=200), windowsize=5, std=0.8)
vstd = np.std(var_data)
if vstd == 0:
vstd = 0.00001
var_data = (var_data - np.mean(var_data))/vstd
return sample(var_data, num_samples)
features = []
for sensor in variance_sensors:
features.append(variance_extractor(data[:,sensors.index(sensor)], num_samples))
if(np.isnan(np.array(features)).any()):
raise ValueError("Error in variance")
for sensor in smooth_sensors:
features.append(smooth_extractor(data[:,sensors.index(sensor)], num_samples))
if(np.isnan(np.array(features)).any()):
raise ValueError("Error in smooth")
return features
def threaded_recurrent_feature_extractor(data, num_samples):
"""
data = 2d-numpy array of shape [timesteps, sensors]
"""
pool = ThreadPool(8)
variance_sequences = []
smooth_sequences = []
for sensor in variance_sensors:
variance_sequences.append(data[:,sensors.index(sensor)])
for sensor in smooth_sensors:
smooth_sequences.append(data[:,sensors.index(sensor)])
var_results = pool.starmap(variance_extractor, zip(variance_sequences, itertools.repeat(num_samples)))
if(np.isnan(np.array(var_results)).any()):
raise ValueError("NaN after variance feature extraction")
smo_results = pool.starmap(smooth_extractor, zip(smooth_sequences, itertools.repeat(num_samples)))
if(np.isnan(np.array(smo_results)).any()):
raise ValueError("NaN after smoothing variance extraction")
pool.close()
pool.join()
return var_results + smo_results
def old_feature_extractor(data, num_samples):
"""
I wrote a new version of this, but apparently the extracted features were worse, so...
"""
def old_variance_extractor(data, num_samples):
"""
Samples the local variance of a signal.
Differences: variance-data is smoothed, and it is not normalized to the mean, only divided by the max
data = 1d-numpy array of length timesteps
num_samples = how many samples to extract
"""
var_data = smooth(variance_filter(data,windowsize=100), windowsize=100, std=25)
vmax = np.max(var_data)
if(vmax == 0):
vmax = 0.00001
var_data = var_data/vmax
return sample(var_data, num_samples)
def old_smooth_extractor(data, num_samples):
"""
Samples a signal after smoothing it.
data = 1d-numpy array of length timestep
num_samples = how many samples to extract
"""
smoothed = smooth(data,200,50)
smax = np.max(smoothed)
if smax == 0:
smax = 0.00001
normalized = smoothed/smax
return sample(normalized, num_samples)
features = []
for sensor in variance_sensors:
features.append(old_variance_extractor(data[:,sensors.index(sensor)], num_samples))
if(np.isnan(np.array(features)).any()):
raise ValueError("Error in variance")
for sensor in smooth_sensors:
features.append(old_smooth_extractor(data[:,sensors.index(sensor)], num_samples))
if(np.isnan(np.array(features)).any()):
raise ValueError("Error in smooth")
return features
def split_dataset(file, train_name, test_name, percentage=10):
"""
Splits the file that contains the original dataset in two files, one for training and one for testing.
file = the original file
"""
df = pd.read_csv(file)
headers = list(df)
files = df.values
indices = np.random.randint(low=0, high=files.shape[0], size=files.shape[0]//percentage)
testset = np.take(files, indices, axis=0)
files = np.delete(files, indices, axis=0)
odf = pd.DataFrame(files)
odf.columns = headers
odf.to_csv(train_name+".csv")
tdf = pd.DataFrame(testset)
tdf.columns = headers
tdf.to_csv(test_name+".csv")
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def serialize_example(featurelist, label, id, subject):
"""
Creates a tf.Example message from the list of features and the label, where
every element in the featurelist is actually a sequence=ndarray
"""
feature = {}
for i in range(len(featurelist)):
feature['feature'+str(i)] = tf.train.Feature(float_list=tf.train.FloatList(value=list(featurelist[i])))
#_float_feature(featurelist[i])
feature['label'] = _int64_feature(label)
feature['subject'] = _int64_feature(subject)
feature['id'] = _int64_feature(id)
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
def dataset_creator(in_file, outfile, feature_extractor, *args):
"""
Creates a dataset (i.e. outfile.tfrecords) from in_file.csv
"""
df = pd.read_csv(in_file)
id = 0
with tf.python_io.TFRecordWriter(outfile+".tfrecords") as writer:
for index, row in df.iterrows():
if(index % 100 == 0):
print("Digesting",row['Datafile'])
if(row['Label'] in classes):
path = row['Datafile']
data = pd.read_csv(data_path+path).values
label = classes.index(row['Label'])
subject = int(row['Subject'][-2:])
extracted_featurelist = feature_extractor(data, args[0])
serialized_example = serialize_example(extracted_featurelist, label, id, subject)
id = id + 1
writer.write(serialized_example)
else:
print(row['Label'],"not in known classes!")
def challenge_dataset_creator(in_file, outfile, feature_extractor, *args):
"""
Creates a dataset (i.e. outfile.tfrecords) from in_file.csv
"""
df = pd.read_csv(in_file)
id = 0
with tf.python_io.TFRecordWriter(outfile+".tfrecords") as writer:
for index, row in df.iterrows():
if(index % 100 == 0):
print("Digesting",row['Datafile'])
path = row['Datafile']
data = pd.read_csv(data_path+path).values
subject = int(row['Subject'][-2:])
extracted_featurelist = feature_extractor(data, args[0])
serialized_example = serialize_example(extracted_featurelist, 0, id, subject)
id = id + 1
writer.write(serialized_example)
def read_recurrent_dataset(path,
sequence_length,
batchsize,
filter_subjects=None,
filter_ids=None,
id_mode=None,
sub_mode=None,
training=True,):
"""
mode = whether to only yield elements that are in the lists or whether to ignore elements that are in the list
"""
if not id_mode is None and not id_mode in ['include', 'exclude']:
raise ValueError("Mode unknwon: Has to be 'include' or 'exclude'")
if not sub_mode is None and not sub_mode in ['include', 'exclude']:
raise ValueError("Mode unknwon: Has to be 'include' or 'exclude'")
if not filter_subjects is None:
filter_subjects_tensor = tf.constant(filter_subjects, dtype=tf.int64)
if not filter_ids is None:
filter_ids_tensor = tf.constant(filter_ids, dtype=tf.int64)
features = {}
for i in range(19):
features['feature'+str(i)] = tf.FixedLenFeature([sequence_length], tf.float32, default_value=np.zeros((sequence_length)))
features['label'] = tf.FixedLenFeature([], tf.int64, default_value=0)
features['subject'] = tf.FixedLenFeature([], tf.int64, default_value=0)
features['id'] = tf.FixedLenFeature([], tf.int64, default_value=0)
# global_means = tf.constant(np.load("global_means.npy"), dtype=tf.float32)
# global_vars = tf.constant(np.load("global_vars.npy"), dtype=tf.float32)
def _parse_function(example_proto):
parsed_features = tf.parse_single_example(example_proto, features)
data = []
for i in range(19):
data.append(parsed_features['feature'+str(i)])
data = tf.reshape(data, (sequence_length,19))
# # data 80, 19
# data = data - global_means
# data = data / global_vars
return data, tf.one_hot(parsed_features['label'],22)
def _filter_by_subjects(example_proto):
parsed_features = tf.parse_single_example(example_proto, features)
subject = parsed_features['subject']
if(sub_mode == 'exclude'):
#return not subject in filter_subjects
return tf.logical_not(tf.reduce_any(tf.equal(subject,filter_subjects_tensor), axis=0))
else:
#return subject in filter_subjects
return tf.reduce_any(tf.equal(subject,filter_subjects_tensor), axis=0)
def _filter_by_ids(example_proto):
parsed_features = tf.parse_single_example(example_proto, features)
id = parsed_features['id']
if(id_mode == 'exclude'):
#return not id in filter_ids
return tf.logical_not(tf.reduce_any(tf.equal(id,filter_ids_tensor), axis=0))
else:
# mode == include, return id in filter_ids
return tf.reduce_any(tf.equal(id,filter_ids_tensor), axis=0)
def _noise(example_proto):
parsed_features = tf.parse_single_example(example_proto, features)
data = []
for i in range(19):
data.append(parsed_features['feature'+str(i)])
data = tf.reshape(data, (sequence_length,19))
# data is of shape samples x sensors
means, vars = tf.nn.moments(data, axes=[0])
new_data = []
for i in range(19):
noise = tf.random.normal(shape=(sequence_length,1),
mean=0.0,
stddev=vars[i]*3.0,
dtype=tf.float32)
#print("noise:",noise)
#print("data:",data[:,i])
new_data.append(tf.reshape(data[:,i], [sequence_length, 1]) + noise)
#print("result:", tf.reshape(data[:,i], [sequence_length, 1]) + noise)
data = tf.stack(new_data, axis=1)
#print(data)
return tf.reshape(data, [sequence_length, 19]), tf.one_hot(parsed_features['label'],22)
dataset = tf.data.TFRecordDataset(path)
if not filter_subjects is None:
dataset = dataset.filter(_filter_by_subjects)
if not filter_ids is None:
dataset = dataset.filter(_filter_by_ids)
if training:
dataset=dataset.map(_noise)
else:
dataset = dataset.map(_parse_function)
dataset.shuffle(1000)
dataset = dataset.batch(batchsize, drop_remainder=training)
#dataset = dataset.prefetch(1)
dataset = dataset.repeat()
return dataset
def get_partial_mean(data):
return np.mean(data, axis=0), data.shape[0]
def get_partial_variance(data):
return np.std(data, axis=0), data.shape[0]
def global_info(directory):
from pathlib import Path
pathlist = Path(directory).glob('**/*.csv')
meanlist = []
varlist = []
weightlist = []
for filename in pathlist:
if not "challenge.csv" in str(filename) and not "train.csv" in str(filename):
data = pd.read_csv(filename).values
meanlist.append(np.mean(data, axis=0))
weightlist.append(data.shape[0])
varlist.append(np.std(data, axis=0))
means = np.array(meanlist)
vars = np.array(varlist)
weights = np.array(weightlist) / np.sum(weightlist)
weighted_means = (means.T * weights).T
weighted_vars = (vars.T * weights).T
print(weighted_means.shape)
print(weighted_vars.shape)
np.save("global_means.npy",np.sum(weighted_means, axis=0))
np.save("global_vars.npy",np.sum(weighted_vars, axis=0))
if __name__ == "__main__":
#global_info("/Users/thomasklein/Projects/BremenBigDataChallenge2019/bbdc_2019_Bewegungsdaten/")
dataset_creator(data_path+"train.csv",
"./data/sparse/rawdata", recurrent_feature_extractor, 80)
challenge_dataset_creator(data_path+"challenge.csv",
"./data/sparse/rawchallenge", recurrent_feature_extractor, 80)
# tf.enable_eager_execution()
# np.random.seed(42)
# #indices = np.random.randint(0, 6384, 638)
# indices = np.arange(0,1)
# print(indices)
# ds = read_recurrent_dataset("/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/data/sparse/rawdata.tfrecords", 30, 1, filter_ids=indices, mode='include', training=True)
# res = ds.take(1)
# for r in res:
# print(r)
# ds = read_recurrent_dataset("/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/data/sparse/rawdata.tfrecords", 30, 1, filter_ids=indices, mode='include', training=True)
# res = ds.take(1)
# for r in res:
# print(r)
|
{"/models.py": ["/AutoconLayer.py"], "/abgabe.py": ["/utils.py"], "/lstm_network.py": ["/utils.py", "/models.py"], "/extend_training.py": ["/utils.py"], "/AutoconLayer.py": ["/autoconceptor.py"]}
|
16,872
|
thoklei/bigdatachallenge
|
refs/heads/master
|
/abgabe.py
|
import numpy as np
import tensorflow as tf
import pandas as pd
from utils import read_recurrent_dataset
from tensorflow.keras import layers
name = "shorty_mit0806_full"
sequence_length = 30
classes = ['run', 'walk', 'stand', 'sit', 'sit-to-stand', 'stand-to-sit',
'stair-up', 'stair-down', 'jump-one-leg', 'jump-two-leg', 'curve-left-step',
'curve-right-step', 'curve-left-spin-Lfirst', 'curve-left-spin-Rfirst',
'curve-right-spin-Lfirst', 'curve-right-spin-Rfirst', 'lateral-shuffle-left',
'lateral-shuffle-right','v-cut-left-Lfirst', 'v-cut-left-Rfirst', 'v-cut-right-Lfirst', 'v-cut-right-Rfirst']
dataset = read_recurrent_dataset("/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/data/sparse/rawchallenge.tfrecords",
sequence_length,
1,
filter_subjects=None,
filter_ids=None,
mode=None,
training=False)
model = tf.keras.models.load_model("/Users/thomasklein/Projects/BremenBigDataChallenge2019/models/model_archive/new_shit/"+name+"/"+name+".h5")
results = model.predict(dataset, steps=1738)#should be of size examples,22
#print(results)
predictions = np.argmax(results, axis=1) # should be of shape num_samples
#print(predictions)
df = pd.read_csv("/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/challenge.csv")
predicted_labels = [classes[int(x)] for x in predictions]
#for index, row in df.iterrows():
df['Label'] = predicted_labels
df.to_csv("abgabe_"+name+".csv", index=False)
|
{"/models.py": ["/AutoconLayer.py"], "/abgabe.py": ["/utils.py"], "/lstm_network.py": ["/utils.py", "/models.py"], "/extend_training.py": ["/utils.py"], "/AutoconLayer.py": ["/autoconceptor.py"]}
|
16,873
|
thoklei/bigdatachallenge
|
refs/heads/master
|
/autoconceptor.py
|
"""
The Autoconceptor, adapted from Jaeger 2017, and the DynStateTuple that
is used to store the conceptor matrix.
"""
import numpy as np
import collections
import tensorflow as tf
from tensorflow.python.ops import init_ops
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.python.layers import base as base_layer
# following the desing of LSTM state tuples
_DynStateTuple = collections.namedtuple("DynStateTyple", ["C", "h"])
class DynStateTuple(_DynStateTuple):
"""Tuple used by RNN Models with conceptor matrices.
Stores two elements: `(C, h)` in that order
where C is the conceptor matrix
and h is the state of the RNN
adapted from LSTMStateTuple in tensorflow/python/obs/rnn_cell_impl.py
"""
__slots__ = ()
@property
def dtype(self):
(C, h) = self
if C.dtype != h.dtype:
raise TypeError("Matrix and internal state should agree on type: %s vs %s" %
(str(C.dtype), str(h.dtype)))
return C.dtype
class Autoconceptor(tf.nn.rnn_cell.BasicRNNCell):
"""
Autoconceptor, adapted from Jaeger 2017
"""
def __init__(self, num_units, alpha, lam, batchsize,
activation=tf.nn.tanh, reuse=None, layer_norm=False, dtype=tf.float32,
initializer=None):
"""
Args:
num_units = hidden state size of RNN cell
alpha = alpha for autoconceptor, used to calculate aperture as alpha**-2
lam = lambda for autoconceptor, scales conceptor-matrix
batchsize = number of training examples per batch (we need this to allocate memory properly)
activation = which nonlinearity to use (tanh works best, relu only with layer norm)
reuse = whether to reuse variables, just leave this as None
layer_norm = whether to apply layer normalization, not necessary if using tanh
initializer = which initializer to use for the weight matrix, good idea is to use init_ops.constant_initializer(0.05 * np.identity(num_units))
"""
super(Autoconceptor, self).__init__(num_units=num_units, activation=activation, reuse=reuse)
self.num_units = num_units
self.c_lambda = tf.constant(lam, name="lambda")
self.batchsize = batchsize
self.conceptor_built = False
self.layer_norm = layer_norm
self._activation = activation
self.aperture_fact = tf.constant(alpha**(-2), name="aperture")
self._state_size = self.zero_state(batchsize, dtype)
self.initializer = initializer or init_ops.constant_initializer(0.05 * np.identity(num_units))
#no idea what this does, to be honest
self.input_spec = base_layer.InputSpec(ndim=2)
# these two properties are necessary to pass assert_like_rnn_cell test in static_rnn and dynamic_rnn
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self.num_units
def zero_state(self, batch_size, dtype):
"""
Returns the zero state for the autoconceptor cell.
batch_size = the number of elements per batch
dtype = the dtype to be used, stick with tf.float32
The zero state is a DynStateTuple consisting of a C-matrix filled with zeros,
shape [batchsize, num_units, num_units] and a zero-filled hidden state of
shape [batchsize, num_units]
"""
return DynStateTuple(C=tf.zeros([batch_size, self.num_units, self.num_units], dtype=dtype),
h=tf.zeros([batch_size, self.num_units], dtype=dtype))
def build(self, inputs_shape):
"""
Builds the cell by defining variables.
Overrides method from super-class.
"""
print("inputs shape at autoconceptor: ", inputs_shape) # None, 80, 19
if inputs_shape[2] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
% inputs_shape)
input_dim = inputs_shape[2]
self.W_in = self.add_variable(
"W_in",
shape=[input_dim, self.num_units],
initializer=init_ops.random_normal_initializer(),
dtype=self.dtype)
self.b_in = self.add_variable(
"b_in",
shape=[self.num_units],
initializer= init_ops.zeros_initializer(),
dtype=self.dtype)
self.W = self.add_variable(
"W",
shape=[self.num_units, self.num_units],
initializer=self.initializer,
dtype=self.dtype)
#tf.get_variable("gamma", shape=shape, initializer=gamma_init)
# tf.get_variable("beta", shape=shape, initializer=beta_init)
self.built = True
# def _norm(self, inp, scope="layer_norm"):
# """
# Performs layer normalization on the hidden state.
# inp = the input to be normalized
# scope = name for the variable scope, just leave as default
# Returns inp normalized by learned parameters gamma and beta
# """
# #shape = inp.get_shape()[-1:]
# #gamma_init = init_ops.constant_initializer(1)
# #beta_init = init_ops.constant_initializer(1)
# #with tf.variable_scope(scope):
# # tf.get_variable("gamma", shape=shape, initializer=gamma_init)
# # tf.get_variable("beta", shape=shape, initializer=beta_init)
# normalized = layers.layer_norm(inp)
# return normalized
def call(self, inputs, h):
"""
Performs one step of this Autoconceptor Cell.
inputs = the input batch, shape [batchsize, input_dim]
h = the DynStateTuple containing the preceding state
Returns output, state
where output = output at this time step
state = new hidden state and C-matrix as DynStateTuple
"""
print("inputs in call, should be 32x19:",inputs)
C, state = h
print("C in call, should be 32x50x50:", C)
print("State in call, should be 32x50, I guess:", h)
# so far, standard RNN logic
state = self._activation(
(tf.matmul(inputs, self.W_in) + self.b_in) + (tf.matmul(state, self.W))
)
# if layer norm is activated, normalize layer output as explained in Ba et al. 2016
if(self.layer_norm):
state = layers.layer_norm(state)#self._norm(state)
state = tf.reshape(state, [-1, 1, self.num_units])
# updating C following update rule presented by Jaeger
C = C + self.c_lambda * ( tf.matmul(tf.transpose((state - tf.matmul(state, C)), [0,2,1]), state) - tf.scalar_mul(self.aperture_fact,C) )
# multiplying state with C
state = tf.matmul(state, C)
# Reshapes necessary for std. matrix multiplication, where one matrix
# for all elements in batch vs. fast-weights matrix -> different for every
# element!
state = tf.reshape(state, [-1, self.num_units])
return state, DynStateTuple(C, state)
|
{"/models.py": ["/AutoconLayer.py"], "/abgabe.py": ["/utils.py"], "/lstm_network.py": ["/utils.py", "/models.py"], "/extend_training.py": ["/utils.py"], "/AutoconLayer.py": ["/autoconceptor.py"]}
|
16,874
|
thoklei/bigdatachallenge
|
refs/heads/master
|
/lstm_network.py
|
import numpy as np
import tensorflow as tf
from utils import *
from tensorflow.keras import layers
from models import *
name = "BigRandomAvoider"
sequence_length = 80
batchsize = 32
data = "/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/data/sparse/rawdata.tfrecords"
model_path = "/Users/thomasklein/Projects/BremenBigDataChallenge2019/models/"
archive_path = model_path + "model_archive/new_shit/"
modus = 'train'
model = get_bartimaeus(sequence_length, rec_units=128, drop1=0.6, dense_units=64, drop2=0.4)#get_dwarf(sequence_length, rec_units=19, drop=0.35)
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
if modus == 'train':
#np.random.seed(42)
indices = np.random.randint(0, 6384, 638)
avoided_subjects = [6,7]
dataset = read_recurrent_dataset(data, sequence_length, batchsize, filter_ids=indices, filter_subjects=avoided_subjects, id_mode='exclude', sub_mode='exclude', training=True)
validation_set = read_recurrent_dataset(data, sequence_length, batchsize, filter_ids=indices, filter_subjects=avoided_subjects, id_mode='include', sub_mode='exclude', training=False)
# elif modus == 'subject_train':
# indices = [18,19]
# dataset = read_recurrent_dataset(data, sequence_length, batchsize, filter_subjects=indices, mode='exclude', training=True)
# validation_set = read_recurrent_dataset(data, sequence_length, batchsize, filter_subjects=indices, mode='include', training=False)
# elif modus == 'full':
# dataset = read_recurrent_dataset(data, sequence_length, batchsize, training=True)
# validation_set = read_recurrent_dataset(data, sequence_length, batchsize, training=False)
callbacks = [
# Write TensorBoard logs to `./logs` directory
tf.keras.callbacks.TensorBoard(log_dir=archive_path+name+"_"+modus),
tf.keras.callbacks.ModelCheckpoint(filepath=archive_path+name+"_"+modus+"/"+name+"_"+modus+".h5",
save_best_only=True,
period=1)
]
model.fit(x=dataset,
epochs=150,
steps_per_epoch=6384//batchsize,
validation_data=validation_set,
validation_steps=638//batchsize,
callbacks = callbacks)
#tf.keras.models.save_model(model,archive_path+name+"_"+modus+"/"+name+"_"+modus+".h5",overwrite=False)
print("Mission accomplished.")
|
{"/models.py": ["/AutoconLayer.py"], "/abgabe.py": ["/utils.py"], "/lstm_network.py": ["/utils.py", "/models.py"], "/extend_training.py": ["/utils.py"], "/AutoconLayer.py": ["/autoconceptor.py"]}
|
16,875
|
thoklei/bigdatachallenge
|
refs/heads/master
|
/network.py
|
import numpy as np
import tensorflow as tf
from tfrecord_converter import read_dataset
from tensorflow.keras import layers
model = tf.keras.Sequential()
model.add(layers.Dense(64, activation='tanh', input_shape=(30,))) #set value to shape-1
model.add(layers.Dense(64, activation='tanh'))
model.add(layers.Dense(22, activation='softmax'))
model.compile(optimizer=tf.train.AdamOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
dataset = read_dataset("/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/cafeteria/minifeat.tfrecords")
testset = read_dataset("/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/cafeteria/minifeat_test.tfrecords")
#val_dataset = tf.data.Dataset.from_tensor_slices((val_data, val_labels))
#val_dataset = val_dataset.batch(32).repeat()
callbacks = [
# Write TensorBoard logs to `./logs` directory
tf.keras.callbacks.TensorBoard(log_dir='./cafeteria/logs/minifeat2')
]
model.fit(x=dataset,
epochs=3000,
steps_per_epoch=6384//32,
validation_data=testset,
validation_steps=100,
callbacks = callbacks)
model.save_weights('./cafeteria/weights/minifeatmodel2')
#model.load_weights('./weights/my_model')
result = model.predict(dataset, steps=1)
print(result)
|
{"/models.py": ["/AutoconLayer.py"], "/abgabe.py": ["/utils.py"], "/lstm_network.py": ["/utils.py", "/models.py"], "/extend_training.py": ["/utils.py"], "/AutoconLayer.py": ["/autoconceptor.py"]}
|
16,876
|
thoklei/bigdatachallenge
|
refs/heads/master
|
/legacy/tfrecord_converter.py
|
import numpy as np
import pandas as pd
import tensorflow as tf
#tf.enable_eager_execution()
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def convert_csv_to_tfrecords(file):
csv = pd.read_csv(file).values
def serialize_example(featurelist):
"""
Creates a tf.Example message ready to be written to a file.
"""
# Create a dictionary mapping the feature name to the type of list
feature = {}
for i in range(csv.shape[1]-1):
feature['feature'+str(i)] = _float_feature(featurelist[i])
feature['label'] = _int64_feature(int(featurelist[-1]))
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
with tf.python_io.TFRecordWriter("features.tfrecords") as writer:
for row in csv:
example = serialize_example(row)
writer.write(example)
def read_dataset(training_path):
# set values to shape-1
features = {}
for i in range(30):
features['feature'+str(i)] = tf.FixedLenFeature([], tf.float32, default_value=0)
features['label'] = tf.FixedLenFeature([], tf.int64, default_value=0)
def _parse_function(example_proto):
parsed_features = tf.parse_single_example(example_proto, features)
data = []
for i in range(30):
data.append(parsed_features['feature'+str(i)])
return data, tf.one_hot(parsed_features['label'],22)
dataset = tf.data.TFRecordDataset(training_path)
dataset = dataset.map(_parse_function)
dataset = dataset.shuffle(1000)
dataset = dataset.batch(32, drop_remainder=True)
dataset = dataset.prefetch(1)
dataset = dataset.repeat()
return dataset
def read_recurrent_dataset(path):
sequence_length = 100
features = {}
for i in range(19):
features['feature'+str(i)] = tf.FixedLenFeature([sequence_length], tf.float32, default_value=np.zeros((sequence_length)))
features['label'] = tf.FixedLenFeature([], tf.int64, default_value=0)
def _parse_function(example_proto):
parsed_features = tf.parse_single_example(example_proto, features)
data = []
for i in range(19):
data.append(parsed_features['feature'+str(i)])
return tf.reshape(data, (sequence_length,19)), tf.one_hot(parsed_features['label'],22)
dataset = tf.data.TFRecordDataset(path)
dataset = dataset.map(_parse_function)
dataset = dataset.shuffle(1000)
dataset = dataset.batch(32, drop_remainder=True)
dataset = dataset.prefetch(1)
dataset = dataset.repeat()
return dataset
if __name__ == "__main__":
print(tf.__version__)
#convert_csv_to_tfrecords("/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/smallfeatures.csv")
# tf.enable_eager_execution()
# res = read_recurrent_dataset("/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/recurrent/rec_features.tfrecords")
# for val in res.take(1):
# print(val)
|
{"/models.py": ["/AutoconLayer.py"], "/abgabe.py": ["/utils.py"], "/lstm_network.py": ["/utils.py", "/models.py"], "/extend_training.py": ["/utils.py"], "/AutoconLayer.py": ["/autoconceptor.py"]}
|
16,877
|
thoklei/bigdatachallenge
|
refs/heads/master
|
/extend_training.py
|
import numpy as np
import tensorflow as tf
from utils import *
from tensorflow.keras import layers
name = "big_ohne_r2"
sequence_length = 80
data = "/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/data/sparse/rawdata.tfrecords"
model_path = "/Users/thomasklein/Projects/BremenBigDataChallenge2019/bigdatachallenge/models/"
archive_path = model_path + "model_archive/"
model = tf.keras.models.load_model(archive_path+name+"/"+name+".h5")
# model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
# loss='categorical_crossentropy',
# metrics=['accuracy'])
np.random.seed(42)
indices = np.random.randint(0, 6384, 638)
dataset = read_recurrent_dataset(data, sequence_length, filter_ids=indices, mode='exclude')
validation_set = read_recurrent_dataset(data, sequence_length, filter_ids=indices, mode='include')
callbacks = [
# Write TensorBoard logs to `./logs` directory
tf.keras.callbacks.TensorBoard(log_dir=archive_path+name),
tf.keras.callbacks.ModelCheckpoint(filepath=model_path+"checkpoints/"+name+".ckpt",
save_best_only=True,
period=20)
]
model.fit(x=dataset,
epochs=50,
steps_per_epoch=6384//32,
validation_data=validation_set,
validation_steps=638//32,
callbacks = callbacks)
tf.keras.models.save_model(model,archive_path+name+".h5",overwrite=True)
print("Mission accomplished.")
|
{"/models.py": ["/AutoconLayer.py"], "/abgabe.py": ["/utils.py"], "/lstm_network.py": ["/utils.py", "/models.py"], "/extend_training.py": ["/utils.py"], "/AutoconLayer.py": ["/autoconceptor.py"]}
|
16,878
|
thoklei/bigdatachallenge
|
refs/heads/master
|
/AutoconLayer.py
|
import collections
import tensorflow as tf
import numpy as np
import tensorflow.keras.layers as layers
from tensorflow.python.ops import init_ops
from autoconceptor import Autoconceptor
class AutoconLayer(layers.RNN):
def __init__(self, output_dim, alpha, lam, batchsize, activation=tf.nn.tanh, layer_norm=False, reuse=None, **kwargs):
self.output_dim = output_dim
self._cell = Autoconceptor(output_dim, alpha, lam, batchsize,
activation=tf.nn.tanh, reuse=reuse, layer_norm=layer_norm, dtype=tf.float32,
initializer=None)
super(AutoconLayer, self).__init__(cell=self._cell, **kwargs)
def build(self, input_shape):
print("input shape:", input_shape)
# Make sure to call the `build` method at the end
self._cell.build(input_shape)
#super(AutoconLayer, self).build(input_shape)
def compute_output_shape(self, input_shape):
shape = tf.TensorShape(input_shape).as_list()
shape[-1] = self.output_dim
return tf.TensorShape(shape)
def get_config(self):
base_config = super(AutoconLayer, self).get_config()
base_config['output_dim'] = self.output_dim
return base_config
@classmethod
def from_config(cls, config):
return cls(**config)
|
{"/models.py": ["/AutoconLayer.py"], "/abgabe.py": ["/utils.py"], "/lstm_network.py": ["/utils.py", "/models.py"], "/extend_training.py": ["/utils.py"], "/AutoconLayer.py": ["/autoconceptor.py"]}
|
16,947
|
Abuelodelanada/hume
|
refs/heads/master
|
/humetools.py
|
#!/usr/bin/env python3
import sys
import argparse
class NotImplementedAction(argparse.Action):
""" This class allows to work on getting your Argparse object
ready even if nothing useful happens when used.
Usage:
Just set action=NotImplementedAction when calling add_argument, like this:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--missing",
help="This will do something in the future",
action=NotImplementedAction)
GIST URL: https://gist.github.com/buanzo/2a004348340ef79b0139ab38f719cf1e
"""
def __call__(self, parser, namespace, values, option_string=None):
msg = 'Argument "{}" still not implemented.'.format(option_string)
sys.exit(msg)
|
{"/hume.py": ["/humetools.py"]}
|
16,948
|
Abuelodelanada/hume
|
refs/heads/master
|
/humed.py
|
#!/usr/bin/python3
import zmq
import json
import pidfile
import sqlite3
import datetime
# import systemd.daemon
from pprint import pprint
DEBUG = True # TODO: set to False :P
# hume is VERY related to logs
# better /var/humed/humed.sqlite3 ?
DBPATH = '/var/log/humed.sqlite3'
if DEBUG:
DBPATH = './humed.sqlite3'
class Humed():
def __init__(self,
listen_url='tcp://127.0.0.1:198'):
self.config = {}
self.config['listen_url'] = listen_url
if self.prepare_db() is False:
sys.exit('Humed: Error preparing database')
def prepare_db(self):
print('Preparing DB')
try:
self.conn = sqlite3.connect(DBPATH)
except Exception as ex:
print(ex)
print('Humed: cannot connect to sqlite3 on "{}"'.format(DBPATH))
self.cursor = self.conn.cursor()
try:
sql = '''CREATE TABLE IF NOT EXISTS
transfers (ts timestamp, sent boolean, hume text)'''
print(sql)
self.cursor.execute(sql)
self.conn.commit()
except Exception as ex:
print(ex)
return(False)
print('DB OK')
return(True)
def transfer_ok(self, rowid):
try:
sql = 'UPDATE transfers SET sent=1 WHERE rowid=?'
self.cursor.execute(sql, (rowid,))
self.conn.commit()
except Exception as ex:
print(ex)
return(False)
return(True)
def add_transfer(self, hume):
try:
hume = json.dumps(hume)
except Exception as ex:
print('Humed - add_transfer() json dumps exception:')
print(ex)
return(None) # FIX: should we exit?
try:
now = datetime.datetime.now()
sql = 'INSERT INTO transfers(ts, sent, hume) VALUES (?,?,?)'
self.cursor.execute(sql, (now, 0, hume,))
self.conn.commit()
except Exception as ex:
print('Humed: add_transfer() Exception:')
print(ex)
return(None)
return(self.cursor.lastrowid)
def list_transfers(self, pending=False):
if pending is True:
sql = 'SELECT rowid,* FROM transfers WHERE sent = 0'
else:
sql = 'SELECT rowid,* FROM transfers'
lista = []
rows = []
try:
self.cursor.execute(sql)
rows = self.cursor.fetchall()
except Exception as ex:
print(ex)
for row in rows:
lista.append(row)
return(lista)
def processs_transfers(self):
# TODO: The master/slave thingy...
pendientes = self.list_transfers(pending=True)
pprint(pendientes)
for item in pendientes:
# TODO: send to master-hume
print(item)
# if sent ok then:
# self.transfer_ok(archivo=archivo)
# if error return(False)
return(True)
def run(self):
# Humed main loop
# TODO: 1 - Initiate process-pending-humes-thread
# 2 - Bind and Initiate loop
sock = zmq.Context().socket(zmq.PULL)
sock.bind(self.config['listen_url'])
# 2a - Await hume message over zmp
while True:
hume = {}
try:
hume = json.loads(sock.recv())
except Exception as ex:
print(ex)
print('Cannot json-loads the received message. Mmmmm...')
# 2b - insert it into transfers
self.add_transfer(hume)
# pprint(self.list_transfers(pending=True))
# TODO: 2c - log errors and rowids
# TODO: deal with exits/breaks
def main():
print('Starting process')
try:
with pidfile.PIDFile():
print('Process started')
except pidfile.AlreadyRunningError:
print('Already running.')
print('Exiting')
sys.exit(1)
# Initialize Stuff
print('initializing hume daemon')
humed = Humed()
# TODO: Tell systemd we are ready
# systemd.daemon.notify('READY=1')
print('Ready. serving...')
humed.run()
if __name__ == '__main__':
# TODO: Add argparse and have master and slave modes
main()
|
{"/hume.py": ["/humetools.py"]}
|
16,949
|
Abuelodelanada/hume
|
refs/heads/master
|
/hume.py
|
#!/usr/bin/env python3
import os
import zmq
import sys
import stat
import psutil
import argparse
import json
from pprint import pprint
from humetools import NotImplementedAction
class Hume():
def __init__(self, args):
# self.config = {'url': 'ipc:///tmp/hume.sock'}
self.config = {'url': 'tcp://127.0.0.1:198'}
# args
self.args = args
# Prepare object to send
# Might end up moving some of this stuff around
# But I like focusing blocks to be developed
# in such a way that the code can grow organically
# and be coder-assistive
self.reqObj = {}
# To store information related to how hume was executed
self.reqObj['process'] = {}
# Hume-specific information
self.reqObj['hume'] = {}
self.reqObj['hume']['level'] = args.level
self.reqObj['hume']['msg'] = args.msg
self.reqObj['hume']['tags'] = args.tags
self.reqObj['hume']['task'] = args.task
if self.args.append_pstree:
self.reqObj['process']['tree'] = self.get_pstree()
ln = self.get_lineno()
if ln is not None:
self.reqObj['process']['line_number'] = ln
del ln
if (len(self.reqObj['process']) == 0):
del(self.reqObj['process'])
# TODO: process args and add items to reqObj
pprint(self.args)
#
pprint(self.reqObj)
print(self.config['url'])
if self.config['url'].startswith('ipc://'):
if self.test_unix_socket(config['url']):
print('socket ok')
else:
print('socket not writable or other error')
sys.exit(1)
def test_unix_socket(self, url):
path = url.replace('ipc://', '')
if not os.path.exists(path):
return(False)
mode = os.stat(path).st_mode
isSocket = stat.S_ISSOCK(mode)
if not isSocket:
return(False)
if os.access(path, os.W_OK):
# OK, it's an actual socket we can write to
return(True)
return(False)
def send(self, encrypt_to=None):
# TODO: If we were to encrypt, we would encapsulate
# self.reqObj to a special structure:
# {'payload': ENCRYPTED_ASCII_ARMORED_CONTENT,
# 'encrypted': True}
# or something like that
print('connect')
if encrypt_to is None:
HUME = self.reqObj
else:
HUME = self.encrypt(gpg_encrypt_to)
# The abstraction level of zeromq does not allow to
# simply check for correctly sent messages. We should wait for a REPly
# FIX: see if we can make REP/REQ work as required
sock = zmq.Context().socket(zmq.PUSH)
sock.setsockopt(zmq.SNDTIMEO, 5)
sock.setsockopt(zmq.LINGER, 5)
try:
sock.connect(self.config['url'])
except zmq.ZMQError as exc:
print(exc)
sys.exit(2)
print('send_string')
try:
x = sock.send_string(json.dumps(self.reqObj))
except zmq.ZMQError as exc:
msg = "\033[1;33mEXCEPTION:\033[0;37m{}"
print(msg.format(exc))
sys.exit(3)
print(x)
print('fin')
return(None)
def get_pstree(self): # FIX: make better version
ps_tree = []
h = 0
me = psutil.Process()
parent = psutil.Process(me.ppid())
while parent.ppid() != 0:
ps_tree.append({'pid': parent.pid,
'cmdline': parent.cmdline(),
'order': h})
parent = psutil.Process(parent.ppid())
h = h+1
return(ps_tree)
def get_caller(self):
me = psutil.Process()
parent = psutil.Process(me.ppid())
grandparent = psutil.Process(parent.ppid())
return(grandparent.cmdline())
def get_lineno(self):
try:
return(os.environ['LINENO'])
except Exception:
# TODO: add stderr warning about no LINENO
return(None)
def run():
parser = argparse.ArgumentParser()
parser.add_argument("-L", "--level",
choices=['ok', 'warn', 'error', 'info', 'debug'],
default="info",
help="Level of update to send, defaults to 'info'")
parser.add_argument("-c", "--cmd",
choices=['counter-start',
'counter-pause',
'counter-stop',
'counter-reset'],
default='',
required=False,
help="[OPTIONAL] Command to attach to the update.")
parser.add_argument("-m", "--msg",
required=True,
help="[REQUIRED] Message to include with this update")
parser.add_argument("-t", "--task",
required=False,
default='',
help="[OPTIONAL] Task name, for example BACKUPTASK.")
parser.add_argument('-a', '--append-pstree',
action='store_true',
help="Append process calling tree")
parser.add_argument('-T', '--tags',
type=lambda arg: arg.split(','),
help="Comma-separated list of tags")
parser.add_argument('-e', '--encrypt-to',
default=None,
action=NotImplementedAction,
dest='encrypt_to',
help="[OPTIONAL] Encrypt to this gpg pubkey id")
args = parser.parse_args()
Hume(args).send(encrypt_to=args.encrypt_to)
if __name__ == '__main__':
run()
sys.exit(0)
|
{"/hume.py": ["/humetools.py"]}
|
16,950
|
aybry/trussDFEM
|
refs/heads/master
|
/testModels.py
|
import numpy as np
from plotsAndTables import nodeTableFunc, elemTableFunc
# NODES
# node nr | x-coord | y-coord
# ELEMENTS
# element nr | start node | end node
# BEARINGS
# bearing type | node | orientation
# LOADS
# load nr | node | orientation | magnitude
def truss1():
nodes = np.array([[1,0.,0.], [2,12.,0.], [3,2*12.,0.], [4,3*12.,0.], [5,4*12.,0.],
[6,12.,-9.], [7,2*12.,-9.], [8,3*12.,-9.]])
nodeTable = nodeTableFunc(nodes)
elements = np.array([[1,1,2], [2,2,3], [3,3,4], [4,4,5], [5,1,6], [6,6,7], [7,7,8], [8,8,5],
[9,2,6], [10,3,7], [11,4,8], [12,6,3], [13,3,8]])
elemTable = elemTableFunc(elements)
bearings = np.array([[1,1,1], [2,5,1]])
loads = np.array([[1,2,3,100000], [2,3,3,200000], [3,4,3,300000]])
E = 2e11; A = 0.01; poiss = 0.3
return [nodes, elements, bearings, loads, nodeTable, elemTable, A, E, poiss]
def truss2():
nodes = np.array([[1,0,0], [2,12,6], [3,0,4], [4,4,4.666667], [5,9,5.5], [6,7,3.5]])
nodeTable = nodeTableFunc(nodes)
elements = np.array([[1,1,6], [2,6,2], [3,2,5], [4,5,6], [5,6,4], [6,4,3], [7,5,4], [8,4,1]])
elemTable = elemTableFunc(elements)
bearings = np.array([[1,1,4], [1,3,4]])
loads = np.array([[1,2,3,50000]])
E = 2e11; A = 0.01; poiss = 0.3
return [nodes, elements, bearings, loads, nodeTable, elemTable, A, E, poiss]
def truss3():
nodes = np.array([[1,0,0], [2,2,0], [3,4,0], [4,6,0], [5,1,0], [6,5,0], [7,3,2],
[8,2.5,1], [9,3.5,1], [10,1.5,1], [11,4.5,1], [12,0.75,0.5],
[13,2.25,1.5], [14,3.75,1.5], [15,5.25,0.5]]); nodeTable = nodeTableFunc(nodes)
elements = np.array([[1,1,5], [2,5,2], [3,2,8], [4,8,7], [5,7,9], [6,9,3], [7,3,6], [8,6,4],
[9,1,12], [10,10,13], [11,7,14], [12,11,15], [13,5,10], [14,10,2], [15,10,8],
[16,9,11], [17,11,3], [18,11,6], [19,12,10], [20,13,7], [21,14,11], [22,15,4],
[23,5,12], [24,8,13], [25,9,14], [26,6,15]]); elemTable = elemTableFunc(elements)
bearings = np.array([[1,1,1], [1,4,1]])
loads = np.array([[1,7,3,500000], [2,10,3,500000], [3,11,3,500000]])
E = 2e11; A = 0.1; poiss = 0.3
return [nodes, elements, bearings, loads, nodeTable, elemTable, A, E, poiss]
def truss4(): # 6 nodes, fixed/roller bearings
nodes = np.array([[1,0.,0.], [2,4.,0.], [3,2.,1.], [4,0.,2.], [5,4.,2.], [6,2.,3.]]); nodeTable = nodeTableFunc(nodes)
elements = np.array([[1,1,3], [2,1,4], [3,3,2], [4,2,5], [5,5,6],
[6,6,3], [7,6,4], [8,4,3], [9,5,3]]); elemTable = elemTableFunc(elements)
bearings = np.array([[1,1,1], [2,2,1]])
loads = np.array([[1,3,3,100000]])
E = 210e9; A = 0.001; poiss = 0.3
return [nodes, elements, bearings, loads, nodeTable, elemTable, A, E, poiss]
def truss5():
nodes = np.array([[1,0.,0.], [2,16.*12,0.], [3,16.*12,12.*12], [4,32.*12,0.], [5,32.*12,12.*12]]); nodeTable = nodeTableFunc(nodes)
elements = np.array([[1,1,3], [2,1,2], [3,2,3], [4,3,5], [5,3,4], [6,2,5], [7,2,4], [8,4,5]]); elemTable = elemTableFunc(elements)
bearings = np.array([[1,1,1], [1,4,1]])
loads = np.array([[1,2,3,100], [2,5,4,50]])
E = 3e4; A = 10; poiss = 0.3 # imperial
# compare: http://people.duke.edu/~hpgavin/cee421/truss-method.pdf
return [nodes, elements, bearings, loads, nodeTable, elemTable, A, E, poiss]
|
{"/testModels.py": ["/plotsAndTables.py"], "/convert.py": ["/testModels.py"], "/solver.py": ["/plotsAndTables.py"], "/trussDFEM.py": ["/testModels.py", "/plotsAndTables.py", "/solver.py"]}
|
16,951
|
aybry/trussDFEM
|
refs/heads/master
|
/convert.py
|
import testModels, json
i = 1
while i <= 5:
next_func = 'truss' + str(i)
[nodes, elements, bearings, loads, nodeTable, elemTable, A, E, poiss] = getattr(testModels, next_func)()
nodes_json = '{"nodes": {'
elements_json = '{"elements": {'
bearings_json = '{"bearings": {'
loads_json = '{"loads": {'
i_running = 1
for node in nodes:
current_node = ('"' + str(int(node[0])) + '": [' + str(node[1]) + ',' + str(node[2]) + ']')
if i_running < len(nodes):
current_node += ', '
nodes_json += current_node
i_running += 1
i_running = 1
for elem in elements:
current_elem = ('"' + str(int(elem[0])) + '": [' + str(elem[1]) + ',' + str(elem[2]) + ']')
if i_running < len(elements):
current_elem += ', '
elements_json += current_elem
i_running += 1
i_running = 1
for bear in bearings:
current_bear = ('"' + str(int(bear[0])) + '": [' + str(bear[1]) + ',' + str(bear[2]) + ']')
if i_running < len(bearings):
current_bear += ', '
bearings_json += current_bear
i_running += 1
i_running = 1
for load in loads:
current_load = ('"' + str(int(load[0])) + '": [' + str(load[1]) + ',' + str(load[2]) + ']')
if i_running < len(loads):
current_load += ', '
loads_json += current_load
i_running += 1
nodes_json += '}}'
elements_json += '}}'
print(nodes_json, '\n',
elements_json, '\n',
bearings_json, '\n',
loads_json)
with open('test_models.json', 'w') as f:
f.write(json.dumps())
print(getattr(next_func, ))
i += 1
|
{"/testModels.py": ["/plotsAndTables.py"], "/convert.py": ["/testModels.py"], "/solver.py": ["/plotsAndTables.py"], "/trussDFEM.py": ["/testModels.py", "/plotsAndTables.py", "/solver.py"]}
|
16,952
|
aybry/trussDFEM
|
refs/heads/master
|
/plotsAndTables.py
|
import numpy as np
import matplotlib.pyplot as plt
from terminaltables import SingleTable as SingTab
def updatePlot(nodes, nodesNew, elements, bearings, loads, fromSolver):
fig = plt.figure()
ax = fig.add_subplot(111)
alphaNew = 1
if fromSolver == 1:
alphaOrig = 0.2 # if coming from solver, set original truss to alpha = 0.3
ax.plot(nodesNew[:,1], nodesNew[:,2], 'ok', markersize = 5) # plot nodes
elif fromSolver == 0:
alphaOrig = 1
ax.plot(nodes[:,1], nodes[:,2], 'ok', markersize = 5, alpha = alphaOrig) # plot nodes
if nodes[0,0] == 0:
print ('\n At least add some nodes...')
return
maxes = np.zeros((2*np.shape(bearings)[0]))
for i in range(0, np.shape(bearings)[0]):
maxes[2*i-1] = nodes[int(bearings[i,1]-1),1]
maxes[2*i] = nodes[int(bearings[i,1]-1),2]
factor = max(maxes)/4 # multiplication factor for resizing
if elements[0,0] != 0:
for i in range(0, np.shape(elements)[0]): # plot elements (lines between connected nodes)
pair = np.array([elements[i,1], elements[i,2]]).astype(int)
ax.plot(nodes[pair-1, 1], nodes[pair-1, 2], 'k', alpha = alphaOrig)
if fromSolver == 1:
for i in range(0, np.shape(elements)[0]): # plot elements (lines between connected nodes)
pair = np.array([elements[i,1], elements[i,2]]).astype(int)
ax.plot(nodesNew[pair-1, 1], nodesNew[pair-1, 2], 'k', alpha = alphaNew)
if bearings[0,0] != 0:
for i in range(0, np.shape(bearings)[0]):
try:
sinVal = np.sin((np.pi)*(bearings[i,2]-1)/2)
cosVal = np.cos((np.pi)*(bearings[i,2]-1)/2)
except IndexError: # if no bearings have been defined
break
plotBearing(bearings[i,0], nodes[int(bearings[i,1]-1),1],
nodes[int(bearings[i,1]-1),2], bearings[i,2],
ax, sinVal, cosVal, alphaOrig, factor)
if fromSolver == 1:
for i in range(0, np.shape(bearings)[0]):
try:
sinVal = np.sin((np.pi)*(bearings[i,2]-1)/2)
cosVal = np.cos((np.pi)*(bearings[i,2]-1)/2)
except IndexError: # if no bearings have been defined
break
plotBearing(bearings[i,0], nodesNew[int(bearings[i,1]-1),1],
nodesNew[int(bearings[i,1]-1),2], bearings[i,2],
ax, sinVal, cosVal, alphaNew, factor)
if loads[0,0] != 0:
for i in range(0, np.shape(loads)[0]):
try:
sinVal = np.sin((np.pi)*(loads[i,2]-1)/2)
cosVal = np.cos((np.pi)*(loads[i,2]-1)/2)
except IndexError: # if no loads have been defined
break
plotLoad(nodesNew[int(loads[i,1]-1),1], nodesNew[int(loads[i,1]-1),2], sinVal, cosVal, ax, factor)
if np.shape(nodes)[0] >= 2:
plotAnnotations(nodesNew, elements, loads, ax, fromSolver)
xleft, xright = (min(nodesNew[:,1])-factor*0.5, max(nodesNew[:,1])+factor*0.5)
ybottom, ytop = (min(nodesNew[:,2])-factor*0.5, max(nodesNew[:,2])+factor*0.5)
ax.set_xlim(xleft, xright)
ax.set_ylim(ybottom, ytop)
ax.set_aspect('equal')
plt.show()
if fromSolver == 0:
print ('\nClose plot to continue!')
elif fromSolver == 1:
print ('\nGoodbye!')
return
def plotAnnotations(nodes, elements, loads, ax, fromSolver):
nodeLabels = [nodes[i,0].astype(int) for i in range(np.shape(nodes)[0])]
loadLabels = [repr(loads[i,3]) + ' N' for i in range(np.shape(loads)[0])]
# for label, x, y in zip(nodeLabels, nodes[:,1], nodes[:,2]):
# ax.annotate(
# label,
# xy=(x, y), xytext=(-3, 15),
# textcoords = 'offset points', ha = 'right', va = 'bottom',
# arrowprops = dict(arrowstyle = '-', connectionstyle = 'arc3, rad=0.3'))
# if fromSolver == 0:
elemLabels = [elements[i,0].astype(int) for i in range(np.shape(elements)[0])]
for i in range(0, np.shape(elements)[0]):
x1 = nodes[int(elements[i,1]-1),1]
y1 = nodes[int(elements[i,1]-1),2]
x2 = nodes[int(elements[i,2]-1),1]
y2 = nodes[int(elements[i,2]-1),2]
ax.annotate(
elemLabels[i],
xy=(0.5*(x2+x1), 0.5*(y2+y1)), xytext=(-3, 15), color = 'b',
textcoords = 'offset points', ha = 'right', #va = 'bottom',
arrowprops = dict(arrowstyle = '-', connectionstyle = 'arc3, rad=0.3'))
if loads[0,0] != 0:
for label, x, y in zip(loadLabels,
nodes[(loads[:,1]-1).astype(int),1],
nodes[(loads[:,1]-1).astype(int),2]):
ax.annotate(
label,
xy=(x, y), xytext=(5, -5), color = 'r',
textcoords = 'offset points', ha = 'left', va = 'top')
def plotBearing(bearType, bearX, bearY, orientation, ax, sinVal, cosVal, alpha, factor):
sinVal *= factor
cosVal *= factor
xTriangle = [-0.12,0,0.12] # coordinates for main triangle shape
yTriangle = [-0.18,0,-0.18]
xBase1 = [-0.16,0.16]
yBase1 = [-0.18, -0.18]
xBase2 = [-0.16,0.16]
yBase2 = [-0.2, -0.2]
ax.plot(bearX + np.multiply(cosVal, xTriangle) - np.multiply(sinVal, yTriangle),
bearY + np.multiply(sinVal, xTriangle) + np.multiply(cosVal, yTriangle), alpha = alpha, color = 'k')
ax.plot(bearX + np.multiply(cosVal, xBase1) - np.multiply(sinVal, yBase1),
bearY + np.multiply(sinVal, xBase1) + np.multiply(cosVal, yBase1), alpha = alpha, color = 'k')
ax.set_aspect('equal')
if bearType == 1:
for i in np.arange(0,1,0.1):
ax.plot(bearX + np.multiply(cosVal, [0.16-(i)*0.32, 0.16-(i+0.1)*0.32]) - np.multiply(sinVal, [-0.18, -0.2]),
bearY + np.multiply(sinVal, [0.16-(i)*0.32, 0.16-(i+0.1)*0.32]) + np.multiply(cosVal, [-0.18, -0.2]),
alpha = alpha, color = 'k')
elif bearType == 2:
ax.plot(bearX + np.multiply(cosVal, xBase2) - np.multiply(sinVal, yBase2),
bearY + np.multiply(sinVal, xBase2) + np.multiply(cosVal, yBase2),
alpha = alpha, color = 'k')
for i in np.arange(0,1,0.1):
ax.plot(bearX + np.multiply(cosVal, [0.16-(i)*0.32, 0.16-(i+0.1)*0.32]) - np.multiply(sinVal, [-0.2, -0.22]),
bearY + np.multiply(sinVal, [0.16-(i)*0.32, 0.16-(i+0.1)*0.32]) + np.multiply(cosVal, [-0.2, -0.22]),
alpha = alpha, color = 'k')
return
def plotLoad(xLoad, yLoad, sinVal, cosVal, ax, factor):
sinVal *= 0.5*factor
cosVal *= 0.5*factor
xStem = [0,0]
yStem = [0,0.4]
xHead = [-0.05, 0, 0.05]
yHead = [0.32, 0.4, 0.32] # pulls (0,0) upwards for direction = 1
ax.plot(xLoad, yLoad, 'or')
ax.plot(xLoad + np.multiply(cosVal, xStem) - np.multiply(sinVal, yStem),
yLoad + np.multiply(sinVal, xStem) + np.multiply(cosVal, yStem), 'r')
ax.plot(xLoad + np.multiply(cosVal, xHead) - np.multiply(sinVal, yHead),
yLoad + np.multiply(sinVal, xHead) + np.multiply(cosVal, yHead), 'r')
return
def nodeTableFunc(nodes):
rows = [['Node Nr', '(x, y)']]
for i in range(0, len(nodes)):
rows.append([int(nodes[i,0]), ('(' + str(nodes[i,1]) + ', ' + str(nodes[i,2]) + ')')])
SingTab.table_data = rows
return SingTab.table_data
def elemTableFunc(elements):
rows = [['Element Nr', 'From', 'To']]
for i in range(0, len(elements)):
rows.append([int(elements[i,0]), int(elements[i,1]), int(elements[i,2])])
SingTab.table_data = rows
return SingTab.table_data
def stressTableFunc(elements, strain, stress):
stressAdj = stress/10**6
strainAdj = strain*10**6
rows = [['Element Nr', 'Strain', 'S11 Stress']]
for i in range(0, len(elements)):
rows.append([int(elements[i,0]), str(round(strainAdj[i,0], 3)) + 'E-06', str(round(stressAdj[i,0], 3)) + 'E+06'])
SingTab.table_data = rows
return SingTab.table_data
|
{"/testModels.py": ["/plotsAndTables.py"], "/convert.py": ["/testModels.py"], "/solver.py": ["/plotsAndTables.py"], "/trussDFEM.py": ["/testModels.py", "/plotsAndTables.py", "/solver.py"]}
|
16,953
|
aybry/trussDFEM
|
refs/heads/master
|
/solver.py
|
import numpy as np
from numpy.linalg import solve as npsolve
from terminaltables import SingleTable as SingTab
from plotsAndTables import *
def solver(nodes, elements, bearings, loads, nodeTable, elemTable, A, E):
np.set_printoptions(precision = 4) # how many decimal places
print ('\n -------------\
\n -- Results --\
\n -------------')
# print AscTab(nodeTable).table
# print AscTab(elemTable).table
Eprint = E/(10**9)
print ('\nYoung\'s modulus: E = ' + str(Eprint) + 'e9')
print ('Cross-sectional area: A =', A)
elemNum = np.shape(elements)[0]
nodeNum = np.shape(nodes)[0]
Kl = np.zeros((4, 4, elemNum)) # initialise local (element) stiffness matrix
lengths = np.zeros((elemNum,1)) # initialise vector of element lengths for stress()
F = np.zeros(2*nodeNum) # initialise load vector
U = np.zeros(2*nodeNum) # initialise displacement vector
K = np.zeros((2*nodeNum, 2*nodeNum)) # intitalise global stiffness matrix
for i in range(0, elemNum):
x1 = nodes[(elements[i,1]-1).astype(int),1]
y1 = nodes[(elements[i,1]-1).astype(int),2]
x2 = nodes[(elements[i,2]-1).astype(int),1]
y2 = nodes[(elements[i,2]-1).astype(int),2]
elemLength = np.sqrt((x2 - x1)**2 + (y2 - y1)**2)
lengths[i,0] = elemLength
c = (x2 - x1) / elemLength
s = (y2 - y1) / elemLength
# element stiffness matrix
Kl[:,:,i] = [[c**2, c*s, -c**2, -c*s],
[c*s, s**2, -c*s, -s**2],
[-c**2, -c*s, c**2, c*s],
[-c*s, -s**2, c*s, s**2]]
Kl[:,:,i] = np.multiply(1/elemLength, Kl[:,:,i])
node1 = elements[i,1]
node2 = elements[i,2]
first = (2*node1 - 2).astype(int) # separating nodes into X, Y local
second = (2*node1 - 1).astype(int)
third = (2*node2 - 2).astype(int)
fourth = (2*node2 - 1).astype(int)
# global stiffness matrix: add DOF from elements
K[first:second+1, first:second+1] += Kl[0:2,0:2,i]
K[first:second+1, third:fourth+1] += Kl[0:2,2:4,i]
K[third:fourth+1, first:second+1] += Kl[2:4,0:2,i]
K[third:fourth+1, third:fourth+1] += Kl[2:4,2:4,i]
for i in range(0, np.shape(bearings)[0]):
# adjust stiffness matrix for bearings
# this way leaves room to solve for reaction forces at a later date!
node = bearings[i,1]
first = (2*node - 2).astype(int) # separating nodes into X, Y local
second = (2*node - 1).astype(int)
if bearings[i,0] == 1:
K[first, first] += 1e32
K[second, second] += 1e32
elif bearings[i,0] == 2:
K[first, first] += (bearings[i,2] + 1) % 2 * 1e30 # if odd, add 1e30
K[second, second] += (bearings[i,2]) % 2 * 1e30 # if even, add 1e30
for i in range(0, np.shape(loads)[0]):
# establish load vector
node = loads[i,1]
first = (2*node - 2).astype(int) # separating nodes into X, Y local
second = (2*node - 1).astype(int)
sinVal = np.sin((np.pi)*(loads[i,2]-1)/2)
cosVal = np.cos((np.pi)*(loads[i,2]-1)/2)
F[first] = - sinVal * loads[i,3]
F[second] = cosVal * loads[i,3]
K = np.multiply(E*A, K) # finalise stiffness matrix with E*A
U = npsolve(K, F) # solve for displacements
for i in range(0, np.shape(U)[0]):
if np.abs(U[i]) < 1e-12:
U[i] = 0 # change ~0 to =0 for clarity
if np.abs(F[i]) < 1e-12:
F[i] = 0
multiplier = 100
nodesNew = np.zeros(np.shape(nodes)) + nodes
nodesNewPlot = np.zeros(np.shape(nodes)) + nodes
for i in range(0, np.shape(U)[0]):
nodesNew[i//2, i%2+1] += U[i] # new coordinates of nodes after deformation
nodesNewPlot[i//2, i%2+1] += U[i] * multiplier # exaggerated deformations for plotting
# print '\nForce vector F = \n', F
# print '\nGlobal stiffness matrix U = \n', K
# print '\nDisplacement vector U =\n', U
print ('\nGraph displacement scale factor: ', multiplier)
stress(nodesNew, lengths, elements, bearings, loads, E)
updatePlot(nodes, nodesNewPlot, elements, bearings, loads, 1) # 1: coming from solver (plots original + deformed)
return
def stress(nodesNew, lengths, elements, bearings, loads, E):
# initialise vector of element lengths
lengthsNew = np.zeros((np.shape(elements)[0],1))
# initialise vector of strain and stress in elements
strain = np.zeros(np.shape(elements)[0])
stress = np.zeros(np.shape(elements)[0])
for i in range(0, np.shape(elements)[0]):
xN1 = nodesNew[(elements[i,1]-1).astype(int), 1]
yN1 = nodesNew[(elements[i,1]-1).astype(int), 2]
xN2 = nodesNew[(elements[i,2]-1).astype(int), 1]
yN2 = nodesNew[(elements[i,2]-1).astype(int), 2]
lengthsNew[i] = np.sqrt((xN2 - xN1)**2 + (yN2 - yN1)**2)
# strain = change in length by original length
strain = (lengthsNew - lengths) / lengths
# stress = strain multiplied by Young's modulus
stress = np.multiply(E, strain)
stressTable = SingTab(stressTableFunc(elements, strain, stress), title = "Strain and Stress")
stressTable.justify_columns[1] = stressTable.justify_columns[2] = 'right'
print ("\n")
print(stressTable.table)
return
|
{"/testModels.py": ["/plotsAndTables.py"], "/convert.py": ["/testModels.py"], "/solver.py": ["/plotsAndTables.py"], "/trussDFEM.py": ["/testModels.py", "/plotsAndTables.py", "/solver.py"]}
|
16,954
|
aybry/trussDFEM
|
refs/heads/master
|
/trussDFEM.py
|
# Program for calculating the deformation of a simple truss via FEM
import numpy as np
import matplotlib.pyplot as plt
import os
from terminaltables import SingleTable as SingTab
from testModels import *
from plotsAndTables import *
from solver import *
def main():
clearScreen = os.system('cls')
clearScreen
print('\n -------------------------------- \
\n -- Truss Deformation with FEM -- \
\n -------------------------------- \
\n Author: Samuel Bryson')
solve = 0; n = 0; choice = 0; nodeCount = 0
elemCount = 0; bearCount = 0; loadCount = 0
nodes = np.zeros((1,3)); elements = np.zeros((1,3))
bearings = np.zeros((1,3)); loads = np.zeros((1,4))
E = 210e9; A = 0.01; poiss = 0.3 # default values (vary depending on truss model)
while solve != 1: # main loop
clearScreen
try:
# clearScreen
choice = float(input('\nOptions: \n 1 - Add node \n 2 - Add element \
\n 3 - Add bearing \n 4 - Add load \n 5 - Update plot \n 6 - Edit model\
\n 7 - Solve model \n 8 - Truss 1 \n 9 - Truss 2 \n 10 - Truss 3 \
\n 11 - Truss 4 \n 12 - Truss 5 \n\n Selection: '))
except ValueError:
print ('\nPlease enter a number between 1 to 8!')
if choice == 1:
[nodes, nodeTable, nodeCount] = addNode(nodes, nodeCount)
elif choice == 2:
[elements, elemTable, elemCount] = addElement(elements, nodes, nodeTable, elemCount)
elements = elements.astype(int) # consists only of element nr, node 1, node 1
elif choice == 3:
[bearings, bearCount] = addBearing(bearings, nodes, bearCount, nodeTable)
elif choice == 4:
[loads, loadCount] = addLoad(loads, nodes, loadCount, nodeTable)
elif choice == 5:
updatePlot(nodes, nodes, elements, bearings, loads, 0) # 0: not coming from solver (only plots basic structure)
elif choice == 6:
[E, A] = editTruss(nodes, nodeCount, nodeTable, elements, elemCount, elemTable)
elif choice == 7:
solve = 1
elif choice == 8:
[nodes, elements, bearings, loads, nodeTable, elemTable, A, E, poiss] = truss1()
print ('\n Using values for truss 1.')
elif choice == 9:
[nodes, elements, bearings, loads, nodeTable, elemTable, A, E, poiss] = truss2()
print ('\n Using values for truss 2.')
elif choice == 10:
[nodes, elements, bearings, loads, nodeTable, elemTable, A, E, poiss] = truss3()
print ('\n Using values for truss 3.')
elif choice == 11:
[nodes, elements, bearings, loads, nodeTable, elemTable, A, E, poiss] = truss4()
print ('\n Using values for truss 4.')
elif choice == 12:
[nodes, elements, bearings, loads, nodeTable, elemTable, A, E, poiss] = truss5()
print ('\n Using values for truss 5.')
choice = 0 # otherwise simply pressing "enter" without number input assumes previous option
print("\n")
print (SingTab(nodeTable, title = "Nodes").table)
print (SingTab(elemTable, title = "Elements").table)
solver(nodes, elements, bearings, loads, nodeTable, elemTable, A, E)
def addNode(nodes, nodeCount):
newNode = np.zeros((1,3)); # node nr | x-coord | y-coord
newNode[0,0] = nodeCount + 1
print ('\nEnter x: ')
newNode[0,1] = getNumber()
print ('\nEnter y: ')
newNode[0,2] = getNumber()
if nodeCount == 0:
nodes = newNode
else:
nodes = np.vstack((nodes, newNode))
nodeTable = nodeTableFunc(nodes)
nodeCount += 1
print (SingTab(nodeTable, title = "Nodes").table)
return [nodes, nodeTable, nodeCount]
def addElement(elements, nodes, nodeTable, elemCount):
newElem = np.zeros((1,3)) # element nr | start node | end node
print (SingTab(nodeTable, title = "Nodes").table)
newElem[0,0] = elemCount + 1
print ('\n Which nodes would you like to connect?\n\n Enter first node: ')
newElem[0,1] = getNumber()
print ('\n Enter second node: ')
newElem[0,2] = getNumber()
if elemCount == 0:
elements = newElem
else:
elements = np.vstack((elements, newElem))
elemTable = elemTableFunc(elements)
elemCount += 1
print (SingTab(elemTable, title = "Elements").table)
return [elements, elemTable, elemCount]
def addBearing(bearings, nodes, bearCount, nodeTable):
newBear = np.zeros((1,4)) # bearing nr | node | orientation
print (' Fixed bearing (1) or roller (2)?')
newBear[0,0] = getNumber()
print (SingTab(nodeTable, title = "Nodes").table)
print (' \nPlace bearing at which node?')
newBear[0,1] = getNumber()
if newBear[0,0] == 1:
print (' Is bearing at bottom (1) or top (2) of truss?')
bearSet = getNumber()
if bearSet == 1:
newBear[0,2] = 1 # pointing upwards
elif bearSet == 2:
newBear[0,2] = 3 # downwards
elif newBear[0,0] == 2:
print (' Block vertical (1) or horizontal (2) movement?')
bearSet = getNumber()
if bearSet == 1:
print (' Is bearing at bottom (1) or top (2) of truss?')
bearSet = getNumber()
if bearSet == 1:
newBear[0,2] = 1 # upwards
elif bearSet == 2:
newBear[0,2] = 3 # downwards
elif bearSet == 2:
print (' Is bearing at left (1) or right (2) of truss?')
bearSet = getNumber()
if bearSet == 1:
newBear[0,2] = 4 # pointing right
elif bearSet == 2:
newBear[0,2] = 2 # pointing left
if bearCount == 0:
bearings = newBear
else:
bearings = np.vstack((bearings, newBear))
bearCount += 1
return [bearings, bearCount]
def addLoad(loads, nodes, loadCount, nodeTable):
newLoad = np.zeros((1,4)) # load nr | node | orientation | magnitude
print ('\n List of nodes: \n', SingTab(nodeTable).table)
newLoad[0,0] = loadCount + 1
print ('\n To which node is the load attached? Enter node number: ')
newLoad[0,1] = getNumber()
print ('\n Enter load direction (up (1), left (2), down (3), right (4)):' )
newLoad[0,2] = getNumber()
print ('\n Enter magnitude: ')
newLoad[0,3] = getNumber()
if loadCount == 0:
loads = newLoad
else:
loads = np.vstack((loads, newLoad))
loadCount += 1
return [loads, loadCount]
def getNumber():
errorstr = 'Please enter a number!'
while True:
try:
num = float(input(' '))
break
except ValueError:
print (errorstr)
return num
def editTruss(nodes, nodeCount, elements, elemCount):
print ('Enter Young\'s modulus E')
E = getNumber()
print ('Enter beam cross-sectional area A:')
A = getNumber()
return E, A
if __name__ == '__main__':
main()
|
{"/testModels.py": ["/plotsAndTables.py"], "/convert.py": ["/testModels.py"], "/solver.py": ["/plotsAndTables.py"], "/trussDFEM.py": ["/testModels.py", "/plotsAndTables.py", "/solver.py"]}
|
16,966
|
MCMXCIII/Django-Blog
|
refs/heads/master
|
/blog/forms.py
|
from django import forms
from .models import Files
#For Class Meta it links back to the mode check the model to make changes.
class FileForm(forms.ModelForm):
class Meta:
model = Files
fields = ('description', 'video', )
|
{"/blog/forms.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py"]}
|
16,967
|
MCMXCIII/Django-Blog
|
refs/heads/master
|
/blog/admin.py
|
from django.contrib import admin
from .models import Post
from .models import Video
# Register your models here.
# new Models
admin.site.register(Post)
admin.site.register(Video)
|
{"/blog/forms.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py"]}
|
16,968
|
MCMXCIII/Django-Blog
|
refs/heads/master
|
/blog/migrations/0004_auto_20180225_2048.py
|
# Generated by Django 2.0 on 2018-02-26 01:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_auto_20180225_1152'),
]
operations = [
migrations.RemoveField(
model_name='video',
name='title',
),
migrations.RemoveField(
model_name='video',
name='urlembed',
),
]
|
{"/blog/forms.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py"]}
|
16,969
|
MCMXCIII/Django-Blog
|
refs/heads/master
|
/blog/views.py
|
#Remeber that these functions have to be impoted, check docs!
from django.shortcuts import render
from django.shortcuts import redirect
#Reference the Post model from earlier
from .models import Post
from .models import Video
from .forms import FileForm
# Create your views here.
def index(request):
posts = Post.objects.order_by('-date_pub')
show_posts = {'posts' : posts}
return render(request, 'blog/index.html', show_posts)
#Views for tube extension
def tube(request):
videos = Video.objects.order_by('-date_pub')
show_videos = {'videos' : videos}
return render(request, 'blog/tube.html', show_videos)
#This will be moved to another view to merga this with a page.
def model_form_upload(request):
if request.method == 'POST':
form = FileForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('tube')
else:
form = FileForm()
return render(request, 'blog/model_form_upload.html', {
'form': form
})
|
{"/blog/forms.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py"]}
|
16,970
|
MCMXCIII/Django-Blog
|
refs/heads/master
|
/questions.py
|
from discord.ext import commands
from random import choice, shuffle
import aiohttp
import functools
import asyncio
class query:
def __init__(self,bot):
self.bot = bot
@commands.group(name="question", no_pm=True,pass_context=True)
async def getquestion(self,ctx):
"""Built in Helpdesk style ticketing syste for questions"""
await self.bot.send_cmd_help(ctx)
@getquestion.command(pass_context=True, name="question")
|
{"/blog/forms.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py"]}
|
16,971
|
MCMXCIII/Django-Blog
|
refs/heads/master
|
/blog/migrations/0005_auto_20180225_2056.py
|
# Generated by Django 2.0 on 2018-02-26 01:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20180225_2048'),
]
operations = [
migrations.AddField(
model_name='video',
name='title',
field=models.CharField(default='', max_length=250),
preserve_default=False,
),
migrations.AddField(
model_name='video',
name='urlembed',
field=models.URLField(default=''),
preserve_default=False,
),
]
|
{"/blog/forms.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py"]}
|
16,972
|
MCMXCIII/Django-Blog
|
refs/heads/master
|
/blog/models.py
|
from django.db import models
from django.utils import timezone
#new models forms.
# Create your models here.
# New models here
# Post Model for Blog not Tube
class Post(models.Model):
title = models.CharField(max_length=250)
body = models.TextField()
date_pub = models.DateTimeField(default=timezone.now)
def __str__(self):
return '<Title: {}, ID: {}>'.format(self.title, self.id)
#For tube posting videos from admin console.
class Video(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(unique = True,help_text="Url slugs")
video_posts = models.TextField()
date_pub = models.DateTimeField(default=timezone.now)
allow_comments = models.BooleanField(default=False)
def __str__(self):
return '<ID: {}'.format(self.id)
#For Uploading stuff
class Files(models.Model):
description = models.CharField(max_length=255)
video = models.FileField(upload_to='videos/')
uploaded_at = models.DateTimeField(auto_now_add=True)
|
{"/blog/forms.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py"]}
|
16,973
|
MCMXCIII/Django-Blog
|
refs/heads/master
|
/blog/migrations/0007_auto_20180226_1222.py
|
# Generated by Django 2.0 on 2018-02-26 17:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_auto_20180225_2057'),
]
operations = [
migrations.AddField(
model_name='video',
name='allow_comments',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='video',
name='slug',
field=models.SlugField(default='', help_text='Url slugs', unique=True),
preserve_default=False,
),
migrations.AddField(
model_name='video',
name='title',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
]
|
{"/blog/forms.py": ["/blog/models.py"], "/blog/admin.py": ["/blog/models.py"], "/blog/views.py": ["/blog/models.py", "/blog/forms.py"]}
|
17,005
|
rakesh329/Simulating-a-disease-spread-and-visualizing-curve-using-python
|
refs/heads/main
|
/Social_connections.py
|
"""
Written by : Rakesh Namballa
Started date : 23/05/2020
Last edited: 08/06/2020
Description: " Representing social connections "
In this code we create objects for all the people and add friends to each person depending on the sample txt file.
Sample file consist of all the Person names along with their respective friends.
"""
class Person:
def __init__(self, first_name, last_name):
self.friend_list = [] # List for all the friends
self.first_name = first_name
self.last_name = last_name
def add_friend(self, friend_person):
self.friend_list.append(friend_person)
def get_name(self):
return str(self.first_name + " " + self.last_name) # Concatenating first name and last name
def get_friends(self):
return self.friend_list
def load_people():
all_personObjects = {} # Each person name and object address stored as a Key:Value pairs.
all_lines = []
f = open("sample_set.txt", 'r') # Open the file and read each line.
for line in f:
line = line.rstrip() # Removes the /n tag from each line
all_lines.append(line) # Append each line into a list(all_lines)
person_name = line.split(": ") # Separate the Person with friends
person = Person(person_name[0].split(" ")[0], person_name[0].split(" ")[1]) # Splits the first and last name
all_personObjects[person_name[0]] = person # Person is added into the dictionary along with object address
f.close()
for line in all_lines:
person_name = line.split(": ") # Separate the fiends with person
friend_name = person_name[1].split(", ") # Separate all the friends
for friend in friend_name:
all_personObjects[person_name[0]].add_friend(all_personObjects[friend]) # Add friend object to person
return list(all_personObjects.values()) # return list of all Person objects from the file records
if __name__ == '__main__':
load_people()
|
{"/Visualise_curve.py": ["/Simulate_disease_spread.py"], "/Simulate_disease_spread.py": ["/Social_connections.py"]}
|
17,006
|
rakesh329/Simulating-a-disease-spread-and-visualizing-curve-using-python
|
refs/heads/main
|
/Visualise_curve.py
|
"""
Written by : Rakesh Namballa
Started date : 23/05/2020
Last edited: 08/06/2020
Description: " Visualise the curve "
In this code we create a count vs days graph and visualise the graph
"""
"""
Test scenario A: An uncontained outbreak
Number of days: 30
Meeting probability: 0.6
Patient zero health: 25 health points
In this case as the patient zero health is less he infect more viral load to the friends he meet and the probability of
meeting is also more.
Though running through multiple case the spread for the first few days start slowly but as patients meet regularly the
viral load gets infected to multiple people fast and at the end of 30 days all the patients are completely effected with
virus.
------------------------------------------------------------------------------------------------------------------------
Test scenario B: an unpredictable situation
Number of days: 60
Meeting probability: 0.25
Patient zero health: 49 health points
This is an unpredictable case where the patient zero health has a mild symptoms and the meeting probability also less.
* In few cases on first day virus spreads for few people and from second day all patients gets recovers after sleep.
* In few cases it keeps on increasing and many patients gets infected.
------------------------------------------------------------------------------------------------------------------------
Test scenario C: flattening the curve
Number of days: 90
Meeting probability: 0.18
Patient zero health: 40 health points
In this cse as the meeting probability is less and the patient zero health is less effected, the increase in the virus
is less
* In few cases the patient recovers after few days
* In few cases patients gets infected but increases slowly
"""
from matplotlib import pyplot as plt # import libraries to plot a graph
from Simulate_disease_spread import run_simulation
def visual_curve(days, meeting_probability, patient_zero_health):
data_series = run_simulation(days, meeting_probability, patient_zero_health) # storing the list of day_count
plt.plot(list(range(1, days + 1)), data_series)
plt.title("Virus spread simulation")
plt.xlabel("Days")
plt.ylabel("Count")
plt.show()
if __name__ == '__main__':
# Take inputs from the user
days = int(input("Enter number of days:"))
meeting_probability = float(input("Enter meeting probability:"))
patient_zero_health = int(input("Enter patient zero health:"))
visual_curve(days, meeting_probability, patient_zero_health)
|
{"/Visualise_curve.py": ["/Simulate_disease_spread.py"], "/Simulate_disease_spread.py": ["/Social_connections.py"]}
|
17,007
|
rakesh329/Simulating-a-disease-spread-and-visualizing-curve-using-python
|
refs/heads/main
|
/Simulate_disease_spread.py
|
"""
Written by : Rakesh Namballa
Started date : 26/05/2020
Last edited: 08/06/2020
Description: " Simulate disease spread "
In this code we create objects for all the patients and add friends to each person depending on the sample txt file.
We run a simulation to check the no:of effected people and return a count for each day.
"""
import random
from Social_connections import Person
class Patient(Person):
def __init__(self, first_name, last_name, health):
super().__init__(first_name, last_name)
self.friend_list = []
self.first_name = first_name
self.last_name = last_name
self.health = set_health_boundary(health) # Initial starting person’s health point values
def get_health(self):
return round(self.health) # Returns the patient’s current health points
def set_health(self, new_health):
self.health = set_health_boundary(round(new_health)) # Changes health points
def is_contagious(self):
if self.health < 50:
return True
else:
return False
def infect(self, viral_load):
if self.health <= 29:
self.health = set_health_boundary(round(self.health - (0.1 * viral_load)))
return
elif self.health < 50:
self.health = set_health_boundary(round(self.health - (1.0 * viral_load)))
return
elif self.health >= 50:
self.health = set_health_boundary(round(self.health - (2.0 * viral_load)))
return
def sleep(self):
self.health = set_health_boundary(self.health + 5) # Add 5 heath points after sleep
def set_health_boundary(value): # Method to set the min and max health points
if value <= 0:
return 0
elif value >= 100:
return 100
else:
return value
def run_simulation(days, meeting_probability, patient_zero_health):
all_PatientObjects = load_patients(75) # Set all the patients with initial health by calling load_patient()
all_PatientObjects[0].set_health(patient_zero_health) # Specify the zero patient health
events = ["meet", "don'tmeet"] # List for the use in random.choices()
not_meeting_probability = round(1 - meeting_probability, 2)
day_count = []
for i in range(0, days):
for patient in all_PatientObjects:
for friend in patient.get_friends(): # Gets all the friend for that patient object
# random.choices() returns a list with randomly selected from the specified sequence
toMeet = random.choices(events, weights=[meeting_probability, not_meeting_probability])[0]
if toMeet == "meet":
# Checks if both patient and meeting friend are contagious
if friend.is_contagious() and patient.is_contagious():
friend_health = friend.get_health()
# from given viral load formula
friend_viral_load = (((friend_health - 25) * (friend_health - 25)) / 62) + 5
person_health = patient.get_health()
person_viral_load = (((person_health - 25) * (person_health - 25)) / 62) + 5
friend.infect(person_viral_load)
patient.infect(friend_viral_load)
# Enter the condition if only friend is contagious
elif friend.is_contagious():
friend_health = friend.get_health()
friend_viral_load = (((friend_health - 25) * (friend_health - 25)) / 62) + 5
patient.infect(friend_viral_load)
# Enter the condition if only patient is contagious
elif patient.is_contagious():
person_health = patient.get_health()
person_viral_load = (((person_health - 25) * (person_health - 25)) / 62) + 5
friend.infect(person_viral_load)
count = 0
for patient in all_PatientObjects:
if patient.is_contagious():
count = count + 1 # count for no:of infected patients each day
patient.sleep()
day_count.append(count)
return day_count # return the list of day_count
def load_patients(initial_health):
all_PatientObjects = {} # Each patient name and object address stored as a Key:Value pairs.
all_lines = []
f = open("a2_sample_set.txt", 'r') # Open the file and read each line.
for line in f:
line = line.rstrip() # Removes the /n tag from each line
all_lines.append(line) # Append each line into a list
patient_name = line.split(": ") # Separate the patient with friends
patient = Patient(patient_name[0].split(" ")[0], patient_name[0].split(" ")[1], initial_health)
all_PatientObjects[patient_name[0]] = patient # Patient is added into the dictionary along with object address
f.close()
for line in all_lines:
patient_name = line.split(": ") # Separate the fiends with patient
friend_names = patient_name[1].split(", ") # Separate all the friends
for friend in friend_names:
all_PatientObjects[patient_name[0]].add_friend(all_PatientObjects[friend])
return list(all_PatientObjects.values()) # return list of all Patient objects from the file records
if __name__ == '__main__':
run_simulation(40, 1, 1)
|
{"/Visualise_curve.py": ["/Simulate_disease_spread.py"], "/Simulate_disease_spread.py": ["/Social_connections.py"]}
|
17,035
|
FlameDavid92/Python-ProgettazioneDiAlgoritmi
|
refs/heads/main
|
/puntiDiArticolazione.py
|
'''
C'è una certa parentela tra i punti di articolazione e i ponti. Se {u, v} è un ponte tale che u ha grado almeno 2, allora
u è un punto di articolazione. Però se u è un punto di articolazione, non è detto che qualche arco incidente in u sia
un ponte.
Punti di articolazione
Vediamo allora come trovare i punti di articolazione. Facciamo una DFS di un grafo non diretto e connesso G,
partendo da un nodo u. Come possiamo riconoscere se u è un punto di articolazione? Chiaramente, una
condizione sufficiente affinché non lo sia è che la rimozione di u non sconnetta l'albero di visita (se la rimozione non
sconnette l'albero di visita a maggior ragione non sconnette G). È anche una condizione necessaria perché u non
sia un punto di articolazione? In altri termini, se la rimozione di u sconnette l'albero di visita, sconnette anche il
grafo?
Iniziamo con la radice u dell'albero della DFS. Se la rimozione di u sconnette l'albero, allora u ha almeno due
sottoalberi figli. Se eliminiamo u i sottografi relativi a questi sottoalberi saranno connessi solo se ci sono archi tra di
essi. Ma non ci possono essere tali archi perchè non sarebbero archi all'indietro. Quindi, se la rimozione di u
sconnette l'albero di visita, sconnette anche il grafo. Ne segue che la radice della DFS è un punto di articolazione
se e solo se ha almeno due figli. Vediamo ora gli altri nodi. Se un nodo v è un punto di articolazione, la sua
rimozione necessariamente sconnette almeno un sottoalbero S della DFS da v. Nel senso che i nodi di S non sono
più raggiungibili da u, nel grafo senza v. Questo accade se e solo se non ci sono archi all'indietro da nodi di S a
antenati di v. Quindi, un nodo v, diverso dalla radice della DFS, è un punto di articolazione se e solo se esiste un
sottoalbero della DFS da v che non ha archi all'indietro verso antenati di v.
Possiamo incorporare queste osservazioni in un algoritmo per trovare i punti di articolazione. Modifichiamo la DFS
per mantenere i tempi di inizio visita dei nodi in un array tt . Inoltre, per determinare le condizioni circa gli archi
all'indietro dei sottoalberi, facciamo sì che la procedura modificata di visita DFS da v ritorni il minimo tempo di inizio
visita tra quelli di tutti i nodi toccati durante la DFS da v. Così, un nodo v è un punto di articolazione se e solo se
esiste un figlio w di v per cui il valore b ritornato dalla visita modificata da w soddisfa b >= tt[v] .
'''
def dfsPA(G, u, tt, C, A):
C[0] += 1
tt[u] = C[0]
back = C[0]
children = 0
for adjacent in G[u]:
if tt[adjacent] == 0:
children += 1
b = dfsPA(G, adjacent, tt, C, A)
if tt[u] > 1 and b >= tt[u]:
A.add(u)
back = min(back, b)
else:
back = min(back, tt[adjacent])
if tt[u] == 1 and children >= 2:
A.add(u)
return back
def trovaPuntiArticolazione(G):
tt = [0 for _ in G] # array dei tempi di inizio visita
C = [0] # contatore dei nodi visitati
A = set() # insieme dei punti di articolazione
dfsPA(G, 0, tt, C, A)
return A
def dfsPAAtoB(G, a, b, tt, C, A):
C[0] += 1
tt[a][0] = C[0]
back = C[0]
children = 0
for adjacent in G[a]:
if tt[adjacent][0] == 0:
children += 1
bc = dfsPAAtoB(G, adjacent, b, tt, C, A)
if tt[a][0] > 1 and bc >= tt[a][0] and tt[b][0] > tt[a][0] and tt[b][1] <= tt[a][1]:
A.add(a)
back = min(back, bc)
else:
back = min(back, tt[adjacent][0])
return back
def trovaPuntiCriticiAtoB(G, a, b):
tt = [[0, 0] for _ in G] # array dei tempi di inizio visita
C = [0] # contatore dei nodi visitati
A = set() # insieme dei punti di articolazione
dfsPAAtoB(G, a, b, tt, C, A)
return A
grfNonDirCicl = {
0: [1, 7],
1: [0, 2],
2: [1, 3],
3: [2, 4, 7],
4: [3, 5, 6],
5: [4, 6],
6: [4, 5],
7: [0, 3, 8],
8: [7],
}
print(trovaPuntiArticolazione(grfNonDirCicl))
print(trovaPuntiCriticiAtoB(grfNonDirCicl, 0, 4))
|
{"/eserciziBFS.py": ["/BFS.py"]}
|
17,036
|
FlameDavid92/Python-ProgettazioneDiAlgoritmi
|
refs/heads/main
|
/forteConnessione.py
|
import simpleStackQueue
'''
Discussione dell'esercizio [sensi unici]
La rete viaria della cittadina può essere rappresentata facilmente tramite un grafo diretto in cui i nodi sono gli
incroci di due o più strade e ogni arco corrisponde a una strada (tra due incroci). Allora la proprietà che vorrebbe il
sindaco equivale alla forte connessione del grafo. Si osservi che non fa differenza se due punti A e B che si
vogliono connettere sono sugli incroci o sulle strade.
Come possiamo verificare se un grafo è fortemente connesso? Un modo semplice consiste nel fare una visita a
partire da ogni nodo del grafo. Se tutte le visite raggiungono tutti i nodi del grafo, allora il grafo è fortemente
connesso. Altrimenti non lo è. Ma questo algoritmo è piuttosto inefficiente, infatti richiede n visite e quindi tempo
O(n(n + m)).
Ma non è necessario effettuare così tante visite, cioè una per ogni nodo del grafo. Se un grafo G è fortemente
connesso, fissando un nodo u, sappiamo che tutti i nodi di G sono raggiungibili da u e che da ogni nodo si può
raggiungere u. Inoltre la validità di questa proprietà per uno specifico nodo, implica che il grafo è fortemente
connesso. Infatti, dati due qualsiasi nodi v e w, si può ottenere un cammino che va da v a w concatenando un
cammino da v a u con uno da u a w e tali cammini esistono in virtù della proprietà suddetta; in modo simmetrico si
ottiene un cammino da w a v. Riassumendo abbiamo un algoritmo per verificare se un grafo G è fortemente
connesso: facciamo una visita da un nodo fissato u per verificare che tutti i nodi sono raggiungibili da u e poi, per
verificare che da ogni nodo u è raggiungibile, basta fare una visita da u del grafo trasposto.
'''
def dfs(G, u, VIS):
VIS[u] = 0
for w in G[u]:
if VIS[w] == -1:
dfs(G, w, VIS)
def checkForteConnessione(G):
VIS = [-1 for _ in G]
dfs(G, 0, VIS) # scegliamo 0 come nodo di partenza ma va bene un qualunque nodo.
for x in VIS:
if x == -1:
print("Non è possibile raggiungere tutti i nodi da 0")
return -1 # Non è possibile raggiungere tutti i nodi da 0
Gt = {x: [] for x in G}
for node in G:
for adjacent in G[node]:
Gt[adjacent].append(node)
for node in G:
VIS[node] = -1
dfs(Gt, 0, VIS)
for x in VIS:
if x == -1:
print("Non è possibile raggiungere 0 da tutti i nodi")
return -1 # Non è possibile raggiungere 0 da tutti i nodi
print("Il grafo è fortemente connesso")
return 0
'''Algoritmo di Tarjan'''
def DFS_SCCTarj(G, node, CC, S, c, nc):
c[0] += 1
CC[node] = -c[0] # il tempo di inizio visita in negativo per distinguerlo dall'indice di una componente
S.push(node)
back = c[0]
for adjacent in G[node]:
if CC[adjacent] == 0:
b = DFS_SCCTarj(G, adjacent, CC, S, c, nc)
back = min(back, b)
elif CC[adjacent] < 0: # la componente di adjacent non è ancora stata determinata
back = min(back, -CC[adjacent])
if back == -CC[node]: # node è una c-radice
nc[0] += 1
w = S.pop()
CC[w] = nc[0]
while w != node:
w = S.pop()
CC[w] = nc[0]
return back
def SCCTarj(G):
CC = [0 for _ in G] # array che darà l'indice della componente di ogni nodo, inizializzato a 0
nc = [0] # contatore componenti
c = [0] # contatore nodi visitati
S = simpleStackQueue.Stack()
for node in G:
if CC[node] == 0:
DFS_SCCTarj(G, node, CC, S, c, nc)
return CC
def DFSNumVis(G, u):
def DFSVIS(Gg, uu, VVIS):
VVIS[uu] = 0
for adjacent in Gg[uu]:
if VVIS[adjacent] == -1:
DFSVIS(Gg, adjacent, VVIS)
VIS = [-1 for _ in G]
DFSVIS(G, u, VIS)
count = 0
for n in VIS:
if n == 0:
count += 1
return count
# esercizio nodi broadcast
def DFS_BroadcastNodes(G):
CC = SCCTarj(G)
nc = 0
u = 0
for node in G:
if CC[node] > nc:
nc = CC[node]
u = node
vis = DFSNumVis(G, u)
print(vis)
B = []
if vis == len(G):
for node in G:
if CC[node] == nc:
B.append(node)
return B
grafoFortConn = {
0: [1],
1: [2],
2: [0, 3],
3: [2]
}
grafoNonFortConn1 = {
0: [1, 2, 3],
1: [],
2: [1],
3: [2]
}
grafoNonFortConn2 = {
0: [1],
1: [0],
2: [0],
3: [0, 2]
}
#checkForteConnessione(grafoFortConn)
#checkForteConnessione(grafoNonFortConn1)
#checkForteConnessione(grafoNonFortConn2)
print(SCCTarj(grafoFortConn))
print(SCCTarj(grafoNonFortConn1))
print(SCCTarj(grafoNonFortConn2))
print(DFS_BroadcastNodes(grafoNonFortConn2))
|
{"/eserciziBFS.py": ["/BFS.py"]}
|
17,037
|
FlameDavid92/Python-ProgettazioneDiAlgoritmi
|
refs/heads/main
|
/eseBacktracking.py
|
def printSottSeqCresc(n, X, sol, u, z, check, k):
if k == n:
print(sol)
return
sol[k] = -1
printSottSeqCresc(n, X, sol, u, z, check, k+1)
if X[k] == 0 and check == 0:
sol[k] = 0
printSottSeqCresc(n, X, sol, u, z+1, 0, k+1)
elif X[k] == 1 and z >= (u+1):
sol[k] = 1
printSottSeqCresc(n, X, sol, u+1, z, 1, k + 1)
X = [1, 1, 0, 0]
X2 = [0, 1, 0, 1]
sol = [-1 for _ in X]
sol2 = [-1 for _ in X2]
printSottSeqCresc(len(X), X, sol, 0, 0, 0, 0)
print("\n\n")
printSottSeqCresc(len(X2), X2, sol2, 0, 0, 0, 0)
|
{"/eserciziBFS.py": ["/BFS.py"]}
|
17,038
|
FlameDavid92/Python-ProgettazioneDiAlgoritmi
|
refs/heads/main
|
/BFS.py
|
import collections
import math
def BFS(G, u):
P = [-1 for _ in G] # array dei padri
DIST = [-1 for _ in G] # array delle distanze
P[u] = u # radice dell'albero BFS
DIST[u] = 0
Q = collections.deque()
Q.append(u) # accoda u alla coda
while len(Q) != 0:
v = Q.popleft() # preleva il primo nodo della coda
for adjacent in G[v]:
if P[adjacent] == -1:
P[adjacent] = v
DIST[adjacent] = DIST[v] + 1
Q.append(adjacent)
return P, DIST
'''
Il vettore dei padri rappresenta l'albero di visita della BFS e quindi per ogni nodo v contiene un cammino di lunghezza
minima da u a v. Per ricostruire un tale cammino basta partire da v e percorrerlo di padre in padre tramite P fino alla
radice u.
'''
def pathMinDist(G, u, v):
P = BFS(G, u)[0]
if P[v] != -1:
L = collections.deque()
L.appendleft(v)
while v != u:
v = P[v]
L.appendleft(v)
return L
else:
print("v non è raggiungibile a partire da u, la loro distanza è inf")
return math.inf
def calcolaNCamminiMinimi(G, u):
DIST = [-1 for _ in G]
M = [0 for _ in G]
DIST[u] = 0
M[u] = 1
Q = collections.deque()
Q.append(u)
while len(Q) != 0:
v = Q.popleft()
for adjacent in G[v]:
if DIST[adjacent] == -1:
DIST[adjacent] = DIST[v]+1
M[adjacent] = M[v]
Q.append(adjacent)
elif DIST[adjacent] == DIST[v]+1:
M[adjacent] = M[adjacent]+M[v]
return M
grafetto = {
0: [1, 3, 5],
1: [2],
2: [4],
3: [5],
4: [0],
5: [4]
}
graf = {
0: [2, 3, 4],
1: [4],
2: [5],
3: [5],
4: [5],
5: [6, 7],
6: [1],
7: [1]
}
graf2 = {
0: [3, 6, 7],
1: [5],
2: [4],
3: [],
4: [0, 3],
5: [0, 2, 7],
6: [],
7: [2]
}
#C = BFS(grafetto, 0)
#print("PADRI: "+str(C[0])+"\nDISTANZE: "+str(C[1]))
#print(pathMinDist(grafetto, 0, 4))
#print(calcolaNCamminiMinimi(grafetto, 0))
#print(calcolaNCamminiMinimi(graf, 0))
C = BFS(graf2, 0)
print("PADRI: "+str(C[0])+"\nDISTANZE: "+str(C[1]))
def bfsRecArchi(G, u, contatori, c):
VIS = [-1 for _ in G]
c += 1
VIS[u] = c
P = [-1 for _ in G] # array dei padri
DIST = [-1 for _ in G] # array delle distanze
P[u] = u # radice dell'albero BFS
DIST[u] = 0
Q = collections.deque()
Q.append(u) # accoda u alla coda
while len(Q) != 0:
v = Q.popleft() # preleva il primo nodo della coda
for adjacent in G[v]:
if VIS[adjacent] == -1:
contatori[0] += 1 # n. archi albero
c += 1
VIS[adjacent] = c
P[adjacent] = v
DIST[adjacent] = DIST[v] + 1
Q.append(adjacent)
elif VIS[adjacent] > 0:
contatori[1] += 1 # n. archi all'indietro o di attraversamento
return P, DIST
def calcolaArchiBFS(G, u):
contatori = [0, 0]
bfsRecArchi(G, 0, contatori, 0)
print("n.archi albero: "+str(contatori[0]))
print("n. archi all'indietro o di attraversamento: " + str(contatori[1]))
calcolaArchiBFS(graf2, 0)
|
{"/eserciziBFS.py": ["/BFS.py"]}
|
17,039
|
FlameDavid92/Python-ProgettazioneDiAlgoritmi
|
refs/heads/main
|
/esercizioDfsTrovaPonti2.py
|
'''
Discussione dell'esercizio [strade critiche]
Possiamo rappresentare la rete viaria con un grafo G non diretto in cui i nodi sono gli incroci e due nodi sono
collegati da un arco se c'è una strada che collega i corrispondenti incroci. Per ipotesi G è un grafo connesso. Una
strada critica corrisponde a un ponte del grafo G. Per trovare tutti i ponti, un algoritmo molto semplice consiste
nell'esaminare ogni arco {u, v} considerando il grafo G' che si ottiene rimuovendo l'arco da G e controllare se G' è
connesso (se non lo è, l'arco è un ponte, altrimenti non è un ponte). Ma l'algoritmo è molto inefficiente, infatti
richiede m visite di grafi che sono quasi uguali a G e quindi ha complessità O(m(n + m)).
Possiamo fare di meglio?
Possiamo tentare di usare una DFS opportunamente modificata per trovare i ponti.
Supponiamo di fare una DFS, a partire da un nodo qualsiasi, del nostro grafo connesso G.
Sappiamo che tutti gli archi saranno classificati o come archi dell'albero della DFS o come archi all'indietro.
Un arco all'indietro può essere un ponte? No, perché sappiamo che ogni arco all'indietro appartiene
ad almeno un ciclo e un ponte non può far parte di cicli.
Quindi rimangono solamente gli archi dell'albero. Sia {u, v} un arco dell'albero e supponiamo,
senza perdita di generalità, che u sia il padre di v.
Sia Tree(v) l'insieme dei nodi del sottoalbero della DFS da v. Se c'è un arco all'indietro da un nodo di
Tree(v) verso u o un antenato di u, allora l'arco {u, v} non è un ponte (perchè c'è un ciclo che contiene l'arco).
Viceversa, se non c'è un arco all'indietro da Tree(v) a u o un antenato di u? Supponiamo per assurdo che esista un
cammino che collega u e v e che non contiene l'arco {u, v}. Allora sia z il primo nodo del cammino (percorrendolo
da v verso u) che non è in Tree(v). E sia w il predecessore di z, sempre nel cammino, quindi w è in Tree(v). Ne segue
che {w, z} è un arco da Tree(v) a un nodo fuori di Tree(v) per cui non può essere un arco dell'albero e deve
necessariamente essere un arco all'indietro, in contraddizione con l'ipotesi che tali archi non ci sono.
Quindi per determinare se un arco è un ponte basterà controllare che sia un arco dell'albero della DFS e che non ci
siano archi all'indietro dal sottoalbero di un estremo dell'arco all'altro estremo o un suo antenato. Per fare questo
controllo facciamo in modo che la funzione che esegue la DFS da u ritorni il minimo tra il tempo d'inizio visita di u e
il tempo di inizio visita dei nodi antenati di u relativi agli archi all'indietro da Tree(u). Inoltre dobbiamo passargli
anche il padre di u per evitare che scambi l'arco tra u e il padre di u per un arco all'indietro.
'''
def dfsPonti(G, u, z, tt, C, Pt):
C[0] += 1
tt[u] = C[0]
back = C[0]
for adjacent in G[u]:
if tt[adjacent] == 0:
b = dfsPonti(G, adjacent, u, tt, C, Pt)
if b > tt[u]: # è un arco ponte
Pt.append({u, adjacent})
back = min(back, b)
elif adjacent != z:
back = min(back, tt[adjacent])
return back
def trovaPonti(G):
tt = [0 for _ in G] #array dei tempi di inizio visita inizializzato a 0
C = [0] #contatore dei nodi visitati
Pt = [] #lista dei ponti
dfsPonti(G, 0, 0, tt, C, Pt)
return Pt
grfNonDirCicl = {
0: [1, 7],
1: [0, 2],
2: [1, 3],
3: [2, 4, 7],
4: [3, 5, 6],
5: [4, 6],
6: [4, 5],
7: [0, 3, 8],
8: [7],
}
grfDirCicl = {
0: [1],
1: [2],
2: [3, 9],
3: [4, 5],
4: [6],
5: [6],
6: [7],
7: [8],
8: [6],
9: []
}
print(trovaPonti(grfNonDirCicl))
print(trovaPonti(grfDirCicl))
|
{"/eserciziBFS.py": ["/BFS.py"]}
|
17,040
|
FlameDavid92/Python-ProgettazioneDiAlgoritmi
|
refs/heads/main
|
/DFS.py
|
import simpleStackQueue
def dfs(G, u):
VIS = []
S = simpleStackQueue.Stack()
S.push(u)
VIS.append(u)
while S.size() >= 1:
v = S.top()
check = 0
for w in G[v]:
if w not in VIS:
VIS.append(w)
S.push(w)
check = 1
break
if check == 0:
S.pop()
return VIS
def dfsRecursive(Gr, u):
def dfsToRec(node):
VIS.append(node)
for w in Gr[node]:
if w not in VIS:
dfsToRec(w)
return VIS
VIS = []
return dfsToRec(u)
def dfsBool(G, u):
VIS = [-1 for _ in G]
S = simpleStackQueue.Stack()
S.push(u)
VIS[u] = 0
while S.size() >= 1:
v = S.top()
check = 0
for w in G[v]:
if VIS[w] == -1:
VIS[w] = 0
S.push(w)
check = 1
break
if check == 0:
S.pop()
return VIS
def dfsRecursiveBool(Gr, u):
def dfsToRec(node):
VIS[node] = 0
for w in Gr[node]:
if VIS[w] == -1:
dfsToRec(w)
return VIS
VIS = [-1 for _ in Gr]
return dfsToRec(u)
def dfsOpt(G, u):
VIS = [-1 for _ in G]
S = simpleStackQueue.Stack()
S.push((u, 0))
VIS[u] = 0
while S.size() >= 1:
v = S.top()
check = 0
for index in range( v[1], len(G[v[0]]) ):
if VIS[ G[v[0]][index] ] == -1:
S.pop()
S.push((v[0], index))
VIS[G[v[0]][index]] = 0
S.push((G[v[0]][index], 0))
check = 1
break
if check == 0:
S.pop()
return VIS
def dfsTempInFin(Gr, u):
def dfsToRec(node, C):
C[0] += 1
TT[node][0] = C[0]
for w in Gr[node]:
if TT[w][0] == 0:
C[0] = dfsToRec(w, C)
TT[node][1] = C[0]
return C[0]
TT = [[0, 0] for _ in Gr]
Counter = [0]
dfsToRec(u, Counter)
return TT
graph = {
0: [4],
1: [2, 4],
2: [1, 3],
3: [2, 4, 5],
4: [0, 1, 3],
5: [3]}
graph2 = {
0: [1, 5, 6],
1: [0, 2, 3, 4, 5],
2: [1, 3],
3: [1, 2],
4: [1, 5],
5: [0, 1, 4],
6: [0, 7, 8, 9],
7: [6],
8: [6, 9],
9: [6, 8]}
print(dfs(graph, 0)) # controllo "if w not in VIS:" non efficiente!!!
print(dfsRecursive(graph, 0)) # controllo "if w not in VIS:" non efficiente!!!
print(" - ")
print(dfsBool(graph, 0))
print(dfsRecursiveBool(graph, 0))
print(" - ")
print(dfsOpt(graph, 0))
print(" --- ")
print(dfsTempInFin(graph, 0))
print(" - ")
print(dfsTempInFin(graph2, 0))
'''
CORRETTEZZA DFS
- Dimostrare che la DFS partendo da un nodo u visita tutti i nodi raggiungibili da u.
Supponiamo per assurdo che esista un nodo z raggiungibile da u ma che la DFS non visita.
Siccome z è raggiungibile da u, esiste un cammino u(0), u(1), …, u(k)
(se il grafo è diretto, il cammino è orientato)
!!! con u(0) = u e u(k) = z.
Sia u(i) il primo nodo del cammino che non è stato visitato, chiaramente 0 < i ≤ k.
Allora, u(i-1) è stato visitato e durante la visita, prima che il nodo u(i-1) sia estratto dallo stack,
tutti gli adiacenti di u(i-1) devono essere stati visitati.
!!! Siccome u(i) è adiacente a u(i-1), il nodo u(i) deve essere stato visitato
in contraddizione con la nostra ipotesi per assurdo.
EFFICIENZA DFS
Per mantenere l'insieme dei nodi visitati possiamo usare un semplice array VIS "booleano"
con un elemento per ogni nodo,inizializzato a -1 e ogni volta che un nuovo w viene visitato
si pone VIS[w] = 0 . --> Così l'aggiornamento e il test relativo alla visita di un nodo costa tempo costante.
Lo stack può essere facilmente implementato in modo che tutte le operazioni push , top e pop abbiano costo costante.
Se il grafo è rappresentato tramite liste di adiacenza, la scansione degli adiacenti prende tempo costante
per ogni adiacente considerato.
Ad ogni iterazione del WHILE o viene visitato un nuovo nodo o è estratto un nodo dallo stack.
Poiché ogni nodo è inserito nello stack una sola volta (quando viene visitato),
il numero di iterazioni del WHILE è al più 2n.
!!!Il numero di operazioni non costanti in una iterazione del WHILE sono solamente le scansioni degli adiacenti
o in altri termini gli attraversamenti degli archi. Ogni arco è attraversato al più due volte!!! (per grafi diretti
una sola volta).
Quindi gli attraversamenti degli archi costano complessivamente O(m).
!
In totale, la complessità della DFS su un grafo connesso è O(n + m).
In generale, la complessità è O(h + k) dove h è il numero di nodi e k il numero di archi della
componente connessa del nodo di partenza.
!
La DFS ha complessità ottimale perchè qualsiasi procedura di visita
deve necessariamente visitare tutti i nodi e gli archi che sono raggiungibili.
[Siccome lo stack delle chiamate ricorsive ha tipicamente una dimensione limitata,
l'implementazione ricorsiva non è adatta per grafi di grandi dimensioni.]
Albero di visita --> sottografo formato da tutti i nodi visitati assieme agli archi che hanno permesso di visitarli.
Albero --> grafo connesso e aciclico.
!!! Un grafo non diretto di n nodi è un albero se e solo se è connesso e ha esattamente n - 1 archi. !!!
Un albero è un grafo minimamente connesso, nel senso che ha il numero minimo di archi per renderlo connesso o,
equivalentemente, che nessun arco può essere eliminato senza sconnettere il grafo.
Gli alberi di visita dipendono dall'ordine con cui i nodi e gli archi vengono visitati.
!!! L'albero della DFS è anche determinato dall'ordine con cui sono scanditi gli adiacenti dei nodi visitati. !!!
Nel caso di grafi diretti l'albero di visita è più propriamente chiamato arborescenza
per indicare che è un grafo diretto.
'''
|
{"/eserciziBFS.py": ["/BFS.py"]}
|
17,041
|
FlameDavid92/Python-ProgettazioneDiAlgoritmi
|
refs/heads/main
|
/eserciziBFS.py
|
import BFS
import collections
import math
'''
Esercizio [dall'albero alle distanze]
Dato un vettore dei padri P che rappresenta l'albero di una BFS a partire da un nodo u, dare un algoritmo che calcola il
corrispondente array Dist delle distanze da u in tempo O(n).
'''
def calcolaDistanzeArrayPadriBFS(P):
def dist(Pp, w, DDIST):
if DDIST[w] == -1:
if P[w] == w:
DDIST[w] = 0
else:
DDIST[w] = dist(Pp, Pp[w], DDIST) + 1
return DDIST[w]
DIST = [-1 for _ in P]
for i in range(0, len(P)):
if DIST[i] == -1:
DIST[i] = dist(P, i, DIST)
return DIST
'''
Esercizio [stessa distanza]
Descrivere un algoritmo efficiente che, dato un grafo non diretto e connesso e due suoi nodi u e v, trova i nodi che hanno la stessa
distanza da u e v.
'''
def sameDist(G, u, v):
DIST_u = BFS.BFS(G, u)[1]
DIST_v = BFS.BFS(G, v)[1]
S = []
for node in G:
if DIST_u[node] == DIST_v[node]:
S.append(node)
return S
'''
Esercizio [distanza tra insiemi]
Dato un grafo non diretto G e due sottoinsiemi A e B dei suoi nodi si definisce distanza tra A e B la distanza minima per andare da
un nodo in A ad un nodo in B. Se A e B non sono disgiunti, la loro distanza è 0. Descrivere un algoritmo che, dato G e due
sottoinsiemi dei nodi A e B calcola la loro distanza. L’algoritmo deve avere complessità O(n + m).
'''
def BFS_SET(G, A):
DIST_A = [-1 for _ in G]
Q = collections.deque()
for node in A:
DIST_A[node] = 0
Q.append(node)
while len(Q) != 0:
v = Q.popleft()
for adjacent in G[v]:
if DIST_A[adjacent] == -1:
DIST_A[adjacent] = DIST_A[v]+1
Q.append(adjacent)
return DIST_A
def distInsiemi(G, A, B):
DIST_A = BFS_SET(G, A)
d = math.inf
for node in B:
if DIST_A[node] < d:
d = DIST_A[node]
return d
def calcolaGT(G):
Gt = {x: [] for x in G}
for node in G:
for adjacent in G[node]:
Gt[adjacent].append(node)
return Gt
'''
Esercizio [Roma]
Descrivere un algoritmo che, dato un grafo diretto e fortemente connesso e un suo nodo r, trova tutti i cammini minimi tra tutte le
coppie di nodi con il vincolo che questi cammini devono passare per r.
CENTER(G: grafo diretto, r: nodo)
P <- BFS(G, r) /* Vettore dei padri dell'albero dei cammini minimi di G da r */
GT <- TRASP(G) /* Ritorna il grafo trasposto */
PT <- BFS(GT, r) /* Vettore dei padri dell'albero dei cammini minimi di GT da r */
RETURN P, PT
C_u <- lista vuota
DO
C_u.append(u)
u <- PT[u]
WHILE u <> r
C_v <- lista vuota
WHILE v <> r DO
C_v.head(v)
v <- P[v]
C <- concatenazione di C_u e C_v
OUTPUT C
'''
def centerMinPath(G, u, v, r): # G grafo, u nodo inizio, v nodo fine, r nodo vincolo
P = BFS.BFS(G, r)[0]
GT = calcolaGT(G)
PT = BFS.BFS(GT, r)[0]
C_u = [u]
u = PT[u]
while u != r:
C_u.append(u)
u = PT[u]
C_u.append(r)
C_v = []
while v != r:
C_v.append(v)
v = P[v]
C_v.reverse()
return C_u+C_v
#PADRI = [0, 0, 1, 0, 5, 0]
#print(calcolaDistanzeArrayPadriBFS(PADRI))
Ggg = {
0: [1, 5],
1: [0, 2, 3],
2: [1, 4],
3: [1, 4],
4: [2, 3, 6],
5: [0, 6],
6: [4, 5]
}
grafoFortConn = {
0: [1],
1: [2],
2: [0, 3],
3: [2]
}
print(sameDist(Ggg, 1, 4))
print(distInsiemi(Ggg, {0, 1, 2}, {6}))
print(centerMinPath(grafoFortConn, 0, 2, 3))
|
{"/eserciziBFS.py": ["/BFS.py"]}
|
17,042
|
FlameDavid92/Python-ProgettazioneDiAlgoritmi
|
refs/heads/main
|
/esePD.py
|
import collections
def EQ_BIP(S, x):
if x % 2 != 0:
print("Il valore totale della sequenza è dispari, la sequenza non è bipartibile!")
return False, None
T = [[False for _ in range(0, int(x/2)+1)] for _ in range(0, len(S))]
for i in range(0, len(S)):
T[i][0] = True
for c in range(1, int(x/2)+1):
if S[0] == c:
T[0][c] = True
else:
T[0][c] = False
for i in range(1, len(S)):
for c in range(1, int(x/2)+1):
T[i][c] = T[i-1][c] #Se la sottosequenza con somma c non include S[i]
if c-S[i] >= 0:
T[i][c] = T[i-1][c] or T[i-1][c-S[i]] #Se la sottosequenza con somma c può includere S[i]
return T[len(S)-1][int(x/2)], T
def printSeqFromT(T, S, x):
i = len(S)-1
c = x
L = collections.deque()
while c > 0 or i > 0:
if (c-S[i]) >= 0 and T[i-1][c-S[i]]:
L.appendleft(S[i])
print("ho inserito " + str(S[i]))
c = c-S[i]
print("è rimasto "+str(c))
i = i - 1
else:
L.appendleft(-1)
i = i - 1
return L
S = [1, 1, 1, 4, 2, 3, 11, 3]
tot = 0
for el in S:
tot += el
Tup = EQ_BIP(S, tot)
T = Tup[1]
if Tup[0]:
L = printSeqFromT(T, S, int(tot/2))
print(L)
|
{"/eserciziBFS.py": ["/BFS.py"]}
|
17,043
|
FlameDavid92/Python-ProgettazioneDiAlgoritmi
|
refs/heads/main
|
/bicolorazione.py
|
import simpleStackQueue
'''
COLORAZIONE DI UN GRAFO
-Dato un grafo si vogliono colorare i nodi usando il numero minimo di colori che garantiscono che due
qualsiasi nodi adiacenti hanno colori diversi.
Problema: sapere se esiste e in tal caso trovare una cosidetta 2-colorazione del nostro grafo.
Un altro modo di vedere il problema è di vedere l'assegnamento dei due colori ai nodi come la partizione del grafo
in due insiemi (disgiunti) tali che non ci sono archi tra nodi appartenenti alla stessa parte.
Un grafo che può esserecosì partizionato si dice bipartito.
'''
def dfsBiCol(G, node, bc):
bc[node] = 0
S = simpleStackQueue.Stack()
S.push(node)
while S.size() >= 1:
v = S.top()
control = 0
for adjacent in G[v]:
if bc[adjacent] == -1:
bc[adjacent] = 1 if bc[v] == 0 else 0
control = 1
S.push(adjacent)
else:
if bc[adjacent] == bc[v]:
return -1
if control == 0:
S.pop()
return 0
def dfsBiCol2(G, u, bc):
for adjacent in G[u]:
if bc[adjacent] == -1:
bc[adjacent] = 1 if bc[u] == 0 else 0
c = dfsBiCol2(G, adjacent, bc)
if c == -1:
return -1
elif bc[adjacent] == bc[u]:
print("Grafo non bicolorabile!")
return -1
return 0
def biCol(G):
bc = [-1 for _ in G]
for node in G:
if bc[node] == -1:
if dfsBiCol(G, node, bc) == -1:
return None
return bc
def biCol2(G):
bc = [-1 for _ in G]
for node in G:
if bc[node] == -1:
bc[node] = 0
if dfsBiCol2(G, node, bc) == -1:
return None
return bc
graph = {
0: [4],
1: [2, 4],
2: [1, 3],
3: [2, 4, 5],
4: [0, 1, 3],
5: [3],
6: [7, 8],
7: [6],
8: [6, 9],
9: [8],
10: [11, 12, 13],
11: [10],
12: [10],
13: [10]
}
G_nonbic2 = {
0: [1, 4],
1: [0, 2],
2: [1, 3],
3: [2, 4],
4: [3, 0]}
print(biCol2(graph))
print(biCol2(G_nonbic2))
'''
Usando la DFS si può risolvere il problema della 2-colorazione (o bipartizione) in modo molto efficiente, cioè, in
tempo O(n + m).
In generale, il problema della colorazione è molto più difficile. Determinare se un grafo è 3-colorabile è già un
problema per cui non si conoscono algoritmi efficienti. Gli algoritmi migliori hanno complessità esponenziale nella
dimensione del grafo.
'''
|
{"/eserciziBFS.py": ["/BFS.py"]}
|
17,044
|
FlameDavid92/Python-ProgettazioneDiAlgoritmi
|
refs/heads/main
|
/eserciziDfs2.py
|
import copy
'''
Esercizio [pozzo]
In un grafo diretto, un nodo si dice pozzo universale se ha grado entrante n − 1 e grado uscente 0.
Dimostrare che un grafo diretto non può avere più di un pozzo universale.
Descrivere un algoritmo che preso un grafo diretto G, rappresentato tramite matrice di adiacenza, determina
se G contiene o meno un pozzo universale. L’algoritmo deve avere complessità O(n).
Dimostrare che il problema non è risolvibile in tempo O(n) se il grafo è rappresentato con liste di adiacenza.
- Se un grafo ha un pozzo universale u allora per un qualsiasi altro nodo v c'è l'arco (v, u) che essendo un arco
uscente da v impedisce che v possa essere un pozzo universale.
-Se consideriamo due nodi qualsiasi u e v e ci chiediamo se c'è un arco da u a v in base alla risposta
possiamo escludere con certezza che uno dei due nodi sia il pozzo (se l'arco c'è escludiamo u altrimenti
escludiamo v). A questo punto l'idea di un algoritmo per trovare il pozzo universale, se esiste, è molto
semplice. Scegliamo due nodi, diciamo u e v, e vediamo se c'è l'arco da u a v. Per quanto detto almeno uno
dei due nodi sarà scartato e quindi scegliamo un altro nodo w. Applichiamo la stessa procedura ai due nodi
in esame scartandone almeno uno. Continuando così fino a considerare tutti i nodi, alla fine o rimarremo con
un solo nodo oppure li avremo scartati tutti e il pozzo universale non c'è. Se rimaniamo con un nodo
dobbiamo solamente verificare che sia il pozzo universale.
N.B.
Se il grafo è rappresentato tramite liste di adiacenza non è possibile risolvere il problema in tempo O(n)
perché per verificare che un nodo sia un pozzo universale bisogna controllare che abbia grado entrante n - 1
e questo richiede la scansione delle liste di adiacenza di tutti gli altri nodi.
'''
def pozzo(matG):
p = 0 # nodo qualsiasi, es. nodo iniziale nella rappresentazione a matrice
for node in range(0, len(matG)):
if node != p and matG[p][node] == 1:
# il p corrente ha un arco uscente e quindi non può essere un pozzo
p = node
for node in range(0, len(matG)):
if node != p and (matG[node][p] == 0 or matG[p][node] == 1):
print("Non ci sono pozzi universali nel grafo")
return -1
print("Il pozzo universale del grafo è "+str(p))
return p
matGpoz = [
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 0, 0, 0],
[0, 0, 1, 0]
]
matGnopoz = [
[0, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]
]
pozzo(matGpoz)
print(" ")
pozzo(matGnopoz)
'''
Esercizio per casa [strade critiche]
La rete viaria di una cittadina non è stata progettata molto bene. Tutte le strade sono a doppio senso di marcia e da
un qualsiasi incrocio è possibile arrivare ad un qualsiasi altro incrocio. Ma ci sono delle strade critiche che se
interrotte (ad esempio per lavori) dividono la cittadina in due parti e non si può più andare da una parte all'altra.
Vogliamo un algoritmo efficiente che analizzando la rete viaria trovi tutte le strade critiche.
- Algoritmo che trova tutti i ponti di un Grafo non diretto connesso
-nodi: incroci
-archi: strade a doppio senso
'''
def dfs(Gp, u, P):
for adjacent in Gp[u]:
if P[adjacent] == -1:
P[adjacent] = u
dfs(Gp, adjacent, P)
# Per grafi non diretti!
def trovaPonteConVetPadri(G, u, v, P):
Gp = copy.deepcopy(G)
try:
Gp[u].remove(v)
P[u] = u
dfs(Gp, u, P)
if P[v] == -1:
print("l'arco {" + str(u) + "," + str(v) + "} è un ponte")
return 0 # l'arco è un ponte
else:
print("l'arco {"+str(u)+","+str(v)+"} NON è un ponte")
return 1 # l'arco non è un ponte (c'è un ciclo nel grafo G, rappresentato dal cammino semplice da u a v in Gp a cui si aggiunge l'arco {u,v})
except ValueError:
print("arco {"+str(u)+","+str(v)+"} non presente in G")
return -1 # errore non esiste l'arco {u,v} in G
def dfsPonti(G, node, VIST, L, M):
VIST[node] = 0
for adjacent in G[node]:
if VIST[adjacent] == -1 and M[node][adjacent] == -1:
P = [-1 for _ in G]
c = trovaPonteConVetPadri(G, node, adjacent, P)
if c == 1:
P[node] = adjacent
print(str(adjacent), end='')
w = adjacent
M[w][P[w]] = 0
M[P[w]][w] = 0
while w != node:
print(" "+str(P[w]), end='')
w = P[w]
M[w][P[w]] = 0
M[P[w]][w] = 0
print("")
print(str(M))
elif c == 0:
L.append((node, adjacent))
dfsPonti(G, adjacent, VIST, L, M)
def trovaPontiNonEff(G):
L = []
M = [[-1 for _ in G] for _ in G]
VIST = [-1 for _ in G]
for node in G:
if VIST[node] == -1:
dfsPonti(G, node, VIST, L, M)
return L
grfNonDirCicl = {
0: [1, 7],
1: [0, 2],
2: [1, 3],
3: [2, 4, 7],
4: [3, 5, 6],
5: [4, 6],
6: [4, 5],
7: [0, 3, 8],
8: [7],
}
print(trovaPontiNonEff(grfNonDirCicl))
|
{"/eserciziBFS.py": ["/BFS.py"]}
|
17,045
|
FlameDavid92/Python-ProgettazioneDiAlgoritmi
|
refs/heads/main
|
/prinSottSeqCresc.py
|
# N[i] = numero delle sottosequenze crescenti di S[0..i] che terminano in i
def numSottSeqCresc(S, n):
N = [0 for _ in range(0, n)]
N[0] = 1
nsc = 1
for i in range(1, n):
N[i] = 1
for j in range(0, i):
if S[j] < S[i]:
N[i] = N[i] + N[j]
nsc = nsc + N[i]
return nsc
def printModSeq(S,C):
stringa = "( "
for x in range(0, len(S)):
if C[x] == 1:
stringa += str(S[x])+" "
print(stringa+")")
def arrModSeq(S,C):
arr = []
for x in range(0, len(S)):
if C[x] == 1:
arr.append(S[x])
return arr
def printFullT(T):
for indice in range(0, len(T)):
print("T["+str(indice)+"] = ", end='')
for stsq in T[indice]:
print(str(stsq)+" ", end='')
print("\n")
def printTH(Tt, h):
print("T[" + str(h) + "] = ", end='')
for stsq in Tt:
print(str(stsq) + " ", end='')
print("\n")
def printSottSeqCresc(S, n, T):
for x in range(n-1, -1, -1):
T[x].append([S[x]])
for y in range(x+1, n):
if S[y] > S[x]:
for seq in T[y]:
A = [S[x]]
for elem in seq:
A.append(elem)
T[x].append(A)
print("T[" + str(x) + "] è: " + str(T[x]))
print("\n")
for x in T:
for sottosequenza in x:
print( sottosequenza, end=' ')
S1 = [5, 7, 3, 6]
print(numSottSeqCresc(S1, len(S1)))
S2 = [5, 3, 7, 8, 6]
print(numSottSeqCresc(S2, len(S2)))
S3 = [8, 1, 2, 9]
print(numSottSeqCresc(S3, len(S3)))
T = [[] for _ in S3]
printSottSeqCresc(S3, len(S3), T)
T = [[] for _ in S2]
printSottSeqCresc(S2, len(S2), T)
|
{"/eserciziBFS.py": ["/BFS.py"]}
|
17,046
|
FlameDavid92/Python-ProgettazioneDiAlgoritmi
|
refs/heads/main
|
/eserciziDfs.py
|
import copy
'''
Esercizio [archi]
Vogliamo scrivere un algoritmo che esegue una DFS su un grafo diretto e ritorna il numero di archi dell'albero della
DFS, il numero di archi all'indietro, il numero di archi in avanti e il numero di archi di attraversamento.
'''
def dfsRecArchi(G, u, VIS, contatori, c):
c += 1
VIS[u] = -c
for adjacent in G[u]:
if VIS[adjacent] == -1:
contatori[0] += 1 # n. archi albero
c = dfsRecArchi(G, adjacent, VIS, contatori, c)
elif VIS[adjacent] < 0:
contatori[1] += 1 # n. archi all'indietro
elif VIS[adjacent] > -VIS[u]:
contatori[2] += 1 # n. archi in avanti
else: contatori[3] += 1 #n. archi di attraversamento
VIS[u] = -VIS[u]
return c
def calcolaArchi(G, u):
VIS = [-1 for _ in G]
contatori = [0, 0, 0, 0]
dfsRecArchi(G, u, VIS, contatori, 0)
print("n.archi albero: "+str(contatori[0]))
print("n.archi attraversamento: " + str(contatori[3]))
print("n. archi all'indietro: " + str(contatori[1]))
print("n. archi in avanti: " + str(contatori[2]))
'''
Esercizio [trasposto]
Il grafo trasposto di un grafo diretto G = (V, E), e un grafo diretto GT = (V, ET) che ha lo stesso insieme dei nodi ma
tutti gli archi con direzione opposta, vale a dire ET = { (v, u) | (u, v) in E }. Descrivere un algoritmo che dato un grafo
diretto G, rappresentato tramite liste di adiacenza degli adiacenti uscenti, ritorna il grafo trasposto GT
rappresentato nello stesso modo. L'algoritmo deve avere complessita O(n + m).
'''
def dfsTrasp(G, GT, VIS, node):
VIS[node] = 0
for adjacent in G[node]:
GT[adjacent].append(node)
if VIS[adjacent] == -1:
dfsTrasp(G, GT, VIS, adjacent)
def creaGTrasp(G):
VIS = [-1 for _ in G]
GT = {x: [] for x in G}
for node in G:
if VIS[node] == -1:
dfsTrasp(G, GT, VIS, node)
return GT
def creaGTrasp2(G):
GT = {x: [] for x in G}
for node in G:
for adjacent in G[node]:
GT[adjacent].append(node)
return GT
'''
Esercizio [ponte]
Descrivere un algoritmo che, dato un grafo non diretto G e un arco {u, v} del grafo, determina se G ha un ciclo che
contiene {u, v}. Analizzare il tempo di esecuzione dell’algoritmo. E se, nel caso un ciclo esista, vogliamo anche
trovare un ciclo che contiene l'arco?
Se un arco {u, v} di un grafo non diretto G non è contenuto in nessun ciclo, allora nel grafo G' ottenuto rimuovendo
l'arco da G, i nodi u e v non sono connessi. Infatti, se lo fossero vuol dire che ci sarebbe in G' un cammino che li
collega e siccome tale cammino non contiene l'arco {u, v}, il cammino insieme a tale arco è un ciclo in G che
contiene l'arco, contraddizione. Viceversa, se la rimozione dell'arco {u, v} sconnette i nodi u e v vuol dire che non ci
poteva essere un ciclo che conteneva l'arco. Quindi abbiamo diamostrato
In un grafo non diretto e connesso G, un arco non è contenuto in nessun ciclo se e solo se la rimozione
dell'arco sconnette il grafo.
Un arco la cui rimozione sconnette un grafo connesso è chiamato ponte (bridge). Un algoritmo molto semplice per
determinare se un arco è un ponte di un grafo G non diretto e connesso consiste nel fare una visita del grafo G'
ottenuto rimuovendo l'arco. Se G' è sconnesso, l'arco è un ponte, altrimenti non lo è. Se in generale il grafo non è
connesso lo stesso ragionamento vale per la componente connessa che contiene l'arco da esaminare (ovvero la
visita parte da uno dei due estremi dell'arco). Chiaramente, tale algoritmo ha complessità O(n + m).
Nel caso vogliamo anche trovare un ciclo che contiene l'arco {u, v} (se esiste) basterà fare una DFS a partire da u
facendo in modo che il primo adiacente scelto sia proprio v. In questo modo la DFS troverà un arco all'indietro che
arriva al nodo u e da qui possiamo ricostruire il ciclo come già sappiamo.
'''
def dfs(Gp, u, VIS):
VIS[u] = 0
for adjacent in Gp[u]:
if VIS[adjacent] == -1:
dfs(Gp, adjacent, VIS)
def dfsRetInd(G, arcA, arcB, v, C, VIStime, P):
C[0] += 1
VIStime[v] = -C[0]
for adjacent in G[v]:
if VIStime[adjacent] == 0:
P[adjacent] = v
check = dfsRetInd(G, arcA, arcB, adjacent, C, VIStime, P)
if check != -1:
return check
elif v != arcB and adjacent == arcA and VIStime[adjacent] < 0:
print("trovato v: "+ str(v))
return v
VIStime[v] = -VIStime[v]
return -1
# Per grafi non diretti!
def trovaPonte(G, u, v):
Gp = copy.deepcopy(G)
try:
Gp[u].remove(v)
VIS = [-1 for _ in G]
dfs(Gp, u, VIS)
if VIS[v] == -1:
print("l'arco {" + str(u) + "," + str(v) + "} è un ponte")
return 0 # l'arco è un ponte
else:
print("l'arco {"+str(u)+","+str(v)+"} NON è un ponte")
return 1 # l'arco non è un ponte (c'è un ciclo nel grafo G, rappresentato dal cammino semplice da u a v in Gp a cui si aggiunge l'arco {u,v})
except ValueError:
print("arco {"+str(u)+","+str(v)+"} non presente in G")
return -1 # errore non esiste l'arco {u,v} in G
def checkCycleEdge(G, u, v):
if trovaPonte(G, u, v) == 1:
P = [-1 for x in G]
VIStime = [0 for x in G]
P[u] = u
C = [1]
VIStime[u] = -C[0]
P[v] = u
ret = dfsRetInd(G, u, v, v, C, VIStime, P)
VIStime[u] = -VIStime[u]
if ret != -1:
w = ret
print(str(w), end='')
while w != u:
print(" " + str(P[w]), end='')
w = P[w]
print("")
return ret # se ret != -1 {c,u} rappresenta l'arco all'indietro che chiude il ciclo
else:
print("l'arco {"+str(u)+","+str(v)+"} non fa parte di un ciclo")
return -1 # l'arco {u,v} non fa parte di un ciclo
grfDag = {
0: [4, 6],
1: [2, 5],
2: [3],
3: [5],
4: [1],
5: [],
6: [7, 11],
7: [8, 9],
8: [],
9: [1, 10],
10: [],
11: []
}
calcolaArchi(grfDag, 0)
print(creaGTrasp(grfDag))
print(creaGTrasp2(grfDag))
grfNonDirCicl = {
0: [1, 7],
1: [0, 2],
2: [1, 3],
3: [2, 4, 7],
4: [3, 5, 6],
5: [4, 6],
6: [4, 5],
7: [0, 3, 8],
8: [7],
}
grfDirCicl = {
0: [1],
1: [2],
2: [3, 9],
3: [4, 5],
4: [6],
5: [6],
6: [7],
7: [8],
8: [6],
9: []
}
print("________________________")
trovaPonte(grfNonDirCicl, 2, 3)
trovaPonte(grfNonDirCicl, 7, 8)
trovaPonte(grfNonDirCicl, 3, 4)
trovaPonte(grfNonDirCicl, 3, 7)
trovaPonte(grfNonDirCicl, 4, 0)
checkCycleEdge(grfNonDirCicl, 3, 7)
checkCycleEdge(grfNonDirCicl, 4, 6)
#checkCycleEdge(grfDirCicl, 6, 7) # Errore poiché l'algoritmo vale solo per i grafi NON diretti!
'''
Esercizio [grado due]
Dimostrare che se tutti i nodi di un grafo non diretto G hanno grado almeno due allora c’è almeno un ciclo. Se il
grado di ogni nodo è esattamente due, si puo affermare che G è un ciclo?
Un modo molto semplice di dimostrare che se ogni nodo di un grafo non diretto G ha grado almeno due allora il
grafo contiene un ciclo e di considerare una DFS a partire da un nodo di G. La visita dovrà necessariamente
incontrare un nodo w che è una foglia dell'albero della DFS e siccome w ha grado almeno due, w deve avere
almeno un altro arco oltre a quello che appartiene all'albero. Sappiamo che tale arco, non appartenendo all'albero,
non potrà che essere un arco all'indietro e questo dimostra l'esistenza di un ciclo.
Se ogni nodo di G ha grado esattamente due, non è detto che G sia un ciclo. Potrebbe infatti essere formato da
due o più cicli disgiunti. Se invece è anche connesso, allora è necessariamente un ciclo. Perché?
Perché la DFS a partire da uno qualunque dei nodi di G riuscirà a visitare tutti i nodi di G trovando infine
nel nodo foglia dell'albero DFS un arco all'indietro verso il nodo da cui è partita la DFS
(poiché anche questo è di grado 2).
'''
|
{"/eserciziBFS.py": ["/BFS.py"]}
|
17,047
|
FlameDavid92/Python-ProgettazioneDiAlgoritmi
|
refs/heads/main
|
/ordinareGrafo.py
|
#import simpleStackQueue
import collections
'''
!!!Se un grafo è aciclico, esiste almeno un nodo che non ha archi entranti.!!!
dim: Se così non fosse potremmo partire da un nodo v1 e prendere un suo arco entrante che esce da un nodo v2, poi
prendere un arco entrante in v2 e uscente da un nodo v3 diverso dai primi due perché il grafo è aciclico, anche v3
ha un arco entrante che deve uscire, per le stesse ragioni, da un quarto nodo distinto v4 e procedendo in questo
modo si arriverebbe all'n-esimo nodo ma quest'ultimo ha un arco entrante che necessariamente dovrebbe uscire
da uno dei nodi già considerati chiudendo un ciclo che non può esistere, contraddizione.
Grazie a questa proprietà possiamo costruire un ordine come segue. Come primo nodo scegliamo un nodo v1
senza archi entranti (cioè, una lavorazione senza vincoli). Eliminando v1 dal grafo rimaniamo con un grafo aciclico e
da questo scegliamo un nodo v2 senza archi entranti (cioè, una lavorazione o senza vincoli o che aveva come unico
vincolo v1). Eliminiamo v2 e otteniamo un grafo ancora aciclico e da questo scegliamo v3 un nodo senza archi
entranti (cioè, una lavorazione o senza vincoli o che aveva come vincoli solo v1 o v2). Possiamo procedere in questo
modo fino all'ultimo nodo.--->Implementazione 1: Grafo dato con liste di adiacenza
Ordine topologico
Effettuiamo una DFS su un DAG G. Se la DFS da v termina dopo la DFS da w, siamo certi che non ci può essere un
arco da w a v. Infatti, se ci fosse sarebbe un arco all'indietro ma in un DAG non essendoci cicli non ci possono
essere archi all'indietro. Allora possiamo ottenere un ordinamento topologico di un DAG semplicemente ordinando i
nodi per tempi di fine visita decrescenti. Quindi, ogniqualvolta la DFS da un nodo v termina, inseriremo v in testa
alla lista che mantiene l'ordine.
'''
def dfsOrd(dag, node, VIS, L):
VIS[node] = 0
for adjacent in dag[node]:
if VIS[adjacent] == -1:
dfsOrd(dag, adjacent, VIS, L)
L.appendleft(node)
# Ordinamento nodi per un grafo diretto aciclico
def ordTop(dag):
L = collections.deque()
VIS = [-1 for _ in dag]
for node in dag:
if VIS[node] == -1:
dfsOrd(dag, node, VIS, L)
return L
def calcolaGradoEntranti(dag):
ge = [0 for _ in dag]
for node in dag:
for adjacent in dag[node]:
ge[adjacent] += 1
return ge
def ordTop2(dag):
L = []
gradoEntranti = calcolaGradoEntranti(dag) # O(n+m)
S = []
for node in dag: # O(n)
if gradoEntranti[node] == 0:
S.append(node)
while len(S) >= 1: # Ad ogni passo del while viene preso in considerazione un nodo x senza archi entranti che verrà in seguito "rimosso" da G ed i nodi nella sua lista di adiacienze--> O(n+m)
v = S.pop()
L.append(v)
for adjacent in dag[v]:
gradoEntranti[adjacent] -= 1
if gradoEntranti[adjacent] == 0:
S.append(adjacent)
return L
grfDag = {
0: [4, 6],
1: [2, 5],
2: [3],
3: [5],
4: [1],
5: [],
6: [7, 11],
7: [8, 9],
8: [],
9: [1, 10],
10: [],
11: []
}
print(ordTop(grfDag))
print(ordTop2(grfDag))
'''
ordTop2
La costruzione dell'array dei gradi entranti ha costo O(n + m) perché fa semplicemente una scansione
dell'intero grafo. L'inizializzazione dello stack dei nodi con grado entrante zero costa O(n). Il WHILE esegue n
iterazioni e complessivamente il numero di iterazioni del FOR interno è pari al numero di tutti gli archi, cioè m.
Quindi la complessità totale è O(n + m).
'''
|
{"/eserciziBFS.py": ["/BFS.py"]}
|
17,048
|
FlameDavid92/Python-ProgettazioneDiAlgoritmi
|
refs/heads/main
|
/componentiConnesse.py
|
import simpleStackQueue
'''
Per tenere traccia delle componenti connesse si può usare un array che ad
ogni nodo assegna l'indice della sua componente connessa (gli indici sono determinati dall'ordine con cui sono
trovate le componenti)
'''
def dfsCC(G, node, arrayCc, countComp):
S = simpleStackQueue.Stack()
S.push(node)
arrayCc[node] = countComp
while S.size() >= 1:
v = S.top()
control = 0
for adjacent in G[v]:
if arrayCc[adjacent] == -1:
arrayCc[adjacent] = countComp
S.push(adjacent)
control = 1
if control == 0:
S.pop()
def CC(G):
arrayCc = [-1 for _ in G]
countComp = 0
for node in G:
if arrayCc[node] == -1:
dfsCC(G, node, arrayCc, countComp)
countComp += 1
return arrayCc
graph = {
0: [4],
1: [2, 4],
2: [1, 3],
3: [2, 4, 5],
4: [0, 1, 3],
5: [3],
6: [7, 8],
7: [6],
8: [6, 9],
9: [8],
10: [11, 12, 13],
11: [10],
12: [10],
13: [10]
}
print(CC(graph))
'''
Quindi CC(G) ritorna un array che per ogni nodo di G dà l'indice della sua componente connessa.
Per grafi non diretti ciò è corretto ed è anche efficiente in quanto la complessità è ancora una volta O(n + m).
!!!Per grafi diretti l'algoritmo non determina in generale le componenti fortemente connesse.!!!
'''
|
{"/eserciziBFS.py": ["/BFS.py"]}
|
17,049
|
FlameDavid92/Python-ProgettazioneDiAlgoritmi
|
refs/heads/main
|
/classificazioneArchiDfsECicli.py
|
'''
Consideriamo un qualsiasi arco diretto (x, y) non appartenente all'albero della DFS. Per gli intervalli di visita
di x e y sono possibili solamente i seguenti casi:
- Gli intervalli di x e y sono disgiunti: non può essere t(x) < t(y) perchè l'arco (x, y) avrebbe forzato la visita
di y durante la visita da x e i due intervalli non sarebbe stati disgiunti.
Però può tranquillamente essere t(y) < t(x), cioè l'arco (x, y) è tra due nodi che non hanno rapporti di
discedenza e va da quello più giovane a quello più vecchio. Questo tipo di arco
(che non può esistere in grafi non diretti) è detto "arco di attraversamento" (in inglese cross edge).
- L'intervallo di x è contenuto nell'intervallo di y: l'arco va da un nodo x a un suo antenato y ed è detto arco
all'indietro (in inglese back edge). Questo tipo di arco esiste anche in grafi non diretti.
- L'intervallo di x contiene l'intervallo di y: l'arco va da un nodo x a un suo discendente y. Questo vuol dire che
il nodo y è stato visitato durante la DFS da x ma seguendo un cammino diverso dal semplice arco (x, y).
Questo tipo di arco è detto arco in avanti (in inglese forward edge). Per i grafi non diretti coincide con l'arco
all'indietro.
Se il grafo non è diretto (e connesso):
la presenza di un qualsiasi arco all'indietro indica l'esistenza di un ciclo.
E se non ci sono archi all'indietro il grafo è aciclico perchè coincide con l'albero della DFS.
Lo stesso vale per grafi diretti, cioè il grafo ha un ciclo se e solo se c'è almeno un arco all'indietro.
'''
def DFS_CYCLE(G, v, u, P, dir):
P[v] = -u # Il valore negativo indica che la visita è iniziata ma non è terminata
for w in G[v]:
if P[w] == 0:
z = DFS_CYCLE(G, w, v, P, dir)
if z != 0: # Un ciclo è già stato trovato
P[v] = -P[v]
return z
elif P[w] < 0 and (w != u or dir == 1): # Trovato ciclo
P[w] = 0 # Marca il primo nodo del ciclo
P[v] = u
return v
P[v] = u # La visita da u è terminata
return 0 # senza aver trovato un ciclo
def dfs_cycle_nodeList(G, u):
P = [0 for _ in G]
w = DFS_CYCLE(G, u, u, P, 1) # 1 se il grafo è diretto
L = []
while w > 0:
L.append(w)
w = P[w]
return L
grf = {
0: [1,10],
1: [2],
2: [3, 4, 7],
3: [],
4: [5],
5: [6],
6: [2],
7: [8],
8: [7, 9],
9: [],
10: [9]
}
print(dfs_cycle_nodeList(grf, 0))
'''
Si osservi che l'algoritmo DFS_CYCLE non costa più della DFS. Inoltre, nel caso di un grafo non diretto, può essere
molto più efficiente perché termina sempre in O(n). Infatti, se il grafo è aciclico la DFS stessa impiega O(n)
perchè il grafo è un albero che ha solamente n - 1 archi. Se invece il grafo ha almeno un ciclo, l'algoritmo
termina non appena trova un arco all'indietro. Al più saranno visitati tutti gli n - 2 archi dell'albero
della DFS prima di incontrare un tale arco (dato che un qualsiasi arco o appartiene all'albero
o è un arco all'indietro). Quindi il costo dell'algoritmo è O(n).
'''
|
{"/eserciziBFS.py": ["/BFS.py"]}
|
17,050
|
apragacz/functoolsplus
|
refs/heads/master
|
/functoolsplus/hof.py
|
from collections.abc import Iterable
from functoolsplus.abc import Filterable, Foldable, Functor, Monad
from functoolsplus.utils.implementations import get_impl, provide_impl_for
from functoolsplus.utils.singletons import Missing
def map(func, obj):
return _generic_higher_order_func(Functor, 'map', '__map__', func, obj)
def filter(func, obj):
return _generic_higher_order_func(
Filterable, 'filter', '__filter__', func, obj)
def fold(func, obj, *, initial_value=Missing):
return _generic_higher_order_func(
Foldable, 'fold', '__fold__', func, obj, initial_value=initial_value)
def flatmap(func, obj):
return _generic_higher_order_func(
Monad, 'flatmap', '__flatmap__', func, obj)
def _generic_higher_order_func(
abc_cls, name, method_name, func, obj, **kwargs):
if isinstance(obj, abc_cls):
obj_handler = getattr(obj, method_name)
result = obj_handler(func, **kwargs)
if result is not NotImplemented:
return result
obj_type = type(obj)
try:
impl_cls = get_impl(abc_cls, obj_type)
except TypeError:
pass
else:
cls_handler = getattr(impl_cls, method_name)
result = cls_handler(obj, func, **kwargs)
if result is not NotImplemented:
return result
raise TypeError(f'{obj_type.__name__!r} does not support {name} interface')
def unit(cls, value):
if issubclass(cls, Monad):
result = cls.__unit__(cls, value)
if result is not NotImplemented:
return result
try:
impl_cls = get_impl(Monad, cls)
except TypeError:
pass
else:
result = impl_cls.__unit__(cls, value)
if result is not NotImplemented:
return result
raise TypeError(f'{cls.__name__!r} does not support unit interface')
@provide_impl_for(Functor, Iterable)
@provide_impl_for(Filterable, Iterable)
@provide_impl_for(Foldable, Iterable)
@provide_impl_for(Monad, Iterable)
class _IterableImpl(
Functor,
Filterable,
Foldable,
Monad,
Iterable):
def __map__(self, func):
cls = type(self)
return cls(func(item) for item in self)
def __filter__(self, func):
cls = type(self)
return cls(item for item in self if func(item))
def __fold__(self, func, initial_value=Missing):
obj_iter = iter(self)
value = initial_value
if value is Missing:
try:
value = next(obj_iter)
except StopIteration:
raise ValueError(
f'Empty {type(self).__name__!r} object'
f' but no initial value provided')
for item in obj_iter:
value = func(value, item)
return value
def __flatmap__(self, func):
cls = type(self)
return cls(_flatmap_iter(self, func))
@staticmethod
def __unit__(cls, value):
return cls([value])
def _flatmap_iter(obj, func):
for item in obj:
for result_item in func(item):
yield result_item
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,051
|
apragacz/functoolsplus
|
refs/heads/master
|
/tests/containers/test_lazy.py
|
import pytest
from functoolsplus.containers.lazy import LazyValue, Unevaluated
def test_with_value_evaluator():
lv = LazyValue(lambda: 42)
assert not lv.is_evaluated()
assert lv.raw_value is Unevaluated
assert str(lv) == 'LazyValue(<unevaluated>)'
value = lv.value
assert value == 42
assert lv.is_evaluated()
assert lv.raw_value == value
assert str(lv) == 'LazyValue(42)'
assert lv.value == value
assert lv.is_evaluated()
assert lv.raw_value == value
def test_with_value():
lv = LazyValue(value=42)
assert lv.is_evaluated()
assert lv.raw_value == 42
value = lv.value
assert value == 42
assert lv.is_evaluated()
assert lv.raw_value == value
assert lv.value == value
assert lv.is_evaluated()
assert lv.raw_value == value
def test_with_value_evaluator_and_value():
with pytest.raises(ValueError):
LazyValue(lambda: 42, value=42)
def test_with_no_params():
with pytest.raises(ValueError):
LazyValue()
def test_unevaluated_str():
assert str(Unevaluated) == '<unevaluated>'
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,052
|
apragacz/functoolsplus
|
refs/heads/master
|
/tests/strategies.py
|
from functools import partial
from hypothesis import strategies as st
from tests.functions import always_false, always_true, identity
def build_add_function(n):
return set_function_name(f'add_{_integer_to_id(n)}')(lambda x: x + n)
def build_mul_function(n):
return set_function_name(f'mul_{_integer_to_id(n)}')(lambda x: x * n)
def build_mod_predicate(n):
return set_function_name(f'mod_{_integer_to_id(n)}')(lambda x: x % n == 0)
def build_lt_predicate(n):
return set_function_name(f'lt_{_integer_to_id(n)}')(lambda x: x < n)
def build_gt_predicate(n):
return set_function_name(f'gt_{_integer_to_id(n)}')(lambda x: x > n)
def build_expand_function(container_cls, function_list):
return lambda x: container_cls(f(x) for f in function_list)
@st.cacheable
@st.defines_strategy_with_reusable_values
def integer_functions():
return (
st.just(identity) |
st.integers().map(build_add_function) |
st.integers().map(build_mul_function)
)
@st.cacheable
@st.defines_strategy_with_reusable_values
def integer_predicates():
return (
st.just(always_false) |
st.just(always_true) |
st.integers(min_value=2).map(build_mod_predicate) |
st.integers().map(build_lt_predicate) |
st.integers().map(build_gt_predicate)
)
@st.defines_strategy_with_reusable_values
def integer_expand_functions(container_cls):
return _expand_functions(integer_functions(), container_cls)
def _expand_functions(functions_strategy, container_cls):
return (
st.lists(functions_strategy)
.map(partial(build_expand_function, container_cls))
)
def set_function_name(name):
def decorator(f):
f.__name__ = name
f.__qualname__ = name
return f
return decorator
def _integer_to_id(n):
if n < 0:
return f'minus_{abs(n)}'
else:
return f'{n}'
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,053
|
apragacz/functoolsplus
|
refs/heads/master
|
/functoolsplus/utils/singletons.py
|
def new_singleton(cls):
if not hasattr(cls, '__instance__'):
cls.__instance__ = object.__new__(cls)
return cls.__instance__
class MissingType(object):
def __new__(cls):
return new_singleton(cls)
Missing = MissingType()
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,054
|
apragacz/functoolsplus
|
refs/heads/master
|
/tests/collections/test_stream.py
|
import pytest
from hypothesis import given
from hypothesis import strategies as st
from functoolsplus import filter as generic_filter
from functoolsplus import flatmap
from functoolsplus import map as generic_map
from functoolsplus import unit
from functoolsplus.collections import Stream
from tests import strategies as tests_st
def test_empty_stream_is_a_singleton():
assert Stream() is Stream.empty
assert Stream([]) is Stream.empty
@given(st.integers(), st.lists(st.integers()))
def test_cons(input_value, input_list):
stream = Stream.cons(input_value, lambda: Stream([input_list]))
assert stream[0] == input_value
def test_cons_invalid():
with pytest.raises(TypeError):
Stream.cons(1, 2)
def test_cons_invalid_lazy_evaluator():
stream = Stream.cons(1, lambda: 2)
with pytest.raises(AttributeError):
stream.tail
@given(st.lists(st.integers()), st.lists(st.integers()))
def test_concat(input_list1, input_list2):
s1 = Stream(input_list1)
s2 = Stream(input_list2)
assert list(s1 + s2) == input_list1 + input_list2
@given(st.lists(st.integers()), st.text())
def test_concat_with_str(input_list, input_str):
with pytest.raises(TypeError):
Stream(input_list) + input_str
@given(st.lists(st.integers()))
def test_isinstance(input_list):
lst = Stream(input_list)
assert isinstance(lst, Stream)
@given(st.lists(st.integers()), tests_st.integer_functions())
def test_map(input_list, func):
stream = Stream(input_list)
assert isinstance(generic_map(func, stream), Stream)
assert list(generic_map(func, stream)) == [func(x) for x in input_list]
@given(st.lists(st.integers()), tests_st.integer_predicates())
def test_filter(input_list, pred):
stream = Stream(input_list)
expected_list = [x for x in input_list if pred(x)]
assert isinstance(generic_filter(pred, stream), Stream)
assert list(generic_filter(pred, stream)) == expected_list
@given(st.lists(st.integers()), tests_st.integer_expand_functions(Stream))
def test_flatmap(input_list, expand_func):
stream = Stream(input_list)
expected_list = [y for x in input_list for y in expand_func(x)]
assert isinstance(flatmap(expand_func, stream), Stream)
assert list(flatmap(expand_func, stream)) == expected_list
@given(st.integers())
def test_unit(n):
stream = unit(Stream, n)
assert isinstance(stream, Stream)
assert stream.head == n
assert stream.tail == Stream.empty
@given(st.lists(st.integers()), st.integers())
def test_getitem_int_key(input_list, i):
n = len(input_list)
stream = Stream(input_list)
if 0 <= i < n:
assert stream[i] == input_list[i]
else:
with pytest.raises(IndexError):
stream[i]
@given(st.lists(st.integers()), st.text())
def test_getitem_str_key(input_list, key):
stream = Stream(input_list)
with pytest.raises(TypeError):
stream[key]
@given(st.lists(st.integers()))
def test_str(input_list):
stream = Stream(input_list)
assert 'Stream' in str(stream)
for elem in input_list:
assert repr(elem) in str(stream)
def test_unevaluated_str():
stream = Stream.cons(1, lambda: Stream([2]))
assert str(stream) == 'Stream([1, <unevaluated>])'
assert stream[1] == 2
assert str(stream) == 'Stream([1, 2])'
def test_empty_head():
with pytest.raises(AttributeError):
Stream.empty.head
def test_empty_tail():
with pytest.raises(AttributeError):
Stream.empty.tail
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,055
|
apragacz/functoolsplus
|
refs/heads/master
|
/setup.py
|
#!/usr/bin/env python
import os.path
import re
from setuptools import find_packages, setup
ROOT_DIR = os.path.dirname(__file__)
def read_contents(local_filepath):
with open(os.path.join(ROOT_DIR, local_filepath), 'rt') as f:
return f.read()
def get_requirements(requirements_filepath):
'''
Return list of this package requirements via local filepath.
'''
requirements = []
with open(os.path.join(ROOT_DIR, requirements_filepath), 'rt') as f:
for line in f:
if line.startswith('#'):
continue
line = line.rstrip()
if not line:
continue
requirements.append(line)
return requirements
def get_version(package):
'''
Return package version as listed in `__version__` in `init.py`.
'''
init_py = read_contents(os.path.join(package, '__init__.py'))
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
def get_long_description(markdown_filepath):
'''
Return the long description in RST format, when possible.
'''
try:
import pypandoc
return pypandoc.convert(markdown_filepath, 'rst')
except ImportError:
return read_contents(markdown_filepath)
setup(
name='functoolsplus',
version=get_version('functoolsplus'),
packages=find_packages(exclude=['tests.*', 'tests']),
include_package_data=True,
author='Andrzej Pragacz',
author_email='apragacz@o2.pl',
description='Functional programming goodies',
license='MIT',
keywords=' '.join((
'functional',
'monads',
'functors',
'streams',
'immutable',
)),
long_description=get_long_description('README.md'),
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: IPython',
'Framework :: Jupyter',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Utilities',
],
install_requires=get_requirements(
'requirements/requirements-base.txt'),
python_requires='>=3.6',
url='https://github.com/apragacz/functoolsplus',
)
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,056
|
apragacz/functoolsplus
|
refs/heads/master
|
/functoolsplus/collections/lists.py
|
from collections.abc import Sequence
from functoolsplus.collections.base import SingleLinkedStruct
from functoolsplus.utils.singletons import new_singleton
class SingleLinkedList(SingleLinkedStruct, Sequence):
def __new__(cls, iterable=None):
if iterable is None:
iterable = []
return cls.from_iterable(iterable)
@classmethod
def get_empty(cls):
return _SingleLinkedListEmptyType()
@classmethod
def cons(cls, head, tail):
return _SingleLinkedListConsType(head, tail)
@classmethod
def cons_simple(cls, head, tail):
return cls.cons(head, tail)
def __len__(self):
counter = 0
lst = self
while lst:
lst = lst.tail
counter += 1
return counter
def __eq__(self, other):
if not isinstance(other, SingleLinkedList):
return NotImplemented
lst1 = self
lst2 = other
while lst1 and lst2:
if lst1.head != lst2.head:
return False
lst1 = lst1.tail
lst2 = lst2.tail
return (not bool(lst1)) and (not bool(lst2))
def __add__(self, other):
if not isinstance(other, SingleLinkedList):
return NotImplemented
result = other
for item in reversed(self):
result = self.cons(item, result)
return result
def __map__(self, func):
result = self.get_empty()
for item in reversed(self):
result = self.cons(func(item), result)
return result
def __filter__(self, func):
result = self.get_empty()
for item in reversed(self):
if func(item):
result = self.cons(item, result)
return result
def __flatmap__(self, func):
reversed_result = self.get_empty()
for item in self:
for result_item in func(item):
reversed_result = self.cons(result_item, reversed_result)
return reversed_result.reversed()
def __reversed__(self):
return iter(self.reversed())
def reversed(self):
reversed_list = self.get_empty()
lst = self
while lst:
reversed_list = self.cons(lst.head, reversed_list)
lst = lst.tail
return reversed_list
def appended_left(self, item):
return self.cons(item, self)
def popped_left(self):
if not self:
raise ValueError(f"{type(self).__name__!r} object is empty")
return self.tail
def _get_repr_items(self):
items = []
lst = self
while lst:
items.append(lst.head)
lst = lst.tail
return items
class _SingleLinkedListEmptyType(SingleLinkedList):
def __new__(cls):
return new_singleton(cls)
def __bool__(self):
return False
class _SingleLinkedListConsType(SingleLinkedList):
def __new__(cls, head, tail):
obj = object.__new__(cls)
obj._head = head
obj._tail = tail
return obj
@property
def head(self):
return self._head
@property
def tail(self):
return self._tail
def __bool__(self):
return True
SingleLinkedList.empty = SingleLinkedList.get_empty()
for cls in (
_SingleLinkedListEmptyType,
_SingleLinkedListConsType):
cls.__internal_name__ = cls.__name__
cls.__name__ = SingleLinkedList.__name__
cls.__qualname__ = SingleLinkedList.__qualname__
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,057
|
apragacz/functoolsplus
|
refs/heads/master
|
/tests/functions.py
|
def identity(x):
return x
def always_false(x):
return False
def always_true(x):
return True
def add(x, y):
return x + y
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,058
|
apragacz/functoolsplus
|
refs/heads/master
|
/tests/containers/test_pipes.py
|
from functools import partial
import pytest
from hypothesis import given
from hypothesis import strategies as st
from functoolsplus import filter, map
from functoolsplus.containers.pipes import P
def test_chain_methods_cons():
pipe = (
P
.filter(lambda x: x % 2 == 1)
.map(lambda x: x + 1)
.sum()
.to_type(float))
result = pipe([1, 2, 3, 4, 5])
assert result == 12.0
assert isinstance(result, float)
def test_operator_cons():
pipe = (
P |
partial(filter, lambda x: x % 2 == 1) |
partial(map, lambda x: x + 1) |
sum |
float)
result = pipe([1, 2, 3, 4, 5])
assert result == 12.0
assert isinstance(result, float)
@given(st.lists(st.integers()))
def test_concat_with_another_pipe(input_list):
pipe1 = P | partial(map, lambda x: x + 1)
pipe2 = P | partial(map, lambda x: x * 2)
pipe12 = pipe1 | pipe2
pipe21 = pipe2 | pipe1
pipe22 = pipe2 | pipe2
assert pipe12(input_list) == [2 * (x + 1) for x in input_list]
assert pipe21(input_list) == [2 * x + 1 for x in input_list]
assert pipe22(input_list) == [4 * x for x in input_list]
@given(st.text())
def test_concat_with_str(input_str):
with pytest.raises(TypeError):
P | input_str
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,059
|
apragacz/functoolsplus
|
refs/heads/master
|
/functoolsplus/utils/implementations.py
|
from collections import OrderedDict, defaultdict, namedtuple
ImplEntry = namedtuple('ImplEntry', [
'base_cls',
'impl_cls',
])
_impementations_registry = defaultdict(OrderedDict)
def provide_impl_for(abc_class, base_class):
def decorator(impl_cls):
impl_reg = _impementations_registry[abc_class.__qualname__]
impl_reg[base_class.__qualname__] = ImplEntry(
base_cls=base_class,
impl_cls=impl_cls)
return impl_cls
return decorator
def get_impl(abc_class, cls):
impl_reg = _impementations_registry[abc_class.__qualname__]
for test_cls in cls.mro():
if test_cls.__qualname__ in impl_reg:
return impl_reg[test_cls.__qualname__].impl_cls
# For "virtual" base classes, which may not be in the MRO.
for impl_entry in impl_reg.values():
if issubclass(cls, impl_entry.base_cls):
return impl_entry.impl_cls
raise TypeError(
f'No implementation of {abc_class.__name__}'
f' for {cls.__name__} provided')
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,060
|
apragacz/functoolsplus
|
refs/heads/master
|
/tests/collections/test_single_linked_list.py
|
import pytest
from hypothesis import given
from hypothesis import strategies as st
from functoolsplus import filter as generic_filter
from functoolsplus import flatmap
from functoolsplus import map as generic_map
from functoolsplus import unit
from functoolsplus.collections import SingleLinkedList
from tests import strategies as tests_st
def test_empty_list_is_a_singleton():
assert SingleLinkedList() is SingleLinkedList.empty
assert SingleLinkedList([]) is SingleLinkedList.empty
@given(st.lists(st.integers()))
def test_len(input_list):
lst = SingleLinkedList(input_list)
assert len(lst) == len(input_list)
@given(st.lists(st.integers()))
def test_eq(input_list):
lst1 = SingleLinkedList(input_list)
lst2 = SingleLinkedList(input_list)
assert lst1 == lst2
@given(st.lists(st.integers()), st.lists(st.integers()))
def test_neq(input_list1, input_list2):
if input_list1 == input_list2:
return
lst1 = SingleLinkedList(input_list1)
lst2 = SingleLinkedList(input_list2)
assert lst1 != lst2
@given(st.lists(st.integers()), st.lists(st.integers()))
def test_concat(input_list1, input_list2):
lst1 = SingleLinkedList(input_list1)
lst2 = SingleLinkedList(input_list2)
assert lst1 + lst2 == SingleLinkedList(input_list1 + input_list2)
@given(st.lists(st.integers()), st.lists(st.integers()))
def test_concat_list(input_list1, input_list2):
lst1 = SingleLinkedList(input_list1)
with pytest.raises(TypeError):
lst1 + input_list2
@given(st.lists(st.integers()))
def test_isinstance(input_list):
lst = SingleLinkedList(input_list)
assert isinstance(lst, SingleLinkedList)
@given(st.lists(st.integers()))
def test_reversed(input_list):
lst = SingleLinkedList(input_list)
reversed_lst = lst.reversed()
assert len(lst) == len(reversed_lst)
assert len(lst) > 1 or lst == reversed_lst
assert reversed_lst.reversed() == lst
@given(st.lists(st.integers()), st.integers())
def test_appended_left(input_list, value):
lst = SingleLinkedList(input_list)
assert lst.appended_left(value) == SingleLinkedList([value] + input_list)
assert lst == SingleLinkedList(input_list)
@given(st.lists(st.integers(), min_size=1))
def test_popped_left_nonempty(input_list):
lst = SingleLinkedList(input_list)
assert lst.popped_left() == SingleLinkedList(input_list[1:])
assert lst == SingleLinkedList(input_list)
def test_popped_left_empty():
lst = SingleLinkedList()
with pytest.raises(ValueError):
lst.popped_left()
@given(st.lists(st.integers()), tests_st.integer_functions())
def test_map(input_list, func):
lst = SingleLinkedList(input_list)
expected_lst = SingleLinkedList(func(x) for x in input_list)
assert generic_map(func, lst) == expected_lst
@given(st.lists(st.integers()), tests_st.integer_predicates())
def test_filter(input_list, pred):
lst = SingleLinkedList(input_list)
expected_lst = SingleLinkedList(x for x in input_list if pred(x))
assert generic_filter(pred, lst) == expected_lst
@given(
st.lists(st.integers()),
tests_st.integer_expand_functions(SingleLinkedList))
def test_flatmap(input_list, expand_func):
lst = SingleLinkedList(input_list)
expected_list = [y for x in input_list for y in expand_func(x)]
assert isinstance(flatmap(expand_func, lst), SingleLinkedList)
assert list(flatmap(expand_func, lst)) == expected_list
@given(st.integers())
def test_unit(n):
lst = unit(SingleLinkedList, n)
assert lst == SingleLinkedList([n])
@given(st.lists(st.integers()), st.integers())
def test_getitem_int_key(input_list, i):
n = len(input_list)
lst = SingleLinkedList(input_list)
if 0 <= i < n:
assert lst[i] == input_list[i]
else:
with pytest.raises(IndexError):
lst[i]
@given(st.lists(st.integers()), st.text())
def test_getitem_str_key(input_list, key):
lst = SingleLinkedList(input_list)
with pytest.raises(TypeError):
lst[key]
@given(st.lists(st.integers()))
def test_str(input_list):
stream = SingleLinkedList(input_list)
assert 'SingleLinkedList' in str(stream)
for elem in input_list:
assert repr(elem) in str(stream)
def test_empty_head():
with pytest.raises(AttributeError):
SingleLinkedList.empty.head
def test_empty_tail():
with pytest.raises(AttributeError):
SingleLinkedList.empty.tail
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,061
|
apragacz/functoolsplus
|
refs/heads/master
|
/functoolsplus/collections/__init__.py
|
from .lists import SingleLinkedList # noqa: F401
from .streams import Stream # noqa: F401
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,062
|
apragacz/functoolsplus
|
refs/heads/master
|
/functoolsplus/collections/base.py
|
import abc
from collections.abc import Iterable, Reversible
from itertools import islice
from functoolsplus.abc import Filterable, Functor, Monad
class SingleLinkedStruct(Iterable, Filterable, Functor, Monad):
@classmethod
@abc.abstractclassmethod
def get_empty(cls):
raise NotImplementedError()
@classmethod
@abc.abstractclassmethod
def cons(cls, head, tail):
raise NotImplementedError()
@classmethod
@abc.abstractclassmethod
def cons_simple(cls, head, tail):
raise NotImplementedError()
@classmethod
def from_iterable(cls, iterable):
if not isinstance(iterable, Reversible):
iterable = list(iterable)
lst = cls.get_empty()
for item in reversed(iterable):
lst = cls.cons_simple(item, lst)
return lst
@staticmethod
def __unit__(cls, value):
return cls.cons_simple(value, cls.get_empty())
@property
def head(self):
raise AttributeError(
f"{type(self).__name__!r} object has no attribute 'head'")
@property
def tail(self):
raise AttributeError(
f"{type(self).__name__!r} object has no attribute 'tail'")
def __iter__(self):
current = self
while current:
yield current.head
current = current.tail
def __repr__(self):
items = self._get_repr_items()
if items:
return f'{type(self).__name__}({items!r})'
else:
return f'{type(self).__name__}()'
def __str__(self):
return repr(self)
def __getitem__(self, index):
if isinstance(index, slice):
return self.from_iterable(
islice(self, index.start, index.stop, index.step))
if isinstance(index, int):
try:
return next(islice(self, index, index + 1))
except (StopIteration, ValueError):
raise IndexError('list index out of range')
raise TypeError(
f"{type(self).__name__!r} indices must be integers or slices,"
f" not {type(index).__name__!r}")
@abc.abstractmethod
def _get_repr_items(self):
raise NotImplementedError()
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,063
|
apragacz/functoolsplus
|
refs/heads/master
|
/tests/test_hof.py
|
import pytest
from hypothesis import given
from hypothesis import strategies as st
from functoolsplus import filter as generic_filter
from functoolsplus import flatmap, fold
from functoolsplus import map as generic_map
from functoolsplus import unit
from functoolsplus.abc import Filterable, Foldable, Functor, Monad
from functoolsplus.utils.singletons import Missing
from tests import strategies as tests_st
from tests.functions import add, always_false, identity
class FallbackImpl(Filterable, Foldable, Functor, Monad, object):
def __init__(self, value):
self._value = value
def __fold__(self, func, *, initial_value=Missing):
return super().__fold__(func, initial_value=initial_value)
def __filter__(self, func):
return super().__filter__(func)
def __map__(self, func):
return super().__map__(func)
def __flatmap__(self, func):
return super().__flatmap__(func)
@staticmethod
def __unit__(cls, value):
return super().__unit__(cls, value)
class NoImpl(Filterable, Foldable, Functor, Monad, object):
def __init__(self, value):
self._value = value
__fold__ = None
__filter__ = None
__map__ = None
__flatmap__ = None
__unit__ = None
@given(st.lists(st.integers()), tests_st.integer_functions())
def test_list_map(input_list, func):
assert generic_map(func, input_list) == [func(x) for x in input_list]
@given(st.lists(st.integers()), tests_st.integer_predicates())
def test_list_filter(input_list, pred):
assert generic_filter(pred, input_list) == [
x for x in input_list if pred(x)]
@given(st.lists(st.integers(), min_size=1))
def test_list_fold_sum(input_list):
assert fold(lambda x, y: x + y, input_list) == sum(input_list)
def test_list_fold_sum_on_empty():
with pytest.raises(ValueError):
fold(lambda x, y: x + y, [])
@given(st.lists(st.integers()))
def test_list_fold_sum_with_initial_value(input_list):
assert fold(
lambda x, y: x + y, input_list, initial_value=0) == sum(input_list)
@given(st.lists(st.integers()), tests_st.integer_expand_functions(list))
def test_list_flatmap(input_list, expand_func):
assert flatmap(expand_func, input_list) == [
y for x in input_list for y in expand_func(x)]
@given(st.integers())
def test_list_unit(n):
assert unit(list, n) == [n]
@given(st.integers(), tests_st.integer_functions())
def test_int_map(n, func):
with pytest.raises(TypeError):
generic_map(func, n)
@given(st.integers())
def test_int_filter(n):
with pytest.raises(TypeError):
generic_filter(always_false, n)
@given(st.integers())
def test_int_fold(n):
with pytest.raises(TypeError):
fold(identity, n)
@given(st.integers())
def test_int_flatmap(n):
with pytest.raises(TypeError):
flatmap(identity, n)
@given(st.integers())
def test_int_unit(n):
with pytest.raises(TypeError):
unit(int, n)
def test_fallback_impl_map():
with pytest.raises(TypeError):
generic_map(identity, FallbackImpl(42))
def test_fallback_impl_filter():
with pytest.raises(TypeError):
generic_filter(always_false, FallbackImpl(42))
def test_fallback_impl_fold():
with pytest.raises(TypeError):
fold(add, FallbackImpl(42))
def test_fallback_impl_flatmap():
with pytest.raises(TypeError):
flatmap(lambda x: FallbackImpl(x), FallbackImpl(42))
def test_fallback_impl_unit():
with pytest.raises(TypeError):
unit(FallbackImpl, 42)
def test_no_impl_map():
with pytest.raises(TypeError):
generic_map(identity, NoImpl(42))
def test_no_impl_filter():
with pytest.raises(TypeError):
generic_filter(always_false, NoImpl(42))
def test_no_impl_fold():
with pytest.raises(TypeError):
fold(add, NoImpl(42))
def test_no_impl_flatmap():
with pytest.raises(TypeError):
flatmap(lambda x: NoImpl(x), NoImpl(42))
def test_no_impl_unit():
with pytest.raises(TypeError):
unit(NoImpl, 42)
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,064
|
apragacz/functoolsplus
|
refs/heads/master
|
/functoolsplus/abc.py
|
import abc
from functoolsplus.utils.singletons import Missing
class Functor(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __map__(self, func):
return NotImplemented
@classmethod
def __subclasshook__(cls, C):
if cls is Functor:
return _check_methods(C, '__map__')
return NotImplemented
class Monad(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __flatmap__(self, func):
return NotImplemented
@classmethod
def __subclasshook__(cls, C):
if cls is Monad:
return _check_methods(C, '__flatmap__', '__unit__')
return NotImplemented
@staticmethod
@abc.abstractmethod
def __unit__(cls, value):
return NotImplemented
class Filterable(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __filter__(self, func):
return NotImplemented
@classmethod
def __subclasshook__(cls, C):
if cls is Filterable:
return _check_methods(C, '__filter__')
return NotImplemented
class Foldable(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __fold__(self, func, *, initial_value=Missing):
return NotImplemented
@classmethod
def __subclasshook__(cls, C):
if cls is Foldable:
return _check_methods(C, '__fold__')
return NotImplemented
def _check_methods(C, *methods):
mro = C.__mro__
for method in methods:
for B in mro:
if method in B.__dict__:
if B.__dict__[method] is None:
return NotImplemented
break
else:
return NotImplemented
return True
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,065
|
apragacz/functoolsplus
|
refs/heads/master
|
/functoolsplus/collections/streams.py
|
from functools import partial
from functoolsplus.collections.base import SingleLinkedStruct
from functoolsplus.containers.lazy import LazyValue
from functoolsplus.hof import filter as generic_filter
from functoolsplus.hof import flatmap
from functoolsplus.hof import map as generic_map
from functoolsplus.utils.singletons import new_singleton
class Stream(SingleLinkedStruct):
def __new__(cls, iterable=None):
if iterable is None:
iterable = []
return cls.from_iterable(iterable)
@classmethod
def get_empty(cls):
return _StreamEmptyType()
@classmethod
def cons(cls, head, tail):
if not isinstance(tail, LazyValue) and callable(tail):
tail = LazyValue(tail)
return _StreamConsType(head, tail)
@classmethod
def cons_simple(cls, head, tail):
return cls.cons(head, LazyValue(value=tail))
def __add__(self, other):
if not isinstance(other, Stream):
return NotImplemented
return self._appended_with_lazy(LazyValue(value=other))
def __map__(self, func):
if not self:
return self.get_empty()
return self.cons(
func(self.head),
self._map_tail_lazy(partial(generic_map, func)))
def __filter__(self, func):
stream = self
while stream and not func(stream.head):
stream = stream.tail
if not stream:
return self.get_empty()
return self.cons(
stream.head,
stream._map_tail_lazy(partial(generic_filter, func)))
def __flatmap__(self, func):
if not self:
return self.get_empty()
result_items = func(self.head)
return result_items._appended_with_lazy(
self._map_tail_lazy(partial(flatmap, func)))
def _appended_with_lazy(self, other_stream_lazy):
if not self:
return other_stream_lazy.value
return self.cons(
self.head,
self._map_tail_lazy(
lambda s: s._appended_with_lazy(other_stream_lazy)))
def _map_tail_lazy(self, func):
raise NotImplementedError()
def _get_repr_items(self):
items = []
stream = self
while stream:
items.append(stream.head)
lazy_tail = stream._tail_lazy # pylint: disable=E1101
if not lazy_tail.is_evaluated():
items.append(lazy_tail.raw_value)
break
stream = stream.tail
return items
class _StreamEmptyType(Stream):
def __new__(cls):
return new_singleton(cls)
def __bool__(self):
return False
class _StreamConsType(Stream):
def __new__(cls, head, tail):
if not isinstance(tail, LazyValue):
raise TypeError("'tail' should be lazy value")
obj = object.__new__(cls)
obj._head = head
obj._tail_lazy = tail
return obj
@property
def head(self):
return self._head
@property
def tail(self):
value = self._tail_lazy.value
if not isinstance(value, Stream):
raise AttributeError('The tail evaluator returned invalid type')
return value
def __bool__(self):
return True
def _map_tail_lazy(self, func):
return generic_map(func, self._tail_lazy)
Stream.empty = Stream.get_empty()
for cls in (_StreamEmptyType, _StreamConsType):
cls.__internal_name__ = cls.__name__
cls.__name__ = Stream.__name__
cls.__qualname__ = Stream.__qualname__
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,066
|
apragacz/functoolsplus
|
refs/heads/master
|
/functoolsplus/__init__.py
|
from functoolsplus.containers.pipes import P, Pipe, PipeRegistry # noqa: F401
from functoolsplus.containers.pipes import default_registry as default_pipe_registry # noqa: F401, E501
from functoolsplus.hof import filter, flatmap, fold, map, unit # noqa: F401
__version__ = '0.0.1'
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,067
|
apragacz/functoolsplus
|
refs/heads/master
|
/functoolsplus/containers/pipes.py
|
from collections.abc import Callable, Mapping
from functools import partial
from functoolsplus.hof import filter as generic_filter
from functoolsplus.hof import flatmap, fold
from functoolsplus.hof import map as generic_map
from functoolsplus.utils.singletons import Missing
class PipeRegistry(Mapping):
def __init__(self):
self._registry = {}
def __getitem__(self, key):
return self._registry[key]
def __iter__(self):
return iter(self._registry)
def __len__(self):
return len(self._registry)
def register(self, name, func):
obj = self._clone()
obj._registry[name] = func
return obj
def _clone(self):
cls = type(self)
obj = cls()
obj._registry = self._registry.copy()
return obj
default_registry = (
PipeRegistry()
.register('map', generic_map)
.register('filter', generic_filter)
.register('flatmap', flatmap)
.register('fold', fold)
.register('len', len)
.register('sum', sum)
.register('min', min)
.register('max', max)
.register('imap', map)
.register('ifilter', filter)
)
class Pipe(Callable):
def __init__(self, input_value=Missing, registry=default_registry):
self._steps = []
self._registry = registry
self._input_value = input_value
def __or__(self, other):
if isinstance(other, Pipe):
obj = self._clone()
obj._steps.extend(other._steps)
assert other._input_value is Missing
return obj
elif callable(other):
return self.step(other)
return NotImplemented
def __getattr__(self, name):
def func(*args, **kwargs):
obj = args[-1]
if not hasattr(obj, name) and name in self._registry:
f = self._registry[name]
else:
f = getattr(obj, name)
return f(*args, **kwargs)
return PipeCall(self, func)
def step(self, func):
obj = self._clone()
obj._steps.append(func)
return obj
def to_type(self, type_):
return self.step(type_)
def _clone(self):
cls = type(self)
obj = cls()
obj._steps = self._steps[:]
obj._registry = self._registry
obj._input_value = self._input_value
return obj
def __call__(self, input_value):
value = input_value
for f in self._steps:
value = f(value)
return value
class PipeCall(Callable):
def __init__(self, pipe, func):
self._pipe = pipe
self._func = func
def __call__(self, *args, **kwargs):
step_func = partial(self._func, *args, **kwargs)
return self._pipe.step(step_func)
P = Pipe()
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,068
|
apragacz/functoolsplus
|
refs/heads/master
|
/functoolsplus/containers/lazy.py
|
from functoolsplus.abc import Functor
from functoolsplus.utils.singletons import new_singleton
class UnevaluatedType(object):
def __new__(cls):
return new_singleton(cls)
def __repr__(self):
return '<unevaluated>'
def __str__(self):
return repr(self)
Unevaluated = UnevaluatedType()
class LazyValue(Functor):
def __init__(self, value_evaluator=None, *, value=Unevaluated):
if not (bool(value is not Unevaluated) ^
bool(value_evaluator is not None)):
raise ValueError(
"You need to provide either value_evaluator or value"
" exclusively")
self._value_eval = value_evaluator
self._value = value
@property
def value(self):
if not self.is_evaluated():
self._value = self._value_eval()
return self._value
@property
def raw_value(self):
return self._value
def __map__(self, func):
cls = type(self)
return cls(lambda: func(self.value))
def __repr__(self):
return f'{type(self).__name__}({self._value!r})'
def __str__(self):
return repr(self)
def is_evaluated(self):
return self._value is not Unevaluated
|
{"/functoolsplus/hof.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/implementations.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_lazy.py": ["/functoolsplus/containers/lazy.py"], "/tests/strategies.py": ["/tests/functions.py"], "/tests/collections/test_stream.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/lists.py": ["/functoolsplus/collections/base.py", "/functoolsplus/utils/singletons.py"], "/tests/containers/test_pipes.py": ["/functoolsplus/__init__.py", "/functoolsplus/containers/pipes.py"], "/tests/collections/test_single_linked_list.py": ["/functoolsplus/__init__.py", "/functoolsplus/collections/__init__.py"], "/functoolsplus/collections/__init__.py": ["/functoolsplus/collections/lists.py", "/functoolsplus/collections/streams.py"], "/functoolsplus/collections/base.py": ["/functoolsplus/abc.py"], "/tests/test_hof.py": ["/functoolsplus/__init__.py", "/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py", "/tests/functions.py"], "/functoolsplus/abc.py": ["/functoolsplus/utils/singletons.py"], "/functoolsplus/collections/streams.py": ["/functoolsplus/collections/base.py", "/functoolsplus/containers/lazy.py", "/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/__init__.py": ["/functoolsplus/containers/pipes.py", "/functoolsplus/hof.py"], "/functoolsplus/containers/pipes.py": ["/functoolsplus/hof.py", "/functoolsplus/utils/singletons.py"], "/functoolsplus/containers/lazy.py": ["/functoolsplus/abc.py", "/functoolsplus/utils/singletons.py"]}
|
17,082
|
fagan2888/Python-Weighted-Means
|
refs/heads/master
|
/test_solution.py
|
#To test in terminal call: pytest test_solution.py -v --durations=1
import solution
import pytest
import numpy as np
def test_three_groups():
vals = [1, 2, 3, 8, 5]
grps_1 = ['USA', 'USA', 'USA', 'USA', 'USA']
grps_2 = ['MA', 'MA', 'MA', 'RI', 'RI']
grps_3 = ['WEYMOUTH', 'BOSTON', 'BOSTON', 'PROVIDENCE', 'PROVIDENCE']
weights = [.15, .35, .5]
adj_vals = solution.group_adjust(vals, [grps_1, grps_2, grps_3], weights)
answer = [-0.770, -0.520, 0.480, 1.905, -1.095]
for ans, res in zip(answer, adj_vals):
assert abs(ans - res) < 1e-5
def test_two_groups():
vals = [1, 2, 3, 8, 5]
grps_1 = ['USA', 'USA', 'USA', 'USA', 'USA']
grps_2 = ['MA', 'RI', 'CT', 'CT', 'CT']
weights = [.65, .35]
adj_vals = solution.group_adjust(vals, [grps_1, grps_2], weights)
answer = [-1.82, -1.17, -1.33666, 3.66333, 0.66333]
for ans, res in zip(answer, adj_vals):
assert abs(ans - res) < 1e-5
def test_missing_vals():
vals = [1, np.NaN, 3, 5, 8, 7]
grps_1 = ['USA', 'USA', 'USA', 'USA', 'USA', 'USA']
grps_2 = ['MA', 'RI', 'RI', 'CT', 'CT', 'CT']
weights = [.65, .35]
adj_vals = solution.group_adjust(vals, [grps_1, grps_2], weights)
answer = [-2.47, np.NaN, -1.170, -0.4533333, 2.54666666, 1.54666666]
for ans, res in zip(answer, adj_vals):
if ans is None:
assert res is None
elif np.isnan(ans):
assert np.isnan(res)
else:
assert abs(ans - res) < 1e-5
def test_weights_len_equals_group_len():
# Need to have 1 weight for each group
vals = [1, np.NaN, 3, 5, 8, 7]
grps_1 = ['USA', 'USA', 'USA', 'USA', 'USA', 'USA']
grps_2 = ['MA', 'RI', 'RI', 'CT', 'CT', 'CT']
weights = [.65]
with pytest.raises(ValueError):
solution.group_adjust(vals, [grps_1, grps_2], weights)
pass
def test_group_len_equals_vals_len():
# The groups need to be same shape as vals
vals = [1, None, 3, 5, 8, 7]
grps_1 = ['USA']
grps_2 = ['MA', 'RI', 'RI', 'CT', 'CT', 'CT']
weights = [.65]
with pytest.raises(ValueError):
solution.group_adjust(vals, [grps_1, grps_2], weights)
pass
def test_performance():
vals = 1000000 * [1, np.NaN, 3, 5, 8, 7]
grps_1 = 1000000 * [1, 1, 1, 1, 1, 1]
grps_2 = 1000000 * [1, 1, 1, 1, 2, 2]
grps_3 = 1000000 * [1, 2, 2, 3, 4, 5]
weights = [.20, .30, .50]
#Timed using --durations when calling pytest
solution.group_adjust(vals, [grps_1, grps_2, grps_3], weights)
|
{"/test_solution.py": ["/solution.py"]}
|
17,083
|
fagan2888/Python-Weighted-Means
|
refs/heads/master
|
/solution.py
|
import numpy as np
def group_adjust(vals,groups,weights):
#Check if inputs are appropriate
for j in groups:
if len(j) != len(vals):
raise ValueError('Size of each group must be same as size of val list')
if len(groups) != len(weights):
raise ValueError('Number of weights must equal number of groups')
#Make lists into arrays for computational efficiency
vals = np.array(vals)
groups = np.array(groups)
#Find indices in list where value is np.NaN
value_list = np.where(np.isfinite(vals))[0]
group_means = []
for i in range(len(groups)):
#Find indices of unique elements
_, indices = np.unique(groups[i], return_inverse=True)
#Make array of np.NaN of size of vals
group_avg = np.empty(len(vals))
group_avg.fill(np.NaN)
indices = np.array(indices)
#For each unique group element, get mean of vals for indices where that element is present and vals is finite
for j in np.unique(indices):
val_indices = np.where(indices == j)[0]
group_avg[np.intersect1d(val_indices,value_list)] = np.mean(vals[np.intersect1d(val_indices,value_list)])
group_means.append(group_avg)
#Array of means for each element at all its locations in its group (where vals is finite)
group_means = np.array(group_means)
#Demeaned values
weights = np.array(weights)
final_vals = np.array(vals) - weights.dot(group_means)
return final_vals
|
{"/test_solution.py": ["/solution.py"]}
|
17,098
|
ctrlcctrlv/WhergBot
|
refs/heads/master
|
/Plugins/LastSeen/lastseen.py
|
#!/usr/bin/env python
import sqlite3
import time
class Seen(object):
def __init__(self, database=":memory:"):
self.database = database
self.Conn = sqlite3.connect(self.database, check_same_thread = False)
self.Cursor = self.Conn.cursor()
self.Cursor.execute("CREATE TABLE IF NOT EXISTS lastseen (nick TEXT PRIMARY KEY, channel TEXT, last_msg TEXT, timestamp DATE)")
def getLastSeen(self, nick):
results = self.Cursor.execute("SELECT channel, last_msg, timestamp FROM lastseen WHERE nick=?", [nick])
try:
channel, last_msg, timestamp = results.fetchone()
return (nick, channel, last_msg, timestamp)
except TypeError:
return None
def addLastSeen(self, nick, channel, last_msg):
timestamp = time.time()
x = self.Cursor.execute("SELECT COUNT(0) FROM lastseen WHERE nick=?", [nick])
if x.fetchone()[0] == 0:
self.Cursor.execute("INSERT INTO lastseen VALUES (?, ?, ?, ?)", [nick, channel, last_msg, timestamp])
else:
self.Cursor.execute("UPDATE lastseen SET channel=?, last_msg=?, timestamp=? WHERE nick=?", [channel, last_msg, timestamp, nick])
self.Conn.commit()
|
{"/Plugins/Gentbot/Main.py": ["/Plugins/Gentbot/Settings.py"]}
|
17,099
|
ctrlcctrlv/WhergBot
|
refs/heads/master
|
/Plugins/LastSeen/Settings.py
|
Settings = {"database": "LastSeen.sql3"
,"timestamp": "%h %d, %Y around %I:%M:%S %p" # 12 hr w/ am/pm
# ,"timestamp": "%h %d, %Y around %H:%M:%S" # 24 hr format
,"blacklist": ["#opers", "#services"]
}
|
{"/Plugins/Gentbot/Main.py": ["/Plugins/Gentbot/Settings.py"]}
|
17,100
|
ctrlcctrlv/WhergBot
|
refs/heads/master
|
/Plugins/Gentbot/Settings.py
|
Settings = {
'allowed':['Ferus!anonymous@the.interwebs']
,'gentbot': {
'replyrate': 15
,"data_dir": "Plugins/Gentbot/data/" #data dir
,"num_contexts": 0 # Total word contextx
,"num_words": 0 # Total unique words known
,"max_words": 12000 # Max limit in the number of words known
,"learning": True # Allow the bot to learn?
,"ignore_list": ['!.', '?.', "'", ',', ';', 'asl', 'h'] # Words to ignore
,"no_save": False # If true, dont save to disk
}
,'twitter': {
'use': False,
'oauth-keys': {
"consumer_key": 'h',
"consumer_secret": 'h',
"access_token_key": 'h',
"access_token_secret": 'h'
}
}
}
|
{"/Plugins/Gentbot/Main.py": ["/Plugins/Gentbot/Settings.py"]}
|
17,101
|
ctrlcctrlv/WhergBot
|
refs/heads/master
|
/Plugins/Gentbot/Main.py
|
#! /usr/bin/env python
from time import sleep
from threading import Thread
try:
import queue
except ImportError:
import Queue as queue
from . import pyborg
from .Settings import Settings
class TwitterOutput(object):
def __init__(self, oauth_keys):
twitter = __import__('twitter')
self.apiHandle = twitter.Api(**oauth_keys)
def tweet(self, message):
if (len(message) < 8):
return
else:
if len(message.strip()) > 140:
tweet = message.strip()[0:136]+'...'
else:
tweet = message.strip()
try:
self.apiHandle.PostUpdate(tweet)
except Exception:
pass # until there is proper logging
class Main(object):
def __init__(self, Name, Parser):
self.__name__ = Name
self.Parser = Parser
self.IRC = self.Parser.IRC
self.Queue = queue.Queue()
if Settings.get('twitter').get('use'):
try:
self.twitter = TwitterOutput(Settings.get('twitter').get('oauth-keys'))
self.isTweeting = True
except (ImportError, KeyError):
self.isTweeting = False
else:
self.isTweeting = False
self.Pyborg = pyborg.pyborg(settings=Settings.get('gentbot'))
self.Learning = Settings.get('gentbot').get('learning')
self.Replyrate = Settings.get('gentbot').get('replyrate')
self.Main = Thread(target=self.processForever)
self.Main.daemon = True
self.Main.start()
def Load(self):
self.Parser.hookCommand("PRIVMSG", self.__name__, {".*": self.process})
def Unload(self):
self.save()
del self.Parser.loadedPlugins[self.__name__]
def Reload(self):
pass
def save(self):
self.Pyborg.save_all()
def output(self, message, data):
if self.isTweeting and "\x19\x08\x15\x21\x10\x15\x20\x01\x03\x08\x09" not in data:
# This is a horrible hack, but it would be more accurate than checking for
# admin output with regexes
self.twitter.tweet(message)
self.IRC.say(data[2], message)
def process(self, data):
body = " ".join(data[3:])[1:]
owner = 1 if data[0] in Settings.get("allowed") else 0
replyrate = 100 if self.IRC.getnick() in body else self.Replyrate
if body.startswith("@"):
pass
else:
args = (self, body, replyrate, self.Learning, data, owner)
self.addToQueue(args)
def addToQueue(self, args):
self.Queue.put_nowait(args)
def getFromQueue(self):
return self.Queue.get()
def processForever(self):
while True:
t = Thread(target=self.Pyborg.process_msg, args=self.getFromQueue())
t.daemon = True
t.start()
sleep(.2)
# find out what I can do to minimize threads
# change command handling to @gentbot <command> <args>
# use pyborg.py to convert databases
|
{"/Plugins/Gentbot/Main.py": ["/Plugins/Gentbot/Settings.py"]}
|
17,102
|
ctrlcctrlv/WhergBot
|
refs/heads/master
|
/Plugins/UrbanDictionary/Main.py
|
#!/usr/bin/env python
import requests
import re
import json
from html import entities as htmlentitydefs
import logging
logger = logging.getLogger("UrbanDictionary")
from Parser import Locker
Locker = Locker(5)
from .Settings import Settings
def convert(text):
"""Decode HTML entities in the given text."""
try:
if type(text) is str:
uchr = chr
else:
uchr = lambda value: value > 255 and chr(value) or chr(value)
def entitydecode(match, uchr=uchr):
entity = match.group(1)
if entity.startswith('#x'):
return uchr(int(entity[2:], 16))
elif entity.startswith('#'):
return chr(int(entity[1:]))
elif entity in htmlentitydefs.name2codepoint:
return chr(htmlentitydefs.name2codepoint[entity])
else:
return match.group(0)
charrefpat = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?')
text = charrefpat.sub(entitydecode, text)
return text
except Exception as e:
logger.exception("Error'd on convert()")
return text
# http://www.urbandictionary.com/tooltip.php?term= <-- Thank god for this url.
# http://api.urbandictionary.com/v0/define?term= <-- Even better,
class Main(object):
def __init__(self, Name, Parser):
self.__name__ = Name
self.Parser = Parser
self.IRC = self.Parser.IRC
self.CacheFile = Settings.get("CacheFile", "Plugins/UrbanDictionary/Cache.txt")
# create it if it doesnt exist.
try:
c = open(self.CacheFile, 'r')
except IOError:
c = open(self.CacheFile, 'w')
c.close()
del c
def checkCacheForDef(self, word):
with open(self.CacheFile, 'r') as c:
cache = c.read().split('\n')
for line in cache:
if line.startswith(word.lower()):
return line.split(" : ")[1]
else:
return False
def addWordToCache(self, word, definition=''):
with open(self.CacheFile, 'a') as c:
logger.info('Adding word {0}'.format(word))
c.write("{0} : {1}\n".format(word, definition))
def Main(self, data):
if Locker.Locked:
self.IRC.notice(data[0].split("!")[0], "Please wait a little bit longer before using this command.")
return None
word = ' '.join(data[4:])
checkCache = self.checkCacheForDef(word)
if checkCache:
logger.info("Sending cached word.")
self.IRC.say(data[2], "\x02[UrbanDict]\x02 {0}: {1}".format(word, checkCache))
Locker.Lock()
return None
logger.info("Polling UrbanDictionary.")
url = "http://www.urbandictionary.com/tooltip.php?term={0}".format(word.replace(" ","%20"))
try:
html = requests.get(url).text
except requests.HTTPError:
logger.exception("Failed to connect.")
self.IRC.say(data[2], "Failed to connect to Urban Dictionary.")
return None
html = html.replace("\\u003C", "<").replace("\\u003E",">")
html = json.loads(html)['string']
try:
result = re.sub(r'[\r\n\t]', "", html)
result, other = re.search("<div>\s*<b>.*?</b></div><div>\s*(?:.*?<br/><br/>)?(.*?)</div>(?:<div class='others'>\s*(.*?)</div>)?", result).groups()
except Exception as e:
logger.exception("Error parsing html")
result = None
if not result or result is None or result == '':
self.IRC.say(data[2], "\x02[UrbanDict]\x02 {0} has not yet been defined.".format(word))
return None
results = []
for x in re.split("<br/>", result):
if x == " " or x == "":
continue
x = x.replace('"', '"').replace('<b>', '\x02').replace('</b>', '\x02').replace('<br/>', '')
results.append(x)
self.IRC.say(data[2], "\x02[UrbanDict]\x02 {0}: {1}".format(word, x))
Locker.Lock()
self.addWordToCache(word.lower(), " ".join(results))
def Load(self):
self.Parser.hookCommand("PRIVMSG", self.__name__, {"^@ud .*?$": self.Main})
def Unload(self):
pass
|
{"/Plugins/Gentbot/Main.py": ["/Plugins/Gentbot/Settings.py"]}
|
17,103
|
ctrlcctrlv/WhergBot
|
refs/heads/master
|
/Config.py
|
#!/usr/bin/env python
Global = {
"unwantedchars": "\x03(?:[0-9]{1,2})?(?:,[0-9]{1,2})?|\x02|\x07|\x1F"
}
Servers = {
"DatNode":
{"host": "mempsimoiria.datnode.net"
,"port": 6697
,"nick": "Wherg"
,"realname": "WhergBot 2.0 [Ferus]"
,"ident": "Wherg"
,"channels": "#boats"
,"ssl": True
,"enabled": True
,"quitmessage": "h"
,"plugins": [
'Services'
,'CleverBot'
,'Omegle'
,'Oper'
,'Plinko'
,'Misc'
,'Quit'
,'Matix'
,'Fapget'
,'GuessingGame'
,'PyFileServ'
,'Quotes'
,'EightBall'
,'Wikipedia'
,'Told'
,'Uptime'
,'SloganMaker'
,'Exec'
,'Asl'
,'Meme'
,'Etymology'
,'Slap'
,'YouTube'
,'Tinyboard'
,'Ermahgerd'
,'Wordnik'
,'UrbanDictionary'
,'InsultGenerator'
,'FuckMyLife'
,'Imgur'
,'General'
,'Weather'
,'Roulette'
# ,'PyMpd'
,'WhatStatus'
,'WheelofFortune'
,'UrlAnnounce'
]
,"owner": {"nicks": ["Ferus", "Ferrous", "^"]
,"hosts": ["the.interwebs", "ur.messages"]
,"idents": ["anonymous", "carroting"]
}
}
,"GentBot":
{"host": "mempsimoiria.datnode.net"
,"port": 6697
,"nick": "gentbot|h"
,"realname": "WhergBot 2.0 [Ferus]"
,"ident": "gentbot"
,"channels": "#boats"
,"ssl": True
,"enabled": False
,"quitmessage": 'h'
,"plugins": [
'General'
,'GentBot'
,'Services'
]
,"owner": {"nicks": ["Ferus"]
,"hosts": ["the.interwebs"]
,"idents": ["anonymous"]
}
}
}
|
{"/Plugins/Gentbot/Main.py": ["/Plugins/Gentbot/Settings.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.