index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
44,825 | elektrodx/omcor-beta-v01 | refs/heads/master | /omcor/order/migrations/0002_orderdetail_item.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-05 23:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('items', '0001_initial'),
('order', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='orderdetail',
name='item',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='items.Item'),
preserve_default=False,
),
]
| {"/omcor/order/admin.py": ["/omcor/order/models.py"], "/omcor/items/admin.py": ["/omcor/items/models.py"], "/omcor/clients/admin.py": ["/omcor/clients/models.py"]} |
44,826 | elektrodx/omcor-beta-v01 | refs/heads/master | /omcor/clients/models.py | from __future__ import unicode_literals
from django.db import models
class Client(models.Model):
name = models.CharField(max_length=100)
fono = models.CharField(max_length=15)
email = models.EmailField(max_length=254, blank=True)
ci = models.CharField(max_length=15)
address = models.CharField(max_length=256)
def __unicode__(self):
return self.name
| {"/omcor/order/admin.py": ["/omcor/order/models.py"], "/omcor/items/admin.py": ["/omcor/items/models.py"], "/omcor/clients/admin.py": ["/omcor/clients/models.py"]} |
44,827 | elektrodx/omcor-beta-v01 | refs/heads/master | /omcor/clients/admin.py | from django.contrib import admin
from .models import Client
@admin.register(Client)
class ClientAdmin(admin.ModelAdmin):
list_display = ('name', 'fono', 'email', 'ci', 'address')
| {"/omcor/order/admin.py": ["/omcor/order/models.py"], "/omcor/items/admin.py": ["/omcor/items/models.py"], "/omcor/clients/admin.py": ["/omcor/clients/models.py"]} |
44,828 | elektrodx/omcor-beta-v01 | refs/heads/master | /omcor/items/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-05 23:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=255)),
('brand', models.CharField(max_length=255)),
('code', models.CharField(max_length=30)),
('picture', models.ImageField(upload_to=b'')),
('price', models.DecimalField(decimal_places=2, max_digits=6)),
('weight', models.DecimalField(decimal_places=2, max_digits=6)),
],
),
]
| {"/omcor/order/admin.py": ["/omcor/order/models.py"], "/omcor/items/admin.py": ["/omcor/items/models.py"], "/omcor/clients/admin.py": ["/omcor/clients/models.py"]} |
44,829 | counter-king/ongorWebsite | refs/heads/main | /website/migrations/0010_remove_ordermodel_quantity.py | # Generated by Django 3.1.6 on 2021-02-09 10:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0009_remove_menuitem_quantity'),
]
operations = [
migrations.RemoveField(
model_name='ordermodel',
name='quantity',
),
]
| {"/website/views.py": ["/website/models.py"], "/website/urls.py": ["/website/views.py"]} |
44,830 | counter-king/ongorWebsite | refs/heads/main | /website/models.py | from django.db import models
# Create your models here.
class MenuItem(models.Model):
name = models.CharField(max_length=100)
image = models.ImageField(upload_to='menu_images/')
price = models.DecimalField(max_digits=10, decimal_places=0)
category = models.ManyToManyField('Category', related_name='item')
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class OrderModel(models.Model):
created_on = models.DateTimeField(auto_now_add=True)
price = models.DecimalField(max_digits=25, decimal_places=0)
items = models.ManyToManyField('MenuItem', related_name='order', blank=True)
name = models.CharField(max_length=100, blank=True)
phone = models.CharField(max_length=100, blank=True)
address = models.CharField(max_length=200, blank=True)
def __str__(self):
return f'Order: {self.created_on.strftime("%b, %d, %I:, %M, %p")}'
class GalleryModel(models.Model):
created_on = models.DateTimeField(auto_now_add=True)
image = models.ImageField(upload_to='gallery_images/')
def __str__(self):
return f'Image: {self.created_on.strftime("%b, %d, %I:, %M, %p")}'
| {"/website/views.py": ["/website/models.py"], "/website/urls.py": ["/website/views.py"]} |
44,831 | counter-king/ongorWebsite | refs/heads/main | /website/migrations/0006_auto_20210205_1904.py | # Generated by Django 3.1.6 on 2021-02-05 14:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0005_auto_20210204_1801'),
]
operations = [
migrations.AlterField(
model_name='menuitem',
name='quantity',
field=models.IntegerField(default=0, null=True),
),
]
| {"/website/views.py": ["/website/models.py"], "/website/urls.py": ["/website/views.py"]} |
44,832 | counter-king/ongorWebsite | refs/heads/main | /website/migrations/0005_auto_20210204_1801.py | # Generated by Django 3.1.6 on 2021-02-04 13:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0004_menuitem_quantity'),
]
operations = [
migrations.AlterField(
model_name='ordermodel',
name='price',
field=models.DecimalField(decimal_places=0, max_digits=25),
),
]
| {"/website/views.py": ["/website/models.py"], "/website/urls.py": ["/website/views.py"]} |
44,833 | counter-king/ongorWebsite | refs/heads/main | /website/migrations/0003_gallerymodel.py | # Generated by Django 3.1.6 on 2021-02-03 13:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0002_auto_20210203_1516'),
]
operations = [
migrations.CreateModel(
name='GalleryModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('image', models.ImageField(upload_to='gallery_images/')),
],
),
]
| {"/website/views.py": ["/website/models.py"], "/website/urls.py": ["/website/views.py"]} |
44,834 | counter-king/ongorWebsite | refs/heads/main | /website/views.py | from django.shortcuts import render
from django.views import View
from django.views.generic import ListView
from django.core.mail import send_mail
from .models import OrderModel, Category, MenuItem, GalleryModel
# Create your views here.
def home(request):
return render(request, 'home.html', {})
def about(request):
return render(request, 'about.html', {})
def contact(request):
if request.method == "POST":
message_name = request.POST['message-name']
message_email = request.POST['message-email']
message_subject = request.POST['message-subject']
message = request.POST['message']
send_mail(
message_subject + ' raqamidan xabar!',
message,
message_email,
['ungorchoyxona@gmail.com'],
fail_silently=False,
)
return render(request, 'contact.html', {'message_name':message_name})
else:
return render(request, 'contact.html', {})
# def order(request):
# return render(request, 'order.html', {})
class Order(View):
def get(self, request, *args, **kwargs):
#get every item from each category
ordered_meals = MenuItem.objects.filter(category__name__contains='Buyurtma_Taom')
drinks = MenuItem.objects.filter(category__name__contains='Ichimlik')
#pass into context
context = {
'ordered_meals': ordered_meals,
'drinks': drinks
}
#render the template
return render(request, 'order.html', context)
def post(self, request, *args, **kwargs):
name = request.POST.get('name')
phone = request.POST.get('phone')
address= request.POST.get('address')
order_items = {
'items': [],
}
items = request.POST.getlist('items[]')
for item in items:
menu_item = MenuItem.objects.get(pk__contains=int(item))
item_data = {
'id': menu_item.pk,
'name': menu_item.name,
'price': menu_item.price,
}
order_items['items'].append(item_data)
price = 0
quantity = 0
item_ids = []
for item in order_items['items']:
quantity = request.POST.get('quantity')
result = item['price'] * int(quantity)
price += result
# price += item['price'] * int(quantity)
item_ids.append(item['id'])
order = OrderModel.objects.create(
price=price,
name=name,
phone=phone,
address=address
)
order.items.add(*item_ids)
context = {
'items': order_items['items'],
'price': price,
'quantity': quantity
}
return render(request, 'order_confirmation.html', context)
class Gallery(ListView):
model = GalleryModel
template_name = 'gallery.html'
| {"/website/views.py": ["/website/models.py"], "/website/urls.py": ["/website/views.py"]} |
44,835 | counter-king/ongorWebsite | refs/heads/main | /website/urls.py | from django.urls import path
from .import views
from .views import Order, Gallery
urlpatterns = [
path('', views.home, name="home"),
path('about/', views.about, name="about"),
path('contact/', views.contact, name="contact"),
# path('order/', views.order, name="order"),
path('order/', Order.as_view(), name='order'),
path('gallery/', Gallery.as_view(), name='gallery'),
] | {"/website/views.py": ["/website/models.py"], "/website/urls.py": ["/website/views.py"]} |
44,836 | counter-king/ongorWebsite | refs/heads/main | /website/migrations/0001_initial.py | # Generated by Django 3.1.6 on 2021-02-03 06:22
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='MenuItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('image', models.ImageField(upload_to='menu_images/')),
('price', models.DecimalField(decimal_places=0, max_digits=10)),
('category', models.ManyToManyField(related_name='item', to='website.Category')),
],
),
migrations.CreateModel(
name='OrderModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('price', models.DecimalField(decimal_places=0, max_digits=15)),
('items', models.ManyToManyField(blank=True, related_name='order', to='website.MenuItem')),
],
),
]
| {"/website/views.py": ["/website/models.py"], "/website/urls.py": ["/website/views.py"]} |
44,883 | jester155/link-checker | refs/heads/master | /broken_link_crawler/spiders/broken-links-spider.py | __author__ = 'Mark'
from scrapy.contrib.spiders import CrawlSpider , Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from broken_link_crawler.items import BrokenItem
import broken_link_crawler.crawler_config as config
class BrokenSpider(CrawlSpider) :
name = config.name
allowed_domains = config.allowed_domains
start_urls = config.start_urls
handle_httpstatus_list = [400 , 404 , 500 , 503]
rules = (Rule(SgmlLinkExtractor() , callback='parse_items' , follow=True) ,)
def parse_items(self , response) :
if response.status != 200 and response.status != 301 :
item = BrokenItem()
item['url'] = response.url
item['referer'] = response.request.headers.get('Referer')
item['status'] = response.status
yield item | {"/broken_link_crawler/spiders/broken-links-spider.py": ["/broken_link_crawler/crawler_config.py"]} |
44,884 | jester155/link-checker | refs/heads/master | /broken_link_crawler/crawler_config.py |
name = 'broken_links'
allowed_domains = ['floridapolytechnic.org']
start_urls = ['https://floridapolytechnic.org']
| {"/broken_link_crawler/spiders/broken-links-spider.py": ["/broken_link_crawler/crawler_config.py"]} |
44,925 | mxrch/ecsc_discord_bot | refs/heads/main | /bot.py | from datetime import datetime
from discord.ext import tasks
from discord import Client, Embed
from lib.scoreboard import ECSC_Stats
CHANNEL_ID = "<The channel ID where you want the bot to send messages>"
BOT_TOKEN = "<Bot token>"
EACH_X_MINUTES = 15
FLAGS = {
"France": ":flag_fr:",
"Poland": ":flag_pl:",
"Italy": ":flag_it:",
"Romania": ":flag_ro:",
"Denmark": ":flag_dk:",
"Switzerland": ":flag_ch:",
"Cyprus": ":flag_cy:",
"Netherlands": ":flag_nl:",
"Austria": ":flag_at:",
"Belgium": ":flag_be:",
"Portugal": ":flag_pt:",
"Spain": ":flag_es:",
"Greece": ":flag_gr:",
"Slovenia": ":flag_si:",
"Ireland": ":flag_ie:",
"Slovakia": ":flag_sk:",
"Malta": ":flag_mt:",
"Germany": ":flag_de:"
}
class MyClient(Client):
async def on_ready(self):
self.channel = await self.fetch_channel(CHANNEL_ID)
self.ecsc_stats = ECSC_Stats()
# Start loops
self.refresh_and_show_deltas.start()
await self.channel.send("Bonjour :french_bread:")
print(f'Logged on as {self.user}!')
@tasks.loop(minutes=EACH_X_MINUTES)
async def refresh_and_show_deltas(self):
await self.ecsc_stats.refresh()
if self.ecsc_stats.old_scoreboard == self.ecsc_stats.scoreboard:
print(f"{datetime.now().strftime('[%d/%m/%Y %H:%M:%S]')} Stats are the same, not showing leaderboard..")
return
deltas = self.ecsc_stats.get_deltas()
text = ""
for country in deltas:
emoji = ":black_small_square:"
if country.delta == 1:
emoji = ":small_red_triangle:"
elif country.delta == 2:
emoji = ":small_red_triangle_down:"
text += f"{emoji} {country.index}. {FLAGS[country.name]} {country.name} {':heart:' if country.name == 'France' else ''}, {country.points} points\n"
embed = Embed(title=":crossed_swords: Leaderboard :flag_eu:", description=text)
embed.set_footer(text="Allez la France")
await self.channel.send(embed=embed)
client = MyClient()
client.run(BOT_TOKEN)
| {"/bot.py": ["/lib/scoreboard.py"]} |
44,926 | mxrch/ecsc_discord_bot | refs/heads/main | /lib/scoreboard.py | from copy import deepcopy
import httpx
from bs4 import BeautifulSoup as bs
class Country():
def __init__(self, index, name, points):
self.index = index
self.name = name
self.points = points
self.delta = 0 # 0 = not moving, 1 = up, 2 = down
def __eq__(self, other):
if not isinstance(other, Country):
return NotImplemented
return vars(self) == vars(other)
class ECSC_Stats():
def __init__(self):
self.as_client = httpx.AsyncClient()
self.old_scoreboard = []
self.scoreboard = []
async def fetch_scoreboard(self):
req = await self.as_client.get("https://ecsc.eu/leaderboard")
body = bs(req.text, 'html.parser')
countries = body.find("tbody").find_all("tr")
scoreboard = []
for nb,country in enumerate(countries):
index = nb + 1
name = country.find("span", {"class": "liveboard-country"}).text
points = int(country.find("span", {"class": "progress-number"}).text)
country = Country(index, name, points)
scoreboard.append(country)
return scoreboard
async def refresh(self):
self.old_scoreboard = deepcopy(self.scoreboard)
self.scoreboard = await self.fetch_scoreboard()
def get_deltas(self):
deltas = deepcopy(self.scoreboard)
if not self.old_scoreboard:
return deltas
countries_indexes_cache = {}
for country in self.old_scoreboard:
countries_indexes_cache[country.name] = country.index
for nb,country in enumerate(self.scoreboard):
cached_index = countries_indexes_cache[country.name]
if cached_index == country.index:
deltas[nb].delta = 0
elif cached_index > country.index:
deltas[nb].delta = 1
elif cached_index < country.index:
deltas[nb].delta = 2
return deltas | {"/bot.py": ["/lib/scoreboard.py"]} |
44,931 | gheorghitamutu/SmartCardsApplicationsHomework | refs/heads/master | /config/__init__.py | from .certificate import *
from .config_helper import *
| {"/config/__init__.py": ["/config/certificate.py", "/config/config_helper.py"], "/crypto_utils/__init__.py": ["/crypto_utils/crypto_helper.py"], "/actors/__init__.py": ["/actors/client.py", "/actors/merchant.py", "/actors/payment_gateway.py"], "/config/certificate.py": ["/config/config_helper.py"], "/actors/merchant.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/main.py": ["/config/__init__.py"], "/actors/payment_gateway.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/actors/client.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"]} |
44,932 | gheorghitamutu/SmartCardsApplicationsHomework | refs/heads/master | /crypto_utils/__init__.py | from .crypto_helper import *
| {"/config/__init__.py": ["/config/certificate.py", "/config/config_helper.py"], "/crypto_utils/__init__.py": ["/crypto_utils/crypto_helper.py"], "/actors/__init__.py": ["/actors/client.py", "/actors/merchant.py", "/actors/payment_gateway.py"], "/config/certificate.py": ["/config/config_helper.py"], "/actors/merchant.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/main.py": ["/config/__init__.py"], "/actors/payment_gateway.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/actors/client.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"]} |
44,933 | gheorghitamutu/SmartCardsApplicationsHomework | refs/heads/master | /actors/__init__.py | from .client import Client
from .merchant import Merchant
from .payment_gateway import PaymentGateway
| {"/config/__init__.py": ["/config/certificate.py", "/config/config_helper.py"], "/crypto_utils/__init__.py": ["/crypto_utils/crypto_helper.py"], "/actors/__init__.py": ["/actors/client.py", "/actors/merchant.py", "/actors/payment_gateway.py"], "/config/certificate.py": ["/config/config_helper.py"], "/actors/merchant.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/main.py": ["/config/__init__.py"], "/actors/payment_gateway.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/actors/client.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"]} |
44,934 | gheorghitamutu/SmartCardsApplicationsHomework | refs/heads/master | /config/certificate.py | from Crypto.PublicKey import RSA
from config.config_helper import *
def generate_certificates():
config = get_config_json()
logger = get_logger()
mpvk = RSA.generate(2048)
with open(config['merchant_private_key'], 'w') as f_mpvk:
f_mpvk.write(mpvk.exportKey('PEM').decode('UTF-8'))
logger.debug("Created {}".format(config['merchant_private_key']))
with open(config['merchant_public_key'], 'w') as f_mpbk:
mpbk = mpvk.publickey().exportKey('PEM').decode('UTF-8')
f_mpbk.write(mpbk)
logger.debug("Created {}".format(config['merchant_public_key']))
pgpvk = RSA.generate(2048)
with open(config['payment_gateway_private_key'], 'w') as f_pgpvk:
f_pgpvk.write(pgpvk.exportKey('PEM').decode('UTF-8'))
logger.debug("Created {}".format(config['payment_gateway_private_key']))
with open(config['payment_gateway_public_key'], 'w') as f_pgpbk:
pgpbk = pgpvk.publickey().exportKey('PEM').decode('UTF-8')
f_pgpbk.write(pgpbk)
logger.debug("Created {}".format(config['payment_gateway_public_key']))
if __name__ == "__main__":
generate_certificates()
| {"/config/__init__.py": ["/config/certificate.py", "/config/config_helper.py"], "/crypto_utils/__init__.py": ["/crypto_utils/crypto_helper.py"], "/actors/__init__.py": ["/actors/client.py", "/actors/merchant.py", "/actors/payment_gateway.py"], "/config/certificate.py": ["/config/config_helper.py"], "/actors/merchant.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/main.py": ["/config/__init__.py"], "/actors/payment_gateway.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/actors/client.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"]} |
44,935 | gheorghitamutu/SmartCardsApplicationsHomework | refs/heads/master | /actors/merchant.py | import pickle
import random
import socket
from config.config_helper import get_config_json, get_logger
from crypto_utils.crypto_helper import encrypt_rsa_aes, decrypt_rsa_aes, verify_signature, sign
class Merchant:
def __init__(self):
self.config = get_config_json()
self.logger = get_logger()
self.address = self.config['merchant_address']
self.port = self.config['merchant_port']
self.password = self.config['merchant_aes_password']
self.private_key = self.config['merchant_private_key']
def keep_selling(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((self.address, self.port))
s.listen(1)
while True:
connection, _ = s.accept()
sid, client_pubkey = self.confirm_identity(connection)
self.sell(connection, client_pubkey, sid)
connection.close()
def confirm_identity(self, connection):
message = connection.recv(4096)
self.logger.debug("STEP 1: Received data from client!")
key, password = pickle.loads(message)
decrypted_key = decrypt_rsa_aes(key, password, self.config['merchant_private_key'])
sid = self.get_sid()
sid_signature = sign(str(sid).encode('UTF-8'), self.config['merchant_private_key'])
message = '{},{}'.format(
sid,
sid_signature.hex()
)
sid_message = encrypt_rsa_aes(message, self.config['merchant_aes_password'], decrypted_key)
serialized_sid_message = pickle.dumps(sid_message)
connection.send(serialized_sid_message)
self.logger.debug("STEP 2: Sending data to client!")
return sid, decrypted_key
def sell(self, client_connection, client_pubkey, sid):
message = client_connection.recv(4096)
self.logger.debug("STEP 3: Received data from client!")
payment, password = pickle.loads(message)
decrypted_packet = decrypt_rsa_aes(payment, password, self.config['merchant_private_key'])
payment_message, decrypted_password, order_desc, from_sid, amount, signature = decrypted_packet.split(",")
validation = verify_signature(
'{},{},{}'.format(order_desc, sid, amount).encode('UTF-8'),
bytes.fromhex(signature),
client_pubkey)
self.logger.debug('STEP 3: Payment order is valid: {}'.format(validation))
self.logger.debug('STEP 3: Sid is valid: {}'.format(sid == int(from_sid)))
if self.config['resolutions']['error_step_4']:
self.logger.debug('STEP 4: {} Error occurred!'.format(sid))
return
connection_gp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection_gp.connect((self.config['payment_gateway_address'], self.config['payment_gateway_port']))
signature = sign(
'{},{},{}'.format(from_sid, client_pubkey, amount).encode("UTF-8"),
self.config['merchant_private_key'])
message_gp = '{},{},{}'.format(payment_message, decrypted_password, signature.hex())
encrypted_message_gp = encrypt_rsa_aes(message_gp, self.password, self.config['payment_gateway_public_key'])
serialized_encrypted_message_gp = pickle.dumps(encrypted_message_gp)
connection_gp.send(serialized_encrypted_message_gp)
self.logger.debug('STEP 4: {} sent data to PG!'.format(sid))
response = connection_gp.recv(4096)
self.logger.debug('STEP 5: {} received data to PG!'.format(sid))
response_message, password = pickle.loads(response)
decrypted_response_message = decrypt_rsa_aes(response_message, password, self.private_key)
pg_code, pg_sid, pg_amount, pg_nonce, pg_signature = tuple(decrypted_response_message.split(','))
validation = verify_signature(
'{},{},{}'.format(pg_code, pg_sid, pg_amount, pg_nonce).encode('UTF-8'),
bytes.fromhex(pg_signature),
self.config['payment_gateway_public_key'])
self.logger.debug('STEP 5: {} Response code, sid, amount and nonce are valid: {}'.format(sid, validation))
self.logger.debug('STEP 8: {} {}'.format(sid, self.config['payment_gateway_response_code'][str(pg_code)]))
if self.config['resolutions']['error_step_6']:
self.logger.debug('STEP 6: {} Error occurred!'.format(sid))
return
encrypted_client_response = encrypt_rsa_aes(decrypted_response_message, self.password, client_pubkey)
serialized_client_response = pickle.dumps(encrypted_client_response)
client_connection.send(serialized_client_response)
self.logger.debug('STEP 6: Sid {} sent data to client!'.format(sid))
@staticmethod
def get_sid():
return random.randint(0, 10000)
if __name__ == "__main__":
merchant = Merchant()
merchant.keep_selling()
| {"/config/__init__.py": ["/config/certificate.py", "/config/config_helper.py"], "/crypto_utils/__init__.py": ["/crypto_utils/crypto_helper.py"], "/actors/__init__.py": ["/actors/client.py", "/actors/merchant.py", "/actors/payment_gateway.py"], "/config/certificate.py": ["/config/config_helper.py"], "/actors/merchant.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/main.py": ["/config/__init__.py"], "/actors/payment_gateway.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/actors/client.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"]} |
44,936 | gheorghitamutu/SmartCardsApplicationsHomework | refs/heads/master | /main.py | import os
import subprocess
import win32api
import config
if __name__ == "__main__":
config.generate_certificates()
module_directory = os.path.dirname(os.path.realpath(__file__))
python_venv_path = os.path.join(module_directory, r'venv\Scripts\python.exe')
pg_process = subprocess.Popen([python_venv_path, r'actors\payment_gateway.py'], shell=False)
mt_process = subprocess.Popen([python_venv_path, r'actors\merchant.py'], shell=False)
ct_process = subprocess.Popen([python_venv_path, r'actors\client.py'], shell=False)
ct_process.wait()
# not the greatest of ideas but it works
win32api.TerminateProcess(int(mt_process._handle), -1)
win32api.TerminateProcess(int(pg_process._handle), -1)
| {"/config/__init__.py": ["/config/certificate.py", "/config/config_helper.py"], "/crypto_utils/__init__.py": ["/crypto_utils/crypto_helper.py"], "/actors/__init__.py": ["/actors/client.py", "/actors/merchant.py", "/actors/payment_gateway.py"], "/config/certificate.py": ["/config/config_helper.py"], "/actors/merchant.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/main.py": ["/config/__init__.py"], "/actors/payment_gateway.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/actors/client.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"]} |
44,937 | gheorghitamutu/SmartCardsApplicationsHomework | refs/heads/master | /crypto_utils/crypto_helper.py | import hashlib
import os
from Crypto.Cipher import AES
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Hash import SHA1
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_PSS
from Crypto.Util.Padding import pad, unpad
def sign(message, rsa_key):
if os.path.exists(rsa_key):
with open(rsa_key, 'r') as f:
content = f.read()
else:
content = rsa_key
key = RSA.importKey(content)
protocol = PKCS1_PSS.new(key)
message_hash = SHA1.new(message)
signature = protocol.sign(message_hash)
return signature
def verify_signature(message, signature, rsa_key):
if os.path.exists(rsa_key):
with open(rsa_key, 'r') as f:
content = f.read()
else:
content = rsa_key
key = RSA.importKey(content)
protocol = PKCS1_PSS.new(key)
message_hash = SHA1.new(message)
try:
protocol.verify(message_hash, signature)
except (ValueError, TypeError) as e:
print(e)
return False
return True
def encrypt_rsa(plaintext, rsa_key):
if os.path.exists(rsa_key):
with open(rsa_key, 'r') as f:
content = f.read()
else:
content = rsa_key
key = RSA.importKey(content)
protocol = PKCS1_OAEP.new(key)
ciphertext = protocol.encrypt(plaintext)
return ciphertext
def decrypt_rsa(ciphertext, rsa_key):
if os.path.exists(rsa_key):
with open(rsa_key, 'r') as f:
content = f.read()
else:
content = rsa_key
key = RSA.importKey(content)
protocol = PKCS1_OAEP.new(key)
plaintext = protocol.decrypt(ciphertext)
return plaintext
def encrypt_aes_ecb(key, plaintext, mode, block_size):
cipher = AES.new(key, mode)
padded_text = pad(plaintext.encode(), block_size)
encrypted_text = cipher.encrypt(padded_text)
return encrypted_text
def decrypt_aes_ecb(key, ciphertext, mode, block_size):
cipher = AES.new(key, mode)
unpadded_plaintext = unpad(cipher.decrypt(ciphertext), block_size)
decoded_plaintext = unpadded_plaintext.decode('UTF-8')
return decoded_plaintext
def encrypt_rsa_aes(message, aes_password, rsa_key):
encoded_aes_password = aes_password.encode('UTF-8')
key = hashlib.sha256(encoded_aes_password).digest()
encrypted_message = encrypt_aes_ecb(key, message, AES.MODE_ECB, 32)
encrypted_key = encrypt_rsa(key, rsa_key)
return encrypted_message, encrypted_key
def decrypt_rsa_aes(encrypted_message, encrypted_password, rsa_key):
aes_key = decrypt_rsa(encrypted_password, rsa_key)
decrypted_message = decrypt_aes_ecb(aes_key, encrypted_message, AES.MODE_ECB, 32)
return decrypted_message
| {"/config/__init__.py": ["/config/certificate.py", "/config/config_helper.py"], "/crypto_utils/__init__.py": ["/crypto_utils/crypto_helper.py"], "/actors/__init__.py": ["/actors/client.py", "/actors/merchant.py", "/actors/payment_gateway.py"], "/config/certificate.py": ["/config/config_helper.py"], "/actors/merchant.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/main.py": ["/config/__init__.py"], "/actors/payment_gateway.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/actors/client.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"]} |
44,938 | gheorghitamutu/SmartCardsApplicationsHomework | refs/heads/master | /actors/payment_gateway.py | import pickle
import socket
from config.config_helper import get_config_json, get_logger
from crypto_utils.crypto_helper import encrypt_rsa_aes, decrypt_rsa_aes, verify_signature, sign
class PaymentGateway:
def __init__(self):
self.config = get_config_json()
self.logger = get_logger()
self.address = self.config['payment_gateway_address']
self.port = self.config['payment_gateway_port']
self.private_key = self.config['payment_gateway_private_key']
self.pg_data = self.config['payment_gateways'][0]
self.password = self.config['payment_gateway_aes_password']
def keep_making_transactions(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((self.address, self.port))
s.listen(1)
code = ''
sid = ''
amount = ''
nonce = ''
while True:
connection, _ = s.accept()
data = connection.recv(4096)
message, password = pickle.loads(data)
decrypted_message = decrypt_rsa_aes(message, password, self.private_key)
if len(decrypted_message.split(',')) == 3: # merchant
code, sid, amount, nonce = self.exchange_sub_protocol(connection, decrypted_message)
else: # client
self.resolution_sub_protocol(connection, decrypted_message, code, sid, amount, nonce)
connection.close()
def exchange_sub_protocol(self, connection, decrypted_message):
payment_message, password, amount_signature = tuple(decrypted_message.split(','))
decrypted_payment_message = decrypt_rsa_aes(
bytes.fromhex(payment_message),
bytes.fromhex(password),
self.private_key)
card_number, expiration_date, code, sid, amount, client_pubkey, nonce, client_signature = \
tuple(decrypted_payment_message.split(","))
self.logger.debug('STEP 4: {} received data from merchant!'.format(sid))
amount_validation = verify_signature(
'{},{},{}'.format(sid, client_pubkey, amount).encode('UTF-8'),
bytes.fromhex(amount_signature),
self.config['merchant_public_key'])
self.logger.debug('STEP 4: {} Sid and clientPubK and amount are valid {}'.format(sid, amount_validation))
client_data = '{},{},{},{},{},{},{}'.format(
card_number,
expiration_date,
code,
sid,
amount,
client_pubkey,
nonce
)
client_data_validation = verify_signature(
client_data.encode('UTF-8'),
bytes.fromhex(client_signature),
client_pubkey)
self.logger.debug('STEP 4: {} client personal data is valid {}'.format(sid, client_data_validation))
if (card_number, expiration_date, code) != (
self.pg_data['card_number'],
self.pg_data['card_expiration_date'],
self.pg_data['code']) or int(amount) < 0:
response_code = 1
elif int(amount) > int(self.pg_data['amount']):
response_code = 2
else:
response_code = 3
self.logger.debug('STEP 4: {} {}'.format(sid, self.config['payment_gateway_response_code'][str(response_code)]))
response_signature = sign(
'{},{},{},{}'.format(response_code, sid, amount, nonce).encode('UTF-8'), self.private_key)
encrypted_message = encrypt_rsa_aes(
'{},{},{},{},{}'.format(response_code, sid, amount, nonce, response_signature.hex()),
self.password,
self.config['merchant_public_key'])
serialized_encrypted_message = pickle.dumps(encrypted_message)
connection.send(serialized_encrypted_message)
self.logger.debug('STEP 5: {} sent data to merchant!'.format(sid))
return response_code, sid, amount, nonce
def resolution_sub_protocol(self, connection, decrypted_message, exchange_return_code, sid, amount, nonce):
self.logger.debug('STEP 7: Resolution sub-protocol initiated!')
self.logger.debug('STEP 7: Received message from client')
client_sid, client_amount, client_nonce, client_pubkey, signature = tuple(decrypted_message.split(','))
validation = verify_signature(
'{},{},{},{}'.format(client_sid, client_amount, client_nonce, client_pubkey).encode('UTF-8'),
signature,
bytes.fromhex(client_pubkey))
self.logger.debug('STEP 7: {} Sid, amount, nonce and client public key are valid: {}'.format(sid, validation))
resolution_response = 0
if (client_sid, client_amount, client_nonce) == (sid, amount, nonce):
self.logger.debug('STEP 7: This transaction exists!')
resolution_response = exchange_return_code
else:
self.logger.debug('STEP 7: This transaction has not reached PG!')
response_message = '{}, {}'.format(resolution_response, sid)
response_message_signature = sign(response_message.encode("UTF-8"), self.private_key)
encrypted_response_message = encrypt_rsa_aes(
'{},{}'.format(response_message, response_message_signature.hex()),
self.password,
bytes.fromhex(client_pubkey))
serialized_encrypted_response_message = pickle.dumps(encrypted_response_message)
connection.send(serialized_encrypted_response_message)
self.logger.debug('STEP 8: Sent message to client!')
if __name__ == "__main__":
pg = PaymentGateway()
pg.keep_making_transactions()
| {"/config/__init__.py": ["/config/certificate.py", "/config/config_helper.py"], "/crypto_utils/__init__.py": ["/crypto_utils/crypto_helper.py"], "/actors/__init__.py": ["/actors/client.py", "/actors/merchant.py", "/actors/payment_gateway.py"], "/config/certificate.py": ["/config/config_helper.py"], "/actors/merchant.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/main.py": ["/config/__init__.py"], "/actors/payment_gateway.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/actors/client.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"]} |
44,939 | gheorghitamutu/SmartCardsApplicationsHomework | refs/heads/master | /config/config_helper.py | import json
import logging
import os
import sys
def get_config_json():
module_directory = os.path.dirname(os.path.realpath(__file__))
config_abs_path = os.path.join(module_directory, r'config')
with open(config_abs_path, 'r') as fc:
config = json.load(fc)
# modify paths for files relative to this one
config['merchant_public_key'] = os.path.join(module_directory, config['merchant_public_key'])
config['merchant_private_key'] = os.path.join(module_directory, config['merchant_private_key'])
config['payment_gateway_public_key'] = os.path.join(module_directory, config['payment_gateway_public_key'])
config['payment_gateway_private_key'] = os.path.join(module_directory, config['payment_gateway_private_key'])
config['log_file'] = os.path.join(module_directory, config['log_file'])
return config
def get_logger():
logger = logging.getLogger()
if len(logger.handlers) > 0: # already initialized
return logger
try:
config = get_config_json()
except FileNotFoundError as e:
print(e)
sys.exit()
dir_log_file = os.path.dirname(config['log_file'])
if not os.path.exists(dir_log_file) and dir_log_file != '': # ensure subdirs for log file creation
os.makedirs(os.path.dirname(config['log_file']))
logging.basicConfig(
filename='{}'.format(config['log_file']),
format='%(asctime)s.%(msecs)03d %(process)6d %(funcName)20s %(lineno)4i: %(levelname)8s %(message)s')
logger.setLevel(logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler())
return logger
| {"/config/__init__.py": ["/config/certificate.py", "/config/config_helper.py"], "/crypto_utils/__init__.py": ["/crypto_utils/crypto_helper.py"], "/actors/__init__.py": ["/actors/client.py", "/actors/merchant.py", "/actors/payment_gateway.py"], "/config/certificate.py": ["/config/config_helper.py"], "/actors/merchant.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/main.py": ["/config/__init__.py"], "/actors/payment_gateway.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/actors/client.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"]} |
44,940 | gheorghitamutu/SmartCardsApplicationsHomework | refs/heads/master | /actors/client.py | # SmartCardsApplicationsHomework\venv\Lib\site-packages\pycrypto-2.6.1-py3.7-win-amd64.egg\Crypto\Random\OSRNG\nt.py
# change import winrandom to from . import winrandom
# https://stackoverflow.com/questions/24804829/no-module-named-winrandom-when-using-pycrypto
import pickle
import socket
from Crypto.PublicKey import RSA
from config.config_helper import get_config_json, get_logger
from crypto_utils.crypto_helper import encrypt_rsa_aes, decrypt_rsa_aes, verify_signature, sign
class Client:
def __init__(self):
self.config = get_config_json()
self.logger = get_logger()
self.rsa = RSA.generate(2048)
self.rsa_private_encoded = self.rsa.export_key()
self.rsa_pub = self.rsa.publickey()
self.rsa_pub_encoded = self.rsa_pub.export_key()
self.nonce = 0 # https://security.stackexchange.com/questions/3001/what-is-the-use-of-a-client-nonce
self.transaction_data = self.config['clients'][0]
self.password = self.config['client_aes_password']
self.sid = None # acquired from merchant
def buy(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.config['client_address'], self.config['merchant_port']))
self.confirm_identity(s)
self.make_transaction(s)
def confirm_identity(self, connection):
data = encrypt_rsa_aes(self.rsa_pub_encoded.decode('UTF-8'), self.password, self.config['merchant_public_key'])
serialized_data = pickle.dumps(data)
connection.send(serialized_data)
self.logger.debug('STEP 1: Sent data to merchant!')
serialized_sid_packet = connection.recv(4096)
self.logger.debug('STEP 2: Received data from merchant!')
message, key = pickle.loads(serialized_sid_packet)
self.sid, sid_signature = decrypt_rsa_aes(message, key, self.rsa_private_encoded).split(",", 1)
verification = verify_signature(
self.sid.encode('UTF-8'), bytes.fromhex(sid_signature), self.config['merchant_public_key'])
self.logger.debug('STEP 2: {} is valid: {}'.format(self.sid, verification))
def make_transaction(self, connection):
payment_info = '{},{},{},{},{},{},{}'.format(
self.transaction_data['card_number'],
self.transaction_data['card_expiration_date'],
self.transaction_data['code'],
self.sid,
self.transaction_data['amount'],
self.rsa_pub_encoded.decode("UTF-8"),
self.nonce
)
payment_info_signature = sign(payment_info.encode("UTF-8"), self.rsa_private_encoded)
payment_message, key = \
encrypt_rsa_aes(
'{},{}'.format(payment_info, payment_info_signature.hex()),
self.password,
self.config['payment_gateway_public_key'])
payment_order_info = '{},{},{}'.format(
self.transaction_data['order'],
self.sid,
self.transaction_data['amount']
)
payment_order_signature = sign(payment_order_info.encode("UTF-8"), self.rsa_private_encoded)
packet = '{},{},{}'.format(
payment_message.hex(),
key.hex(),
'{},{}'.format(payment_order_info, payment_order_signature.hex())
)
encrypted_packet = encrypt_rsa_aes(packet, self.password, self.config['merchant_public_key'])
serialized_encrypted_packet = pickle.dumps(encrypted_packet)
connection.send(serialized_encrypted_packet)
self.logger.debug('STEP 3: {} Sent data to merchant!'.format(self.sid))
try:
connection.settimeout(5)
response = connection.recv(4096)
self.logger.debug('STEP 6: {} Received data from merchant!'.format(self.sid))
connection.settimeout(None)
except Exception as e:
self.logger.exception(e)
self.reach_resolution(self.transaction_data['amount'])
return
deserialized_response, aes_key = pickle.loads(response)
decrypted_response = decrypt_rsa_aes(deserialized_response, aes_key, self.rsa.exportKey())
code, sid, amount, nonce, signature = tuple(decrypted_response.split(','))
message = '{},{},{},{}'.format(code, sid, amount, nonce)
are_valid = verify_signature(
message.encode('UTF-8'),
bytes.fromhex(signature),
self.config['payment_gateway_public_key'])
self.logger.debug('STEP 6: {} Response code, sid, amount and nonce are valid {}'.format(sid, are_valid))
self.logger.debug('STEP 6: {} Sid and nonce are correct {}'.format(
str(self.sid), str(self.sid) == sid and str(self.nonce) == nonce))
self.logger.debug('STEP 6: {} {}'.format(sid, self.config['payment_gateway_response_code'][str(code)]))
def reach_resolution(self, amount):
self.logger.debug('STEP 7: {} Timeout occurred!'.format(self.sid))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.config['payment_gateway_address'], self.config['payment_gateway_port']))
message = '{},{},{},{}'.format(
self.sid,
amount,
self.nonce,
self.rsa_pub_encoded.hex()
)
message_signature = sign(message.encode('UTF-8'), self.rsa_pub_encoded)
encrypted_message = \
encrypt_rsa_aes(
'{},{}'.format(message, message_signature.hex()),
self.password,
self.config['payment_gateway_public_key'])
serialized_encrypted_message = pickle.dumps(encrypted_message)
s.send(serialized_encrypted_message)
self.logger.debug('STEP 7: {} Sent message to PG!'.format(self.sid))
response = s.recv(4096)
self.logger.debug('STEP 8: {} Received message from PG!'.format(self.sid))
message, key = pickle.loads(response)
decrypted_message = decrypt_rsa_aes(message, key, self.rsa_pub_encoded)
code, sid, response_signature = tuple(decrypted_message.split(','))
validation = \
verify_signature(
'{},{}'.format(code, sid).encode('UTF-8'),
bytes.fromhex(response_signature),
self.config['payment_gateway_public_key'])
self.logger.debug('STEP 8: {} Response code and sid are valid {}'.format(sid, validation))
self.logger.debug('STEP 8: {} {}'.format(sid, self.config['payment_gateway_response_code'][str(code)]))
if __name__ == "__main__":
client = Client()
client.buy()
| {"/config/__init__.py": ["/config/certificate.py", "/config/config_helper.py"], "/crypto_utils/__init__.py": ["/crypto_utils/crypto_helper.py"], "/actors/__init__.py": ["/actors/client.py", "/actors/merchant.py", "/actors/payment_gateway.py"], "/config/certificate.py": ["/config/config_helper.py"], "/actors/merchant.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/main.py": ["/config/__init__.py"], "/actors/payment_gateway.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"], "/actors/client.py": ["/config/config_helper.py", "/crypto_utils/crypto_helper.py"]} |
44,946 | mohmedhsuhail/SocialMediaApp | refs/heads/master | /simplesocial/posts/views.py | from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.http import Http404
from django.views import generic
from braces.views import SelectRelatedMixin
from django.contrib.messages.views import SuccessMessageMixin
from . import models
from . import forms
from django.contrib import messages
from django.contrib.auth import get_user_model
from groups.models import GroupMember,Group
from .forms import PostForm
User = get_user_model()
class PostList(SelectRelatedMixin,generic.ListView):
model = models.Post
select_related = ('user',)
template_name = 'post_list'
class UserPosts(SelectRelatedMixin,generic.ListView):
model = models.Post
select_related = ('user',)
template_name = 'posts/user_post_list.html'
def get_queryset(self):
try:
self.post_user = User.objects.get(username__iexact=self.request.user)
except User.DoesNotExist:
raise Http404
else:
return self.post_user.posts.all()
def get_context_data(self,**kwargs):
context = super().get_context_data(**kwargs)
context['post_user'] = self.post_user
return context
class PostDetail(SelectRelatedMixin,generic.DetailView):
model = models.Post
select_related = ('user','group')
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(user__username__iexact=self.kwargs.get('username'))
class CreatePost(LoginRequiredMixin,SelectRelatedMixin,generic.CreateView):
model = models.Post
form_class = PostForm
def get_form_kwargs(self):
kwargs = super(CreatePost,self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self,form):
self.object = form.save(commit = False)
self.object.user = self.request.user
self.object.save()
return super().form_valid(form)
class DeletePost(LoginRequiredMixin,SelectRelatedMixin,generic.DeleteView):
model = models.Post
select_related = ('user','group')
success_url = reverse_lazy('posts:all')
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(user_id = self.request.user.id)
def delete(self,*args,**kwargs):
messages.success(self.request,'deleted')
return super().delete(*args,**kwargs)
| {"/simplesocial/posts/views.py": ["/simplesocial/posts/forms.py"]} |
44,947 | mohmedhsuhail/SocialMediaApp | refs/heads/master | /simplesocial/posts/templatetags/template_tags.py | from django import template
register = template.Library()
@register.simple_tag
def flag(value="True"):
return value | {"/simplesocial/posts/views.py": ["/simplesocial/posts/forms.py"]} |
44,948 | mohmedhsuhail/SocialMediaApp | refs/heads/master | /simplesocial/groups/templatetags/template_tags.py | '''from django import template
from ..models import Group,GroupMember
register = template.Library()
@register.inclusion_tag('grouped_list.html')
def Groups(user):
print(user)
get_user_groups = Group.objects.filter(members__user__username__contains=user)
print(get_user_groups)
get_other_groups = Group.objects.all()
return {'get_user_groups':get_user_groups,'get_other_groups':get_other_groups}''' | {"/simplesocial/posts/views.py": ["/simplesocial/posts/forms.py"]} |
44,949 | mohmedhsuhail/SocialMediaApp | refs/heads/master | /simplesocial/groups/admin.py | from django.contrib import admin
from . import models
# Register your models here.
class GroupMemberInline(admin.StackedInline):
model = models.GroupMember
class GroupAdmin(admin.ModelAdmin):
inlines = [GroupMemberInline,]
admin.site.register(models.Group,GroupAdmin)
admin.site.register(models.GroupMember) | {"/simplesocial/posts/views.py": ["/simplesocial/posts/forms.py"]} |
44,950 | mohmedhsuhail/SocialMediaApp | refs/heads/master | /simplesocial/posts/forms.py | from django import forms
from .models import Post
from groups.models import Group,GroupMember
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ['message','group']
def __init__(self,user,*args,**kwargs):
super(PostForm,self).__init__(*args,**kwargs)
members = GroupMember.objects.prefetch_related('user').filter(user__username__contains=user)
members = members.select_related('group')
group_list = []
for member in members:
group_list.append(member.group.name)
self.fields['group'].queryset = Group.objects.filter(name__in=group_list) | {"/simplesocial/posts/views.py": ["/simplesocial/posts/forms.py"]} |
44,951 | mohmedhsuhail/SocialMediaApp | refs/heads/master | /simplesocial/posts/migrations/0003_auto_20201110_1953.py | # Generated by Django 3.1.1 on 2020-11-10 14:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_remove_post_message_html'),
]
operations = [
migrations.AlterUniqueTogether(
name='post',
unique_together=set(),
),
]
| {"/simplesocial/posts/views.py": ["/simplesocial/posts/forms.py"]} |
44,952 | mohmedhsuhail/SocialMediaApp | refs/heads/master | /simplesocial/simplesocial/views.py | from django.views.generic import TemplateView
from django.views.generic import View
from groups.models import GroupMember,Group
class TestPage(TemplateView):
template_name = 'test.html'
class ThanksPage(TemplateView):
template_name = 'thanks.html'
class HomePage(TemplateView):
template_name = 'index.html'
| {"/simplesocial/posts/views.py": ["/simplesocial/posts/forms.py"]} |
44,953 | mohmedhsuhail/SocialMediaApp | refs/heads/master | /simplesocial/groups/views.py | from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin,PermissionRequiredMixin
from django.urls import reverse
from django.views import generic
from django.shortcuts import get_object_or_404
from django.contrib import messages
from groups.models import Group,GroupMember
from django.db import IntegrityError
from . import models
from braces.views import PrefetchRelatedMixin
from django.contrib.auth import get_user_model
from django.http import HttpResponseRedirect
from django.urls import reverse
User = get_user_model()
# Create your views here.
class CreateGroup(LoginRequiredMixin,generic.CreateView):
fields = ('name','description')
model = Group
def form_valid(self,form):
user = User.objects.get(username=self.request.user)
form.instance.admin = user
form.save()
return super(CreateGroup, self).form_valid(form)
class SingleGroup(PrefetchRelatedMixin,generic.DetailView):
model = Group
prefetch_related = ['posts','members']
class ListGroups(PrefetchRelatedMixin,generic.ListView):
model = Group
prefetch_related = ['posts','members']
class JoinGroup(LoginRequiredMixin,generic.RedirectView):
def get_redirect_url(self,*args,**kwargs):
return reverse('groups:single',kwargs={'slug':self.kwargs.get('slug')})
def get(self,request,*args,**kwargs):
group = get_object_or_404(Group,slug=self.kwargs.get('slug'))
try:
GroupMember.objects.create(user=self.request.user,group=group)
except IntegrityError:
messages.warning(self.request,"Warning. Already a member")
else:
messages.success(self.request,'You are now a member')
return super().get(request,*args,**kwargs)
class LeaveGroup(LoginRequiredMixin,generic.RedirectView):
def get_redirect_url(self,*args,**kwargs):
return reverse('groups:single',kwargs={'slug':self.kwargs.get('slug')})
def get(self,request,*args,**kwargs):
try:
membership = models.GroupMember.objects.filter(
user=self.request.user,
group__slug=self.kwargs.get('slug')
).get()
except models.GroupMember.DoesNotExist:
messages.warning(self.request,'Sorry. You are not in this group')
else:
membership.delete()
messages.success(self.request,"You have successfully left this group")
return super().get(request, *args, **kwargs)
def DeleteGroup(request,pk):
if request.method == 'GET':
group = get_object_or_404(Group,pk=pk)
if group.admin == request.user:
group.delete()
return HttpResponseRedirect(reverse('groups:all'))
| {"/simplesocial/posts/views.py": ["/simplesocial/posts/forms.py"]} |
44,982 | hyyc116/WOS_data_processing | refs/heads/master | /basic_attr_fetcher.py | #coding:utf-8
from basic_config import *
def subjnum_dis():
pid_subjs = json.loads(open('data/pid_subjects.json').read())
num_dis = defaultdict(int)
for pid in pid_subjs.keys():
subj_num = len(pid_subjs[pid])
num_dis[subj_num] += 1
xs = []
ys = []
for num in sorted(num_dis.keys()):
xs.append(num)
ys.append(num_dis[num])
plt.figure(figsize=(5, 4))
plt.plot(xs, ys)
plt.tight_layout()
plt.savefig('fig/subj_num_dis.png', dpi=400)
def fetch_subjects():
pid_subjects = defaultdict(list)
## query database wos_summary
query_op = dbop()
num_with_subject = 0
sql = 'select id,subject from wos_core.wos_subjects'
progress = 0
for pid, subject in query_op.query_database(sql):
progress += 1
if progress % 1000000 == 0:
logging.info('progress {:}, {:} papers within subjects ...'.format(
progress, num_with_subject))
# if subject.strip().lower() in subjects:
# num_with_subject+=1
pid_subjects[pid].append(subject)
query_op.close_db()
logging.info('{:} papers have subject'.format(len(pid_subjects.keys())))
open('data/pid_subjects.json', 'w').write(json.dumps(pid_subjects))
##加载所有论文的subject对应的最高级的subject
logging.info('loading mapping relations to top subject ...')
top_subject = None
subject_2_top = {}
for line in open('subjects.txt'):
line = line.strip()
if line == '' or line is None:
continue
if line.startswith('====='):
top_subject = line[5:]
else:
subject_2_top[line.lower()] = top_subject
if ',' in line.lower():
for subj in line.split(','):
subject_2_top[subj.lower()] = top_subject
if '&' in line.lower():
subject_2_top[line.replace('&', '')] = top_subject
logging.info('%d subjects are loaded ..' % len(subject_2_top.keys()))
## 所有论文的顶级subject
logging.info('paper top subjs ....')
nums_top_subjs = []
_ids_top_subjs = {}
progress = 0
error_subjs = []
topsubj_num = defaultdict(int)
for _id in pid_subjects.keys():
progress += 1
if progress % 1000000 == 0:
logging.info('progress %d/%d ...' % (progress, total_paper_num))
top_subjs = []
for subj in pid_subjects[_id]:
top_subj = subject_2_top.get(subj.strip().lower(), None)
if top_subj is None:
error_subjs.append(subj)
logging.info('error subj %s' % subj)
else:
top_subjs.append(top_subj)
topsubj_num[top_subj] += 1
top_subjs = list(set(top_subjs))
nums_top_subjs.append(len(top_subjs))
_ids_top_subjs[_id] = top_subjs
open('data/missing_subjects.txt',
'w').write('\n'.join(list(set(error_subjs))))
open('data/pid_topsubjs.json', 'w').write(json.dumps(_ids_top_subjs))
logging.info('pid_topsubjs.json saved')
def fetch_pubyear():
pid_pubyear = {}
## query database wos_summary
query_op = dbop()
sql = 'select id,pubyear from wos_core.wos_summary'
progress = 0
for pid, pubyear in query_op.query_database(sql):
progress += 1
if progress % 1000000 == 0:
logging.info('progress {:} ...'.format(progress))
pid_pubyear[pid] = pubyear
query_op.close_db()
logging.info('{:} cited ids have year'.format(len(pid_pubyear.keys())))
open('data/pid_pubyear.json', 'w').write(json.dumps(pid_pubyear))
def fetch_doctype():
pid_doctype = {}
## query database wos_summary
query_op = dbop()
sql = 'select id,doctype from wos_core.wos_doctypes'
progress = 0
for pid, doctype in query_op.query_database(sql):
progress += 1
if progress % 1000000 == 0:
logging.info('progress {:} ...'.format(progress))
pid_doctype[pid] = doctype
query_op.close_db()
saved_path = 'data/pid_doctype.json'
open(saved_path, 'w').write(json.dumps(pid_doctype))
def fetch_teamsize():
sql = 'select id,name_id,addr_id from wos_core.wos_address_names'
pid_names = defaultdict(set)
query_op = dbop()
progress = 0
for _id, name_id, addr_id in query_op.query_database(sql):
progress += 1
if progress % 1000000 == 0:
logging.info('progress {} ...'.format(progress))
pid_names[_id].add(name_id)
pid_ts = {}
for pid in pid_names:
pid_ts[pid] = len(pid_names[pid])
open('data/pid_teamsize.json', 'w').write(json.dumps(pid_ts))
logging.info('{} data saved to data/pid_teamsize.json'.format(len(pid_ts)))
def fetch_titles():
sql = 'select id,title from wos_core.wos_titles where title_id=6'
pid_names = defaultdict(set)
query_op = dbop()
progress = 0
pid_title = {}
for pid, title in query_op.query_database(sql):
progress += 1
if progress % 1000000 == 0:
logging.info('progress {} ...'.format(progress))
pid_title[pid] = title
open('data/pid_teamsize.json', 'w').write(json.dumps(pid_title))
logging.info('{} data saved to data/pid_teamsize.json'.format(
len(pid_title)))
# 获取WOS的摘要
def fectch_abs():
sql = 'select id,abstract_id,paragraph_id,paragraph_text from wos_core.wos_abstract_paragraphs'
query_op = dbop()
f = open('data/wos_abs.txt', 'w+')
progress = 0
lines = []
for pid, abstract_id, paragraph_id, paragraph_label in query_op.query_database(
sql):
progress += 1
lines.append(
f"{pid}#$#{abstract_id}#$#{paragraph_id}#$#{paragraph_label}")
if len(lines) == 100000:
logging.info(f'progress {progress} ...')
f.write('\n'.join(lines) + '\n')
lines = []
if len(lines) > 0:
f.write('\n'.join(lines) + '\n')
logging.info('Done')
def featch_author_info():
sql = 'select id,addr_id,full_address from wos_core.wos_addresses'
query_op = dbop()
lines = []
f = open("data/wos_address.txt", 'w')
for id, addr_id, full_address in query_op.query_database(sql):
line = f'{id},{addr_id},{full_address}'
lines.append(line)
if len(lines) > 100000:
f.write('\n'.join(lines) + '\n')
lines = []
if len(lines) > 0:
f.write('\n'.join(lines) + '\n')
logging.info('DONE')
if __name__ == '__main__':
# fetch_teamsize()
# fetch_titles()
# subjnum_dis()
# fectch_abs()
featch_author_info()
| {"/basic_attr_fetcher.py": ["/basic_config.py"], "/WOS_data_processing.py": ["/basic_config.py", "/basic_attr_fetcher.py"], "/attr_stats.py": ["/basic_config.py", "/WOS_data_processing.py"]} |
44,983 | hyyc116/WOS_data_processing | refs/heads/master | /basic_config.py | #coding:utf-8
import os
import sys
import json
from collections import defaultdict
from collections import Counter
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from sklearn.metrics import r2_score
import math
import numpy as np
import random
import logging
import networkx as nx
from itertools import combinations
import pylab
import itertools
from mpl_toolkits.mplot3d import Axes3D
# from scipy.interpolate import spline
from multiprocessing.dummy import Pool as ThreadPool
from networkx.algorithms import isomorphism
from matplotlib import cm as CM
from collections import Counter
from scipy.signal import wiener
import matplotlib as mpl
from matplotlib.patches import Circle
# from matplotlib.patheffects import withStroke
import matplotlib.colors as colors
from matplotlib.colors import LogNorm
from matplotlib.colors import LinearSegmentedColormap
from networkx.algorithms.core import core_number
from networkx.algorithms.core import k_core
import psycopg2
from cycler import cycler
# import statsmodels.api as sm
# lowess = sm.nonparametric.lowess
import scipy
# plt.style.use('ggplot')
# from gini import gini
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO)
mpl.rcParams['agg.path.chunksize'] = 10000
color_sequence = [
'#1f77b4', '#ffbb78', '#2ca02c', '#d62728', '#ff9896', '#9467bd',
'#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#dbdb8d', '#17becf',
'#ff7f0e', '#aec7e8', '#9edae5', '#98df8a', '#c5b0d5', '#c49c94',
'#f7b6d2', '#c7c7c7'
]
mpl.rcParams['axes.prop_cycle'] = cycler('color', color_sequence)
# color = plt.cm.viridis(np.linspace(0.01,0.99,6)) # This returns RGBA; convert:
# hexcolor = map(lambda rgb:'#%02x%02x%02x' % (rgb[0]*255,rgb[1]*255,rgb[2]*255),
# tuple(color[:,0:-1]))
# mpl.rcParams['axes.prop_cycle'] = cycler('color', hexcolor)
params = {
'legend.fontsize': 10,
'axes.labelsize': 10,
'axes.titlesize': 15,
'xtick.labelsize': 10,
'ytick.labelsize': 10
}
pylab.rcParams.update(params)
# from paths import *
def circle(ax, x, y, radius=0.15):
circle = Circle((x, y),
radius,
clip_on=False,
zorder=10,
linewidth=1,
edgecolor='black',
facecolor=(0, 0, 0, .0125),
path_effects=[withStroke(linewidth=5, foreground='w')])
ax.add_artist(circle)
def power_low_func(x, a, b):
return b * (x**(-a))
def exponential_func(x, a, b):
return b * np.exp(-a * x)
def square_x(x, a, b, c):
return a * pow(x, 2) + b * x + c
def autolabel(
rects,
ax,
total_count=None,
step=1,
):
"""
Attach a text label above each bar displaying its height
"""
for index in np.arange(len(rects), step=step):
rect = rects[index]
height = rect.get_height()
# print height
if not total_count is None:
ax.text(rect.get_x() + rect.get_width() / 2.,
1.005 * height,
'{:}\n({:.6f})'.format(int(height),
height / float(total_count)),
ha='center',
va='bottom')
else:
ax.text(rect.get_x() + rect.get_width() / 2.,
1.005 * height,
'{:}'.format(int(height)),
ha='center',
va='bottom')
class dbop:
def __init__(self, insert_index=0):
self._insert_index = insert_index
self._insert_values = []
logging.debug("connect database with normal cursor.")
self._db = psycopg2.connect(database='core_data',
user="buyi",
password="ruth_hardtop_isthmus_bubbly")
self._cursor = self._db.cursor()
def query_database(self, sql):
self._cursor.close()
self._cursor = self._db.cursor()
self._cursor.execute(sql)
logging.debug("query database with sql {:}".format(sql))
return self._cursor
def insert_database(self, sql, values):
self._cursor.close()
self._cursor = self._db.cursor()
self._cursor.executemany(sql, values)
logging.debug("insert data to database with sql {:}".format(sql))
self._db.commit()
def batch_insert(self, sql, row, step, is_auto=True, end=False):
if end:
if len(self._insert_values) != 0:
logging.info(
"insert {:}th data into database,final insert.".format(
self._insert_index))
self.insert_database(sql, self._insert_values)
else:
self._insert_index += 1
if is_auto:
row[0] = self._insert_index
self._insert_values.append(tuple(row))
if self._insert_index % step == 0:
logging.info("insert {:}th data into database".format(
self._insert_index))
self.insert_database(sql, self._insert_values)
self._insert_values = []
def get_insert_count(self):
return self._insert_index
def execute_del_update(self, sql):
self._cursor.execute(sql)
self._db.commit()
logging.debug("execute delete or update sql {:}.".format(sql))
def execute_sql(self, sql):
self._cursor.execute(sql)
self._db.commit()
logging.debug("execute sql {:}.".format(sql))
def close_db(self):
self._db.close()
def plot_line_from_data(fig_data, ax=None):
xs = fig_data['x']
ys = fig_data['y']
title = fig_data['title']
xlabel = fig_data['xlabel']
ylabel = fig_data['ylabel']
marker = fig_data['marker']
xscale = fig_data.get('xscale', 'linear')
yscale = fig_data.get('yscale', 'linear')
if ax is None:
plt.plot(xs, ys, marker)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xscale(xscale)
plt.yscale(yscale)
plt.title(title)
plt.tight_layout()
else:
ax.plot(xs, ys, marker)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
def plot_bar_from_data(fig_data, ax=None):
xs = fig_data['x']
ys = fig_data['y']
title = fig_data['title']
xlabel = fig_data['xlabel']
ylabel = fig_data['ylabel']
xscale = fig_data.get('xscale', 'linear')
yscale = fig_data.get('yscale', 'linear')
if ax is None:
plt.bar(xs, ys, align='center')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xscale(xscale)
plt.yscale(yscale)
plt.title(title)
plt.tight_layout()
else:
ax.bar(xs, ys, align='center')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
def plot_multi_lines_from_data(fig_data, ax=None):
xs = fig_data['x']
yses = fig_data['ys']
title = fig_data['title']
xlabel = fig_data['xlabel']
ylabel = fig_data['ylabel']
markers = fig_data['markers']
labels = fig_data['labels']
xscale = fig_data.get('xscale', 'linear')
yscale = fig_data.get('yscale', 'linear')
if ax is None:
for i, ys in enumerate(yses):
plt.plot(xs, ys, markers[i], label=labels[i])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xscale(xscale)
plt.yscale(yscale)
plt.title(title)
plt.legend()
plt.tight_layout()
else:
for i, ys in enumerate(yses):
ax.plot(xs, ys, markers[i], label=labels[i])
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
ax.legend()
def plot_multi_lines_from_two_data(fig_data, ax=None):
xses = fig_data['xs']
yses = fig_data['ys']
title = fig_data['title']
xlabel = fig_data['xlabel']
ylabel = fig_data['ylabel']
markers = fig_data['markers']
labels = fig_data['labels']
xscale = fig_data.get('xscale', 'linear')
yscale = fig_data.get('yscale', 'linear')
if ax is None:
for i, ys in enumerate(yses):
plt.plot(xses[i], ys, markers[i], label=labels[i])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xscale(xscale)
plt.yscale(yscale)
plt.title(title)
plt.legend()
plt.tight_layout()
else:
for i, ys in enumerate(yses):
ax.plot(xses[i], ys, markers[i], label=labels[i])
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
ax.legend()
def hist_2_bar(data, bins=50):
n, bins, patches = plt.hist(data, bins=bins)
return [x for x in bins[:-1]], [x for x in n]
def gini(array):
"""Calculate the Gini coefficient of a numpy array."""
# based on bottom eq:
# http://www.statsdirect.com/help/generatedimages/equations/equation154.svg
# from:
# http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
# All values are treated equally, arrays must be 1d:
array = array.flatten()
if np.amin(array) < 0:
# Values cannot be negative:
array -= np.amin(array)
# Values cannot be 0:
array += 0.0000001
# Values must be sorted:
array = np.sort(array)
# Index per array element:
index = np.arange(1, array.shape[0] + 1)
# Number of array elements:
n = array.shape[0]
# Gini coefficient:
return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array)))
if __name__ == '__main__':
# test color theme
xs = range(5)
ys = np.random.random((5, 5))
plt.figure()
plt.plot(xs, ys)
plt.tight_layout()
plt.savefig('fig/test_color.jpg')
| {"/basic_attr_fetcher.py": ["/basic_config.py"], "/WOS_data_processing.py": ["/basic_config.py", "/basic_attr_fetcher.py"], "/attr_stats.py": ["/basic_config.py", "/WOS_data_processing.py"]} |
44,984 | hyyc116/WOS_data_processing | refs/heads/master | /WOS_data_processing.py | #coding:utf-8
'''
对wos数据库中的数据进行一系列的基本处理,以便于其他项目使用。
包括:
---基础数据
每篇论文的年份: pid_pubyear.json
每篇论文每年的被引次数: pid_year_citnum.json
每篇论文对应的作者数量: pid_teamsize.json
--- subject相关
每篇论文对应的subjects: pid_subjects.json pid_topsubjs.json
--- 用于计算学科之间的相似度
topsubject以及subject的参考文献数量,以及相互之间的引用数量: subj_refnum.json subj_subj_refnum.json topsubj_refnum.json topsubj_topsubj_refnum.json
每个学科非本地引用的数量: subj_outrefnum.json
---统计数据
论文总数量,引用关系总数量,及在六个学科中的总数量
随着时间论文总数量的变化以及六个顶级学科的论文数量变化
六个顶级学科相互之间的引用矩阵
六个学科的引用次数分布 CCDF以及使用powlaw进行拟合的系数
'''
from basic_config import *
from basic_attr_fetcher import *
def stat_basic_attr():
fetch_subjects()
fetch_pubyear()
fetch_doctype()
fetch_teamsize()
def load_basic_data(isStat=False):
logging.info('======== LOADING BASIC DATA =============')
logging.info('======== ================== =============')
logging.info('loading paper pubyear ...')
pid_pubyear = json.loads(open('data/pid_pubyear.json').read())
logging.info('{} papers has year label.'.format(len(pid_pubyear.keys())))
logging.info('loading paper subjects ...')
pid_subjects = json.loads(open('data/pid_subjects.json').read())
logging.info('{} papers has subject label.'.format(len(
pid_subjects.keys())))
logging.info('loading paper top subjects ...')
pid_topsubjs = json.loads(open('data/pid_topsubjs.json').read())
logging.info('{} papers has top subject label.'.format(
len(pid_topsubjs.keys())))
logging.info('loading paper teamsize ...')
pid_teamsize = json.loads(open('data/pid_teamsize.json').read())
logging.info('{} papers has teamsize label.'.format(
len(pid_teamsize.keys())))
if isStat:
interset = set(pid_pubyear.keys()) & set(pid_teamsize.keys()) & set(
pid_topsubjs.keys()) & set(pid_topsubjs.keys())
logging.info('{} papers has both four attrs.'.format(len(interset)))
logging.info('======== LOADING BASIC DATA DONE =============')
logging.info('======== ======================= =============')
return pid_pubyear, pid_subjects, pid_topsubjs, pid_teamsize
def stats_from_pid_cits():
##基础数据统计
pid_pubyear, pid_subjects, pid_topsubjs, pid_teamsize = load_basic_data()
## 学科间的相互引用
subj_refnum = defaultdict(int)
subj_subj_refnum = defaultdict(lambda: defaultdict(int))
topsubj_refnum = defaultdict(int)
topsubj_topsubj_refnum = defaultdict(lambda: defaultdict(int))
## 非本地引文数量
subj_year_outrefnum = defaultdict(lambda: defaultdict(int))
topsubj_year_outrefnum = defaultdict(lambda: defaultdict(int))
subj_year_refnum = defaultdict(lambda: defaultdict(int))
topsubj_year_refnum = defaultdict(lambda: defaultdict(int))
## 每篇论文随着时间的引用次数
pid_year_citnum = defaultdict(lambda: defaultdict(int))
progress = 0
lines = []
for line in open('data/pid_cits_ALL.txt'):
progress += 1
if progress % 100000000 == 0:
logging.info('reading %d citation relations....' % progress)
line = line.strip()
pid, citing_id = line.split("\t")
if pid == citing_id:
continue
cited_year = pid_pubyear.get(pid, None)
cited_subjs = pid_subjects.get(pid, None)
cited_topsubjs = pid_topsubjs.get(pid, None)
citing_year = pid_pubyear.get(citing_id, None)
citing_subjs = pid_subjects.get(citing_id, None)
citing_topsubjs = pid_topsubjs.get(citing_id, None)
##如果引证文献没有数据 则略过
if citing_year is None or citing_subjs is None or citing_topsubjs is None:
continue
## 被引论文可能是不再本地数据库中的论文
if cited_year is None or cited_subjs is None or cited_topsubjs is None:
for subj in citing_subjs:
subj_year_outrefnum[subj][citing_year] += 1
for topsubj in citing_topsubjs:
topsubj_year_outrefnum[topsubj][citing_year] += 1
continue
for subj in citing_subjs:
subj_year_refnum[subj][citing_year] += 1
subj_refnum[subj] += 1
for cited_subj in cited_subjs:
subj_subj_refnum[subj][cited_subj] += 1
for topsubj in citing_topsubjs:
topsubj_year_refnum[topsubj][citing_year] += 1
topsubj_refnum[topsubj] += 1
for cited_topsubj in cited_topsubjs:
topsubj_topsubj_refnum[topsubj][cited_topsubj] += 1
pid_year_citnum[pid][citing_year] += 1
open("data/pid_year_citnum.json", 'w').write(json.dumps(pid_year_citnum))
logging.info('data saved to data/pid_year_citnum.json')
open("data/subj_refnum.json", 'w').write(json.dumps(subj_refnum))
logging.info('data saved to data/subj_refnum.json')
open("data/subj_year_refnum.json", 'w').write(json.dumps(subj_year_refnum))
logging.info('data saved to data/subj_year_refnum.json')
open("data/subj_year_outrefnum.json",
'w').write(json.dumps(subj_year_outrefnum))
logging.info('data saved to data/subj_year_outrefnum.json')
open("data/topsubj_refnum.json", 'w').write(json.dumps(topsubj_refnum))
logging.info('data saved to data/topsubj_refnum.json')
open("data/topsubj_year_refnum.json",
'w').write(json.dumps(topsubj_year_refnum))
logging.info('data saved to data/topsubj_year_refnum.json')
open("data/topsubj_year_outrefnum.json",
'w').write(json.dumps(topsubj_year_outrefnum))
logging.info('data saved to data/topsubj_year_outrefnum.json')
open("data/subj_subj_refnum.json", 'w').write(json.dumps(subj_subj_refnum))
logging.info('data saved to data/subj_subj_refnum.json')
open("data/topsubj_topsubj_refnum.json",
'w').write(json.dumps(topsubj_topsubj_refnum))
logging.info('data saved to data/topsubj_topsubj_refnum.json')
##对一些数据进行统计和可视化
def stat_and_visualize_data():
## 各领域的论文数量及随时间的变化曲线
pid_pubyear, pid_subjects, pid_topsubjs, pid_teamsize = load_basic_data()
subj_year_num = defaultdict(lambda: defaultdict(int))
subj_totalnum = defaultdict(int)
years = set([])
allsubjs = set([])
for pid in pid_pubyear:
subjs = pid_topsubjs.get(pid, None)
pubyear = pid_pubyear[pid]
years.add(pubyear)
if subjs is None:
continue
for subj in subjs:
allsubjs.add(subj)
subj_year_num[subj][pubyear] += 1
subj_totalnum[subj] += 1
tableLines = ['|year|{}|'.format('|'.join(sorted(list(allsubjs))))]
tableLines.append('|:---:|{}|'.format('|'.join([':---:'] * len(allsubjs))))
for year in sorted(years, key=lambda x: int(x)):
line = []
line.append(year)
for subj in sorted(subj_year_num.keys()):
line.append('{:,}'.format(subj_year_num[subj][year]))
tableLines.append('|{}|'.format('|'.join(line)))
totalline = ['total']
for subj in sorted(subj_totalnum.keys()):
totalline.append('{:,}'.format(subj_totalnum[subj]))
tableLines.append('|{}|'.format('|'.join(totalline)))
open("subj_paper_num.md", 'w').write('\n'.join(tableLines))
logging.info('paper num saved to subj_paper_num.md')
plt.figure(figsize=(7, 5))
for subj in sorted(subj_year_num.keys()):
year_num = subj_year_num[subj]
xs = []
ys = []
for year in sorted(year_num.keys(), key=lambda x: int(x)):
xs.append(int(year))
ys.append(year_num[year])
plt.plot(xs, ys, label=subj)
plt.yscale('log')
plt.xlabel('year')
plt.ylabel('number of publications')
plt.legend()
plt.tight_layout()
plt.savefig('fig/subj_year_num.png', dpi=400)
logging.info('fig saved to fig/subj_year_num.png.')
def subj_sim_cal(data_path, out_path):
subj_subj_refnum = json.loads(open(data_path).read())
subj_refT = defaultdict(int)
subj_citT = defaultdict(int)
for subj in subj_subj_refnum.keys():
for cited_subj in subj_subj_refnum[subj].keys():
refnum = subj_subj_refnum[subj][cited_subj]
subj_refT[subj] += refnum
subj_citT[cited_subj] += refnum
subjs = subj_refT.keys()
print(subjs)
subj_subj_sim = defaultdict(dict)
for i in range(len(subjs)):
subj1 = subjs[i]
for j in range(len(subjs)):
subj2 = subjs[j]
R_ij = subj_subj_refnum[subj1].get(subj2, 0)
R_ji = subj_subj_refnum[subj2].get(subj1, 0)
TR_i = subj_refT[subj1]
TR_j = subj_refT[subj2]
TC_i = subj_citT[subj1]
TC_j = subj_citT[subj2]
sim = (R_ji + R_ij) / (np.sqrt((TC_i + TR_i) * (TC_j + TR_j)))
print(subj1, subj2, sim, R_ij, R_ji, TR_i, TC_i, TR_j, TC_j)
subj_subj_sim[subj1][subj2] = sim
open(out_path, 'w').write(json.dumps(subj_subj_sim))
logging.info('data saved to {}.'.format(out_path))
def plot_subj_sim_matrix():
subj_subj_sim = json.loads(open('data/topsubj_topsubj_sim.json').read())
tableLines = []
header = '|subj|{}|'.format('|'.join(sorted(subj_subj_sim.keys())))
header2 = '|:---:|{}|'.format('|'.join([':---:'] *
len(subj_subj_sim.keys())))
tableLines.append(header)
tableLines.append(header2)
for subj1 in sorted(subj_subj_sim.keys()):
line = [subj1]
for subj2 in sorted(subj_subj_sim.keys()):
line.append('{:.5f}'.format(subj_subj_sim[subj1][subj2]))
tableLines.append('|{}|'.format('|'.join(line)))
open('sim_matrix.md', 'w').write('\n'.join(tableLines))
logging.info('data saved to sim_matrix.md')
def stat_refs():
##基础数据统计
pid_pubyear, pid_subjects, pid_topsubjs, pid_teamsize = load_basic_data()
## 每篇论文随着时间的引用次数
pid_refs = defaultdict(list)
pid_refnum = defaultdict(int)
progress = 0
lines = []
for line in open('data/pid_cits_ALL.txt'):
progress += 1
if progress % 100000000 == 0:
logging.info('reading %d citation relations....' % progress)
line = line.strip()
pid, citing_id = line.split("\t")
pid_refnum[citing_id] += 1
cited_year = pid_pubyear.get(pid, None)
cited_subjs = pid_subjects.get(pid, None)
cited_topsubjs = pid_topsubjs.get(pid, None)
citing_year = pid_pubyear.get(citing_id, None)
citing_subjs = pid_subjects.get(citing_id, None)
citing_topsubjs = pid_topsubjs.get(citing_id, None)
##如果引证文献没有数据 则略过
if citing_year is None or citing_subjs is None or citing_topsubjs is None:
continue
## 被引论文可能是不再本地数据库中的论文
if cited_year is None or cited_subjs is None or cited_topsubjs is None:
continue
if citing_id == pid:
continue
pid_refs[citing_id].append(pid)
logging.info('there are {:,} paper has refs.'.format(len(pid_refs)))
open('data/pid_refnum.json', 'w').write(json.dumps(pid_refnum))
## 将论文的数据分为多个进行存储,每100000个存一行
saved_dict = {}
savenum = 0
out_path = 'data/pid_refs.txt'
of = open(out_path, 'w')
for pid in pid_refs.keys():
saved_dict[pid] = list(set(pid_refs[pid]))
savenum += 1
if savenum % 100000 == 0:
of.write(json.dumps(saved_dict) + '\n')
saved_dict = {}
if len(saved_dict) > 0:
of.write(json.dumps(saved_dict) + '\n')
of.close()
logging.info('data saved to {}.'.format(out_path))
def stat_paper_cN():
logging.info('loading data pid year citenum ....')
pid_year_citnum = json.loads(open("data/pid_year_citnum.json").read())
logging.info('loading paper pubyear ...')
pid_pubyear = json.loads(open('data/pid_pubyear.json').read())
logging.info('{} papers has year label.'.format(len(pid_pubyear.keys())))
pid_c2 = defaultdict(int)
pid_c5 = defaultdict(int)
pid_c10 = defaultdict(int)
pid_cn = defaultdict(int)
for pid in pid_year_citnum.keys():
pubyear = int(pid_pubyear[pid])
for year in pid_year_citnum[pid].keys():
if int(year) - int(pubyear) <= 2:
pid_c2[pid] += pid_year_citnum[pid][year]
if int(year) - int(pubyear) <= 5:
pid_c5[pid] += pid_year_citnum[pid][year]
if int(year) - int(pubyear) <= 10:
pid_c10[pid] += pid_year_citnum[pid][year]
pid_cn[pid] += pid_year_citnum[pid][year]
open('data/pid_c2.json', 'w').write(json.dumps(pid_c2))
logging.info('data saved to data/pid_c2.json')
open('data/pid_c5.json', 'w').write(json.dumps(pid_c5))
logging.info('data saved to data/pid_c5.json')
open('data/pid_c10.json', 'w').write(json.dumps(pid_c10))
logging.info('data saved to data/pid_c10.json')
open('data/pid_cn.json', 'w').write(json.dumps(pid_cn))
logging.info('data saved to data/pid_cn.json')
if __name__ == '__main__':
# stat_basic_num()
stats_from_pid_cits()
stat_and_visualize_data()
subj_sim_cal('data/subj_subj_refnum.json', 'data/subj_subj_sim.json')
subj_sim_cal('data/topsubj_topsubj_refnum.json',
'data/topsubj_topsubj_sim.json')
plot_subj_sim_matrix()
stat_refs()
stat_paper_cN()
# stat_ref_num()
| {"/basic_attr_fetcher.py": ["/basic_config.py"], "/WOS_data_processing.py": ["/basic_config.py", "/basic_attr_fetcher.py"], "/attr_stats.py": ["/basic_config.py", "/WOS_data_processing.py"]} |
44,985 | hyyc116/WOS_data_processing | refs/heads/master | /attr_stats.py | #coding:utf-8
from basic_config import *
from WOS_data_processing import load_basic_data
import seaborn as sns
def citation_distribution():
logging.info('loading attrs ...')
pid_pubyear, pid_subjects, pid_topsubjs, pid_teamsize = load_basic_data()
logging.info('loading pid cn ...')
pid_cn = json.loads(open('data/pid_cn.json').read())
logging.info('{} paper citations loaded.'.format(len(pid_cn.keys())))
subj_cit_dis = defaultdict(lambda: defaultdict(int))
subj_year_cits = defaultdict(lambda: defaultdict(list))
for pid in pid_cn.keys():
pubyear = int(pid_pubyear[pid])
cn = pid_cn[pid]
for subj in pid_topsubjs[pid]:
subj_cit_dis[subj][cn] += 1
subj_year_cits[subj][pubyear].append(cn)
##画出各个学科的ccdf分布
plt.figure(figsize=(5.5, 4))
for subj in sorted(subj_cit_dis.keys()):
xs = []
ys = []
for cit in sorted(subj_cit_dis[subj].keys()):
xs.append(cit)
ys.append(subj_cit_dis[subj][cit])
ys = [np.sum(ys[i:]) / float(np.sum(ys)) for i in range(len(ys))]
plt.plot(xs, ys, label=subj)
plt.xlabel('number of citations')
plt.ylabel('percentage')
plt.xscale('log')
plt.yscale('log')
plt.legend()
plt.tight_layout()
plt.savefig('fig/subj_cit_dis.png', dpi=400)
logging.info('fig saved to fig/subj_cit_dis.png.')
### 画出各学科平均引用次数随时间的变化
plt.figure(figsize=(5.5, 4))
for subj in sorted(subj_year_cits.keys()):
xs = []
ys = []
for year in sorted(subj_year_cits[subj].keys()):
avgC = np.mean(subj_year_cits[subj][year])
xs.append(year)
ys.append(avgC)
plt.plot(xs, ys, label=subj)
plt.plot([2004] * 10, np.linspace(1, 42, 10), '--', label='$x=2004$')
plt.xlabel('year')
plt.ylabel('average number of citations')
plt.legend()
# plt.yscale('l)
plt.tight_layout()
plt.savefig('fig/subj_year_averagecn.png', dpi=400)
logging.info('fig saved to fig/subj_year_averagecn.png.')
def reference_distribution():
logging.info('loading paper subjects ...')
pid_topsubjs = json.loads(open('data/pid_topsubjs.json').read())
logging.info('{} papers has subject label.'.format(len(
pid_topsubjs.keys())))
pid_refnum = json.loads(open('data/pid_refnum.json').read())
## 统计每一个学科 数据集内的引用次数分布
subj_refnum_dis = defaultdict(lambda: defaultdict(int))
for pid in pid_refnum.keys():
if pid_topsubjs.get(pid, None) is None:
continue
for subj in pid_topsubjs[pid]:
subj_refnum_dis[subj][pid_refnum[pid]] += 1
sns.set_theme(style='ticks')
plt.figure(figsize=(5.5, 4))
##将每一个学科的ref_dis画出来
for subj in sorted(subj_refnum_dis.keys()):
xs = []
ys = []
for refnum in sorted(subj_refnum_dis[subj].keys()):
xs.append(refnum)
ys.append(subj_refnum_dis[subj][refnum])
ys = np.array(ys) / float(np.sum(ys))
plt.plot(xs, ys, label=subj)
sns.despine()
plt.xscale('log')
# plt.yscale('log')
plt.xlabel('number of references')
plt.xlim(0.9, 200)
# plt.ylim()
plt.ylabel('percentage')
plt.legend()
plt.tight_layout()
plt.savefig('fig/subj_all_refnum_dis.png', dpi=400)
logging.info('fig saved to fig/subj_all_refnum_dis.png')
pid_refs = {}
for line in open('data/pid_refs.txt'):
line = line.strip()
pid_refs.update(json.loads(line))
## 统计每一个学科 数据集内的引用次数分布
subj_refnum_dis = defaultdict(lambda: defaultdict(int))
for pid in pid_refs.keys():
for subj in pid_topsubjs[pid]:
subj_refnum_dis[subj][len(pid_refs[pid])] += 1
plt.figure(figsize=(5.5, 4))
##将每一个学科的ref_dis画出来
for subj in sorted(subj_refnum_dis.keys()):
xs = []
ys = []
for refnum in sorted(subj_refnum_dis[subj].keys()):
xs.append(refnum)
ys.append(subj_refnum_dis[subj][refnum])
ys = np.array(ys) / float(np.sum(ys))
plt.plot(xs, ys, label=subj)
plt.xscale('log')
# plt.yscale('log')
plt.xlabel('number of references')
plt.xlim(0.9, 200)
# plt.ylim()
plt.ylabel('percentage')
plt.legend()
sns.despine()
plt.tight_layout()
plt.savefig('fig/subj_refnum_dis.png', dpi=400)
logging.info('fig saved to fig/subj_refnum_dis.png')
if __name__ == '__main__':
# citation_distribution()
reference_distribution()
| {"/basic_attr_fetcher.py": ["/basic_config.py"], "/WOS_data_processing.py": ["/basic_config.py", "/basic_attr_fetcher.py"], "/attr_stats.py": ["/basic_config.py", "/WOS_data_processing.py"]} |
45,001 | Sithlord-dev/rock_paper_scissors | refs/heads/main | /game.py | import random
def play(player1, player2, num_games, verbose=False):
""" Function that takes players for argument and the desired number of games, and return the winning rate of the
first player """
p1_prev_play = ""
p2_prev_play = ""
results = {"P1_win": 0, "P2_win": 0, "Tie": 0}
for _ in range(num_games):
p1_play = player1(p2_prev_play)
p2_play = player2(p1_prev_play)
if p1_play == p2_play:
results["Tie"] += 1
winner = "Tie."
elif (p1_play == "P" and p2_play == "R") or (
p1_play == "R" and p2_play == "S") or (p1_play == "S"
and p2_play == "P"):
results["P1_win"] += 1
winner = "Player 1 wins."
elif p2_play == "P" and p1_play == "R" or p2_play == "R" and p1_play == "S" or p2_play == "S" and p1_play == "P":
results["P2_win"] += 1
winner = "Player 2 wins."
if verbose:
print("Player 1: {} | Player 2: {}\n {}\n".format(p1_play, p2_play, winner))
p1_prev_play = p1_play
p2_prev_play = p2_play
games_won = results['P1_win'] + results['P2_win']
if games_won == 0:
win_rate = 0
else:
win_rate = results['P1_win'] / games_won * 100
print("Final results:", results)
print("Player 1 win rate: {:.2f}%".format(win_rate))
return win_rate
def cyril(prev_play, count=[0]):
""" Pretty dumb, will always play a set of moves regardless of what you play """
count[0] += 1
choices = ["P", "S", "S", "R", "P"]
return choices[count[0] % len(choices)]
def christal(prev_opp_play):
""" a bit smarter than cyril, sniffs glue but also analyses your last move and try to guess on that """
if prev_opp_play == '':
prev_opp_play = "R"
perfect_ans = {'P': 'S', 'R': 'P', 'S': 'R'}
return perfect_ans[prev_opp_play]
def lana(prev_opp_play, opp_history=list()):
""" Smart, always carry TEC-nines and will somehow analyse your moves and try to deduce your most frequent out of
the last 10 moves """
opp_history.append(prev_opp_play)
last_ten = opp_history[-10:]
most_frequent = max(set(last_ten), key=last_ten.count)
if most_frequent == '':
most_frequent = "S"
perfect_ans = {'P': 'S', 'R': 'P', 'S': 'R'}
return perfect_ans[most_frequent]
def dr_krieger(prev_opp_play,
opp_history=list(),
play_order=[{"RR": 0, "RP": 0, "RS": 0, "PR": 0, "PP": 0, "PS": 0, "SR": 0, "SP": 0, "SS": 0, }]):
"""" Smartest bot, definitely willing to beat you """
if not prev_opp_play:
prev_opp_play = 'R'
opp_history.append(prev_opp_play)
last_two = "".join(opp_history[-2:])
if len(last_two) == 2:
play_order[0][last_two] += 1
potential_plays = [
prev_opp_play + "R",
prev_opp_play + "P",
prev_opp_play + "S",
]
sub_order = {
k: play_order[0][k]
for k in potential_plays if k in play_order[0]
}
prediction = max(sub_order, key=sub_order.get)[-1:]
perfect_ans = {'P': 'S', 'R': 'P', 'S': 'R'}
return perfect_ans[prediction]
def human(prev_opp_play):
""" Us, humans"""
while True:
inp = input("[R]ock, [P]aper, [S]cissors? ")
if inp in ["R", "P", "S"]:
print(inp)
move = inp
break
else:
print("I did not understand that, can you try again?")
continue
return move
def random_player(prev_opp_play):
""" random player for test """
return random.choice(['R', 'P', 'S'])
| {"/main.py": ["/game.py", "/AI_player.py"]} |
45,002 | Sithlord-dev/rock_paper_scissors | refs/heads/main | /main.py | from game import play, cyril, christal, lana, dr_krieger, human, random_player
from AI_player import AI_player
# play(AI_player, cyril, 1000)
# play(AI_player, christal, 1000)
# play(AI_player, lana, 1000)
# play(AI_player, dr_krieger, 1000)
# Play interactively against one of the bots:
play(human, cyril, 20, verbose=True)
# Play against a bot that plays randomly:
# play(cyril, dr_krieger, 100000)
| {"/main.py": ["/game.py", "/AI_player.py"]} |
45,003 | Sithlord-dev/rock_paper_scissors | refs/heads/main | /AI_player.py | # The example function below keeps track of the opponent's history and plays whatever the opponent played two plays ago.
# It is not a very good player so you will need to change the code to pass the challenge.
import numpy as np
# ideal response:
perfect_ans = {"P": "S", "R": "P", "S": "R"}
# reset variable
AI_moves = ["R"]
opp_history = []
strategy = [0, 0, 0, 0]
AI_strategy_guess = ["", "", "", ""]
opp_guess = ["", "", "", ""]
AI_play_order = {}
opp_play_order = {}
def AI_player(prev_play):
if prev_play in ["R", "P", "S"]:
opp_history.append(prev_play)
for i in range(0, 4):
if opp_guess[i] == prev_play:
strategy[i] += 1
else:
reset()
# Strategies:
# guess on the last move
if len(AI_moves) > 0:
last_play = AI_moves[-1]
opp_guess[0] = perfect_ans[last_play]
AI_strategy_guess[0] = perfect_ans[opp_guess[0]]
# guess on the most frequent move of the last 10 moves
last_10_moves = AI_moves[-10:]
if len(last_10_moves) > 0:
most_frequent_move = max(set(last_10_moves), key=last_10_moves.count)
opp_guess[1] = perfect_ans[most_frequent_move]
AI_strategy_guess[1] = perfect_ans[opp_guess[1]]
# guess on opp move patterns
if len(opp_history) >= 3:
opp_guess[2] = predict_move(opp_history, 3, opp_play_order)
AI_strategy_guess[2] = perfect_ans[opp_guess[2]]
# guess on opp's counter-pattern-prediction
if len(AI_moves) >= 2:
opp_guess[3] = perfect_ans[predict_move(AI_moves, 2, AI_play_order)]
AI_strategy_guess[3] = perfect_ans[opp_guess[3]]
best_strategy = np.argmax(strategy)
guess = AI_strategy_guess[best_strategy]
if guess == "":
guess = "S"
AI_moves.append(guess)
return guess
def predict_move(history, n, play_order):
""" Predict the next move according to a history and play order """
if "".join(history[-n:]) in play_order.keys():
play_order["".join(history[-n:])] += 1
else:
play_order["".join(history[-n:])] = 1
possible = ["".join(history[-(n - 1):]) + k for k in ["R", "P", "S"]]
for pm in possible:
if pm not in play_order.keys():
play_order[pm] = 0
predict = max(possible, key=lambda key: play_order[key])
return predict[-1]
def reset():
global AI_moves, opp_history, strategy, opp_guess, AI_strategy_guess, opp_play_order, AI_play_order
AI_moves = ["R"]
opp_history.clear()
strategy = [0, 0, 0, 0]
opp_guess = ["", "", "", ""]
AI_strategy_guess = ["", "", "", ""]
opp_play_order = {}
AI_play_order = {}
| {"/main.py": ["/game.py", "/AI_player.py"]} |
45,010 | prakash-simhandri/IMDB_scraper | refs/heads/master | /IMDB_task3.py | from IMDB_task1 import movie_list
from pprint import pprint
# Task_3
def group_by_decade(movies):
movie_by_decade={}
for years in movies:
division=years['year']
decade=division // 10
divisi=decade*10
movie_by_decade[divisi]=[]
for de_year in movie_by_decade:
for movie in movies:
_division=movie['year']
_decade=_division // 10
_divisi=_decade*10
if de_year == divisi:
movie_by_decade[de_year].append(movie)
# pprint.pprint(movie_by_decade)
return movie_by_decade
group_by_decade_analysis=group_by_decade(movie_list)
# pprint(group_by_decade_analysis) | {"/IMDB_task3.py": ["/IMDB_task1.py"], "/IMDB_task13.py": ["/IMDB_task12.py"], "/IMDB_task6.py": ["/IMDB_task5.py"], "/IMDB_task4.py": ["/IMDB_task1.py"], "/IMDB_task5.py": ["/IMDB_task1.py", "/IMDB_task4.py"], "/IMDB_task8_9.py": ["/IMDB_task4.py", "/IMDB_task1.py"], "/IMDB_task7.py": ["/IMDB_task5.py"], "/IMDB_task11.py": ["/IMDB_task8_9.py"], "/IMDB_task2.py": ["/IMDB_task1.py"], "/IMDB_task10.py": ["/IMDB_task8_9.py"], "/IMDB_task12.py": ["/IMDB_task1.py"]} |
45,011 | prakash-simhandri/IMDB_scraper | refs/heads/master | /IMDB_task13.py | from IMDB_task12 import All_movie_cast
from pprint import pprint
import os ,json
files=os.listdir("./IMD_data")
files_count = 0
for file_name in files:
if os.path.exists('IMD_data/'+file_name):
with open('IMD_data/'+file_name,'r')as file:
information = json.load(file)
information['cast']=All_movie_cast[files_count]
pprint(information)
with open('IMD_data/'+file_name,'w')as data:
json.dump(information,data)
files_count+=1
else:
print("Error")
| {"/IMDB_task3.py": ["/IMDB_task1.py"], "/IMDB_task13.py": ["/IMDB_task12.py"], "/IMDB_task6.py": ["/IMDB_task5.py"], "/IMDB_task4.py": ["/IMDB_task1.py"], "/IMDB_task5.py": ["/IMDB_task1.py", "/IMDB_task4.py"], "/IMDB_task8_9.py": ["/IMDB_task4.py", "/IMDB_task1.py"], "/IMDB_task7.py": ["/IMDB_task5.py"], "/IMDB_task11.py": ["/IMDB_task8_9.py"], "/IMDB_task2.py": ["/IMDB_task1.py"], "/IMDB_task10.py": ["/IMDB_task8_9.py"], "/IMDB_task12.py": ["/IMDB_task1.py"]} |
45,012 | prakash-simhandri/IMDB_scraper | refs/heads/master | /IMDB_task6.py | from pprint import pprint
from IMDB_task5 import get_movie_list_details
#Task_6
def analyse_movies_language(movies_list):
language_dict={}
Languages=[]
for language in movies_list:
for lan in language['language']:
if lan not in Languages:
Languages.append(lan)
for Languages_list in Languages:
count=0
for language_ in movies_list:
for lan_ in language_['language']:
if Languages_list == lan_:
count+=1
language_dict[Languages_list]=count
return(language_dict)
movie_10_language=analyse_movies_language(get_movie_list_details)
# pprint(movie_10_language) | {"/IMDB_task3.py": ["/IMDB_task1.py"], "/IMDB_task13.py": ["/IMDB_task12.py"], "/IMDB_task6.py": ["/IMDB_task5.py"], "/IMDB_task4.py": ["/IMDB_task1.py"], "/IMDB_task5.py": ["/IMDB_task1.py", "/IMDB_task4.py"], "/IMDB_task8_9.py": ["/IMDB_task4.py", "/IMDB_task1.py"], "/IMDB_task7.py": ["/IMDB_task5.py"], "/IMDB_task11.py": ["/IMDB_task8_9.py"], "/IMDB_task2.py": ["/IMDB_task1.py"], "/IMDB_task10.py": ["/IMDB_task8_9.py"], "/IMDB_task12.py": ["/IMDB_task1.py"]} |
45,013 | prakash-simhandri/IMDB_scraper | refs/heads/master | /IMDB_task4.py | from IMDB_task1 import movie_list
from pprint import pprint
import requests
from bs4 import BeautifulSoup
def scrape_movie_details(movie_url):
movie_downlod=requests.get(movie_url)
suop=BeautifulSoup(movie_downlod.text,'html.parser')
movie_title=suop.find('div',class_='title_bar_wrapper')
movie_name=movie_title.find('h1').get_text()
film_=movie_name.split()
film_.pop()
film_tital=' '.join(film_)
# print(film_tital)
Director_div=suop.find('div',class_='plot_summary')
second_div=Director_div.find('div',class_='credit_summary_item')
Director_=second_div.find_all('a')
Directors_list=[Director.text for Director in Director_]
# print(Directors_list)
movie_bio=Director_div.find('div',class_='summary_text').get_text().strip()
# print(movie_bio)
# movie_image = suop.find('div',class_='slate_wrapper')
movie_postar = suop.find('div',class_='poster').a.img['src']
# print(movie_postar)
Genre_list=[]
film_details=suop.find('div',attrs ={'class':'article','id':'titleStoryLine'})
Genre=film_details.find_all('div',class_='see-more inline canwrap')
for movie_g in Genre:
if movie_g.find('h4').text == 'Genres:':
gen=movie_g.find_all('a')
for g in gen:
Genre_list.append(g.text)
# print(Genre_list)
movie_detail=suop.find('div',{'class':'article','id':'titleDetails'})
list_of_div=movie_detail.find_all('div',class_='txt-block')
movie_Country=''
movie_Language=[]
for h4 in list_of_div:
div=(h4.find_all('h4',class_='inline'))
for div_list in div:
if div_list.text in 'Country:':
movie_Country=(h4.find('a').text)
if div_list.text in 'Language:':
movie_languags=(h4.find_all('a'))
for language in movie_languags:
movie_Language.append(language.text)
# print(movie_Country)
# print(movie_Language)
movie_time=suop.find('div',class_='title_wrapper')
time_data=movie_time.find('div',class_='subtext')
data_time=time_data.find('time').get_text().split()
minutes=0
for minet in data_time:
if 'h' in minet:
hours_to_movie=int(minet.strip('h'))*60
# print(hours_to_movie)
elif 'min' in minet:
minutes+=int(minet.strip('min'))
runtime=hours_to_movie+minutes
# print(runtime)
movie_details_dict = {'name':'','director':'',"country":'',
"language":'',"poster_image_url":'',"bio":'',"runtime":'',"genre":''
}
movie_details_dict['name']= film_tital
movie_details_dict['director']= Directors_list
movie_details_dict['country']= movie_Country
movie_details_dict['language']= movie_Language
movie_details_dict['poster_image_url']= movie_postar
movie_details_dict['bio']= movie_bio
movie_details_dict['runtime']= runtime
movie_details_dict['genre']= Genre_list
return movie_details_dict
# movie_details_ten=scrape_movie_details(movie_list[4]['url'])
# pprint(movie_details_ten) | {"/IMDB_task3.py": ["/IMDB_task1.py"], "/IMDB_task13.py": ["/IMDB_task12.py"], "/IMDB_task6.py": ["/IMDB_task5.py"], "/IMDB_task4.py": ["/IMDB_task1.py"], "/IMDB_task5.py": ["/IMDB_task1.py", "/IMDB_task4.py"], "/IMDB_task8_9.py": ["/IMDB_task4.py", "/IMDB_task1.py"], "/IMDB_task7.py": ["/IMDB_task5.py"], "/IMDB_task11.py": ["/IMDB_task8_9.py"], "/IMDB_task2.py": ["/IMDB_task1.py"], "/IMDB_task10.py": ["/IMDB_task8_9.py"], "/IMDB_task12.py": ["/IMDB_task1.py"]} |
45,014 | prakash-simhandri/IMDB_scraper | refs/heads/master | /IMDB_task5.py | from IMDB_task1 import movie_list
from IMDB_task4 import scrape_movie_details
from pprint import pprint
# Task_5
def get_movie_list_details(movies_list):
movie_url_list=[]
ten_movie_url=(movies_list[:10])
for All_url in ten_movie_url:
movie_url_li=scrape_movie_details(All_url['url'])
movie_url_list.append(movie_url_li)
return movie_url_list
get_movie_list_details = get_movie_list_details(movie_list)
# pprint(get_movie_list_details) | {"/IMDB_task3.py": ["/IMDB_task1.py"], "/IMDB_task13.py": ["/IMDB_task12.py"], "/IMDB_task6.py": ["/IMDB_task5.py"], "/IMDB_task4.py": ["/IMDB_task1.py"], "/IMDB_task5.py": ["/IMDB_task1.py", "/IMDB_task4.py"], "/IMDB_task8_9.py": ["/IMDB_task4.py", "/IMDB_task1.py"], "/IMDB_task7.py": ["/IMDB_task5.py"], "/IMDB_task11.py": ["/IMDB_task8_9.py"], "/IMDB_task2.py": ["/IMDB_task1.py"], "/IMDB_task10.py": ["/IMDB_task8_9.py"], "/IMDB_task12.py": ["/IMDB_task1.py"]} |
45,015 | prakash-simhandri/IMDB_scraper | refs/heads/master | /IMDB_task8_9.py | from IMDB_task4 import scrape_movie_details
from IMDB_task1 import movie_list
from pprint import pprint
import os.path,json,time,random
# Task 8 and 9
timer = random.randint(1,3)
def movies_json():
all_movies_dicts=[]
for all_movies in movie_list:
movie_url = all_movies['url']
ID=''
for _id in movie_url[27:]:
if '/' not in _id:
ID+=_id
else:
break
file_name = ID
if os.path.exists('IMD_data/'+file_name+'.json'):
with open('IMD_data/'+file_name+'.json',"r")as open_file:
data = json.load(open_file)
all_movies_dicts.append(data)
# pprint(data)
else:
time.sleep(timer)
movie_details_dict=scrape_movie_details(movie_url)
with open('IMD_data/'+file_name+'.json','w')as data:
json.dump(movie_details_dict,data)
return all_movies_dicts
Movies_Dict=movies_json() | {"/IMDB_task3.py": ["/IMDB_task1.py"], "/IMDB_task13.py": ["/IMDB_task12.py"], "/IMDB_task6.py": ["/IMDB_task5.py"], "/IMDB_task4.py": ["/IMDB_task1.py"], "/IMDB_task5.py": ["/IMDB_task1.py", "/IMDB_task4.py"], "/IMDB_task8_9.py": ["/IMDB_task4.py", "/IMDB_task1.py"], "/IMDB_task7.py": ["/IMDB_task5.py"], "/IMDB_task11.py": ["/IMDB_task8_9.py"], "/IMDB_task2.py": ["/IMDB_task1.py"], "/IMDB_task10.py": ["/IMDB_task8_9.py"], "/IMDB_task12.py": ["/IMDB_task1.py"]} |
45,016 | prakash-simhandri/IMDB_scraper | refs/heads/master | /IMDb_scraper.py | import requests
from bs4 import BeautifulSoup
import pprint,os.path
import time,random,json
from urllib.request import urlopen
url='https://www.imdb.com/india/top-rated-indian-movies/'
movie=requests.get(url)
# print (movie.text)
suop=BeautifulSoup(movie.text,"html.parser")
# print (suop.title)
# Task_1
def scrape_top_list():
movie_div=suop.find("div", class_='lister')
table_=movie_div.find("tbody",class_='lister-list')
movie_tr=table_.find_all("tr")
All_movie_list=[]
for tr in movie_tr:
name=tr.find("td",class_='titleColumn')
film_name=name.get_text().strip().split(".")
# return film_name
movie_names=name.find("a").get_text()
# print(movie_names)
movie_year=name.find("span").get_text()
# print(movie_year)
movie_position=film_name[0]
# print(movie_position)
movie_url=name.find("a").get("href")
movie_link="https://www.imdb.com"+movie_url
rating_in_movie=tr.find("td",class_="ratingColumn imdbRating").strong.get_text()
# print(rating_in_movie)
movie_details={'name':'','url':'','position':'','rating':'','year':''}
movie_details['name']=movie_names
movie_details['url']=movie_link
movie_details['position']=int(movie_position)
movie_details['rating']=float(rating_in_movie)
movie_details['year']=int (movie_year.strip("()"))
All_movie_list.append(movie_details)
return All_movie_list
movie_list=scrape_top_list()
# print (movie_list)
#Task_2
def group_by_year(movies):
movie_by_year={}
for movie in movies:
movie_by_year[movie['year']]=[]
# print(movie_by_year)
for year_key in movie_by_year:
for movie_ in movies:
year=movie_['year']
if year==year_key:
movie_by_year[year].append(movie_)
# pprint.pprint(movie_by_year)
return movie_by_year
group_by_year_analysis=group_by_year(movie_list)
# print(group_by_year_analysis)
# Task_3
def group_by_decade(movies):
movie_by_decade={}
for years in movies:
division=years['year']
decade=division // 10
divisi=decade*10
movie_by_decade[divisi]=[]
for de_year in movie_by_decade:
for movie in movies:
_division=movie['year']
_decade=_division // 10
_divisi=_decade*10
if de_year == divisi:
movie_by_decade[de_year].append(movie)
# pprint.pprint(movie_by_decade)
return movie_by_decade
group_by_decade_analysis=group_by_decade(movie_list)
# print(group_by_decade_analysis)
#Task_12
def scrape_movie_cast(movie_caste_url):
ID=''
for _id in movie_caste_url[27:]:
if '/' not in _id:
ID+=_id
else:
break
film_name = ID
# print(film_name)
movie_caste=requests.get(movie_caste_url)
suop_1=BeautifulSoup(movie_caste.text,"html.parser")
All_details=suop_1.find_all('div',class_='see-more')
casts_list=[]
for i in All_details:
if (i.text).strip() == 'See full cast »':
connect_url=urlopen('https://www.imdb.com/title/'+film_name+'/'+i.find('a').get('href'))
soupa=BeautifulSoup(connect_url,"html.parser")
caste_tabal=soupa.find('table',class_='cast_list')
te_body=caste_tabal.find_all('tr')
for td in te_body:
actors_name=td.find_all('td',class_='')
for actor in actors_name:
imdb_id=actor.find('a').get('href')[6:15]
# print(imdb_id)
names=actor.find('a').get_text()
# print(names.text)
cast={'name':names.strip(),
'imdb_id':imdb_id}
casts_list.append(cast)
# pprint.pprint(casts_list)
return(casts_list)
# All_movie_cast=scrape_movie_cast(movie_list[0]['url'])
# pprint.pprint(All_movie_cast)
# Task_4
def scrape_movie_details(movie_url):
#Task_9
timer = random.randint(1,3)
#Task_8
ID=''
for _id in movie_url[27:]:
if '/' not in _id:
ID+=_id
else:
break
file_name = ID
if os.path.exists('IMD_data/'+file_name+'.json'):
with open('IMD_data/'+file_name+'.json',"r")as open_file:
data=json.load(open_file)
return (data)
else:
time.sleep(timer)
movie_downlod=requests.get(movie_url)
suop=BeautifulSoup(movie_downlod.text,'html.parser')
movie_title=suop.find('div',class_='title_bar_wrapper')
movie_name=movie_title.find('h1').get_text()
film_=movie_name.split()
film_.pop()
film_tital=' '.join(film_)
# print(film_tital)
Director_div=suop.find('div',class_='plot_summary')
second_div=Director_div.find('div',class_='credit_summary_item')
Director_=second_div.find_all('a')
Directors_list=[Director.text for Director in Director_]
# print(Directors_list)
movie_bio=Director_div.find('div',class_='summary_text').get_text().strip()
# print(movie_bio)
# movie_image = suop.find('div',class_='slate_wrapper')
movie_postar = suop.find('div',class_='poster').a.img['src']
# print(movie_postar)
Genre_list=[]
film_details=suop.find('div',attrs ={'class':'article','id':'titleStoryLine'})
Genre=film_details.find_all('div',class_='see-more inline canwrap')
for movie_g in Genre:
if movie_g.find('h4').text == 'Genres:':
gen=movie_g.find_all('a')
for g in gen:
Genre_list.append(g.text)
# print(Genre_list)
movie_detail=suop.find('div',{'class':'article','id':'titleDetails'})
list_of_div=movie_detail.find_all('div',class_='txt-block')
movie_Country=''
movie_Language=[]
for h4 in list_of_div:
div=(h4.find_all('h4',class_='inline'))
for div_list in div:
if div_list.text in 'Country:':
movie_Country=(h4.find('a').text)
if div_list.text in 'Language:':
movie_languags=(h4.find_all('a'))
for language in movie_languags:
movie_Language.append(language.text)
# print(movie_Country)
# print(movie_Language)
movie_time=suop.find('div',class_='title_wrapper')
time_data=movie_time.find('div',class_='subtext')
data_time=time_data.find('time').get_text().split()
minutes=0
for minet in data_time:
if 'h' in minet:
hours_to_movie=int(minet.strip('h'))*60
# print(hours_to_movie)
elif 'min' in minet:
minutes+=int(minet.strip('min'))
runtime=hours_to_movie+minutes
# print(runtime)
movie_details_dict = {'name':'','director':'',"country":'',
"language":'',"poster_image_url":'',"bio":'',"runtime":'',"genre":''
}
movie_details_dict['cast']=scrape_movie_cast(movie_url)
movie_details_dict['name']= film_tital
movie_details_dict['director']= Directors_list
movie_details_dict['country']= movie_Country
movie_details_dict['language']= movie_Language
movie_details_dict['poster_image_url']= movie_postar
movie_details_dict['bio']= movie_bio
movie_details_dict['runtime']= runtime
movie_details_dict['genre']= Genre_list
with open('IMD_data/'+file_name+'.json','w')as data:
json.dump(movie_details_dict,data)
return movie_details_dict
# movie_details_ten=scrape_movie_details(movie_list[4]['url'])
# pprint.pprint(movie_details_ten)
# Task_5
def get_movie_list_details(movies_list):
movie_url_list=[]
ten_movie_url=(movies_list[:250])
for All_url in ten_movie_url:
movie_url_li=scrape_movie_details(All_url['url'])
movie_url_list.append(movie_url_li)
return movie_url_list
get_movie_list_details=get_movie_list_details(movie_list)
pprint.pprint(get_movie_list_details)
#Task_6
def analyse_movies_language(movies_list):
language_dict={}
Languages=[]
for language in movies_list:
for lan in language['language']:
if lan not in Languages:
Languages.append(lan)
for Languages_list in Languages:
count=0
for language_ in movies_list:
for lan_ in language_['language']:
if Languages_list == lan_:
count+=1
language_dict[Languages_list]=count
return(language_dict)
# movie_10_language=analyse_movies_language(get_movie_list_details)
# pprint.pprint(movie_10_language)
#Task_7
def analyse_movies_directors(director_data):
Director_dict={}
director_list=[]
for director in director_data:
for film_Director in director['director']:
if film_Director not in director_list:
director_list.append(film_Director)
# print(director_list)
for All_directors in director_list:
count=0
for director_ in director_data:
for film_Director_ in director_['director']:
if All_directors == film_Director_:
count+=1
Director_dict[All_directors]=count
return(Director_dict)
# movie_10_director=analyse_movies_directors(get_movie_list_details)
# pprint.pprint(movie_10_director)
#Task_10
def analyse_language_and_directors(movies_list):
Directors_All_movies={}
for driector in movies_list:
for one_driector in (driector['director']):
Directors_All_movies[one_driector]={}
# pprint.pprint(Directors_All_movies)
for run in range(len(movies_list)):
for Director_m in Directors_All_movies:
if Director_m in movies_list[run]['director']:
for film_language in movies_list[run]['language']:
Directors_All_movies[Director_m][film_language]=0
for run in range(len(movies_list)):
for Director_m in Directors_All_movies:
if Director_m in movies_list[run]['director']:
for film_language in movies_list[run]['language']:
Directors_All_movies[Director_m][film_language]+=1
return(Directors_All_movies)
# Directors_All_movies_details=analyse_language_and_directors(get_movie_list_details)
# pprint.pprint(Directors_All_movies_details)
#Task_11
def analyse_movies_genre(movies_list):
genres_list=[]
for genres in movies_list:
for one_genres in genres['genre']:
if one_genres not in genres_list:
genres_list.append(one_genres)
total_value={}
All_genres=[]
for genres_ in movies_list:
for one_genres_ in genres_['genre']:
All_genres.append(one_genres_)
for genre_1 in genres_list:
total=All_genres.count(genre_1)
total_value[genre_1]=total
return(total_value)
# All_movie_genre=analyse_movies_genre(get_movie_list_details)
# pprint.pprint(All_movie_genre)
#Task_14
def analyse_co_actors(movies_list):
analyse_actors={}
for All_cast in movies_list:
movie_IMDb=(All_cast['cast'][0]['imdb_id'])
lead_actor=(All_cast['cast'][0]['name'])
# print(movie_IMDb,"= ",lead_actor)
analyse_actors[movie_IMDb]={'name':lead_actor,'frequent_co_actors':[]}
# pprint.pprint(analyse_actors)
for actors_key in analyse_actors:
five_actors=[]
for All_film_cast in movies_list:
only_cast=All_film_cast['cast']
for Actor_imdb_id in All_film_cast['cast']:
if actors_key == Actor_imdb_id['imdb_id']:
index=1
while index <= 5:
five_actors.append(only_cast[index])
index+=1
for five_actors_list in five_actors:
actor_id=(five_actors_list['imdb_id'])
actor_name=(five_actors_list['name'])
complet={"imdb_id":actor_id,"name":actor_name}
return(analyse_actors[actors_key])
# for All_analyse_actors in analyse_actors:
# print(All_analyse_actors)
# if actors_key ==
# movie_actors=analyse_co_actors(get_movie_list_details)
# pprint.pprint(movie_actors)
#Task_15
def film_analyse_actors(movies_list):
All_movies_details=[]
new_list=[]
analyse_actors_dict={}
for cast_movie in movies_list:
for All_movies_cast in cast_movie['cast']:
new_list.append(All_movies_cast['name'])
# return(new_list)
for cast_dict in movies_list:
cast_list=(cast_dict['cast'])
index=0
while index < 5:
top_five_name=(cast_list[index]['name'])
total_count=0
# print(top_five_name)
top_five_ID=(cast_list[index]['imdb_id'])
# print(top_five_ID)
index+=1
for value in new_list:
if top_five_name == value:
total_count+=1
analyse_actors_dict[top_five_ID]={'name':top_five_name,'num_movies':total_count}
else:
All_movies_details.append(analyse_actors_dict)
return(All_movies_details)
# movie_analyse_actors=film_analyse_actors(get_movie_list_details)
# pprint.pprint(movie_analyse_actors) | {"/IMDB_task3.py": ["/IMDB_task1.py"], "/IMDB_task13.py": ["/IMDB_task12.py"], "/IMDB_task6.py": ["/IMDB_task5.py"], "/IMDB_task4.py": ["/IMDB_task1.py"], "/IMDB_task5.py": ["/IMDB_task1.py", "/IMDB_task4.py"], "/IMDB_task8_9.py": ["/IMDB_task4.py", "/IMDB_task1.py"], "/IMDB_task7.py": ["/IMDB_task5.py"], "/IMDB_task11.py": ["/IMDB_task8_9.py"], "/IMDB_task2.py": ["/IMDB_task1.py"], "/IMDB_task10.py": ["/IMDB_task8_9.py"], "/IMDB_task12.py": ["/IMDB_task1.py"]} |
45,017 | prakash-simhandri/IMDB_scraper | refs/heads/master | /IMDB_task7.py | from IMDB_task5 import get_movie_list_details
from pprint import pprint
#Task_7
def analyse_movies_directors(director_data):
Director_dict={}
director_list=[]
for director in director_data:
for film_Director in director['director']:
if film_Director not in director_list:
director_list.append(film_Director)
# print(director_list)
for All_directors in director_list:
count=0
for director_ in director_data:
for film_Director_ in director_['director']:
if All_directors == film_Director_:
count+=1
Director_dict[All_directors]=count
return(Director_dict)
movie_10_director=analyse_movies_directors(get_movie_list_details)
# pprint(movie_10_director)
| {"/IMDB_task3.py": ["/IMDB_task1.py"], "/IMDB_task13.py": ["/IMDB_task12.py"], "/IMDB_task6.py": ["/IMDB_task5.py"], "/IMDB_task4.py": ["/IMDB_task1.py"], "/IMDB_task5.py": ["/IMDB_task1.py", "/IMDB_task4.py"], "/IMDB_task8_9.py": ["/IMDB_task4.py", "/IMDB_task1.py"], "/IMDB_task7.py": ["/IMDB_task5.py"], "/IMDB_task11.py": ["/IMDB_task8_9.py"], "/IMDB_task2.py": ["/IMDB_task1.py"], "/IMDB_task10.py": ["/IMDB_task8_9.py"], "/IMDB_task12.py": ["/IMDB_task1.py"]} |
45,018 | prakash-simhandri/IMDB_scraper | refs/heads/master | /IMDB_task1.py | import requests
from bs4 import BeautifulSoup
from pprint import pprint
url='https://www.imdb.com/india/top-rated-indian-movies/'
movie=requests.get(url)
suop=BeautifulSoup(movie.text,"html.parser")
# Task_1
def scrape_top_list():
movie_div=suop.find("div", class_='lister')
table_=movie_div.find("tbody",class_='lister-list')
movie_tr=table_.find_all("tr")
All_movie_list=[]
for tr in movie_tr:
name=tr.find("td",class_='titleColumn')
film_name=name.get_text().strip().split(".")
# return film_name
movie_names=name.find("a").get_text()
# print(movie_names)
movie_year=name.find("span").get_text()
# print(movie_year)
movie_position=film_name[0]
# print(movie_position)
movie_url=name.find("a").get("href")
movie_link="https://www.imdb.com"+movie_url
rating_in_movie=tr.find("td",class_="ratingColumn imdbRating").strong.get_text()
movie_details={'name':'','url':'','position':'','rating':'','year':''}
movie_details['name']=movie_names
movie_details['url']=movie_link
movie_details['position']=int(movie_position)
movie_details['rating']=float(rating_in_movie)
movie_details['year']=int (movie_year.strip("()"))
All_movie_list.append(movie_details)
return All_movie_list
movie_list=scrape_top_list()
# pprint(movie_list) | {"/IMDB_task3.py": ["/IMDB_task1.py"], "/IMDB_task13.py": ["/IMDB_task12.py"], "/IMDB_task6.py": ["/IMDB_task5.py"], "/IMDB_task4.py": ["/IMDB_task1.py"], "/IMDB_task5.py": ["/IMDB_task1.py", "/IMDB_task4.py"], "/IMDB_task8_9.py": ["/IMDB_task4.py", "/IMDB_task1.py"], "/IMDB_task7.py": ["/IMDB_task5.py"], "/IMDB_task11.py": ["/IMDB_task8_9.py"], "/IMDB_task2.py": ["/IMDB_task1.py"], "/IMDB_task10.py": ["/IMDB_task8_9.py"], "/IMDB_task12.py": ["/IMDB_task1.py"]} |
45,019 | prakash-simhandri/IMDB_scraper | refs/heads/master | /IMDB_task11.py | from IMDB_task8_9 import Movies_Dict
from pprint import pprint
# Task_11
def analyse_movies_genre(movie_data):
genres_list=[]
for genres in movie_data:
for one_genres in genres['genre']:
if one_genres not in genres_list:
genres_list.append(one_genres)
total_value={}
All_genres=[]
for genres_ in movie_data:
for one_genres_ in genres_['genre']:
All_genres.append(one_genres_)
for genre_1 in genres_list:
total=All_genres.count(genre_1)
total_value[genre_1]=total
return(total_value)
All_movie_genre=analyse_movies_genre(Movies_Dict)
# pprint(All_movie_genre) | {"/IMDB_task3.py": ["/IMDB_task1.py"], "/IMDB_task13.py": ["/IMDB_task12.py"], "/IMDB_task6.py": ["/IMDB_task5.py"], "/IMDB_task4.py": ["/IMDB_task1.py"], "/IMDB_task5.py": ["/IMDB_task1.py", "/IMDB_task4.py"], "/IMDB_task8_9.py": ["/IMDB_task4.py", "/IMDB_task1.py"], "/IMDB_task7.py": ["/IMDB_task5.py"], "/IMDB_task11.py": ["/IMDB_task8_9.py"], "/IMDB_task2.py": ["/IMDB_task1.py"], "/IMDB_task10.py": ["/IMDB_task8_9.py"], "/IMDB_task12.py": ["/IMDB_task1.py"]} |
45,020 | prakash-simhandri/IMDB_scraper | refs/heads/master | /IMDB_task2.py | from IMDB_task1 import movie_list
from pprint import pprint
#Task_2
def group_by_year(movies):
movie_by_year={}
for movie in movies:
movie_by_year[movie['year']]=[]
# print(movie_by_year)
for year_key in movie_by_year:
for movie_ in movies:
year=movie_['year']
if year==year_key:
movie_by_year[year].append(movie_)
# pprint.pprint(movie_by_year)
return movie_by_year
group_by_year_analysis=group_by_year(movie_list)
# pprint(group_by_year_analysis) | {"/IMDB_task3.py": ["/IMDB_task1.py"], "/IMDB_task13.py": ["/IMDB_task12.py"], "/IMDB_task6.py": ["/IMDB_task5.py"], "/IMDB_task4.py": ["/IMDB_task1.py"], "/IMDB_task5.py": ["/IMDB_task1.py", "/IMDB_task4.py"], "/IMDB_task8_9.py": ["/IMDB_task4.py", "/IMDB_task1.py"], "/IMDB_task7.py": ["/IMDB_task5.py"], "/IMDB_task11.py": ["/IMDB_task8_9.py"], "/IMDB_task2.py": ["/IMDB_task1.py"], "/IMDB_task10.py": ["/IMDB_task8_9.py"], "/IMDB_task12.py": ["/IMDB_task1.py"]} |
45,021 | prakash-simhandri/IMDB_scraper | refs/heads/master | /IMDB_task10.py | from IMDB_task8_9 import Movies_Dict
import json
from pprint import pprint
# Task 10
def analyse_language_and_directors(Dict):
list_1=[]
for i in Dict:
a=(i["director"])
for j in a:
if j not in list_1:
list_1.append(j)
# print(list_1)
director_dict={}
for k in list_1:
list_2=[]
list_3=[]
for l in Dict:
b=(l["director"])
for m in b:
if m==k:
language_1=(l["language"])
# print(language_1)
for n in language_1:
list_2.append(n)
if n not in list_3:
list_3.append(n)
dict_language={}
for p in list_3:
counter=0
for q in list_2:
if q==p:
counter=counter+1
dict_language[p]=counter
director_dict[k]=dict_language
return(director_dict)
# pprint(director_dict)
# pprint(analyse_language_and_directors(Movies_Dict)) | {"/IMDB_task3.py": ["/IMDB_task1.py"], "/IMDB_task13.py": ["/IMDB_task12.py"], "/IMDB_task6.py": ["/IMDB_task5.py"], "/IMDB_task4.py": ["/IMDB_task1.py"], "/IMDB_task5.py": ["/IMDB_task1.py", "/IMDB_task4.py"], "/IMDB_task8_9.py": ["/IMDB_task4.py", "/IMDB_task1.py"], "/IMDB_task7.py": ["/IMDB_task5.py"], "/IMDB_task11.py": ["/IMDB_task8_9.py"], "/IMDB_task2.py": ["/IMDB_task1.py"], "/IMDB_task10.py": ["/IMDB_task8_9.py"], "/IMDB_task12.py": ["/IMDB_task1.py"]} |
45,022 | prakash-simhandri/IMDB_scraper | refs/heads/master | /IMDB_task12.py | from IMDB_task1 import movie_list
import requests
from bs4 import BeautifulSoup
from urllib.request import urlopen
from pprint import pprint
# Task 12
def scrape_movie_cast(movie_caste_url):
All_movies_cast_list = []
count=0
for caste_url in movie_caste_url:
M_caste_url=caste_url['url']
ID=''
for _id in M_caste_url[27:]:
if '/' not in _id:
ID+=_id
else:
break
film_name = ID
movie_caste=requests.get(M_caste_url)
suop_1=BeautifulSoup(movie_caste.text,"html.parser")
All_details=suop_1.find_all('div',class_='see-more')
casts_list=[]
for i in All_details:
if (i.text).strip() == 'See full cast »':
connect_url=urlopen('https://www.imdb.com/title/'+film_name+'/'+i.find('a').get('href'))
soupa=BeautifulSoup(connect_url,"html.parser")
caste_tabal=soupa.find('table',class_='cast_list')
te_body=caste_tabal.find_all('tr')
for td in te_body:
actors_name=td.find_all('td',class_='')
for actor in actors_name:
imdb_id=actor.find('a').get('href')[6:15]
# print(imdb_id)
names=actor.find('a').get_text()
# print(names.text)
cast={'name':names.strip(),
'imdb_id':imdb_id}
casts_list.append(cast)
All_movies_cast_list.append(casts_list)
# print(casts_list)
count+=1
print(count)
return(All_movies_cast_list)
All_movie_cast=scrape_movie_cast(movie_list)
# pprint(All_movie_cast) | {"/IMDB_task3.py": ["/IMDB_task1.py"], "/IMDB_task13.py": ["/IMDB_task12.py"], "/IMDB_task6.py": ["/IMDB_task5.py"], "/IMDB_task4.py": ["/IMDB_task1.py"], "/IMDB_task5.py": ["/IMDB_task1.py", "/IMDB_task4.py"], "/IMDB_task8_9.py": ["/IMDB_task4.py", "/IMDB_task1.py"], "/IMDB_task7.py": ["/IMDB_task5.py"], "/IMDB_task11.py": ["/IMDB_task8_9.py"], "/IMDB_task2.py": ["/IMDB_task1.py"], "/IMDB_task10.py": ["/IMDB_task8_9.py"], "/IMDB_task12.py": ["/IMDB_task1.py"]} |
45,027 | fatejzz/combination_of_multiple_global_descriptors_for_image_retrieval | refs/heads/master | /tests/test_ml.py | """
Tests for net.ml module
"""
import numpy as np
import tensorflow as tf
import net.ml
def test_get_distance_matrix_op():
"""
Test computing a matrix of distances between all rows permutations of a matrix.
"""
inputs_matrix = np.array([
[1, 3, 5, 7],
[2, 2, 4, 4],
[1.5, -2, -4, 0]
], dtype=np.float32)
expected = np.sqrt(np.array([
[0, 12, 155.25],
[12, 0, 96.25],
[155.25, 96.25, 0]
]))
actual = net.ml.get_distances_matrix_op(
matrix_op=tf.constant(inputs_matrix)
).numpy()
# Our distance function sets minimum distances to epsilon to prevent infinite derivatives
assert np.all(np.isclose(expected, actual, atol=1e-5))
def test_get_vector_elements_equalities_matrix_op():
"""
Test net.get_vector_elements_equalities_matrix_op
"""
vector = np.array([1, 4, 3, 1, 4, 3])
expected = np.array([
[1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1],
[1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1]
])
actual = net.ml.get_vector_elements_equalities_matrix_op(vector_op=tf.constant(vector)).numpy()
assert np.all(expected == actual)
def test_average_ranking_position():
"""
Test average_ranking_position function
"""
labels = tf.reshape(tf.constant([1, 2, 3, 1, 2], dtype=tf.float32), (-1, 1))
embeddings = tf.constant([
[1, 1],
[2, 2],
[3, 3],
[2.5, 2.5],
[10, 10]
], dtype=tf.float32)
expected = np.mean([1, 2, 0, 1.5, 1.5])
actual = net.ml.average_ranking_position(
labels=labels,
embeddings=embeddings
)
assert np.isclose(actual, expected)
| {"/tests/test_ml.py": ["/net/ml.py"], "/net/logging.py": ["/net/analysis.py", "/net/processing.py"], "/net/data.py": ["/net/constants.py", "/net/processing.py"], "/tasks.py": ["/net/invoke/analysis.py", "/net/invoke/docker.py", "/net/invoke/ml.py", "/net/invoke/visualize.py"], "/net/invoke/visualize.py": ["/net/data.py", "/net/processing.py", "/net/logging.py", "/net/ml.py"], "/tests/test_analysis.py": ["/net/analysis.py"], "/net/invoke/analysis.py": ["/net/analysis.py", "/net/constants.py", "/net/data.py", "/net/ml.py"], "/net/invoke/ml.py": ["/net/constants.py", "/net/data.py", "/net/logging.py", "/net/ml.py"]} |
45,028 | fatejzz/combination_of_multiple_global_descriptors_for_image_retrieval | refs/heads/master | /net/analysis.py | """
Module with analysis logic
"""
import numpy as np
import scipy.spatial.distance
import tensorflow as tf
import tqdm
def get_indices_of_k_most_similar_vectors(vectors, k):
"""
Given 2D matrix of vectors laid out row-wise compute euclidean distances between all vectors pairs,
and then for each vector return indices of k most similar vectors.
Distance of vector with itself is set to large value so that its index will only be included
in results if k is greater or equal to number of vectors
:param vectors: 2D matrix [n x m] of vectors laid out row-wise
:param k: int, number of indices of most similar vectors to return for each vector
:return: 2D matrix [n x k], indices of k most similar vectors for each vector in input matrix.
"""
# Compute distances between all vectors
distances = scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(vectors, metric="euclidean"))
# For each vector set its distance to itself to number higher than all other distances,
# so it doesn't come up in top k results
distances += (np.max(distances) + 1) * np.eye(distances.shape[0])
# For each vector return indices of top k elements with smallest distances to it
return np.argsort(distances, axis=1)[:, :k]
def get_recall_at_k_score(vectors, labels, k):
"""
Given a 2D matrix of vectors, one per row, and labels, compute ratio of vectors for which
at least one from k most similar vectors has the same label.
Euclidean distance is used to define similarity
:param vectors: [m x n] numpy array, one vector per row
:param labels: 1D array of ints, labels for each vector
:param k: int, number of most similar vectors to each vector to consider when computing a score
:return: float, mean recall score across all vectors
"""
# Get indices of top k matched vectors for each vector
top_k_indices_matrix = get_indices_of_k_most_similar_vectors(vectors, k)
# Select their labels
top_k_labels = labels[top_k_indices_matrix]
# For each vector check if any of top k matches has same label, return mean across all vectors
return np.mean(np.any(top_k_labels == labels.reshape(-1, 1), axis=1))
def get_samples_embeddings(data_loader, prediction_model, verbose):
"""
Given data loader and prediction model, iterate over whole dataset, predict embeddings for all samples,
and return (embeddings, labels) tuple
:param data_loader: data loader that yields (images, labels) batches
:param model: keras.Model instance
:param verbose: bool, if True then progress bar is shown
:return: tuple (embeddings, labels), embeddings is a 2D numpy array with one embedding per row,
labels is a 1D array of ints
"""
all_embeddings = []
all_labels = []
tf_dataset = tf.data.Dataset.from_generator(
generator=lambda: iter(data_loader),
output_types=(tf.float32, tf.float32),
output_shapes=(tf.TensorShape([None, None, None, 3]), tf.TensorShape([None]))
).prefetch(32)
data_iterator = iter(tf_dataset)
# Iterate over dataset to obtain embeddings and labels
for _ in tqdm.tqdm(range(len(data_loader)), disable=not verbose):
images, labels = next(data_iterator)
embeddings = prediction_model.predict(images)[0]
all_embeddings.extend(embeddings)
all_labels.extend(labels)
return np.array(all_embeddings), np.array(all_labels)
| {"/tests/test_ml.py": ["/net/ml.py"], "/net/logging.py": ["/net/analysis.py", "/net/processing.py"], "/net/data.py": ["/net/constants.py", "/net/processing.py"], "/tasks.py": ["/net/invoke/analysis.py", "/net/invoke/docker.py", "/net/invoke/ml.py", "/net/invoke/visualize.py"], "/net/invoke/visualize.py": ["/net/data.py", "/net/processing.py", "/net/logging.py", "/net/ml.py"], "/tests/test_analysis.py": ["/net/analysis.py"], "/net/invoke/analysis.py": ["/net/analysis.py", "/net/constants.py", "/net/data.py", "/net/ml.py"], "/net/invoke/ml.py": ["/net/constants.py", "/net/data.py", "/net/logging.py", "/net/ml.py"]} |
45,029 | fatejzz/combination_of_multiple_global_descriptors_for_image_retrieval | refs/heads/master | /net/constants.py | """
Module with constants
"""
import enum
class DatasetMode(enum.Enum):
"""
Simple enum to differentiate between training and validation datasets
"""
TRAINING = 0
VALIDATION = 1
| {"/tests/test_ml.py": ["/net/ml.py"], "/net/logging.py": ["/net/analysis.py", "/net/processing.py"], "/net/data.py": ["/net/constants.py", "/net/processing.py"], "/tasks.py": ["/net/invoke/analysis.py", "/net/invoke/docker.py", "/net/invoke/ml.py", "/net/invoke/visualize.py"], "/net/invoke/visualize.py": ["/net/data.py", "/net/processing.py", "/net/logging.py", "/net/ml.py"], "/tests/test_analysis.py": ["/net/analysis.py"], "/net/invoke/analysis.py": ["/net/analysis.py", "/net/constants.py", "/net/data.py", "/net/ml.py"], "/net/invoke/ml.py": ["/net/constants.py", "/net/data.py", "/net/logging.py", "/net/ml.py"]} |
45,030 | fatejzz/combination_of_multiple_global_descriptors_for_image_retrieval | refs/heads/master | /net/processing.py | """
Module with processing code
"""
import cv2
import numpy as np
def get_image_padded_to_square_size(image):
"""
Pad image to square size with zeros
:param image: image
:type image: 3D numpy array
:return: 3D numpy array
"""
height, width = image.shape[:2]
height_padding = width - height if width > height else 0
width_padding = height - width if height > width else 0
top_padding = height_padding // 2
bottom_padding = top_padding if height_padding % 2 == 0 else top_padding + 1
left_padding = width_padding // 2
right_padding = left_padding if width_padding % 2 == 0 else left_padding + 1
padding = [(top_padding, bottom_padding), (left_padding, right_padding), (0, 0)]
return np.pad(
array=image,
pad_width=padding,
mode="constant",
constant_values=0
)
class ImageProcessor:
"""
Simple class wrapping up normalization and denormalization routines
"""
@staticmethod
def get_resized_image(image, target_size):
"""
Resize image to common format.
First pads image to square size, then resizes it to target_size x target_size
:param image: 3D numpy array
:param target_size: int, size to which image should be resized
:return: 3D numpy array
"""
image = get_image_padded_to_square_size(image)
image = cv2.resize(image, (target_size, target_size))
return image
@staticmethod
def get_normalized_image(image):
"""
Get normalized image
:param image: numpy array
:return: numpy array
"""
return np.float32(image / 255.0) - 0.5
@staticmethod
def get_denormalized_image(image):
"""
Transform normalized image back to original scale
:param image: numpy array
:return: numpy array
"""
return np.uint8(255 * (image + 0.5))
| {"/tests/test_ml.py": ["/net/ml.py"], "/net/logging.py": ["/net/analysis.py", "/net/processing.py"], "/net/data.py": ["/net/constants.py", "/net/processing.py"], "/tasks.py": ["/net/invoke/analysis.py", "/net/invoke/docker.py", "/net/invoke/ml.py", "/net/invoke/visualize.py"], "/net/invoke/visualize.py": ["/net/data.py", "/net/processing.py", "/net/logging.py", "/net/ml.py"], "/tests/test_analysis.py": ["/net/analysis.py"], "/net/invoke/analysis.py": ["/net/analysis.py", "/net/constants.py", "/net/data.py", "/net/ml.py"], "/net/invoke/ml.py": ["/net/constants.py", "/net/data.py", "/net/logging.py", "/net/ml.py"]} |
45,031 | fatejzz/combination_of_multiple_global_descriptors_for_image_retrieval | refs/heads/master | /net/logging.py | """
Module with logging utilities
"""
import os
import random
import cv2
import numpy as np
import vlogging
import net.analysis
import net.processing
class ImageRankingLogger:
"""
Class for visualizing image ranking results
"""
def __init__(self, logger, prediction_model):
"""
Constructor
:param logger: logging.Logger instaince
:param prediction_model: keras model instaince
"""
self.logger = logger
self.prediction_model = prediction_model
def log_ranking_on_batch(self, query_image, query_label, images, labels):
"""
Log ranking result for query image againts all provided images
:param query_image: 3D numpy array, image to query against
:param query_label: int, label of query image
:param images: list of 3D numpy arrays, images to rank w.r.t. query
:param labels: list of ints, labels for all images
"""
query_embedding = self.prediction_model.predict(np.array([query_image]))[0][0]
embeddings = self.prediction_model.predict(images)[0]
# Compute distances between query embedding and other embeddings
distances = np.linalg.norm(embeddings - query_embedding, axis=1)
indices_sorted_by_distances = np.argsort(distances)
ranked_images = [
net.processing.ImageProcessor.get_denormalized_image(image)
for image in images[indices_sorted_by_distances]]
labels_sorted_by_distances = labels[indices_sorted_by_distances]
# Draw a green frame around every image that has the same label as query image
for image, label in zip(ranked_images, labels_sorted_by_distances):
if label == query_label:
cv2.circle(
img=image,
center=(127, 200),
radius=10,
color=(5, 220, 5),
thickness=-1
)
query_image = net.processing.ImageProcessor.get_denormalized_image(query_image)
cv2.circle(
img=query_image,
center=(127, 200),
radius=10,
color=(255, 0, 0),
thickness=-1
)
self.logger.info(
vlogging.VisualRecord(
title="query image",
imgs=query_image,
footnotes=f"label: {query_label}"
)
)
self.logger.info(
vlogging.VisualRecord(
title="ranked images",
imgs=ranked_images
)
)
# Compute average position of images with same label as query label - the lower the number, the better
# the ranking model
self.logger.info("<h3>Average position of images with same label as query image: {:.3f}<h3></br><hr>".format(
np.mean(np.where(labels_sorted_by_distances == query_label)[0])
))
def log_ranking_on_dataset(self, data_loader, queries_count, logged_top_matches_count, image_size):
"""
Log ranking results on a few random queries. For each query ranking is done across whole dataset.
:param data_loader: net.data.Cars196AnalysisDataLoader instance
:param queries_count: int, number of queries to run ranking on
:param logged_top_matches_count: int, number of top matches to log for each query
:param image_size: int, size to which images should be resized before logging
"""
# Get embeddings and labels for whole dataset
embeddings_matrix, labels_array = net.analysis.get_samples_embeddings(
data_loader=data_loader,
prediction_model=self.prediction_model,
verbose=True)
# Get images paths
images_paths = \
[os.path.join(data_loader.data_dir, annotation.filename) for annotation in data_loader.annotations]
# Get indices of top k matched vectors for each vector
top_k_indices_matrix = net.analysis.get_indices_of_k_most_similar_vectors(
vectors=embeddings_matrix,
k=logged_top_matches_count)
ranking_data = []
# For each query index - log query image and top matches
for query_index in random.sample(population=range(len(labels_array)), k=queries_count):
query_image = net.processing.ImageProcessor.get_resized_image(
image=cv2.imread(images_paths[query_index]),
target_size=image_size)
query_image = cv2.circle(
img=query_image,
center=(127, 200),
radius=10,
color=(255, 0, 0),
thickness=-1
)
query_label = labels_array[query_index]
matched_images = [
net.processing.ImageProcessor.get_resized_image(
image=cv2.imread(images_paths[match_index]),
target_size=image_size)
for match_index in top_k_indices_matrix[query_index]
]
matched_images = [
cv2.circle(
img=image,
center=(127, 200),
radius=10,
color=(5, 220, 5),
thickness=-1
) if labels_array[match_index] == query_label else image
for image, match_index in zip(matched_images, top_k_indices_matrix[query_index])
]
ranking_data.append(
{
"query_image": query_image,
"matched_images": matched_images
}
)
self.logger.info(
vlogging.VisualRecord(
title="ranking collage",
imgs=[self._get_ranking_images_collage(ranking_data, image_size)]
)
)
def _get_ranking_images_collage(self, ranking_data, image_size):
rows_count = len(ranking_data)
columns_count = len(ranking_data[0]["matched_images"]) + 1
# Draw all images onto one large image
canvas = np.zeros(shape=(rows_count * image_size, columns_count * image_size, 3))
for row_index in range(rows_count):
canvas[row_index * image_size: (row_index + 1) * image_size, 0: image_size] = \
ranking_data[row_index]["query_image"]
for matching_image_index, matching_image in enumerate(ranking_data[row_index]["matched_images"]):
canvas[
row_index * image_size: (row_index + 1) * image_size,
(matching_image_index + 1) * image_size: (matching_image_index + 2) * image_size
] = matching_image
return canvas
| {"/tests/test_ml.py": ["/net/ml.py"], "/net/logging.py": ["/net/analysis.py", "/net/processing.py"], "/net/data.py": ["/net/constants.py", "/net/processing.py"], "/tasks.py": ["/net/invoke/analysis.py", "/net/invoke/docker.py", "/net/invoke/ml.py", "/net/invoke/visualize.py"], "/net/invoke/visualize.py": ["/net/data.py", "/net/processing.py", "/net/logging.py", "/net/ml.py"], "/tests/test_analysis.py": ["/net/analysis.py"], "/net/invoke/analysis.py": ["/net/analysis.py", "/net/constants.py", "/net/data.py", "/net/ml.py"], "/net/invoke/ml.py": ["/net/constants.py", "/net/data.py", "/net/logging.py", "/net/ml.py"]} |
45,032 | fatejzz/combination_of_multiple_global_descriptors_for_image_retrieval | refs/heads/master | /net/data.py | """
Module with data related code
"""
import collections
import functools
import os
import random
import cv2
import imgaug
import numpy as np
import scipy.io
import net.constants
import net.processing
class Cars196Annotation:
"""
Class for representing one sample of Cars196 dataset
"""
def __init__(self, annotation_matrix, categories_names):
"""
Constructor
:param annotation_matrix: annotation for a single sample from Cars196 dataset's loaded from official annotations
matlab mat file
:type annotation_matrix: numpy array
:param categories_names: list of arrays, each array contains a single element,
string representing category label
"""
self.filename = str(annotation_matrix[0][0])
self.category_id = int(annotation_matrix[-2][0][0] - 1)
self.category = categories_names[self.category_id][0]
self.dataset_mode = \
net.constants.DatasetMode.TRAINING if self.category_id <= 98 else net.constants.DatasetMode.VALIDATION
def get_cars_196_annotations_map(annotations_path, dataset_mode):
"""
Read cars 196 annotations into a category_id: list of Cars196Annotation map and return it
:param annotations_path: path to annotations data
:type annotations_path: str
:param dataset_mode: net.constants.DatasetMode instance,
indicates annotations for which dataset (train/validation) should be loaded
:return: map {category_id: list of Cars196Annotation}
:rtype: dict
"""
annotations_data_map = scipy.io.loadmat(annotations_path)
annotations_matrices = annotations_data_map["annotations"].flatten()
categories_names = annotations_data_map["class_names"].flatten()
# Get a list of annotations
annotations = [
Cars196Annotation(
annotation_matrix=annotation_matrix,
categories_names=categories_names) for annotation_matrix in annotations_matrices]
# Filter annotations by target dataset mode
filtered_annotations = [annotation for annotation in annotations if annotation.dataset_mode == dataset_mode]
categories_ids_samples_map = collections.defaultdict(list)
# Move annotations into categories_ids: annotations map
for annotation in filtered_annotations:
categories_ids_samples_map[annotation.category_id].append(annotation)
# Convert lists of samples to np.arrays of samples
return {category_id: np.array(samples) for category_id, samples in categories_ids_samples_map.items()}
class Cars196AnalysisDataLoader:
"""
Data loader for cars 196 dataset.
This data loader loads and yields batches of (images, labels) in order they are read from disk.
It returns each sample exactly once, but makes no attempt to shuffle or balance categories returned in each batch.
"""
def __init__(self, config, dataset_mode):
"""
Constructor
:param config: dictionary with data loader configuration
:param dataset_mode: net.constants.DatasetMode instance,
indicates which dataset (train/validation) loader should load
"""
self.data_dir = config["data_dir"]
self.image_size = config["image_size"]
categories_ids_samples_map = get_cars_196_annotations_map(
annotations_path=config["annotations_path"],
dataset_mode=dataset_mode)
# Extract a flat list of samples from categories_ids_samples_map
self.annotations = []
for annotations_for_single_category in categories_ids_samples_map.values():
self.annotations.extend(annotations_for_single_category)
self.batch_size = 32
def __iter__(self):
iterator = self.get_verbose_iterator()
while True:
_, images_batch, labels_batch = next(iterator)
yield images_batch, labels_batch
def get_verbose_iterator(self):
"""
Get iterator that yields (images_paths, images, labels) batches
"""
index = 0
while index < len(self.annotations):
yield self._get_verbose_batch(index)
index += self.batch_size
def _get_verbose_batch(self, start_index):
annotations_batch = self.annotations[start_index: start_index + self.batch_size]
images_paths_batch = [os.path.join(self.data_dir, annotation.filename) for annotation in annotations_batch]
images_batch = [
net.processing.ImageProcessor.get_resized_image(
image=cv2.imread(image_path),
target_size=self.image_size)
for image_path in images_paths_batch
]
images_batch = [net.processing.ImageProcessor.get_normalized_image(image) for image in images_batch]
categories_batch = [annotation.category_id for annotation in annotations_batch]
return images_paths_batch, np.array(images_batch), np.array(categories_batch)
def __len__(self):
return len(self.annotations) // self.batch_size
class Cars196TrainingLoopDataLoader:
"""
Data loader class for cars 196 dataset
"""
def __init__(self, config, dataset_mode):
"""
Constructor
:param config: dictionary with data loader configuration
:param dataset_mode: net.constants.DatasetMode instance,
indicates which dataset (train/validation) loader should load
"""
self.data_dir = config["data_dir"]
self.categories_ids_samples_map = get_cars_196_annotations_map(
annotations_path=config["annotations_path"],
dataset_mode=dataset_mode)
self.dataset_mode = dataset_mode
self.image_size = config["image_size"]
self.augmentations_pipeline = imgaug.augmenters.Sequential(
children=[
imgaug.augmenters.SomeOf(
n=(0, 2),
children=[
imgaug.augmenters.Grayscale(alpha=(0.2, 1)),
imgaug.augmenters.Affine(scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}),
imgaug.augmenters.Affine(translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)}),
imgaug.augmenters.Affine(rotate=(-15, 15)),
imgaug.augmenters.Affine(shear=(-10, 10))
],
random_order=True),
# Left-right flip
imgaug.augmenters.Fliplr(0.5)]
) if dataset_mode is net.constants.DatasetMode.TRAINING else None
self.samples_batches_drawer_builder = functools.partial(
SamplesBatchesDrawer,
categories_samples_map=self.categories_ids_samples_map,
categories_per_batch=config["train"]["categories_per_batch"],
samples_per_category=config["train"]["samples_per_category"],
dataset_mode=self.dataset_mode,
samples_per_category_per_epoch_percentile=100
)
def __iter__(self):
while True:
samples_batches_drawer = self.samples_batches_drawer_builder()
for samples_batch, categories_batch in samples_batches_drawer:
images_batch = [
net.processing.ImageProcessor.get_resized_image(
image=cv2.imread(os.path.join(self.data_dir, sample.filename)),
target_size=self.image_size)
for sample in samples_batch
]
if self.dataset_mode is net.constants.DatasetMode.TRAINING:
images_batch = self.augmentations_pipeline(images=images_batch)
images_batch = [net.processing.ImageProcessor.get_normalized_image(image) for image in images_batch]
yield np.array(images_batch), np.array(categories_batch)
def __len__(self):
samples_batches_drawer = self.samples_batches_drawer_builder()
return len(samples_batches_drawer)
class SamplesBatchesDrawer:
"""
Class for drawing samples batches from a dictionary with {category: samples} structure.
It yields (samples_batch, categories_batch) tuples.
Input data might contain different number of samples for different categories,
but generator will try to yield from all categories as evenly as possible given input parameters.
In case of imbalanced number of samples between categories some samples may be yielded more than once
per epoch.
"""
def __init__(
self, categories_samples_map, categories_per_batch, samples_per_category,
dataset_mode, samples_per_category_per_epoch_percentile):
"""
Constructor
:param categories_samples_map: dict, data to draw samples from, map with format {category: samples}
:param categories_per_batch: int, number of categories to be included in a batch
:param samples_per_category: int, number of samples for each category to be included in a batch
:param dataset_mode: net.constants.DatasetMode instance. If training mode is used,
both categories and samples are shuffled randomly. Otherwise only categories are shuffled and
constant random seed is used, so that results are repeatable across runs.
:param samples_per_category_per_epoch_percentile: int, used to decide number of samples to be yielded
for each category per epoch. Samples count from all categories in categories_samples_map will be computed,
and percentile from this data will be used to compute samples per epoch for each category in yielded data.
"""
self.categories_samples_map = categories_samples_map
self.categories_per_batch = categories_per_batch
self.samples_per_category = samples_per_category
self.dataset_mode = dataset_mode
# Random number generator. Use random seed if we are in training mode, otherwise use constant seed
self.random = random.Random() if dataset_mode is net.constants.DatasetMode.TRAINING else random.Random(0)
self.samples_per_category_per_epoch = int(np.percentile(
[len(samples) for samples in self.categories_samples_map.values()],
samples_per_category_per_epoch_percentile))
def __len__(self):
# Compute into how many smaller, independent subsets can we divide data
categories_count = len(self.categories_samples_map.keys())
independent_subdatasets_count = categories_count // self.categories_per_batch
draws_per_category = self.samples_per_category_per_epoch // self.samples_per_category
return independent_subdatasets_count * draws_per_category
def _get_categories_indices_map(self):
"""
Based on {category: samples} dataset instance is initialized with,
compute a {category: samples indices} dictionary.
If instance was initialized in training mode, samples indices are shuffled
"""
categories_samples_indices_map = {
category: np.arange(len(samples))
for category, samples in self.categories_samples_map.items()}
if self.dataset_mode is net.constants.DatasetMode.TRAINING:
# Shuffle samples indices - we will then draw from shuffled indices list sequentially to simulate
# shuffling samples
for samples_indices in categories_samples_indices_map.values():
self.random.shuffle(samples_indices)
return categories_samples_indices_map
def __iter__(self):
categories_samples_indices_map = self._get_categories_indices_map()
# Set of categories to be used for drawing samples
primary_categories_pool = set(categories_samples_indices_map.keys())
# If there aren't enough categories in primary pool to satisfy batch requirements,
# reused categories pool will be used
reused_categories_pool = set()
for _ in range(len(self)):
# Pick categories for the batch
# If there aren't enough categories available in primary categories pool, draw only as many as we can
categories_to_draw = self.random.sample(
population=primary_categories_pool,
k=min(self.categories_per_batch, len(primary_categories_pool))
)
# If primary categories pool couldn't supply enough categories,
# draw remained from reused categories pool
if len(categories_to_draw) < self.categories_per_batch:
categories_to_draw.extend(
self.random.sample(
population=reused_categories_pool,
k=self.categories_per_batch - len(categories_to_draw)
)
)
samples_batch = []
categories_labels_batch = []
# Pick samples for categories in the batch
for category in categories_to_draw:
samples_indices = categories_samples_indices_map[category]
# Pick a batch of samples indices, remove it from the samples indices list
samples_indices_batch = samples_indices[:self.samples_per_category]
categories_samples_indices_map[category] = samples_indices[self.samples_per_category:]
# Using samples indices pick samples, store them in batch
samples_batch.extend(self.categories_samples_map[category][samples_indices_batch])
categories_labels_batch.extend([category] * self.samples_per_category)
# If we already drew max number of batches from this category
if len(categories_samples_indices_map[category]) < self.samples_per_category:
# If this category is currently in primary categories pool, removed it from it and add it
# to reused categories pool instead
if category in primary_categories_pool:
primary_categories_pool.remove(category)
reused_categories_pool.add(category)
# Replenish samples indices for this category
samples_indices = np.arange(len(self.categories_samples_map[category]))
if self.dataset_mode is net.constants.DatasetMode.TRAINING:
self.random.shuffle(samples_indices)
categories_samples_indices_map[category] = samples_indices
yield samples_batch, categories_labels_batch
| {"/tests/test_ml.py": ["/net/ml.py"], "/net/logging.py": ["/net/analysis.py", "/net/processing.py"], "/net/data.py": ["/net/constants.py", "/net/processing.py"], "/tasks.py": ["/net/invoke/analysis.py", "/net/invoke/docker.py", "/net/invoke/ml.py", "/net/invoke/visualize.py"], "/net/invoke/visualize.py": ["/net/data.py", "/net/processing.py", "/net/logging.py", "/net/ml.py"], "/tests/test_analysis.py": ["/net/analysis.py"], "/net/invoke/analysis.py": ["/net/analysis.py", "/net/constants.py", "/net/data.py", "/net/ml.py"], "/net/invoke/ml.py": ["/net/constants.py", "/net/data.py", "/net/logging.py", "/net/ml.py"]} |
45,033 | fatejzz/combination_of_multiple_global_descriptors_for_image_retrieval | refs/heads/master | /tasks.py | """
Module with invoke tasks
"""
import invoke
import net.invoke.analysis
import net.invoke.docker
import net.invoke.tests
import net.invoke.ml
import net.invoke.visualize
# Default invoke collection
ns = invoke.Collection()
# Add collections defined in other files
ns.add_collection(net.invoke.analysis)
ns.add_collection(net.invoke.docker)
ns.add_collection(net.invoke.tests)
ns.add_collection(net.invoke.ml)
ns.add_collection(net.invoke.visualize)
| {"/tests/test_ml.py": ["/net/ml.py"], "/net/logging.py": ["/net/analysis.py", "/net/processing.py"], "/net/data.py": ["/net/constants.py", "/net/processing.py"], "/tasks.py": ["/net/invoke/analysis.py", "/net/invoke/docker.py", "/net/invoke/ml.py", "/net/invoke/visualize.py"], "/net/invoke/visualize.py": ["/net/data.py", "/net/processing.py", "/net/logging.py", "/net/ml.py"], "/tests/test_analysis.py": ["/net/analysis.py"], "/net/invoke/analysis.py": ["/net/analysis.py", "/net/constants.py", "/net/data.py", "/net/ml.py"], "/net/invoke/ml.py": ["/net/constants.py", "/net/data.py", "/net/logging.py", "/net/ml.py"]} |
45,034 | fatejzz/combination_of_multiple_global_descriptors_for_image_retrieval | refs/heads/master | /net/invoke/visualize.py | """
Module with visualization related tasks
"""
import invoke
@invoke.task
def visualize_data(_context, config_path):
"""
Visualize data
:param _context: context
:type _context: invoke.Context
:param config_path: path to configuration file
:type config_path: str
"""
import tqdm
import vlogging
import net.data
import net.processing
import net.utilities
config = net.utilities.read_yaml(config_path)
logger = net.utilities.get_logger(
path=config["log_path"]
)
data_loader = net.data.Cars196TrainingLoopDataLoader(
config=config,
dataset_mode=net.constants.DatasetMode.TRAINING
)
iterator = iter(data_loader)
for _ in tqdm.tqdm(range(4)):
images_batch, labels_batch = next(iterator)
logger.info(
vlogging.VisualRecord(
title="data",
imgs=[net.processing.ImageProcessor.get_denormalized_image(image) for image in images_batch],
footnotes=str(labels_batch)
)
)
@invoke.task
def visualize_predictions_on_batches(_context, config_path):
"""
Visualize image similarity ranking predictions on a few batches of data
:param _context: invoke.Context instance
:param config_path: str, path to configuration file
"""
import random
import tensorflow as tf
import tqdm
import net.data
import net.logging
import net.ml
import net.utilities
config = net.utilities.read_yaml(config_path)
logger = net.utilities.get_logger(
path=config["log_path"]
)
data_loader = net.data.Cars196TrainingLoopDataLoader(
config=config,
dataset_mode=net.constants.DatasetMode.VALIDATION
)
prediction_model = tf.keras.models.load_model(
filepath=config["model_dir"],
compile=False,
custom_objects={'average_ranking_position': net.ml.average_ranking_position})
image_ranking_logger = net.logging.ImageRankingLogger(
logger=logger,
prediction_model=prediction_model
)
data_iterator = iter(data_loader)
for _ in tqdm.tqdm(range(4)):
images, labels = next(data_iterator)
query_index = random.choice(range(len(images)))
image_ranking_logger.log_ranking_on_batch(
query_image=images[query_index],
query_label=labels[query_index],
images=images,
labels=labels
)
@invoke.task
def visualize_predictions_on_dataset(_context, config_path):
"""
Visualize image similarity ranking predictions on a few
query images. For each query image all images in dataset are ranked against it,
and best matches are logged together with query image.
:param _context: invoke.Context instance
:param config_path: str, path to configuration file
"""
import tensorflow as tf
import net.data
import net.logging
import net.ml
import net.utilities
config = net.utilities.read_yaml(config_path)
logger = net.utilities.get_logger(
path=config["log_path"]
)
data_loader = net.data.Cars196AnalysisDataLoader(
config=config,
dataset_mode=net.constants.DatasetMode.VALIDATION
)
prediction_model = tf.keras.models.load_model(
filepath=config["model_dir"],
compile=False,
custom_objects={'average_ranking_position': net.ml.average_ranking_position})
image_ranking_logger = net.logging.ImageRankingLogger(
logger=logger,
prediction_model=prediction_model
)
image_ranking_logger.log_ranking_on_dataset(
data_loader=data_loader,
queries_count=8,
logged_top_matches_count=8,
image_size=config["image_size"]
)
| {"/tests/test_ml.py": ["/net/ml.py"], "/net/logging.py": ["/net/analysis.py", "/net/processing.py"], "/net/data.py": ["/net/constants.py", "/net/processing.py"], "/tasks.py": ["/net/invoke/analysis.py", "/net/invoke/docker.py", "/net/invoke/ml.py", "/net/invoke/visualize.py"], "/net/invoke/visualize.py": ["/net/data.py", "/net/processing.py", "/net/logging.py", "/net/ml.py"], "/tests/test_analysis.py": ["/net/analysis.py"], "/net/invoke/analysis.py": ["/net/analysis.py", "/net/constants.py", "/net/data.py", "/net/ml.py"], "/net/invoke/ml.py": ["/net/constants.py", "/net/data.py", "/net/logging.py", "/net/ml.py"]} |
45,035 | fatejzz/combination_of_multiple_global_descriptors_for_image_retrieval | refs/heads/master | /net/invoke/docker.py | """
Module with docker related commands
"""
import invoke
@invoke.task
def run(context, config_path):
"""
Run docker container for the app
:param context: invoke.Context instance
:param config_path: str, path to configuration file
"""
import os
import net.utilities
config = net.utilities.read_yaml(config_path)
os.makedirs(os.path.dirname(config["log_path"]), exist_ok=True)
# Don't like this line, but necessary to let container write to volume shared with host and host
# to be able to read that data
context.run(f'sudo chmod -R 777 {os.path.dirname(config["log_path"])}', echo=True)
context.run('sudo chmod -R 777 $PWD/../../data', echo=True)
# Also need to give container access to .git repository if we want it to run insertions count check against it
context.run('sudo chmod -R 777 .git', echo=True)
# Define run options that need a bit of computations
run_options = {
# Use gpu runtime if host has cuda installed
"gpu_capabilities": "--gpus all" if "/cuda/" in os.environ["PATH"] else ""
}
command = (
"docker run -it --rm "
"{gpu_capabilities} "
"-v $PWD:/app:delegated "
"-v $PWD/../../data:/data:delegated "
"-v /tmp/logs:/tmp/logs:delegated "
"puchatek_w_szortach/combination_of_multiple_global_descriptors:latest /bin/bash"
).format(**run_options)
context.run(command, pty=True, echo=True)
@invoke.task
def build_app_container(context):
"""
Build app container
:param context: invoke.Context instance
"""
command = (
"docker build "
"--tag puchatek_w_szortach/combination_of_multiple_global_descriptors:latest "
"-f ./docker/app.Dockerfile ."
)
context.run(command, echo=True)
@invoke.task
def build_app_base_container(context, tag):
"""
Build app base container
:param context: invoke.Context instance
:param context: tag: str, tag for the image
"""
command = (
"docker build "
f"--tag puchatek_w_szortach/combination_of_multiple_global_descriptors_base:{tag} "
"-f ./docker/app_base.Dockerfile ."
)
context.run(command, echo=True)
| {"/tests/test_ml.py": ["/net/ml.py"], "/net/logging.py": ["/net/analysis.py", "/net/processing.py"], "/net/data.py": ["/net/constants.py", "/net/processing.py"], "/tasks.py": ["/net/invoke/analysis.py", "/net/invoke/docker.py", "/net/invoke/ml.py", "/net/invoke/visualize.py"], "/net/invoke/visualize.py": ["/net/data.py", "/net/processing.py", "/net/logging.py", "/net/ml.py"], "/tests/test_analysis.py": ["/net/analysis.py"], "/net/invoke/analysis.py": ["/net/analysis.py", "/net/constants.py", "/net/data.py", "/net/ml.py"], "/net/invoke/ml.py": ["/net/constants.py", "/net/data.py", "/net/logging.py", "/net/ml.py"]} |
45,036 | fatejzz/combination_of_multiple_global_descriptors_for_image_retrieval | refs/heads/master | /tests/test_analysis.py | """
Tests for analysis module
"""
import numpy as np
import net.analysis
def test_get_indices_of_k_most_similar_vectors():
"""
Test get_indices_of_k_most_similar_vectors
"""
vectors = np.array([
[1, 1],
[10, 10],
[2.5, 2.5],
[5, 5],
[8, 8]
])
actual = net.analysis.get_indices_of_k_most_similar_vectors(
vectors=vectors,
k=3
)
expected = np.array([
[2, 3, 4],
[4, 3, 2],
[0, 3, 4],
[2, 4, 0],
[1, 3, 2]
])
assert np.all(expected == actual)
def test_get_recall_at_k_score():
"""
Test for get_recall_at_k_score
"""
vectors = np.array([
[1, 1],
[10, 10],
[2.5, 2.5],
[5, 5],
[8, 8]
])
labels = np.array([1, 2, 1, 3, 3])
expected = np.mean([True, False, True, True, True])
actual = net.analysis.get_recall_at_k_score(
vectors=vectors,
labels=labels,
k=2
)
assert np.isclose(expected, actual)
| {"/tests/test_ml.py": ["/net/ml.py"], "/net/logging.py": ["/net/analysis.py", "/net/processing.py"], "/net/data.py": ["/net/constants.py", "/net/processing.py"], "/tasks.py": ["/net/invoke/analysis.py", "/net/invoke/docker.py", "/net/invoke/ml.py", "/net/invoke/visualize.py"], "/net/invoke/visualize.py": ["/net/data.py", "/net/processing.py", "/net/logging.py", "/net/ml.py"], "/tests/test_analysis.py": ["/net/analysis.py"], "/net/invoke/analysis.py": ["/net/analysis.py", "/net/constants.py", "/net/data.py", "/net/ml.py"], "/net/invoke/ml.py": ["/net/constants.py", "/net/data.py", "/net/logging.py", "/net/ml.py"]} |
45,037 | fatejzz/combination_of_multiple_global_descriptors_for_image_retrieval | refs/heads/master | /net/invoke/analysis.py | """
Module with analysis tasks
"""
import invoke
@invoke.task
def analyze_model_performance(_context, config_path):
"""
Analyze model performance
:param _context: invoke.Context instance
:param config_path: str, path to configuration file
"""
import tensorflow as tf
import net.analysis
import net.constants
import net.data
import net.ml
import net.utilities
config = net.utilities.read_yaml(config_path)
validation_data_loader = net.data.Cars196AnalysisDataLoader(
config=config,
dataset_mode=net.constants.DatasetMode.VALIDATION
)
prediction_model = tf.keras.models.load_model(
filepath=config["model_dir"],
compile=False,
custom_objects={'average_ranking_position': net.ml.average_ranking_position})
embeddings_matrix, labels_array = net.analysis.get_samples_embeddings(
data_loader=validation_data_loader,
prediction_model=prediction_model,
verbose=True)
for k in [1, 2, 4, 8]:
score = net.analysis.get_recall_at_k_score(
vectors=embeddings_matrix,
labels=labels_array,
k=k
)
print(f"Recall at {k} is: {score:.3f}")
| {"/tests/test_ml.py": ["/net/ml.py"], "/net/logging.py": ["/net/analysis.py", "/net/processing.py"], "/net/data.py": ["/net/constants.py", "/net/processing.py"], "/tasks.py": ["/net/invoke/analysis.py", "/net/invoke/docker.py", "/net/invoke/ml.py", "/net/invoke/visualize.py"], "/net/invoke/visualize.py": ["/net/data.py", "/net/processing.py", "/net/logging.py", "/net/ml.py"], "/tests/test_analysis.py": ["/net/analysis.py"], "/net/invoke/analysis.py": ["/net/analysis.py", "/net/constants.py", "/net/data.py", "/net/ml.py"], "/net/invoke/ml.py": ["/net/constants.py", "/net/data.py", "/net/logging.py", "/net/ml.py"]} |
45,038 | fatejzz/combination_of_multiple_global_descriptors_for_image_retrieval | refs/heads/master | /net/ml.py | """
Module with machine learning code
"""
import tensorflow as tf
def get_auxiliary_categorization_head(x, categories_count):
"""
Get a simple categorization head
:param x: 2D tensor op, batch of 1D vectors
:param categories_count: int, number of categories for auxiliary categorization head's output
:return: 2D tensor op, final layer uses softmax activation
"""
x = tf.keras.layers.Dense(units=categories_count, activation=tf.nn.swish)(x)
x = tf.keras.layers.BatchNormalization(name="logits")(x)
x = tf.keras.layers.Dense(units=categories_count, activation=tf.nn.softmax, name="softmax_predictions")(x)
return x
class ImagesSimilarityComputer:
"""
Class for computing similarity between images.
"""
@staticmethod
def get_model(image_size, categories_count):
"""
Model builder
:param image_size: int, height and width of input image for the model
:param categories_count: int, number of categories for auxiliary categorization head's output
:return: keras.Model instance
"""
base_model = tf.keras.applications.ResNet50(
include_top=False,
weights="imagenet",
input_shape=(image_size, image_size, 3)
)
input_op = base_model.input
x = tf.keras.layers.Conv2D(filters=512, kernel_size=(1, 1), activation=tf.nn.swish)(base_model.output)
x = tf.keras.layers.Conv2D(filters=128, kernel_size=(1, 1), activation=tf.nn.swish)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(units=1024, activation=tf.nn.swish)(x)
x = tf.keras.layers.BatchNormalization()(x)
embeddings_head = tf.keras.layers.Dense(units=512, activation=None)(x)
auxiliary_categorization_head = \
get_auxiliary_categorization_head(x=embeddings_head, categories_count=categories_count)
embeddings_head_name = "embeddings"
auxiliary_categorization_head_name = "auxiliary_categorization_head"
embeddings_head = tf.keras.layers.Lambda(
lambda x: x,
name=embeddings_head_name)(embeddings_head)
auxiliary_categorization_head = tf.keras.layers.Lambda(
lambda x: x,
name=auxiliary_categorization_head_name)(auxiliary_categorization_head)
model = tf.keras.models.Model(
inputs=input_op,
outputs=[embeddings_head, auxiliary_categorization_head]
)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
loss={
embeddings_head_name: get_hard_aware_point_to_set_loss_op,
auxiliary_categorization_head_name:
get_auxiliary_head_categorization_loss(temperature=0.5)
},
metrics={
embeddings_head_name: average_ranking_position,
auxiliary_categorization_head_name: "accuracy"
}
)
return model
class CGDImagesSimilarityComputer:
"""
Class for computing similarity between images based on Combination of Multiple Global Descriptors model
"""
@staticmethod
def get_model(image_size, categories_count):
"""
Model builder
:param image_size: int, height and width of input image for the model
:param categories_count: int, number of categories for auxiliary categorization head's output
:return: keras.Model instance
"""
base_model = tf.keras.applications.ResNet50(
include_top=False,
weights="imagenet",
input_shape=(image_size, image_size, 3)
)
input_op = base_model.input
x = [layer for layer in base_model.layers if layer.name == "conv4_block3_out"][0].output
sum_of_pooling_convolutions_features = CGDImagesSimilarityComputer._get_normalized_branch(
x=CGDImagesSimilarityComputer._get_sum_of_pooling_convolutions_head(x),
target_size=512)
maximum_activations_of_convolutions_features = CGDImagesSimilarityComputer._get_normalized_branch(
x=CGDImagesSimilarityComputer._get_maximum_activation_of_convolutions_head(x),
target_size=512)
generalized_mean_pooling_features = CGDImagesSimilarityComputer._get_normalized_branch(
x=CGDImagesSimilarityComputer._get_generalized_mean_pooling_head(x),
target_size=512)
combination_of_multiple_global_descriptors = tf.concat(
[
sum_of_pooling_convolutions_features,
maximum_activations_of_convolutions_features,
generalized_mean_pooling_features
],
axis=1)
embeddings_head_name = "embeddings"
embeddings_head = tf.keras.layers.Lambda(
lambda x: x,
name=embeddings_head_name)(l2_normalize_batch_of_vectors(combination_of_multiple_global_descriptors))
auxiliary_categorization_head_name = "auxiliary_categorization_head"
auxiliary_categorization_head = \
get_auxiliary_categorization_head(
x=sum_of_pooling_convolutions_features,
categories_count=categories_count)
auxiliary_categorization_head = tf.keras.layers.Lambda(
lambda x: x,
name=auxiliary_categorization_head_name)(auxiliary_categorization_head)
model = tf.keras.models.Model(
inputs=input_op,
outputs=[embeddings_head, auxiliary_categorization_head]
)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
loss={
embeddings_head_name: get_hard_aware_point_to_set_loss_op,
auxiliary_categorization_head_name:
get_auxiliary_head_categorization_loss(temperature=0.5)
},
metrics={
embeddings_head_name: average_ranking_position,
auxiliary_categorization_head_name: "accuracy"
}
)
return model
@staticmethod
def _get_normalized_branch(x, target_size):
x = tf.keras.layers.Dense(units=target_size, activation=None)(x)
return l2_normalize_batch_of_vectors(x)
@staticmethod
def _get_sum_of_pooling_convolutions_head(x):
return tf.reduce_mean(x, axis=(1, 2))
@staticmethod
def _get_maximum_activation_of_convolutions_head(x):
return tf.reduce_max(x, axis=(1, 2))
@staticmethod
def _get_generalized_mean_pooling_head(x):
# Compute mean pooling by first raising elements to power 3, computing mean, then taking cubic root of result
scaled_up_elements = tf.math.pow(x, 3)
channels_means = tf.reduce_mean(scaled_up_elements, axis=(1, 2))
return tf.math.pow(channels_means, 1.0 / 3.0)
class HardAwarePointToSetLossBuilder:
"""
A helper class for building hard aware point to set loss ops
"""
@staticmethod
def get_points_to_sets_losses_op(distances_matrix_op, mask_op, power_constant):
"""
Get points to sets losses vector op for points/sets specified by mask_op.
:param distances_matrix_op: 2D tensor op with distances from queries to images in batch,
each row represents distances from one query to all images in a batch
:param mask_op: 2D tensor with 1s for elements that should be used in computations and 0s for elements
that should be masked
:param power_constant: float, value by which distances are scaled for weights computations
:return: 1D tensor of weighted point to scale distances, each element represents weighted sum of distances
between a query and all the non-masked elements from image set
"""
# Keep value we raise to power not larger than 100, so we don't run into infinity after raising it to power
weights_op = tf.pow(tf.minimum(distances_matrix_op + 1.0, 100), power_constant) * mask_op
weighted_distances_op = distances_matrix_op * weights_op
normalized_weighted_points_to_sets_distances = \
tf.math.reduce_sum(weighted_distances_op, axis=1) / (tf.math.reduce_sum(weights_op, axis=1) + 1e-6)
return normalized_weighted_points_to_sets_distances
@tf.function
def get_hard_aware_point_to_set_loss_op(labels, embeddings):
"""
Implementation of loss from
"Hard-aware point-to-set deep metric for person re-identification" paper
:param labels: 1D tensor, batch of labels for embeddings
:param embeddings: 2D tensor, batch of embeddings
:return: loss tensor
"""
if has_any_nan_elements(embeddings):
tf.print("\nNaN embeddings detected!")
# Keras adds an unnecessary batch dimension on our labels, flatten them
flat_labels = tf.reshape(labels, shape=(-1,))
distances_matrix_op = get_distances_matrix_op(embeddings)
same_labels_mask = get_vector_elements_equalities_matrix_op(flat_labels)
diagonal_matrix_op = tf.eye(num_rows=tf.shape(flat_labels)[0], dtype=tf.float32)
hard_positives_vector_op = HardAwarePointToSetLossBuilder.get_points_to_sets_losses_op(
distances_matrix_op=distances_matrix_op,
# Make sure diagonal elements of positives mask are set to zero,
# so we don't try to set loss on a distance between a vector and itself
mask_op=same_labels_mask - diagonal_matrix_op,
power_constant=10.0
)
hard_negatives_vector_op = HardAwarePointToSetLossBuilder.get_points_to_sets_losses_op(
distances_matrix_op=distances_matrix_op,
# Use negative pairs only
mask_op=1.0 - same_labels_mask,
power_constant=-20.0
)
# Use soft margin loss instead of hinge loss, as per "In defence of the triplet loss" paper
losses_vector_op = tf.math.log1p(tf.math.exp(hard_positives_vector_op - hard_negatives_vector_op))
loss_op = tf.reduce_mean(losses_vector_op)
return loss_op
@tf.function
def get_batch_hard_triplets_loss_op(labels, embeddings):
"""
Implementation of batch-hard triplets loss from
"In Defense of the Triplet Loss for Person Re-Identification" paper
:param labels: 1D tensor, batch of labels for embeddings
:param embeddings: 2D tensor, batch of embeddings
:return: loss tensor
"""
if has_any_nan_elements(embeddings):
tf.print("\nNaN embeddings detected!")
# Keras adds an unnecessary batch dimension on our labels, flatten them
flat_labels = tf.reshape(labels, shape=(-1,))
distances_matrix_op = get_distances_matrix_op(embeddings)
same_labels_mask = get_vector_elements_equalities_matrix_op(flat_labels)
diagonal_matrix_op = tf.eye(num_rows=tf.shape(flat_labels)[0], dtype=tf.float32)
positives_mask = same_labels_mask - diagonal_matrix_op
# For each anchor, select largest distance to same category element
hard_positives_vector_op = tf.reduce_max(distances_matrix_op * positives_mask, axis=1)
max_distance_op = tf.reduce_max(distances_matrix_op)
# Modify distances matrix so that all distances between same labels are set higher than all
# distances between different labels
distances_matrix_op_with_distances_between_same_labels_maxed_out = \
distances_matrix_op + (same_labels_mask * max_distance_op)
hard_negatives_vector_op = tf.reduce_min(distances_matrix_op_with_distances_between_same_labels_maxed_out, axis=1)
# Use soft margin loss instead of hinge loss, as per "In defence of the triplet loss" paper
losses_vector_op = tf.math.log1p(tf.math.exp(hard_positives_vector_op - hard_negatives_vector_op))
loss = tf.reduce_mean(losses_vector_op)
return loss
def get_distances_matrix_op(matrix_op):
"""
Given a 2D matrix tensor, return euclidean distance between each row vector
This implementation tries to take care of cases when two rows are identical in a way that yields stable (zeo)
gradients.
:param matrix_op: 2D tensor of row vectors
"""
# Unroll matrix so that each row can be matched with each other row
# repeated_rows_inputs repeats each row n times in order
# (first row n times, then second row n times, then third, etc)
# repeated_matrix_inputs repaets each row n times in order
# whole matrix first time, whole matrix second time, etc
# This way we will have two matrices such that all rows combinations can be matched
rows_count_op = tf.shape(matrix_op)[0]
repeated_rows_inputs = tf.repeat(matrix_op, repeats=rows_count_op, axis=0)
repeated_matrix_inputs = tf.tile(matrix_op, multiples=(rows_count_op, 1))
differences = repeated_rows_inputs - repeated_matrix_inputs
epsilon = tf.constant(1e-6, tf.float32)
# Compute norm of each vector. Since derivative of norm of 0 is inifinity, set minimum value to epsilon
# Credit for noticing need to do so goes to
# Olivier Moindro, who described the problem here: https://omoindrot.github.io/triplet-loss
distances = tf.maximum(tf.norm(differences, axis=1), epsilon)
# So reshape it to a matrix of same shape as input
return tf.reshape(
tensor=distances,
shape=(rows_count_op, rows_count_op)
)
def l2_normalize_batch_of_vectors(x):
"""
Given a matrix representing group of vectors, one vector per row, l2 normalize each vector.
This implementation wraps tf.math.l2_normalize with ops that try to make sure that should any vector
be 0, then its gradient will also be 0 instead of infinite.
:param x: 2D tensorflow op
:return: 2D tensorflow op
"""
epsilon = 1e-6
# Normalize vectors, set smallest value after normalization to epsilon, so we avoid infinite gradients
# on normalization tensor
y = tf.maximum(tf.math.l2_normalize(x, axis=1), epsilon)
return y
def get_vector_elements_equalities_matrix_op(vector_op):
"""
Given a vector_op, return a square matrix such that element (i,j) is 1 if
vector_op[i] == vector_op[j] and 0 otherwise
:param vector_op: 1D tensor of ints
:return: 2D matrix of ints
"""
elements_count_op = tf.shape(vector_op)[0]
# Unroll vector so that each element can be matched with each other element
vector_repeated_elements_wise = tf.repeat(vector_op, repeats=elements_count_op)
vector_repeated_vector_wise = tf.tile(vector_op, multiples=[elements_count_op])
# Compute equalities, cast booleans to floats
equalities_vector_op = tf.cast(vector_repeated_elements_wise == vector_repeated_vector_wise, tf.float32)
# Reshape vector to square matrix
return tf.reshape(equalities_vector_op, shape=(elements_count_op, elements_count_op))
def has_any_nan_elements(x):
"""
Check if tensor contains any NaN values
:param x: tensor
:rtype: boolean tensor
"""
return tf.math.reduce_any(tf.math.is_nan(x))
def has_any_inf_elements(x):
"""
Check if tensor contains any inf values
:param x: tensor
:rtype: boolean tensor
"""
return tf.math.reduce_any(tf.math.is_inf(x))
def has_near_zero_element(x):
"""
Check if tensor contains any near zero values
:param x: tensor
:rtype: boolean tensor
"""
epsilon = 1e-6
is_smaller_than_epsilon = tf.abs(x) < epsilon
return tf.math.reduce_any(is_smaller_than_epsilon)
def average_ranking_position(labels, embeddings):
"""
Compute average ranking position of correct label images for each query image.
:param labels: [n x 1] tensor with labels
:param embeddings: 2D tensor with embeddings, each row represents embeddings for a single input
"""
# Keras adds an unnecessary batch dimension on our labels, flatten them
flat_labels = tf.reshape(labels, shape=(-1,))
distances_matrix_op = get_distances_matrix_op(embeddings)
same_labels_mask = get_vector_elements_equalities_matrix_op(flat_labels)
# For each element in a row get index it would have in sorted array, or its distance rank.
# To compute this value we run argsort twice.
# First run returns indices of elements in sorted order.
# Second argsort returns index into this array for each original element,
# in effect giving us rank values for each element.
distances_rankings = tf.argsort(tf.argsort(distances_matrix_op, axis=1), axis=1)
# Set distance ranking for samples that don't have same label as query label to zero
distances_rankings_with_negative_samples_distances_set_to_zero = \
(same_labels_mask) * tf.cast(distances_rankings, tf.float32)
# To compute average rank for each query:
# - compute sum of ranks for all samples with that query
# - divide by number of elements with same labels as query
# Since any negative queries had their distance ranks set to 0, they will not contribute to the sum,
# therefore yielding correct results
per_label_average_rank = \
tf.reduce_sum(distances_rankings_with_negative_samples_distances_set_to_zero, axis=1) / \
tf.reduce_sum(same_labels_mask, axis=1)
return tf.reduce_mean(per_label_average_rank)
def get_temperature_scaled_softmax_cross_entropy_loss_function(temperature):
"""
Function that builds a softmax cross entropy with specified temperature scaling
:param temperature: float, temperature to use for temperature scaling
:return: loss function that accepts two parameters, labels and predictions, and returns scalar loss
"""
def get_loss(labels, predictions_op):
# # We want logits, not predictions, so first get them
logits = predictions_op.op.inputs[0]
scaled_logits = logits / temperature
return tf.keras.losses.sparse_categorical_crossentropy(labels, scaled_logits, from_logits=True, axis=-1)
return get_loss
def get_auxiliary_head_categorization_loss(temperature):
"""
Function that builds a softmax cross entropy loss with temperature scaling and label smoothing
:param temperature: float, temperature to use for temperature scaling
:return: loss function that accepts two parameters, labels and predictions, and returns scalar loss
"""
def get_loss(labels, predictions_op):
# First compute temperature scaled logits
logits = predictions_op.op.inputs[0]
scaled_logits = logits / temperature
# Then compute smoothed labels
one_hot_encoded_labels = tf.one_hot(tf.cast(tf.squeeze(labels), tf.int32), predictions_op.shape[-1])
return tf.keras.losses.categorical_crossentropy(
y_true=one_hot_encoded_labels,
y_pred=scaled_logits,
from_logits=True,
label_smoothing=0.1)
return get_loss
| {"/tests/test_ml.py": ["/net/ml.py"], "/net/logging.py": ["/net/analysis.py", "/net/processing.py"], "/net/data.py": ["/net/constants.py", "/net/processing.py"], "/tasks.py": ["/net/invoke/analysis.py", "/net/invoke/docker.py", "/net/invoke/ml.py", "/net/invoke/visualize.py"], "/net/invoke/visualize.py": ["/net/data.py", "/net/processing.py", "/net/logging.py", "/net/ml.py"], "/tests/test_analysis.py": ["/net/analysis.py"], "/net/invoke/analysis.py": ["/net/analysis.py", "/net/constants.py", "/net/data.py", "/net/ml.py"], "/net/invoke/ml.py": ["/net/constants.py", "/net/data.py", "/net/logging.py", "/net/ml.py"]} |
45,039 | fatejzz/combination_of_multiple_global_descriptors_for_image_retrieval | refs/heads/master | /net/invoke/ml.py | """
Module with machine learning tasks
"""
import invoke
@invoke.task
def train(_context, config_path):
"""
Train model
:param _context: invoke.Context instance
:param config_path: str, path to configuration file
"""
import tensorflow as tf
import net.constants
import net.data
import net.logging
import net.ml
import net.utilities
config = net.utilities.read_yaml(config_path)
training_data_loader = net.data.Cars196TrainingLoopDataLoader(
config=config,
dataset_mode=net.constants.DatasetMode.TRAINING
)
validation_data_loader = net.data.Cars196TrainingLoopDataLoader(
config=config,
dataset_mode=net.constants.DatasetMode.VALIDATION
)
training_dataset = tf.data.Dataset.from_generator(
generator=lambda: iter(training_data_loader),
output_types=(tf.float32, tf.float32),
output_shapes=(tf.TensorShape([None, 224, 224, 3]), tf.TensorShape([None]))
).prefetch(32)
validation_dataset = tf.data.Dataset.from_generator(
generator=lambda: iter(validation_data_loader),
output_types=(tf.float32, tf.float32),
output_shapes=(tf.TensorShape([None, 224, 224, 3]), tf.TensorShape([None]))
).prefetch(32)
model = net.ml.CGDImagesSimilarityComputer.get_model(
image_size=config["image_size"],
categories_count=config["categories_count"]
)
metric_to_monitor = "val_embeddings_average_ranking_position"
model.fit(
x=training_dataset,
epochs=200,
steps_per_epoch=len(training_data_loader),
validation_data=validation_dataset,
validation_steps=len(validation_data_loader),
callbacks=[
tf.keras.callbacks.ModelCheckpoint(
monitor=metric_to_monitor,
filepath=config["model_dir"],
save_best_only=True,
save_weights_only=False,
verbose=1),
tf.keras.callbacks.EarlyStopping(
monitor=metric_to_monitor,
patience=12,
verbose=1),
tf.keras.callbacks.ReduceLROnPlateau(
monitor=metric_to_monitor,
factor=0.1,
patience=4,
verbose=1),
tf.keras.callbacks.CSVLogger(
filename=config["training_metrics_log_path"]
)
]
)
| {"/tests/test_ml.py": ["/net/ml.py"], "/net/logging.py": ["/net/analysis.py", "/net/processing.py"], "/net/data.py": ["/net/constants.py", "/net/processing.py"], "/tasks.py": ["/net/invoke/analysis.py", "/net/invoke/docker.py", "/net/invoke/ml.py", "/net/invoke/visualize.py"], "/net/invoke/visualize.py": ["/net/data.py", "/net/processing.py", "/net/logging.py", "/net/ml.py"], "/tests/test_analysis.py": ["/net/analysis.py"], "/net/invoke/analysis.py": ["/net/analysis.py", "/net/constants.py", "/net/data.py", "/net/ml.py"], "/net/invoke/ml.py": ["/net/constants.py", "/net/data.py", "/net/logging.py", "/net/ml.py"]} |
45,040 | shalman13091994/lapnew | refs/heads/master | /newbee/firstapp/admin.py | from django.contrib import admin
from.models import person
admin.site.register(person)
| {"/newbee/firstapp/urls.py": ["/newbee/firstapp/views.py"]} |
45,041 | shalman13091994/lapnew | refs/heads/master | /newbee/firstapp/urls.py | from django.urls import path
from.views import homepage,welcome,password
urlpatterns=[
path('',homepage,name='home'),
path('welcome',welcome,name='welcome'),
path('password',password,name='pass'),
path('home',homepage,name='home')
] | {"/newbee/firstapp/urls.py": ["/newbee/firstapp/views.py"]} |
45,042 | shalman13091994/lapnew | refs/heads/master | /newbee/firstapp/views.py | from django.shortcuts import render
from .models import person
from django.http import HttpResponse
def homepage(request):
allpost=person.objects.all()
return render(request,'home.html',{'posts':allpost})
def welcome(request):
nam =request.POST['uname']
pas=request.POST['psw']
mod=nam+' '+pas
return render(request,'welcome.html',{'final':mod})
def password(request):
return render(request,'password.html')
| {"/newbee/firstapp/urls.py": ["/newbee/firstapp/views.py"]} |
45,056 | simonno/FinalProject | refs/heads/master | /createMalware.py | import os
import createImages
import badImage
def main():
# Create the images
images = createImages.createImages()
newList = os.listdir("train50")
for i in range(500):
image = "Images/image" + i.__str__() + ".png"
malware = "train50/" + newList[i % len(newList)]
badImage.createBadImage(image, malware)
if __name__ == "__main__":
main()
| {"/createMalware.py": ["/badImage.py"]} |
45,057 | simonno/FinalProject | refs/heads/master | /createCSV.py | import csv
import random
from sklearn.model_selection import train_test_split
from PIL import Image
import numpy as np
import pandas as pd
import xgboost as xgb
with open("classification_data.csv", "wb") as csv_file:
writer = csv.writer(csv_file)
newArray = []
for i in range(1000):
image = Image.open("Images/image" + str(i) + ".png")
image_array = np.array(image)
output_string = ""
for row in image_array:
for column in row:
value_string = ""
try:
for value in column:
if value < 10:
value_string += "00" + str(value)
elif value < 100:
value_string += "0" + str(value)
else:
value_string += str(value)
except:
value_string += str(column)
output_string += value_string + ","
output_string += ""
if i < 500:
output_string += "1"
else:
output_string += "0"
newArray_I = output_string.split(',')
newArray.append(newArray_I)
print i
random.shuffle(newArray)
for i in range(1000):
writer.writerow(newArray[i])
| {"/createMalware.py": ["/badImage.py"]} |
45,058 | simonno/FinalProject | refs/heads/master | /badImage.py | import cv2 # OpenCV 2
from random import randint
from os import listdir
from os.path import isfile, join
def to_bit_generator(msg):
"""Converts a message into a generator which returns 1 bit of the message
each time."""
for c in (msg):
o = ord(c)
for i in range(8):
yield (o & (1 << i)) >> i
def createBadImage(path_image, path_malware):
# Create a generator for the hidden message
hidden_message = to_bit_generator(open(path_image, "rb").read() * 10)
# Read the original image
img = cv2.imread(path_image, cv2.IMREAD_GRAYSCALE)
width, height = img.shape
for h in range(width):
for w in range(height):
# Write the hidden message into the least significant bit
bit = next(hidden_message)
img[h][w] = (img[h][w] & ~1) | bit
# Write out the image with hidden message
cv2.imwrite(path_image, img)
| {"/createMalware.py": ["/badImage.py"]} |
45,059 | simonno/FinalProject | refs/heads/master | /machineLearning.py | import csv
import random
from sklearn.model_selection import train_test_split
from PIL import Image
import numpy as np
import pandas as pd
import xgboost as xgb
class_data = pd.read_csv("classification_data.csv")
X, y = class_data.iloc[:, :-1], class_data.iloc[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123)
xg_cl = xgb.XGBClassifier(objective='binary:logistic', n_estimators=10, seed=123)
xg_cl.fit(X_train, y_train)
preds = xg_cl.predict(X_test)
accuracy = float(np.sum(preds == y_test)) / y_test.shape[0]
print("accuracy: %f" % (accuracy))
| {"/createMalware.py": ["/badImage.py"]} |
45,061 | dmkaraba/green-house | refs/heads/master | /test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from time import sleep
from modules.greenhouse.objects import Lifecycle
from web_interface.web_events.config_requests import CreateLifecycle, Timer, Conditions
from datetime import datetime
from handlers.jobs import watch_for_soilmoisture_a, watch_for_fans, watch_for_lights
from modules.greenhouse.controllers import PumpA, PumpB
from modules.greenhouse.sensors import SoilMoistureA, SoilMoistureB, SoilMoistureSensors
T = Timer({
'start_time': datetime(2017, 1, 1, 7, 0, 0),
'end_time': datetime(2017, 1, 1, 20, 0, 0),
'start_date': datetime(2017, 3, 3, 0, 0, 0),
'end_date': datetime(2017, 3, 15, 0, 0, 0),
})
C = Conditions({'min_value': 30, 'max_value': 70})
data = {
'by_time': False,
'type': 'soil_moisture_b',
# 'timer': T,
'conditions': C
}
def test_soilMo():
print SoilMoistureA().read().moisture
print SoilMoistureB().read().moisture
SoilMoistureSensors().read()
if __name__ == '__main__':
test_soilMo()
# lc = CreateLifecycle(data)
# Lifecycle.create(**lc)
# watch_for_fans()
# watch_for_lights()
# watch_for_soilmoisture_a()
# watch_for_soilmoisture_b() | {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,062 | dmkaraba/green-house | refs/heads/master | /config/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import yaml
base_dir = os.path.join(os.path.dirname(__file__), '..')
class Config(object):
temp_dir = os.path.join(base_dir, 'tmp')
log_dir = os.path.join(base_dir, 'logs')
camera_dir = temp_dir
def __init__(self, conf):
self._data = conf
def __getattr__(self, item):
try:
return self._data[item]
except KeyError:
raise AttributeError('Config has not {0} attribute.'.format(str(item)))
stage_name = os.getenv('GH_STAGE')
if not stage_name:
raise Exception('GH_STAGE environment wasn\'t set.')
relative_path = '{0}.yaml'.format(os.path.join('stages', stage_name))
abs_path = os.path.join(base_dir, relative_path)
with open(abs_path, 'r') as fh:
data = yaml.load(fh)
config = Config(data)
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,063 | dmkaraba/green-house | refs/heads/master | /modules/greenhouse/objects.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from modules.data.db import DBSharedProperty
from modules.greenhouse.models import SensorResultsDoc, \
SoilConditions, SoilMoistureConditions,\
AirInsideConditions, AirOutsideConditions, \
LifecycleDoc, TimerDoc, ConditionDoc
class Lifecycle(object):
ModelClass = LifecycleDoc
TimerModelClass = TimerDoc
ConditionModelClass = ConditionDoc
type = DBSharedProperty('type')
by_time = DBSharedProperty('by_time')
state = DBSharedProperty('state')
active = DBSharedProperty('active')
timer = DBSharedProperty('timer')
conditions = DBSharedProperty('conditions')
last_event = DBSharedProperty('last_event')
def __init__(self, type=None, model=None):
assert type or model, 'type or model are required'
self.model = model or self.ModelClass.objects.get(type=type)
@classmethod
def create(cls, **kwargs):
new_model = cls.ModelClass()
new_model = cls.populate(new_model, **kwargs)
new_model.save()
@classmethod
def populate(cls, model, type, by_time, timer, conditions, last_event):
model.type = type
model.by_time = by_time
if timer:
model.timer = cls.TimerModelClass(start_date=timer.start_date, end_date=timer.end_date,
start_time=timer.start_time, end_time=timer.end_time)
if conditions:
model.conditions = cls.ConditionModelClass(min_value=conditions.min_value,
max_value=conditions.max_value)
return model
def save(self):
self.model.save()
class SensorResults(object):
ModelClass = SensorResultsDoc
SoilCondModelClass = SoilConditions
SoilMoistureModelClass = SoilMoistureConditions
AirInsideCondModelDoc = AirInsideConditions
AirOutsideCondModelDoc = AirOutsideConditions
def __init__(self, model=None):
assert model, 'model is required'
self.model = model
@classmethod
def create(cls, **kwargs):
new_model = cls.ModelClass()
new_model = cls.populate(new_model, **kwargs)
new_model.save()
@classmethod
def populate(cls, model, DS18B20_air, DS18B20_soil, BH1750, DHT22,
Soil_moisture_a, Soil_moisture_b, Soil_moisture_c, Soil_moisture_d, **kwargs):
soil_moisture = cls.SoilMoistureModelClass(a=Soil_moisture_a.moisture,
b=Soil_moisture_b.moisture,
c=Soil_moisture_c.moisture,
d=Soil_moisture_d.moisture)
model.soil = cls.SoilCondModelClass(temperature=DS18B20_soil.temperature,
moisture=soil_moisture)
model.air_outside = cls.AirOutsideCondModelDoc(temperature=DS18B20_air.temperature)
model.air_inside = cls.AirInsideCondModelDoc(temperature=DHT22.temperature,
luminosity=BH1750.luminosity,
humidity=DHT22.humidity)
return model
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,064 | dmkaraba/green-house | refs/heads/master | /modules/greenhouse/models.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from modules.data.db import DBDocument
from mongoengine import EmbeddedDocument, EmbeddedDocumentField, StringField,\
IntField, BooleanField, ListField, MapField, DynamicField, \
ObjectIdField, DateTimeField, FloatField, ReferenceField
### LIFECYCLE ###################################
class TimerDoc(EmbeddedDocument):
start_date = DateTimeField()
end_date = DateTimeField()
start_time = DateTimeField(required=True)
end_time = DateTimeField(required=True)
class ConditionDoc(EmbeddedDocument):
min_value = IntField(required=True)
max_value = IntField()
class LifecycleDoc(DBDocument):
meta = {'collection': 'lifecycle'} # TODO: index - type
type = StringField(required=True)
by_time = BooleanField(required=True)
active = BooleanField(default=True)
state = BooleanField(default=False)
timer = EmbeddedDocumentField(TimerDoc)
conditions = EmbeddedDocumentField(ConditionDoc)
last_event = DateTimeField()
### SENSOR RESULTS ##############################
class SoilMoistureConditions(EmbeddedDocument):
a = FloatField()
b = FloatField()
c = FloatField()
d = FloatField()
class SoilConditions(EmbeddedDocument):
temperature = FloatField()
moisture = EmbeddedDocumentField(SoilMoistureConditions)
class AirOutsideConditions(EmbeddedDocument):
temperature = FloatField()
class AirInsideConditions(EmbeddedDocument):
temperature = FloatField()
luminosity = FloatField()
humidity = FloatField()
class SensorResultsDoc(DBDocument):
meta = {'collection': 'measures'}
soil = EmbeddedDocumentField(SoilConditions)
air_outside = EmbeddedDocumentField(AirOutsideConditions)
air_inside = EmbeddedDocumentField(AirInsideConditions)
datetime = DateTimeField(default=datetime.datetime.now)
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,065 | dmkaraba/green-house | refs/heads/master | /modules/mqtt_interaction/handlers.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import signal
class SensorsMQTTDmn(object):
def __init__(self):
self.kill_now = False
def exit_pub_loop(self, signum, frame):
# print 'exit_pub_loop'
self.kill_now = True
self.mqtt_connection.disconnect()
def run(self):
from utils.sensors.reader import read_all
from modules.mqtt_interaction.base import Base_GHMQTT
self.mqtt_connection = Base_GHMQTT()
#TODO: missing ONE message while disconnect-reconnect
while not self.kill_now:
signal.signal(signal.SIGTERM, self.exit_pub_loop)
signal.signal(signal.SIGINT, self.exit_pub_loop)
results = read_all()
if results:
data = results['result']
msgs = [
('conditions/soil/temperature', data['soil']['temperature']),
('conditions/soil/moisture', data['soil']['moisture']),
('conditions/air/outside/temperature', data['air_outside']['temperature']),
('conditions/air/inside/temperature', data['air_inside']['temperature']),
('conditions/air/inside/humidity', data['air_inside']['humidity']),
('conditions/air/inside/luminosity', data['air_inside']['luminosity']),
]
self.mqtt_connection.pub(msgs)
# print 'Steel in while loop'
time.sleep(5)
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,066 | dmkaraba/green-house | refs/heads/master | /modules/greenhouse/sensors.py | #!/usr/bin/python
import time
import Adafruit_ADS1x15
import Adafruit_DHT as dht
import smbus
import utils.logger as logger
from config import config
from modules.greenhouse.sensor_answers import DS18B20Result, BH1750Result, DHT22Result, SoilMoistureResult
class BaseSensor(object):
NAME = None
@classmethod
def set_up(cls):
raise NotImplementedError
@classmethod
def tear_down(cls):
raise NotImplementedError
@classmethod
def read(cls):
pass
class BH1750(BaseSensor):
NAME = 'Air inside luminosity'
# Define some constants from the datasheet
DEVICE = 0x23 # Default device I2C address
POWER_DOWN = 0x00 # No active state
POWER_ON = 0x01 # Power on
RESET = 0x07 # Reset data register value
# Start measurement at 4lx resolution. Time typically 16ms.
CONTINUOUS_LOW_RES_MODE = 0x13
# Start measurement at 1lx resolution. Time typically 120ms
CONTINUOUS_HIGH_RES_MODE_1 = 0x10
# Start measurement at 0.5lx resolution. Time typically 120ms
CONTINUOUS_HIGH_RES_MODE_2 = 0x11
# Start measurement at 1lx resolution. Time typically 120ms
# Device is automatically set to Power Down after measurement.
ONE_TIME_HIGH_RES_MODE_1 = 0x20
# Start measurement at 0.5lx resolution. Time typically 120ms
# Device is automatically set to Power Down after measurement.
ONE_TIME_HIGH_RES_MODE_2 = 0x21
# Start measurement at 1lx resolution. Time typically 120ms
# Device is automatically set to Power Down after measurement.
ONE_TIME_LOW_RES_MODE = 0x23
# bus = smbus.SMBus(0) # Rev 1 Pi uses 0
bus = smbus.SMBus(1) # Rev 2 Pi uses 1
def read(self):
try:
self.readLight() # warming up
time.sleep(0.5)
luminosity = self.readLight()
logger.info('BH1750| L: {}'.format(luminosity))
return BH1750Result({'luminosity': float("%.1f" % luminosity)})
except IOError:
return BH1750Result({'rc': 66})
def convertToNumber(self, data):
# Simple function to convert 2 bytes of data
# into a decimal number
return ((data[1] + (256 * data[0])) / 1.2)
def readLight(self, addr=DEVICE):
data = self.bus.read_i2c_block_data(addr, self.ONE_TIME_HIGH_RES_MODE_1)
return self.convertToNumber(data)
class DHT22(BaseSensor):
NAME = 'Air inside humidity/temperature'
DHT22_PIN = config.sensors['gpio_pins']['DHT22']
def read(self):
h, t = dht.read_retry(dht.DHT22, self.DHT22_PIN, delay_seconds=3)
logger.info('DHT22| T: {}, H: {}'.format(t, h))
DHT22Result.temperature = float("%.1f" % t)
DHT22Result.humidity = float("%.1f" % h)
return DHT22Result({'temperature': float("%.1f" % t), 'humidity': float("%.1f" % h)})
class DS18B20(BaseSensor):
NAME = None
SENSOR_FILE = None
def read(self):
t = self.__read_temp(self.SENSOR_FILE)
logger.info('DS18B20| T: {}'.format(t))
return DS18B20Result({'temperature': t})
def __read_temp_raw(self, sensor_file):
with open(sensor_file, 'r') as f:
lines = f.readlines()
return lines
def __read_temp(self, sensor_file):
lines = self.__read_temp_raw(sensor_file)
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = self.__read_temp_raw(sensor_file)
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1].strip()[equals_pos+2:]
temp = float(temp_string) / 1000
temp_formated = float("%.1f" % temp)
return temp_formated
class DS18B20_Air(DS18B20):
NAME = 'Air outside temperature'
SENSOR_FILE = "/sys/bus/w1/devices/{}/w1_slave".format(config.sensors['ids']['ds18b20_b'])
class DS18B20_Soil(DS18B20):
NAME = 'Soil inside temperature'
SENSOR_FILE = "/sys/bus/w1/devices/{}/w1_slave".format(config.sensors['ids']['ds18b20_a'])
class SoilMoistureSensors(BaseSensor):
NAME = 'Soil moisture'
GAIN = 1
MIN_VOLTS = 4700
MAX_VOLTS = 18000
adc = Adafruit_ADS1x15.ADS1115()
def read_one_raw(self, sens_num):
"""Read raw data from specific sensor [0...3]"""
return self.adc.read_adc(sens_num, gain=self.GAIN)
def read_all_raw(self):
"""Read data from all sensors"""
values = [0]*4
for i in range(4):
values[i] = self.adc.read_adc(i, gain=self.GAIN)
return values
def do_average(self, values):
return sum(values)/len(values)
def volts_to_percents(self, value):
"""
Converting raw volts to moisture percents.
"""
old_min = self.MIN_VOLTS
old_max = self.MAX_VOLTS
new_min = 0
new_max = 100
# Figure out how 'wide' each range is
old_span = old_max - old_min
new_span = new_max - new_min
# Convert the left range into a 0-1 range (float)
value_scaled = float(value - old_min) / float(old_span)
value_scaled_formated = float("%.3f" % value_scaled)
return new_min + (value_scaled_formated * new_span)
def read(self):
raw = self.read_all_raw()
percents = map(self.volts_to_percents, raw)
return SoilMoistureResult({'moisture': self.do_average(percents)})
def read_one(self, num):
raw = self.read_one_raw(num)
result = map(self.volts_to_percents, [raw])[0]
return SoilMoistureResult({'moisture': result})
class SoilMoistureA(SoilMoistureSensors):
def read(self):
result1 = self.read_one(0)
result2 = self.read_one(1)
avg = (result1.moisture + result2.moisture)/2
return SoilMoistureResult({'moisture': avg})
class SoilMoistureB(SoilMoistureSensors):
def read(self):
result1 = self.read_one(2)
result2 = self.read_one(3)
avg = (result1.moisture + result2.moisture)/2
return SoilMoistureResult({'moisture': avg})
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,067 | dmkaraba/green-house | refs/heads/master | /modules/greenhouse/lifecycle.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
from datetime import date as dt
from datetime import time as tm
from modules.greenhouse.objects import Lifecycle
from utils.mixins import DateComparison, TimeComparison
from modules.const import PERFORMERS, SENSORS
from utils.logger import info
def compare_logic(a, b, op):
if op == 'eq':
return a == b
elif op == 'gt':
return a > b
elif op == 'lt':
return a < b
elif op == 'gte':
return a >= b
elif op == 'lte':
return a <= b
else:
raise Exception('Unsupported operation %s' % op)
class BaseWatchdog(object):
def __init__(self):
self.sensor = None
self.performer = None
self.lifecycle_obj = None
self.timer = None
self.last_event = None
self.active = None
def satisfy_time(self): # TODO: rebuild using dt/tm
time = TimeComparison(datetime.datetime.now().hour, datetime.datetime.now().minute)
timer_start_time = TimeComparison(self.timer.start_time.hour, self.timer.start_time.minute)
timer_end_time = TimeComparison(self.timer.end_time.hour, self.timer.end_time.minute)
return timer_end_time > time >= timer_start_time
def satisfy_date(self):
if self.timer.start_date and self.timer.end_date:
timer_start_date = DateComparison(self.timer.start_date.year, self.timer.start_date.month, self.timer.start_date.day)
timer_end_date = DateComparison(self.timer.end_date.year, self.timer.end_date.month, self.timer.end_date.day)
return timer_end_date >= DateComparison.today() >= timer_start_date
else:
return True
# def satisfied_last_event(self):
# time = TimeComparison(datetime.datetime.now().hour, datetime.datetime.now().minute)
# date = DateComparison(datetime.datetime.now().year, datetime.datetime.now().month, datetime.datetime.now().day)
# if self.last_event:
# last_event_time = TimeComparison(self.last_event.hour, self.last_event.minute)
# last_event_date = DateComparison(self.last_event.year, self.last_event.month, self.last_event.day)
# if date > last_event_date:
# return True
# else:
# return time <= last_event_time
# else:
# return True
def watch(self):
print '>>> Timer watch: satisfy_date:{} satisfy_time:{}'.\
format(self.satisfy_date(), self.satisfy_time())
if self.satisfy_date() and self.satisfy_time() and self.active:
if not self.lifecycle_obj.state:
self.performer.set_up()
self.performer.on()
self.lifecycle_obj.state = True
self.lifecycle_obj.last_event = datetime.datetime.now()
self.lifecycle_obj.save()
info('TimerWatchdog turned on')
elif not self.satisfy_time() or not self.satisfy_date():
if self.lifecycle_obj.state:
self.performer.set_up()
self.performer.off()
self.lifecycle_obj.state = False
self.lifecycle_obj.last_event = datetime.datetime.now()
self.lifecycle_obj.save()
info('TimerWatchdog turned off')
class TimerWatchdog(BaseWatchdog):
def __init__(self, performer):
super(TimerWatchdog, self).__init__()
self.performer = PERFORMERS[performer]
self.lifecycle_obj = Lifecycle(type=performer)
self.timer = Lifecycle(type=performer).timer
self.last_event = Lifecycle(type=performer).last_event
self.active = Lifecycle(type=performer).active
class ConditinsWatchdog(BaseWatchdog):
def __init__(self, condition, performer):
super(ConditinsWatchdog, self).__init__()
self.sensor = SENSORS[condition]()
self.performer = PERFORMERS[performer]
self.lifecycle_obj = Lifecycle(type=condition)
self.conditions = Lifecycle(type=condition).conditions
self.last_event = Lifecycle(type=condition).last_event
self.active = Lifecycle(type=condition).active
def satisfy_conditions(self):
value = self.sensor.read().moisture
goal = self.conditions
print 'value avg:{} goal is:{}'.format(value, goal.min_value)
return value < goal.min_value
def satisfied_last_event(self, shift_back=180):
# date = DateComparison(datetime.datetime.now().year, datetime.datetime.now().month, datetime.datetime.now().day)
# time_shifted_obj = datetime.datetime.now() - datetime.timedelta(minutes=shift_back)
# time_shifted = TimeComparison(time_shifted_obj.hour, time_shifted_obj.minute)
# if self.last_event:
# last_event_date = DateComparison(self.last_event.year, self.last_event.month, self.last_event.day)
# last_event_time = TimeComparison(self.last_event.hour, self.last_event.minute)
# print '---shifted', time_shifted
# print '---last_ev', last_event_time
# print '---boolean', time_shifted > last_event_time
# if date > last_event_date:
# return True
# else:
# return time_shifted > last_event_time
date = dt(datetime.datetime.now().year, datetime.datetime.now().month, datetime.datetime.now().day)
time_shifted_obj = datetime.datetime.now() - datetime.timedelta(minutes=shift_back)
time_shifted = tm(time_shifted_obj.hour, time_shifted_obj.minute)
if self.last_event:
last_event_date = dt(self.last_event.year, self.last_event.month, self.last_event.day)
last_event_time = tm(self.last_event.hour, self.last_event.minute)
if date > last_event_date:
return True
else:
return time_shifted >= last_event_time
def watch(self):
print '>>> Condition watch: satisfy_conditions:{} satisfied_last_event:{}'.\
format(self.satisfy_conditions(), self.satisfied_last_event())
if self.satisfy_conditions() and self.satisfied_last_event() and self.active:
self.performer.pulse(4)
self.lifecycle_obj.last_event = datetime.datetime.now()
self.lifecycle_obj.save()
info('water turned on')
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,068 | dmkaraba/green-house | refs/heads/master | /handlers/jobs.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from modules.greenhouse.camera import Camera
from modules.greenhouse.lifecycle import TimerWatchdog, ConditinsWatchdog
from modules.greenhouse.objects import SensorResults
from utils.sensors.reader import pull_data
def insert_all_conditions():
print '>>> insert all conditions <<<'
answer = pull_data()
SensorResults.create(**answer)
def watch_for_lights():
print '>>> perform lights scenario <<<'
TimerWatchdog('light').watch()
def watch_for_fans():
print '>>> perform fans scenario <<<'
TimerWatchdog('fan').watch()
def watch_for_soilmoisture_a():
print '>>> perform soilmoisture A <<<'
ConditinsWatchdog('soil_moisture_a', 'pump_a').watch()
# def watch_for_soilmoisture_b():
# print '>>> perform soilmoisture B <<<'
# ConditinsWatchdog('soil_moisture_b', 'pump_b').watch()
# def shoot_frame():
# print '>>> shoot frame <<<'
# Camera.shoot() | {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,069 | dmkaraba/green-house | refs/heads/master | /modules/greenhouse/sensor_answers.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from schematics.models import Model
from schematics.types.compound import ModelType
from schematics import types
class BaseSensorResult(Model):
rc = types.IntType(default=0)
class BH1750Result(BaseSensorResult):
luminosity = types.FloatType()
class DHT22Result(BaseSensorResult):
temperature = types.FloatType()
humidity = types.FloatType()
class DS18B20Result(BaseSensorResult):
temperature = types.FloatType()
class SoilMoistureResult(BaseSensorResult):
moisture = types.FloatType()
class AllSensorsResult(BaseSensorResult):
BH1750 = ModelType(BH1750Result)
DHT22 = ModelType(DHT22Result)
DS18B20_air = ModelType(DS18B20Result)
DS18B20_soil = ModelType(DS18B20Result)
Soil_moisture_a = ModelType(SoilMoistureResult)
Soil_moisture_b = ModelType(SoilMoistureResult)
Soil_moisture_c = ModelType(SoilMoistureResult)
Soil_moisture_d = ModelType(SoilMoistureResult)
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,070 | dmkaraba/green-house | refs/heads/master | /handlers/tasks.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from config import config
from utils.deploy.celeryd import app as celeryd_app
from utils.deploy.celerybeat import app as celerybeat_app
@celerybeat_app.task(ignore_result=True, queue='main')
def insert_all_conditions():
from handlers.jobs import insert_all_conditions
insert_all_conditions()
@celerybeat_app.task(ignore_result=True, queue='main')
def light_watchdog():
from handlers.jobs import watch_for_lights
watch_for_lights()
@celerybeat_app.task(ignore_result=True, queue='main')
def fan_watchdog():
from handlers.jobs import watch_for_fans
watch_for_fans()
@celerybeat_app.task(ignore_result=True, queue='main')
def soilmoisture_a_watchdog():
from handlers.jobs import watch_for_soilmoisture_a
watch_for_soilmoisture_a()
@celerybeat_app.task(ignore_result=True, queue='main')
def soilmoisture_b_watchdog():
from handlers.jobs import watch_for_soilmoisture_b
watch_for_soilmoisture_b()
# @celerybeat_app.task(ignore_result=True, queue='main')
# def shoot_frame():
# from handlers.jobs import shoot_frame
# shoot_frame()
# @celeryd_app.task(ignore_result=True, queue='mqtt')
# def fans():
# from modules.mqtt_interaction.base import FansMQTTClass
# FansMQTTClass().sub(config.mqtt_topics_sub['fans'])
#
# @celeryd_app.task(ignore_result=True, queue='mqtt')
# def lights():
# from modules.mqtt_interaction.base import LightMQTTClass
# LightMQTTClass().sub(config.mqtt_topics_sub['lights'])
#
# @celeryd_app.task(ignore_result=True, queue='mqtt')
# def pumps():
# from modules.mqtt_interaction.base import PumpsMQTTClass
# PumpsMQTTClass().sub(config.mqtt_topics_sub['pumps'])
#
# @celeryd_app.task(ignore_result=True, queue='mqtt')
# def sensors():
# from modules.mqtt_interaction.handlers import SensorsMQTTDmn
# SensorsMQTTDmn().run()
#
#
# @celeryd_app.task(ignore_result=True, queue='gh.incoming')
# def push_event():
# pass
#
# @celeryd_app.task(ignore_result=True, queue='gh.outcoming')
# def push_action():
# pass
# class BaseQueue(object):
# pass
#
#
# class IncomingQueue(BaseQueue):
# def push(self, obj):
# push_event.apply_async(args=[obj])
#
#
# class OutcomingQueue(BaseQueue):
# def push(self, obj):
# push_action.apply_async(args=[obj])
#
#
# fans.apply_async()
# lights.apply_async()
# pumps.apply_async()
# sensors.apply_async()
# TODO: add lights on/off task
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,071 | dmkaraba/green-house | refs/heads/master | /utils/deploy/celerybeat.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import timedelta
from celery import Celery
from celery.schedules import crontab
from config import config
schedules = {}
for k, schedule in config.celerybeat['CELERYBEAT_SCHEDULE'].items():
if 'timedelta' in schedule['schedule']:
schedule['schedule'] = timedelta(**schedule['schedule']['timedelta'])
elif 'crontab' in schedule['schedule']:
schedule['schedule'] = crontab(**schedule['schedule']['crontab'])
schedules.update({k: schedule})
config.celerybeat['CELERYBEAT_SCHEDULE'] = schedules
app = Celery()
app.conf.update(config.celerybeat)
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,072 | dmkaraba/green-house | refs/heads/master | /utils/video/sequence.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, glob, json
from moviepy.editor import ImageClip, TextClip, CompositeVideoClip, ImageSequenceClip
from configer import ConfigCreator, ConfigDefaults
class SequenceProcessing(object):
def __init__(self):
config = ConfigCreator()
config_default = ConfigDefaults()
for i in ['PATH', 'STAMP_LOGO']:
if config.get_value(i):
setattr(self, i, config.get_value(i))
else:
setattr(self, i, getattr(config_default, i))
def make_stamp(self, task_id):
'''
stamps all frames in specified sequence
:param sequenceFolder: frames folder
:param shotNumber: number to be stamped on each frame
:return:
'''
task = self._find_task(task_id)
sequence_folder = task['sequence_path']
stamp_shot_number = os.path.split(task['scene_path'])[-1].split('.')[0]
stamp_shot_version = os.path.split(task['scene_path'])[-1].split('.')[1]
# stamp_focal_length = json.load(open(os.path.join(self.PATH, stamp_shot_number, 'shotInfo.json')))['focalLength']
# frames = os.listdir(sequence_folder)
# stamp_logo = ImageClip(str(self.STAMP_LOGO), transparent=True)
# for frame in frames:
# if os.path.splitext(frame)[-1] in ['.jpeg']:
# image = ImageClip(str(os.path.join(sequence_folder, frame)))
# stamp_frame_number = frame.split('.')[1]
# txt_clip1 = TextClip(stamp_shot_number, color='white', fontsize=20)
# txt_clip2 = TextClip('version: {}'.format(stamp_shot_version[1:]), color='white', fontsize=15)
# txt_clip3 = TextClip('frame: {}'.format(stamp_frame_number), color='white', fontsize=15)
# txt_clip4 = TextClip('focalLength: {}'.format(stamp_focal_length), color='white', fontsize=15)
# result = CompositeVideoClip([image, txt_clip1.set_position((5, 5)),
# txt_clip2.set_position((5, 25)),
# txt_clip3.set_position((5, 40)),
# txt_clip4.set_position((5, 55)),
# stamp_logo.set_position(("left", "bottom"))])
# result.save_frame(os.path.join(sequence_folder, frame))
print 'STAMP#################'
print stamp_shot_number, stamp_shot_version, self.STAMP_LOGO, self.PATH
print '######################'
def make_video(self, task_id):
'''
makes video file from specified frame folder
:param sequenceFolder: frames folder
:param shotNumber: number for naming the resulting video file
:return:
'''
task = self._find_task(task_id)
sequence_folder = task['sequence_path']
sequence_frames = glob.glob(os.path.join(sequence_folder, '*.jpeg'))
stamp_shot_number = os.path.split(task['scene_path'])[-1].split('.')[0]
# clip = ImageSequenceClip(sequence_frames, fps=25)
# if not os.path.exists(os.path.join(self.PATH, 'videos')):
# os.mkdir(os.path.join(self.PATH, 'videos'))
# clip.write_videofile(os.path.join(self.PATH, 'mov', stamp_shot_number+'.mp4'), fps=25)
print 'VIDEO#################'
print sequence_folder, sequence_frames[0], stamp_shot_number, self.PATH
print '######################'
def _find_task(self, task_id):
tasks_data = os.path.join(os.path.dirname(sys.argv[0]), 'config/renderData.json')
tasks = json.load(open(tasks_data))
for task in tasks:
if task['id'] == task_id:
return task
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,073 | dmkaraba/green-house | refs/heads/master | /utils/mixins.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import date, time
class DateComparison(date):
"""
Additional logic to compare <datetime> with <str>
"""
def __ge__(self, y):
if not y:
return False
return date(self.year, self.month, self.day) >= y
def __le__(self, y):
if not y:
return False
return date(self.year, self.month, self.day) <= y
def __gt__(self, y):
if not y:
return False
return date(self.year, self.month, self.day) > y
def __lt__(self, y):
if not y:
return False
return date(self.year, self.month, self.day) < y
def __eq__(self, y):
if not y:
return False
return date(self.year, self.month, self.day) == y
class TimeComparison(time):
def __ge__(self, y):
if not y:
return False
return time(self.hour, self.minute) >= y
def __le__(self, y):
if not y:
return False
return time(self.hour, self.minute) <= y
def __gt__(self, y):
if not y:
return False
return time(self.hour, self.minute) > y
def __lt__(self, y):
if not y:
return False
return time(self.hour, self.minute) < y
def __eq__(self, y):
if not y:
return False
return time(self.hour, self.minute) == y
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,074 | dmkaraba/green-house | refs/heads/master | /app/test_all.py | #!/usr/bin/python
from modules.greenhouse.sensors import DS18B20_Air, DS18B20_Soil
from modules.greenhouse.sensors import BH1750, DHT22, SoilMoistureSensors
from modules.greenhouse.controllers import Light, Fan, Pump, Servo
from time import sleep
def test_sensors():
sensors = (DS18B20_Air, DS18B20_Soil, BH1750,
DHT22, SoilMoistureSensors)
for sensor in sensors:
name, answer = sensor.NAME, sensor().read()
print name, answer
# if answer['status'] == 'success':
# value = answer['result']
# print 'OK | {0:<33} | {1}'.format(name, value)
# else:
# print 'FAIL | {0:<33} | {1}'.format(name, answer['status'])
def test_relay_performers():
performers = (Light, Fan) #, Pump)
for perf in performers:
print perf.on()
sleep(1)
print perf.off()
sleep(1)
def test_servo():
print Servo.set_state(0)
sleep(1)
print Servo.set_state(5)
sleep(1)
print Servo.set_state(10)
sleep(1)
print Servo.set_state(5)
sleep(1)
if __name__ == '__main__':
# test_relay_performers()
# test_servo()
test_sensors()
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,075 | dmkaraba/green-house | refs/heads/master | /modules/greenhouse/controllers.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import RPi.GPIO as GPIO
import utils.logger as logger
from config import config
class RelayBase(object):
"""
Controlling relay performers here.
Usage (lights for example):
# If we want to switch lights on
Light.on()
# If we want to switch lights off
Light.off()
# We cat reverse lights state
Light.switch()
# We can ask for lights state
Light.get_state()
# We can set up lights GPIO pins before turning on.
# But it's not necessary. Just turning on or switching
# checks for GPIOs for been seted up.
# And if they are not yet it sets them up.
"""
RELAY = None
SETED_UP = False
@classmethod
def on(cls):
if not cls.SETED_UP:
cls.set_up()
GPIO.output(cls.RELAY, GPIO.LOW)
return {'status': 'success', 'result': True}
@classmethod
def off(cls):
if cls.SETED_UP:
GPIO.output(cls.RELAY, GPIO.HIGH)
return {'status': 'success', 'result': False, 'msg': 'turned off'} # TODO: rebuild to answer objects
else:
return {'status': 'success', 'result': False, 'msg': 'was not seted up'}
@classmethod
def switch(cls):
if not cls.SETED_UP:
cls.set_up()
switched_state = not GPIO.input(cls.RELAY)
GPIO.output(cls.RELAY, switched_state)
return {'status': 'success', 'result': not switched_state}
@classmethod
def get_state(cls):
if cls.SETED_UP:
# We get the state of relay here. <not> is used
# because of getting human readable result:
# GPIO.LOW -> relay is locked (ON) -> True
# GPIO.HIGH -> relay is unlocked (OFF) -> False
state = not GPIO.input(cls.RELAY)
return {'status': 'success', 'result': state}
else:
return {'status': 'success', 'result': False}
@classmethod
def set_up(cls):
if not cls.SETED_UP:
GPIO.setmode(GPIO.BCM)
GPIO.setup(cls.RELAY, GPIO.OUT, initial=GPIO.HIGH)
cls.SETED_UP = True
return {'status': 'success',
'info': 'GPIO {} seted up'.format(cls.RELAY)}
else:
return {'status': 'success',
'info': 'GPIO {} is already seted up'.format(cls.RELAY)}
@classmethod
def tear_down(cls):
# TODO: not to call cleanup at all. Only at system shotdown
GPIO.cleanup(cls.RELAY)
cls.SETED_UP = False
return {'status': 'success', 'info': 'GPIOs cleaned up'}
class Light(RelayBase):
RELAY = config.relays['gpio_pins']['lights']
class Fan(RelayBase):
RELAY = config.relays['gpio_pins']['fans']
class Pumps(RelayBase):
RELAY = [
config.relays['gpio_pins']['pump_a'],
config.relays['gpio_pins']['pump_b']
]
@classmethod
def pulse(cls, duration=1):
cls.on()
time.sleep(duration)
cls.off()
cls.tear_down()
class PumpA(Pumps):
RELAY = config.relays['gpio_pins']['pump_a']
class PumpB(Pumps):
RELAY = config.relays['gpio_pins']['pump_b']
class Servo(object):
SERVO_PIN = config.relays['gpio_pins']['servo']
min_position = 3.0
max_position = 12.4
@staticmethod
def get_state():
pass
@classmethod
def set_state(cls, position=None, min=min_position, max=max_position):
answer = dict()
if position != None:
if position <= 10 and position >= 0:
new_pos = min+position*(max-min)/10
GPIO.setmode(GPIO.BCM)
GPIO.setup(cls.SERVO_PIN, GPIO.OUT)
pwm = GPIO.PWM(cls.SERVO_PIN, 50)
pwm.start(5)
pwm.ChangeDutyCycle(new_pos)
time.sleep(1.5)
pwm.stop()
GPIO.cleanup()
answer.update({'status': 'success', 'result': position})
logger.info('Servo.set_state: new position is {}'.format(position))
else:
answer.update({'status': 'fail', 'info': 'Position out of range'})
logger.warning('Servo.set_state: position {} not in [0:10]'.format(position))
else:
answer.update({'status': 'fail', 'info': 'No position provided'})
logger.warning('Servo.set_state: no position provided')
return answer
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,076 | dmkaraba/green-house | refs/heads/master | /modules/const.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from modules.greenhouse.controllers import PumpA, PumpB, Light, Fan
from modules.greenhouse.sensors import BH1750, DHT22, DS18B20_Air, DS18B20_Soil,\
SoilMoistureA, SoilMoistureB
SENSORS = {
'soil_moisture_a': SoilMoistureA, # TODO: add sensors
'soil_moisture_b': SoilMoistureB,
}
PERFORMERS = {
'light': Light,
'pump_a': PumpA,
'pump_b': PumpB,
'fan': Fan
}
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,077 | dmkaraba/green-house | refs/heads/master | /app/tear_down.py | #!/usr/bin/python
import RPi.GPIO as GPIO
def cleanup():
GPIO.cleanup()
if __name__ == '__main__':
cleanup()
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,078 | dmkaraba/green-house | refs/heads/master | /web_interface/web_events/config_requests.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from schematics import types
from schematics.models import Model
from schematics.types.compound import ModelType
### LIFECYCLE ###################################
class Timer(Model):
start_date = types.DateTimeType()
end_date = types.DateTimeType()
start_time = types.DateTimeType(required=True)
end_time = types.DateTimeType(required=True)
class Conditions(Model):
min_value = types.IntType(required=True)
max_value = types.IntType()
class CreateLifecycle(Model):
type = types.StringType(required=True)
by_time = types.BooleanType(required=True)
timer = ModelType(Timer)
conditions = ModelType(Conditions)
last_event = types.DateTimeType()
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,079 | dmkaraba/green-house | refs/heads/master | /modules/mqtt_interaction/base.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import signal
import paho.mqtt.client as mqtt
from config import config
from modules.greenhouse.controllers import Light, Fan, Pump
class Base_GHMQTT(mqtt.Client):
USERNAME = config.cloud_mqtt_cred['user']
PASSWORD = config.cloud_mqtt_cred['pass']
SERVER = config.cloud_mqtt_cred['server']
def __init__(self):
super(Base_GHMQTT, self).__init__()
self.username_pw_set(self.USERNAME, self.PASSWORD)
self.connect(self.SERVER, 10990, 60)
signal.signal(signal.SIGTERM, self.cleanup)
signal.signal(signal.SIGINT, self.cleanup)
def cleanup(self, signum, frame):
# print signum, frame
self.disconnect()
print '>>> Base_GHMQTT:cleanup'
def on_connect(self, mqttc, obj, flags, rc):
print("Connected rc: "+str(rc))
def on_disconnect(self, mqttc, userdata, rc):
print("Dissconnected rc: " + str(rc))
if rc != 0:
self.reconnect()
print "Reconnected"
def on_message(self, mqttc, obj, msg):
print("Message "+msg.topic+" "+str(msg.qos)+" "+str(msg.payload))
def on_publish(self, mqttc, obj, mid):
print("Published mid: "+str(mid))
def on_subscribe(self, mqttc, obj, mid, granted_qos):
print("Subscribed: "+str(mid)+" "+str(granted_qos))
def on_log(self, mqttc, obj, level, string):
print(string)
def perform(self, mqttc, obj, msg):
raise NotImplementedError
def pub(self, msgs):
for topic, payload in msgs:
self.publish(topic, json.dumps(payload))
def sub(self, topic):
print '>>>sub(topic={})'.format(topic)
self.subscribe(topic, 0)
rc = 0
while rc == 0:
rc = self.loop()
return rc
class LightMQTTClass(Base_GHMQTT):
topic_pub = config.mqtt_topics_pub['lights']
def on_message(self, mqttc, obj, msg):
if msg.payload == 'on':
Light.on()
self.publish(self.topic_pub, json.dumps({'status': 'ok'}))
elif msg.payload == 'off':
Light.off()
self.publish(self.topic_pub, json.dumps({'status': 'ok'}))
else:
self.publish(self.topic_pub, json.dumps({'status': 'error'}))
class FansMQTTClass(Base_GHMQTT):
topic_pub = config.mqtt_topics_pub['fans']
def on_message(self, mqttc, obj, msg):
if msg.payload == 'on':
Fan.on()
self.publish(self.topic_pub, json.dumps({'status': 'ok'}))
elif msg.payload == 'off':
Fan.off()
self.publish(self.topic_pub, json.dumps({'status': 'ok'}))
else:
self.publish(self.topic_pub, json.dumps({'status': 'error'}))
class PumpsMQTTClass(Base_GHMQTT):
topic_pub = config.mqtt_topics_pub['pumps']
def on_message(self, mqttc, obj, msg):
if msg.payload == 'on':
Pump.pulse(5)
self.publish(self.topic_pub, json.dumps({'status': 'ok'}))
elif msg.payload == 'off':
Pump.off()
self.publish(self.topic_pub, json.dumps({'status': 'ok'}))
else:
self.publish(self.topic_pub, json.dumps({'status': 'error'}))
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,080 | dmkaraba/green-house | refs/heads/master | /modules/data/db.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from mongoengine import *
from config import config
from modules.data.connections import mongo_connection
class DBSharedProperty(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, objtype=None):
if obj is None:
return self
return obj.model[self.name]
def __set__(self, obj, value):
obj.model[self.name] = value
class DBQuerySet(QuerySet):
def get(self, *q_objs, **query):
return super(QuerySet, self).get(*q_objs, **query)
def with_id(self, object_id):
return super(QuerySet, self).with_id(object_id)
class DBDocument(Document):
meta = {
'abstract': True,
'queryset_class': DBQuerySet,
}
@classmethod
def _get_db(cls):
return mongo_connection[config.mongodb['db_name']]
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,081 | dmkaraba/green-house | refs/heads/master | /utils/logger/__init__.py | #!/usr/bin/python
import os
import logging
from config import Config
config_dir = Config.log_dir
def debug(msg):
logging.basicConfig(filename=os.path.join(config_dir, 'debug.log'), level=logging.DEBUG,
format="%(asctime)s %(levelname)s: %(message)s",
datefmt='%d-%m-%Y %I:%M:%S')
logging.debug(msg)
def info(msg):
logging.basicConfig(filename=os.path.join(config_dir, 'info.log'), level=logging.DEBUG,
format="%(asctime)s %(levelname)s: %(message)s",
datefmt='%d-%m-%Y %I:%M:%S')
logging.info(msg)
def warning(msg):
logging.basicConfig(filename=os.path.join(config_dir, 'warning.log'), level=logging.DEBUG,
format="%(asctime)s %(levelname)s: %(message)s",
datefmt='%d-%m-%Y %I:%M:%S')
logging.warning(msg)
def error(msg):
logging.basicConfig(filename=os.path.join(config_dir, 'error.log'), level=logging.DEBUG,
format="%(asctime)s %(levelname)s: %(message)s",
datefmt='%d-%m-%Y %I:%M:%S')
logging.error(msg) | {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,082 | dmkaraba/green-house | refs/heads/master | /modules/greenhouse/camera.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import subprocess
from config import config
camera_dir = config.camera_dir
class Camera(object):
@classmethod
def shoot(cls):
frame_name = '.'.join((str(int(time.time())), 'jpg'))
frame_dir = os.path.join(camera_dir, frame_name)
print frame_dir
bash_command = 'streamer -f jpeg -s 640x480 -j 100 -o {}'.format(frame_dir)
# process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
# output, error = process.communicate()
return frame_name
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,083 | dmkaraba/green-house | refs/heads/master | /utils/deploy/celeryd.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from celery import Celery
from config import config
# app = Celery('main_app',
# broker='amqp://guest@localhost//',
# include=['handlers.tasks'])
#
# app.conf.timezone = 'Europe/Minsk'
app = Celery()
app.conf.update(config.celeryd)
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,084 | dmkaraba/green-house | refs/heads/master | /handlers/__init__.py | #!/usr/bin/python
# all mesurment logics here
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,085 | dmkaraba/green-house | refs/heads/master | /app/set_up.py | # GPIO setup must be here | {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
45,086 | dmkaraba/green-house | refs/heads/master | /utils/sensors/reader.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from modules.greenhouse.sensor_answers import AllSensorsResult
from modules.greenhouse.sensors import DS18B20_Air, DS18B20_Soil, \
BH1750, DHT22, SoilMoistureSensors
def pull_data():
sm = SoilMoistureSensors()
res = AllSensorsResult({
'DS18B20_air': DS18B20_Air().read(),
'DS18B20_soil': DS18B20_Soil().read(),
'BH1750': BH1750().read(),
'DHT22': DHT22().read(),
'Soil_moisture_a': sm.read_one(0),
'Soil_moisture_b': sm.read_one(1),
'Soil_moisture_c': sm.read_one(2),
'Soil_moisture_d': sm.read_one(3)
})
return res
if __name__=='__main__':
print pull_data()
| {"/modules/greenhouse/objects.py": ["/modules/data/db.py", "/modules/greenhouse/models.py"], "/modules/greenhouse/models.py": ["/modules/data/db.py"], "/modules/mqtt_interaction/handlers.py": ["/utils/sensors/reader.py", "/modules/mqtt_interaction/base.py"], "/modules/greenhouse/sensors.py": ["/utils/logger/__init__.py", "/config/__init__.py", "/modules/greenhouse/sensor_answers.py"], "/handlers/tasks.py": ["/config/__init__.py", "/utils/deploy/celeryd.py", "/utils/deploy/celerybeat.py", "/handlers/jobs.py"], "/utils/deploy/celerybeat.py": ["/config/__init__.py"], "/modules/greenhouse/controllers.py": ["/utils/logger/__init__.py", "/config/__init__.py"], "/modules/const.py": ["/modules/greenhouse/controllers.py", "/modules/greenhouse/sensors.py"], "/modules/data/db.py": ["/config/__init__.py"], "/utils/logger/__init__.py": ["/config/__init__.py"], "/utils/deploy/celeryd.py": ["/config/__init__.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.