index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
27,293,963
|
Agamiru/online_store
|
refs/heads/master
|
/app_admin/models.py
|
from django.db import models
from django.contrib.auth.models import AbstractUser, BaseUserManager
from django.utils.translation import gettext_lazy as _
# Create your models here.
class UserManager(BaseUserManager):
# use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
"""
Create and save a user with the given username, email, and password.
"""
if not email:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save()
return user
def create_user(self, email=None, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email=None, password=None, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault("is_active", True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, password, **extra_fields)
class User(AbstractUser):
username = None
email = models.EmailField(_('email address'), unique=True)
USERNAME_FIELD = "email"
REQUIRED_FIELDS = []
objects = UserManager()
class Meta(AbstractUser.Meta):
verbose_name = "users"
verbose_name_plural = "users"
db_table = "users"
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,964
|
Agamiru/online_store
|
refs/heads/master
|
/telegram_app/apps.py
|
from django.apps import AppConfig
class TelegramAppConfig(AppConfig):
name = 'telegram_app'
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,965
|
Agamiru/online_store
|
refs/heads/master
|
/products/migrations/0019_auto_20201214_1509.py
|
# Generated by Django 3.1 on 2020-12-14 14:09
from django.db import migrations, models
import products.utils.model_utils
class Migration(migrations.Migration):
dependencies = [
('products', '0018_product_features_alias'),
]
operations = [
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=100, unique=True, validators=[products.utils.model_utils.CrossModelUniqueNameValidator]),
),
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,966
|
Agamiru/online_store
|
refs/heads/master
|
/products/tests/test_model_utils.py
|
from django.test import TestCase
from django.db.models import ForeignKey
from django.core.exceptions import ValidationError
from ..utils.test_utils import CreateProduct, SubCategory1
from ..utils.model_utils import GetMainFeatures as gmf
from ..utils.model_utils import CrossModelUniqueNameValidator
from ..models import UniqueCategory
from django.forms import models
class TestGetMainFeatures(TestCase):
def setUp(self) -> None:
self.product = CreateProduct()
def test_return_appropriate_category_instance(self):
# Only Category given
cat_kwargs = {"name": "Guitars", "main_features": ["type", "year"]}
self.product.create_products_w_defaults(cat_kwargs, "Ibanez")
main_f_obj = gmf(self.product.prod_instance)
self.assertEqual(
main_f_obj.return_appropriate_category_instance(),
self.product.cat_obj
)
# Subcategory given but with no main_features
subcat_1_kwargs = {
"cat_id": self.product.cat_obj, "name": "Electric Guitars",
}
self.product.subcat_1_id(**subcat_1_kwargs)
self.product.model_name(brand_id=self.product.brand_obj, name="Roadstar")
self.product.create_product()
main_f_obj = gmf(self.product.prod_instance)
self.assertEqual(
main_f_obj.return_appropriate_category_instance(),
self.product.cat_obj
)
# Subcategory given and with main_features
subcat_2_kwargs = {
"subcat_1_id": self.product.subcat_1_obj, "name": "Bass Guitar",
"main_features": ["strings", "eq"]
}
self.product.subcat_2_id(**subcat_2_kwargs)
self.product.model_name(brand_id=self.product.brand_obj, name="Reels")
self.product.create_product()
main_f_obj = gmf(self.product.prod_instance)
self.assertEqual(
main_f_obj.return_appropriate_category_instance(),
self.product.subcat_2_obj
)
def test_specs_key_switcher(self):
specs = {
"key": ["Really dope stuff"],
"midi": ["Nice Midi"], "fishes": ["Swim a lot"],
"world": ["Messed up"], "os": ["Nice os", "Good os", "Lovely Os"]
}
main_f = [
"Keyboard", "MIDI Control Surfaces", "OS Compatibility"
]
alias_f = ["key", "midi", "os"]
# Create product_instance class
prod_inst = type("prod_inst", (), {"specs": specs, "features_alias": alias_f})
main_f_obj = gmf(prod_inst)
main_f_obj.specs_key_switcher(main_f, alias_f)
self.assertEqual(
main_f_obj.product_instance.specs["Keyboard"],
["Really dope stuff"]
)
self.assertEqual(
main_f_obj.product_instance.specs["OS Compatibility"],
["Nice os", "Good os", "Lovely Os"]
)
def test_set_custom_alias(self):
# Set data
specs = {
"key": ["Really dope stuff"],
"midi": ["Nice Midi"], "fishes": ["Swim a lot"],
"world": ["Messed up"], "os": ["Nice os", "Good os", "Lovely Os"]
}
main_f = [
"Keyboard", "MIDI Control Surfaces", "OS Compatibility"
]
alias_f = ["key", "midi", "os"]
# Create product_instance class
prod_inst = type("prod_inst", (), {"specs": specs, "features_alias": alias_f, "specs_from_bhpv": False})
approp_cat = type("approp_cat", (), {"main_features": main_f})
# Instantiate GMF class with custom_alias list
main_f_obj = gmf(prod_inst, ["shell", "camp"])
main_f_obj.return_appropriate_category_instance = lambda: approp_cat # set callable
self.assertEqual(main_f_obj.return_appropriate_category_instance(), approp_cat)
# Make sure AssertionError is raised
self.assertRaisesMessage(
AssertionError, "Lists must be of equal lengths", main_f_obj.final
)
main_f_obj.custom_alias.append("owerri")
main_f_obj.final()
self.assertEqual(
main_f_obj.product_instance.specs["shell"], ['Really dope stuff']
)
self.assertEqual(
main_f_obj.product_instance.specs["owerri"], ['Nice os', 'Good os', 'Lovely Os']
)
def test_features(self):
cat_kwargs = {
"name": "Midi Keyboard",
"main_features": [
"Keyboard", "fishes", "MIDI Control Surfaces",
"OS Compatibility", "clowns"
]
}
self.product.create_products_w_defaults(cat_kwargs, "Alesis")
main_f_obj = gmf(self.product.prod_instance)
main_f_obj.features()
print(f"features dict:\n{main_f_obj.to_string()}\n")
self.assertEqual(main_f_obj.skipped, [1, 4])
def test_features_alias(self):
specs = {
"key": ["Really dope stuff"],
"midi": ["Nice Midi"], "fishes": ["Swim a lot"],
"world": ["Messed up"], "os": ["Nice os", "Good os", "Lovely Os"]
}
cat_kwargs = {
"name": "Midi Keyboard",
"main_features": [
"Keyboard", "MIDI Control Surfaces", "OS Compatibility"
]
}
self.product.create_products_w_defaults(cat_kwargs, "M-Audio")
self.product.model_name(brand_id=self.product.brand_obj, name="Rockstar")
self.product.specs_from_bhpv(False)
self.product.features_alias(["key", "midi", "os"])
self.product.kwargs["specs"] = specs
self.product.create_product()
main_f_obj = gmf(self.product.prod_instance)
print(f"features dict:\n{main_f_obj.to_string()}\n")
self.assertEqual(
main_f_obj.features_dict.get("OS Compatibility"),
specs.get("os")
)
def test_get_main_features_w_all_invalid_keys(self):
cat_kwargs = {
"name": "M Keyboard",
"main_features": ["shell", "camp", "koun"]
}
self.product.create_products_w_defaults(cat_kwargs, "Motu")
main_f_obj = gmf(self.product.prod_instance)
main_f_obj.features()
print(f"features dict:\n{main_f_obj.to_string()}\n")
self.assertEqual(len(main_f_obj.skipped), 3)
# def test_print_specs(self):
# self.product.print_specs()
class FakeModelForm(models.ModelForm):
class Meta:
exclude = ["id"]
model = SubCategory1
class TestCrossModelUniqueNameValidator(TestCase):
def setUp(self) -> None:
self.prod = CreateProduct()
def test_cross_model_unique_name_validator(self):
cat_kwargs = {"name": "M Keyboard", "main_features": ["shell"]}
self.prod.cat_id(**cat_kwargs)
# Test validator in isolation
validator = CrossModelUniqueNameValidator(UniqueCategory)
self.assertRaises(ValidationError, validator, "M Keyboard")
# Test validator in model
cat_kwargs["cat_id"] = self.prod.cat_obj
cat_kwargs["alias"] = ["koun"]
self.assertRaises(
ValidationError,
self.prod.subcat_1_id, **cat_kwargs
)
# Test validator in model form
form_instance = FakeModelForm(data=cat_kwargs)
self.assertFalse(form_instance.is_valid())
self.assertEqual(
"Category object with name 'M Keyboard' already exists",
form_instance.errors.get("name")[0]
)
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,967
|
Agamiru/online_store
|
refs/heads/master
|
/products/managers.py
|
from typing import Union, Type, Tuple
from django.db import models
from django.db.models import ObjectDoesNotExist as doesnt_exist
from django.contrib.postgres.search import TrigramSimilarity, TrigramDistance
from django.db.models import CharField
from django.db.models.functions import Cast
from django.db.models import QuerySet, Func, F
from .utils.manager_utils import SearchResult
# Types
queryset = Union[Type[QuerySet], QuerySet] # Can be Subtype or Instance of Queryset
django_model = Type[models.Model] # Subtype of models.Model
class ProductManager(models.Manager):
def simple_search(self, value: str):
results = self.filter(full_name__trigram_similar=value)
return SearchResult(results, "full_name", "trigram", similarity=0.3)
def trigram_similarity_search(
self, value, sim_value: float = 0.5, field="full_name"
) -> SearchResult:
if sim_value > 1 or sim_value < 0:
raise ValueError("similarity must be greater > 0 but <= 1")
res = self.annotate(
similarity=TrigramSimilarity(field, value),
).filter(similarity__gt=sim_value).order_by("-similarity")
return SearchResult(res, field, "trigram", similarity=sim_value)
def trigram_distance_search(
self, value, dist_value: float = 0.7, field="full_name"
) -> SearchResult:
"""
Returns a queryset of values not really similar to query but not totally DISIMILAR.
"""
if dist_value > 1 or dist_value < 0:
raise ValueError("distance must be greater > 0 but <= 1")
res = self.annotate(
distance=TrigramDistance(field, value),
).filter(distance_gt=dist_value).filter(distance_lt=1).order_by("-distance")
return SearchResult(res, field, "trigram", distance=dist_value)
# Basically a way to get a single object without a try/except block
def get_obj_or_none(self, value, field="full_name") -> Union[django_model, None]:
dict_ = {field: value}
try:
obj = self.get(**dict_)
except doesnt_exist:
return None
else:
return obj
@staticmethod
def filter_available(query_set: queryset) -> queryset:
return query_set.filter(available=True)
def full_search(self, value) -> Union[SearchResult, None]:
# Search for exact object if user is knowledgeable of item
res = self.filter_available(self.filter(full_name=value))
if res:
return SearchResult(res, "full_name")
# Search for objects that contain values
res = self.filter(full_name__icontains=value)
if res:
return SearchResult(self.filter_available(res), "full_name", "icontains")
# Search for similar items
sim_values = (0.75, 0.5, 0.3)
for sim_val in sim_values:
res = self.trigram_similarity_search(value, sim_val)
if res:
query_set = self.filter_available(res.results)
return SearchResult(query_set, res.query_field, "trigram", similarity=sim_val)
# Suggestions
res = res.results.exclude(similarity=0)
if res:
return SearchResult(self.filter_available(res), "full_name", "trigram", suggestions=True)
class CategoryManagers(models.Manager):
def trigram_similarity_search(
self, value, sim_value: float = 0.5, field="name"
) -> SearchResult:
if sim_value > 1 or sim_value < 0:
raise ValueError("similarity must be greater > 0 but <= 1")
res = self.annotate(
similarity=TrigramSimilarity(field, value),
).filter(similarity__gt=sim_value).order_by("-similarity")
return SearchResult(res, field, "trigram", similarity=sim_value)
def trigram_distance_search(
self, value, dist_value: float = 0.7, field="name"
) -> SearchResult:
"""
Returns a queryset of values not really similar to query but not totally DISIMILAR.
"""
if dist_value > 1 or dist_value < 0:
raise ValueError("distance must be greater > 0 but <= 1")
res = self.annotate(
distance=TrigramDistance(field, value),
).filter(distance_gt=dist_value).filter(distance_lt=1).order_by("-distance")
return SearchResult(res, field, "trigram", distance=dist_value)
# Basically a way to get a single object without a try/except block
def get_obj_or_none(self, value, field="name"):
dict_ = {field: value}
try:
obj = self.get(**dict_)
except doesnt_exist:
return None
else:
return obj
def full_search(self, value) -> Union[SearchResult, None]:
# Search for exact object if user is knowledgeable of item
res = self.filter(name=value)
if res:
return SearchResult(res, "name")
# Search for objects that contain values
res = self.filter(name__icontains=value)
if res:
return SearchResult(res, "name", "icontains")
# Search for similar items
sim_values = (0.75, 0.5, 0.3)
for sim_val in sim_values:
res = self.trigram_similarity_search(value, sim_val)
if res:
return res
# Search Alias
# Empty Array fields are empty lists = [""], exclude such objects
res = self.exclude(alias=list()) \
.annotate(unnest=Func(F('alias'), function='unnest')) \
.annotate(similar=TrigramSimilarity('unnest', value))
# Manually filter queryset objects
similar, suggestions = [], []
if res:
for obj in res:
for sim_val in sim_values:
if obj.similar >= sim_val:
similar.append(obj)
if 0 < obj.similar < 0.3:
suggestions.append(obj)
# Suggestions
if similar:
return SearchResult(similar, "alias", "trigram")
else:
return SearchResult(suggestions, "alias", "trigram", suggestions=True) \
if suggestions else None
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,968
|
Agamiru/online_store
|
refs/heads/master
|
/app_admin/views.py
|
from django.shortcuts import render
# Create your views here.
html_data = """
<table>
<tr>
<td>Card balance</td>
<td>$18.30</td>
</tr>
<tr>
<td>Card name</td>
<td>NAMEn</td>
</tr>
<tr>
<td>Account holder</td>
<td>NAME</td>
</tr>
<tr>
<td>Card number</td>
<td>1234</td>
</tr>
<tr>
<td>Status</td>
<td>Active</td>
</tr>
</table>
"""
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,969
|
Agamiru/online_store
|
refs/heads/master
|
/products/utils/admin_utils.py
|
import json
import zlib
from django.forms import fields
from django import forms
from django.forms.widgets import Textarea
from django.core.exceptions import ValidationError
from django.forms.models import BaseModelForm, ModelFormMetaclass
from ..utils.general_utils import BhphotovideoTableConverter
class FormSpecsField(fields.JSONField):
# Check for 3 possibilities:
# 1. Value wasn't filled, return None
# 2. Value comes in HTML, convert to JSON compatible python type and return.
# 3. Value is string from database, return "in_database".
widget = Textarea(attrs={
"placeholder": "Insert HTML specs here",
})
def to_python(self, value):
if value in self.empty_values:
return None
value = str(value)
if value.startswith("<") and value.endswith(">"): # html check
converter_obj = BhphotovideoTableConverter(value)
python_data = converter_obj.to_python_dict()
# print(f"python_data: {python_data}")
if isinstance(python_data, (list, dict, int, float, fields.JSONString)):
return python_data
else:
raise ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
return "in_database"
def prepare_value(self, value):
if value is None:
return
if isinstance(value, forms.fields.InvalidJSONInput):
return value
return json.dumps(value, cls=self.encoder)
class FormCommaNewLineSeparatedField(fields.JSONField):
widget = Textarea(attrs={
"placeholder": "Comma or new line separated values",
})
def to_python(self, value):
if value in self.empty_values:
return [""]
items = value.split("\n") # For lists delimited by a new line character
items_2 = value.split(",") # For lists delimited by a comma
# The longer of the two will be the final list
items = [item.strip() for item in items] if len(items) > len(items_2) else [item.strip() for item in items_2]
if isinstance(items, (list, dict, int, float, fields.JSONString)):
return items
else:
raise ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def prepare_value(self, value):
if value is None:
return
if isinstance(value, forms.fields.InvalidJSONInput):
return value
return ', '.join(value)
class AbstractJoinForm(BaseModelForm, metaclass=ModelFormMetaclass):
"""
A model form maker where categories and subcategories join models can inherit
special form cleaning and validation abilities on the admin page.
"""
def clean(self):
self._validate_unique = True
first_field_name, second_field_name = self.get_field_names()
obj_1 = self.cleaned_data[first_field_name] # Category or Subcategory Object
obj_2 = self.cleaned_data[second_field_name] # Category or Subcategory Object
cat_id_1, cat_id_2 = obj_1.id, obj_2.id # Category id pair
if cat_id_1 == cat_id_2: # A category item cannot be an accessory to itself
self.add_error(
f"{second_field_name}", f"{first_field_name} & {second_field_name} cannot have the same values"
)
return self.cleaned_data
# Get the two important model field names
def get_field_names(self): # Tuple[str, str]
# Actually second and third field names considering the index/id
# field, but for readability let it be first and second
first_field_name = self._meta.model()._meta.fields[1].name
second_field_name = self._meta.model()._meta.fields[2].name
return first_field_name, second_field_name
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,970
|
Agamiru/online_store
|
refs/heads/master
|
/products/tests/test_managers.py
|
from django.test import TestCase
from django.db.models.expressions import Value as V
from django.db.models.functions import Cast, Greatest, Coalesce
from django.db.models import CharField, F, Func, Case, When, Value, Q, IntegerField as I
from django.contrib.postgres.search import TrigramSimilarity, SearchVector
from ..models import Product, search_all_categories, Category
class TestProductManager(TestCase):
fixtures = ["product_fixtures.json"]
def setUp(self) -> None:
pass
def test_full_search(self):
value = "soundcard"
prod_search = Product.objects.full_search(value)
cat_search = search_all_categories(value)
if cat_search:
print(f"Cat_list: {(cat_search, cat_search.query_type) if not hasattr(cat_search, 'suggestions') else (cat_search, 'These are suggestions')}")
else:
print("No Category items found")
if prod_search:
print(f"Prod_list: {(prod_search, prod_search.query_type) if not hasattr(prod_search, 'suggestions') else (prod_search, 'These are suggestions')}")
else:
print("No Products found")
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,971
|
Agamiru/online_store
|
refs/heads/master
|
/products/migrations/0024_auto_20201216_1635.py
|
# Generated by Django 3.1 on 2020-12-16 15:35
from django.db import migrations, models
import products.models
import products.utils.model_utils
class Migration(migrations.Migration):
dependencies = [
('products', '0023_auto_20201216_1630'),
]
operations = [
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=100, unique=True, validators=[products.utils.model_utils.CrossModelUniqueNameValidator(products.models.UniqueCategory)]),
),
migrations.AlterField(
model_name='subcategory1',
name='name',
field=models.CharField(max_length=100, unique=True, validators=[products.utils.model_utils.CrossModelUniqueNameValidator(products.models.UniqueCategory)]),
),
migrations.AlterField(
model_name='subcategory2',
name='name',
field=models.CharField(max_length=100, unique=True, validators=[products.utils.model_utils.CrossModelUniqueNameValidator(products.models.UniqueCategory)]),
),
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,972
|
Agamiru/online_store
|
refs/heads/master
|
/products/migrations/0004_auto_20201210_0835.py
|
# Generated by Django 3.1 on 2020-12-10 07:35
from django.db import migrations
import products.models
import products.utils.model_utils
class Migration(migrations.Migration):
dependencies = [
('products', '0003_auto_20201210_0802'),
]
operations = [
migrations.AlterField(
model_name='category',
name='alias',
field=products.utils.model_utils.ListJSONField(default=products.models.json_default),
),
migrations.AlterField(
model_name='category',
name='main_features',
field=products.utils.model_utils.ListJSONField(default=products.models.json_default),
),
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,973
|
Agamiru/online_store
|
refs/heads/master
|
/products/signals.py
|
from django.db.models import ObjectDoesNotExist as doesnt_exist
from .models import UniqueCategory
# For post_save signals
def save_or_update_unique_category(sender, instance, created, **kwargs):
inst_name, inst_id = instance.name, instance.id
model_name = instance.__class__.__name__
if created:
UniqueCategory.objects.create(name=inst_name, model_name=model_name, cat_id=inst_id)
else:
# cat_id and model_name are Unique Together, else might return more than one result
# We cannot use only inst_name for look up as that might have been updated.
unique_obj = UniqueCategory.objects.get(cat_id=inst_id, model_name=model_name)
if unique_obj.name != inst_name:
unique_obj.name = inst_name
unique_obj.save(update_fields=["name"])
# For post_delete signals
def delete_unique_category(sender, instance, **kwargs):
inst_id = instance.id
try:
unique_obj = UniqueCategory.objects.get(cat_id=inst_id)
unique_obj.delete()
except doesnt_exist:
pass
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,974
|
Agamiru/online_store
|
refs/heads/master
|
/products/migrations/0028_auto_20210110_2338.py
|
# Generated by Django 3.1 on 2021-01-10 22:38
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0027_auto_20210110_2249'),
]
operations = [
migrations.AlterField(
model_name='category',
name='alias',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=50, null=True), default=list, size=None),
),
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,975
|
Agamiru/online_store
|
refs/heads/master
|
/products/migrations/0012_auto_20201211_0641.py
|
# Generated by Django 3.1 on 2020-12-11 05:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0011_auto_20201211_0641'),
]
operations = [
migrations.RenameField(
model_name='category',
old_name='new_alias',
new_name='alias',
),
migrations.RenameField(
model_name='category',
old_name='new_features',
new_name='main_features',
),
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,976
|
Agamiru/online_store
|
refs/heads/master
|
/products/migrations/0025_auto_20210110_2037.py
|
# Generated by Django 3.1 on 2021-01-10 19:37
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0024_auto_20201216_1635'),
]
operations = [
migrations.AlterField(
model_name='category',
name='alias',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=50), default=list, size=None),
),
migrations.AlterField(
model_name='category',
name='main_features',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=50), size=None),
),
migrations.AlterField(
model_name='subcategory1',
name='alias',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=50), default=list, size=None),
),
migrations.AlterField(
model_name='subcategory1',
name='main_features',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=50), default=list, size=None),
),
migrations.AlterField(
model_name='subcategory2',
name='alias',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=50), default=list, size=None),
),
migrations.AlterField(
model_name='subcategory2',
name='main_features',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=50), default=list, size=None),
),
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,977
|
Agamiru/online_store
|
refs/heads/master
|
/products/tests/test_models.py
|
from django.test import TestCase
from ..models import Category
from django.db.utils import ProgrammingError
class TestModels(TestCase):
def setUp(self) -> None:
self.params = {
"name": "cables", "alias": ["wires", "cords"],
"main_features": ["type", "gender"]
}
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,978
|
Agamiru/online_store
|
refs/heads/master
|
/telegram_app/utils.py
|
from .settings import bot, webhook_url
class IncrementString:
def __init__(self, string, start_num=0):
self.string = string
self.start_num = start_num
self.next_num = None
def __call__(self, *args, **kwargs):
if not self.next_num:
self.next_num = self.start_num + 1
new_string = str(self.string + str(self.next_num))
return new_string
else:
self.next_num += 1
return str(self.string + str(self.next_num))
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,979
|
Agamiru/online_store
|
refs/heads/master
|
/products/migrations/0020_auto_20201216_0745.py
|
# Generated by Django 3.1 on 2020-12-16 06:45
from django.db import migrations, models
import products.models
import products.utils.model_utils
class Migration(migrations.Migration):
dependencies = [
('products', '0019_auto_20201214_1509'),
]
operations = [
migrations.CreateModel(
name='UniqueCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('model_name', models.CharField(max_length=200)),
],
options={
'db_table': 'unique_category',
},
),
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=100, unique=True, validators=[products.utils.model_utils.CrossModelUniqueNameValidator(products.models.UniqueCategory)]),
),
migrations.AlterField(
model_name='subcategory1',
name='name',
field=models.CharField(max_length=100, unique=True, validators=[products.utils.model_utils.CrossModelUniqueNameValidator(products.models.UniqueCategory)]),
),
migrations.AlterField(
model_name='subcategory2',
name='name',
field=models.CharField(max_length=100, unique=True, validators=[products.utils.model_utils.CrossModelUniqueNameValidator(products.models.UniqueCategory)]),
),
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,980
|
Agamiru/online_store
|
refs/heads/master
|
/products/migrations/0034_auto_20210112_1320.py
|
# Generated by Django 3.1 on 2021-01-12 12:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('products', '0033_remove_product_model_name'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='mo_name',
),
migrations.AddField(
model_name='product',
name='model_name',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='product', to='products.modelname'),
),
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,981
|
Agamiru/online_store
|
refs/heads/master
|
/telegram_app/urls.py
|
from django.urls import path, include
from .views import telegram_view_dispatcher, set_webhook
from .settings import BOT_TOKEN
urlpatterns = [
path(f"{BOT_TOKEN}/", telegram_view_dispatcher),
path("set-webhook/", set_webhook)
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,982
|
Agamiru/online_store
|
refs/heads/master
|
/common_app/__init__.py
|
default_app_config = "common_app.apps.CommonAppConfig"
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,983
|
Agamiru/online_store
|
refs/heads/master
|
/products/admin.py
|
from django.contrib import admin
from .models import (
Product, Brand, Category, SubCategory1,
SubCategory2, ModelName, CategoryAccessoryJoin,
CategoryBoughtTogetherJoin, Subcat1AccessoryJoin,
Subcat1BoughtTogetherJoin, Subcat2AccessoryJoin,
Subcat2BoughtTogetherJoin,
)
from django import forms
from django.forms.widgets import TextInput
from django.core.exceptions import ValidationError, ObjectDoesNotExist as doesntExist
from django.forms.models import ModelForm
from .utils.admin_utils import (
FormSpecsField, FormCommaNewLineSeparatedField, AbstractJoinForm
)
################### PRODUCT ######################
class ProductForm(forms.ModelForm):
"""
Special clean method implemented for product form to auto-fill 'weight'
and 'package_dimensions' and also handle other intricacies that could occur
while retrieving and saving specs from database.
"""
specs = FormSpecsField()
in_the_box = FormCommaNewLineSeparatedField()
features_alias = FormCommaNewLineSeparatedField()
# variants = FormCommaNewLineSeparatedField()
def clean(self):
self._validate_unique = True
specs = self.cleaned_data.get("specs") # Can be "in_database", json or None
if not specs: # Ideally, field validations should have checked for this already
raise ValidationError("This field receives 'None' as value")
id_ = self.instance.id
pd = self.cleaned_data.get("package_dimensions") # Package Dimensions
w = self.cleaned_data.get("weight") # Weight
try:
obj = Product.objects.get(pk=id_) # Try fetching existing object
instance_specs = obj.specs # Existing Object specs
instance_itb = obj.in_the_box # Existing Object in_the_box
if specs == "in_database":
# Sometimes, artifacts are introduced in the field input while displaying
# existing values (bound_data).
# It's best to use values from the instance itself while saving to avoid this
self.cleaned_data["specs"] = instance_specs
# Sets appropriate package dimensions and weight using existing specs data
instance_specs_pd = instance_specs.get("Box Dimensions (LxWxH)")
instance_specs_pd = instance_specs_pd[0] if instance_specs_pd else None
instance_specs_w = instance_specs.get("Package Weight")
instance_specs_w = instance_specs_w[0] if instance_specs_w else None
# In case package dimension and weight fields are empty during updating
# use values from existing specs if available
if not pd and instance_specs_pd is not None:
self.cleaned_data["package_dimensions"] = instance_specs_pd
if not w and instance_specs_w is not None:
self.cleaned_data["weight"] = instance_specs_w
# Use existing objects itb if available
if self.cleaned_data.get("in_the_box") == "in_database":
self.cleaned_data["in_the_box"] = instance_itb
except doesntExist: # For newly saved products
try:
package_dims = specs["Box Dimensions (LxWxH)"]
# Use filled package dimensions if available, else take from specs
if not pd:
self.cleaned_data["package_dimensions"] = package_dims[0]
# Key error in case specs has no package dimensions
# Type error in case "null" is returned as string indices must be integers
except (KeyError, TypeError, IndexError) as e:
# self.cleaned_data["specs"] = None
pass
self.add_error("package_dimensions", f"No package dimensions provided for reason '{e.__str__()}'")
# Todo: Should display a message notifying the user there are no package_dimensions
# self.add_error("package_dimensions", f"Specs has no {e}")
try:
weight = specs["Package Weight"]
# Confirm weight from spec are same as filled, else use weight from specs
if not w:
self.cleaned_data["weight"] = weight[0]
except (KeyError, TypeError, IndexError) as e:
self.add_error("weight", f"No weight provided for reason '{e.__str__()}'")
pass
# Todo: Should display a message notifying the user there is no weight
# self.add_error("weight", f"Specs has no {e}")
return self.cleaned_data
class Meta:
model = Product
exclude = ["variants"]
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = ("product_name", "price", "short_desc",)
# list_display_links = ("brand", "model_name",)
list_editable = ("price", "short_desc")
form = ProductForm
###################### CATEGORY #######################
class CategoryForm(ModelForm):
alias = FormCommaNewLineSeparatedField()
main_features = FormCommaNewLineSeparatedField()
class Meta:
model = Category
fields = "__all__"
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
form = CategoryForm
class CategoryAccessoryJoinForm(AbstractJoinForm):
class Meta:
model = CategoryAccessoryJoin
fields = "__all__"
class CategoryBoughtTogetherJoinForm(AbstractJoinForm):
class Meta:
model = CategoryBoughtTogetherJoin
fields = "__all__"
@admin.register(CategoryAccessoryJoin)
class CategoryAccessoryJoinAdmin(admin.ModelAdmin):
form = CategoryAccessoryJoinForm
@admin.register(CategoryBoughtTogetherJoin)
class CategoryBoughtTogetherJoinAdmin(admin.ModelAdmin):
form = CategoryBoughtTogetherJoinForm
################# SUB CATEGORY 1 #########################
class SubCategory1Form(ModelForm):
alias = FormCommaNewLineSeparatedField()
main_features = FormCommaNewLineSeparatedField()
class Meta:
model = SubCategory1
fields = "__all__"
@admin.register(SubCategory1)
class Subcat1Admin(admin.ModelAdmin):
form = SubCategory1Form
class Subcat1AccessoryJoinForm(AbstractJoinForm):
class Meta:
model = Subcat1AccessoryJoin
fields = "__all__"
class Subcat1BoughtTogetherJoinForm(AbstractJoinForm):
class Meta:
model = Subcat1BoughtTogetherJoin
fields = "__all__"
@admin.register(Subcat1AccessoryJoin)
class Subcat1AccessoryJoinAdmin(admin.ModelAdmin):
form = Subcat1AccessoryJoinForm
@admin.register(Subcat1BoughtTogetherJoin)
class Subcat1BoughtTogetherJoinAdmin(admin.ModelAdmin):
form = Subcat1BoughtTogetherJoinForm
################# SUB CATEGORY 2 #########################
class SubCategory2Form(ModelForm):
alias = FormCommaNewLineSeparatedField()
main_features = FormCommaNewLineSeparatedField()
class Meta:
model = SubCategory2
fields = "__all__"
@admin.register(SubCategory2)
class Subcat2Admin(admin.ModelAdmin):
form = SubCategory2Form
class Subcat2AccessoryJoinForm(AbstractJoinForm):
class Meta:
model = Subcat2AccessoryJoin
fields = "__all__"
class Subcat2BoughtTogetherJoinForm(AbstractJoinForm):
class Meta:
model = Subcat2BoughtTogetherJoin
fields = "__all__"
@admin.register(Subcat2AccessoryJoin)
class Subcat2AccessoryJoinAdmin(admin.ModelAdmin):
form = Subcat2AccessoryJoinForm
@admin.register(Subcat2BoughtTogetherJoin)
class Subcat2BoughtTogetherJoinAdmin(admin.ModelAdmin):
form = Subcat2BoughtTogetherJoinForm
admin.site.register(Brand)
admin.site.register(ModelName)
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,984
|
Agamiru/online_store
|
refs/heads/master
|
/products/migrations/0014_auto_20201211_0655.py
|
# Generated by Django 3.1 on 2020-12-11 05:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0013_auto_20201211_0653'),
]
operations = [
migrations.RemoveField(
model_name='subcategory1',
name='alias',
),
migrations.RemoveField(
model_name='subcategory1',
name='main_features',
),
migrations.RemoveField(
model_name='subcategory2',
name='alias',
),
migrations.RemoveField(
model_name='subcategory2',
name='main_features',
),
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,985
|
Agamiru/online_store
|
refs/heads/master
|
/products/utils/model_utils.py
|
from typing import List
import json
from django.db.models.fields.json import JSONField
from django.db.models import ObjectDoesNotExist as doesnt_exist
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
@deconstructible
class CrossModelUniqueNameValidator:
"""
Check that model 'name' attribute is unique amongst other category
models with same attribute name.
:param model: Model backend to perform look up.
"""
message = "%(category_name)s object with name '%(value)s' already exists"
code = "invalid"
def __init__(self, model, message=None, code=None):
# if not isinstance(model, list) and (len(model) == 1):
# raise TypeError("'model' arg must be a one-item list")
if message is not None:
self.message = message
if code is not None:
self.code = code
self.model = model
def __call__(self, value):
value = str(value)
try:
unique_val = self.model.objects.get(name=value)
model_name = unique_val.model_name
raise ValidationError(
message=self.message, code=self.code,
params={"category_name": model_name, "value": value}
)
except doesnt_exist:
pass
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.message == other.message and
self.code == other.code
)
# Todo: Add a hook for changing the final features keys to custom ones
# Todo: This hook might be unique to this class or might be persisted to db.
class GetMainFeatures:
"""
Uses the main_features attribute of different product categories
to return the appropriate features for the product.
It will skip keys it doesnt find in specs. In the future, this should
warn the user.
"""
def __init__(self, product_instance, custom_alias: list = None):
self.product_instance = product_instance
self.custom_alias = custom_alias
self.has_features = False
self.features_list = None
self.values_list = []
self.features_dict = None # Final product
self.skipped: List[int] = [] # List of index of skipped features
def final(self):
"""
Main hook to get the final dict, do not use 'self.features_dict'
"""
count = 0
while not self.values_list:
count += 1
if count > 1:
return {}
self.features()
if self.custom_alias:
return self.set_custom_alias(self.custom_alias)
return self.features_dict
# Refactoring this function causes dynamic references
### Do not Refactor name###
def features(self):
"""
Creates the main_features final dict
"""
self.set_features_list()
specs = self.product_instance.specs
skipped: List[int] = []
self.features_dict = {}
count = 0
for feat in self.features_list:
try:
self.features_dict.update({feat: specs[feat]})
self.values_list.append(specs[feat])
# If for some reason specs has no such features
except KeyError:
skipped.append(count)
count += 1
if skipped:
self.skipped = skipped
def set_custom_alias(self, custom_alias: list):
self.specs_key_switcher(
custom_alias, self.features_list, self.features_dict
)
return self.product_instance.specs
def set_features_list(self):
"""
Sets features list which will be used to create features_dict.
"""
approp_cat = self.return_appropriate_category_instance()
if not self.product_instance.specs_from_bhpv:
main_f = approp_cat.main_features
alias_f = self.product_instance.features_alias
self.specs_key_switcher(main_f, alias_f)
self.features_list = approp_cat.main_features
def specs_key_switcher(self, main_f, alias_f, specs=None):
"""
In the case of products with features_alias (i.e. specs_from_bhpv = False),
format the main_features specs to use the keys provided in the
main_features list, but have the values provided in the actual specs.
It will skip missing key_values where necessary.
"""
assert len(main_f) == len(alias_f), "Lists must be of equal lengths"
specs = self.product_instance.specs if specs is None else specs
new_specs, specs_keys = {}, specs.keys()
count = 0
for feature in alias_f:
if feature in specs_keys:
new_specs[main_f[count]] = specs.get(feature)
count += 1
self.product_instance.specs = new_specs
def return_appropriate_category_instance(self):
"""
Returns lowest_level subcategory with valid main_features else,
returns Category obj which will always have valid main_features attr.
"""
if self.product_instance.subcat_2_id \
and self.product_instance.subcat_2_id.main_features:
return self.product_instance.subcat_2_id
elif self.product_instance.subcat_1_id \
and self.product_instance.subcat_1_id.main_features:
return self.product_instance.subcat_1_id
else:
approp_cat = self.product_instance.cat_id
return approp_cat
def to_string(self):
"""
Pretty print results of main_features.
"""
# Check if values_list has been generated, attempt to generate it if it hasn't.
# If values_list is still empty return empty string
if not self.final():
return ""
final_string = ""
skipped_count = 0 # Count to check for skipped items
values_count = 0
for feat in self.features_list:
# Check if feature was skipped
if skipped_count in self.skipped:
skipped_count += 1
continue
skipped_count += 1
count = 0 # Count to format indented items
final_string += f"{feat}: "
indent_length = len(feat)
for inner_feat in self.values_list[values_count]:
count += 1
if count > 1:
final_string += " " * (indent_length + 2) + f"{inner_feat}\n"
else:
final_string += f"{inner_feat}\n"
values_count += 1
return final_string
# Not Useful, kept because removal causes migration errors
class ListJSONField(JSONField):
"""
Return a list or none rather than string
"""
def value_from_object(self, obj):
print("I ran")
str_obj = getattr(obj, self.attname)
list_obj = json.loads(str_obj)
return list_obj
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,986
|
Agamiru/online_store
|
refs/heads/master
|
/telegram_app/views.py
|
import telegram
import json
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from rest_framework import status
from .settings import bot, webhook_url
from .messages import *
def set_webhook(request):
set_ = bot.set_webhook(webhook_url)
print(f"Webhook url: {webhook_url}")
if not set_:
# Todo: Change to Raise Configuration Error
raise ValueError("Webhook not set")
return HttpResponse("Webhook Set", status=status.HTTP_200_OK)
@csrf_exempt
def telegram_view_dispatcher(request):
json_body = json.loads(request.body)
print(f"update: {json_body}")
update = telegram.Update.de_json(json_body, bot)
text = update.message.text.encode("utf-8").decode()
if text == "/start":
return start(update)
def start(update):
chat_id = update.message.chat.id
msg_id = update.message.message_id
bot.send_message(
chat_id=chat_id, text=start_message, reply_to_message_id=msg_id,
parse_mode="MarkdownV2"
)
return HttpResponse(status=status.HTTP_200_OK)
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,987
|
Agamiru/online_store
|
refs/heads/master
|
/products/migrations/0032_product_mo_name.py
|
# Generated by Django 3.1 on 2021-01-12 12:07
from django.db import migrations, models
import django.db.models.deletion
def copy_model_references(apps, schema_editor):
"""
Copies model_name id's to new field mo_name, field will be renamed later
"""
prod = apps.get_model("products", "Product")
for product in prod.objects.all():
product.mo_name = product.model_name
product.save()
class Migration(migrations.Migration):
dependencies = [
('products', '0031_auto_20210112_1303'),
]
operations = [
migrations.AddField(
model_name='product',
name='mo_name',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='pro', to='products.modelname'),
),
# Python routine to copy model_name id's
migrations.RunPython(copy_model_references),
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,988
|
Agamiru/online_store
|
refs/heads/master
|
/products/migrations/0008_category_test.py
|
# Generated by Django 3.1 on 2020-12-10 09:02
from django.db import migrations, models
import products.models
class Migration(migrations.Migration):
dependencies = [
('products', '0007_auto_20201210_0948'),
]
operations = [
migrations.AddField(
model_name='category',
name='test',
field=models.JSONField(default=products.models.json_default),
),
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,989
|
Agamiru/online_store
|
refs/heads/master
|
/products/migrations/0021_uniquecategory_cat_id.py
|
# Generated by Django 3.1 on 2020-12-16 10:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0020_auto_20201216_0745'),
]
operations = [
migrations.AddField(
model_name='uniquecategory',
name='cat_id',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,990
|
Agamiru/online_store
|
refs/heads/master
|
/products/migrations/0001_initial.py
|
# Generated by Django 3.1 on 2020-12-09 06:32
from django.db import migrations, models
import django.db.models.deletion
import products
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Brand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='Generic', max_length=100, unique=True)),
],
options={
'db_table': 'brands',
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('alias', models.JSONField(default=products.models.json_default)),
('main_features', models.JSONField(default=products.models.json_default)),
],
options={
'verbose_name_plural': 'categories',
'db_table': 'categories',
},
),
migrations.CreateModel(
name='ModelName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('brand_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='model', to='products.brand')),
],
options={
'db_table': 'model_name',
},
),
migrations.CreateModel(
name='SubCategory1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('alias', models.JSONField(default=products.models.json_default)),
('main_features', models.JSONField(default=products.models.json_default)),
('cat_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='subcategory_1', to='products.category')),
],
options={
'verbose_name_plural': 'subcategories_1',
'db_table': 'subcategory_1',
},
),
migrations.CreateModel(
name='SubCategory2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('alias', models.JSONField(default=products.models.json_default)),
('main_features', models.JSONField(default=products.models.json_default)),
('subcat_1_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='subcategory_2', to='products.subcategory1')),
],
options={
'verbose_name_plural': 'subcategories_2',
'db_table': 'subcategory_2',
},
),
migrations.CreateModel(
name='Subcat2BoughtTogetherJoin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hash_field', models.IntegerField(blank=True, unique=True)),
('bought_together_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bought_join', to='products.subcategory2')),
('subcat_2_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bought_together_join', to='products.subcategory2')),
],
options={
'verbose_name_plural': 'subcategory_2_bought_together',
'db_table': 'subcategory_2_bought_together',
},
),
migrations.CreateModel(
name='Subcat2AccessoryJoin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hash_field', models.IntegerField(blank=True, unique=True)),
('accessory_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_join', to='products.subcategory2')),
('subcat_2_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='accessory_join', to='products.subcategory2')),
],
options={
'verbose_name_plural': 'subcategory_2_accessories',
'db_table': 'subcategory_2_accessories',
},
),
migrations.CreateModel(
name='Subcat1BoughtTogetherJoin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hash_field', models.IntegerField(blank=True, unique=True)),
('bought_together_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bought_join', to='products.subcategory1')),
('subcat_1_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bought_together_join', to='products.subcategory1')),
],
options={
'verbose_name_plural': 'subcategory_1_bought_together',
'db_table': 'subcategory_1_bought_together',
},
),
migrations.CreateModel(
name='Subcat1AccessoryJoin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hash_field', models.IntegerField(blank=True, unique=True)),
('accessory_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_join', to='products.subcategory1')),
('subcat_1_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='accessory_join', to='products.subcategory1')),
],
options={
'verbose_name_plural': 'subcategory_1_accessories',
'db_table': 'subcategory_1_accessories',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comes_in_pairs', models.BooleanField(default=False)),
('image', models.ImageField(blank=True, null=True, upload_to=products.models.storage_dir)),
('short_desc', models.CharField(max_length=200, verbose_name='Short Description')),
('price', models.FloatField()),
('available', models.BooleanField(default=True)),
('in_the_box', models.JSONField(default=products.models.json_default)),
('specs', models.JSONField(default=products.models.json_default)),
('package_dimensions', models.CharField(blank=True, max_length=200, null=True)),
('weight', models.CharField(blank=True, max_length=200, null=True)),
('date_created', models.DateTimeField(auto_now=True)),
('brand', models.ForeignKey(default='Generic', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='product', to='products.brand')),
('cat_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='products.category')),
('model_name', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='product', to='products.modelname', to_field='name')),
('subcat_1_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='products.subcategory1')),
('subcat_2_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='products.subcategory2')),
],
options={
'db_table': 'products',
},
),
migrations.CreateModel(
name='CategoryBoughtTogetherJoin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hash_field', models.IntegerField(blank=True, unique=True)),
('bought_together_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bought_join', to='products.category')),
('cat_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bought_together_join', to='products.category')),
],
options={
'verbose_name_plural': 'category_bought_together',
'db_table': 'category_bought_together',
},
),
migrations.CreateModel(
name='CategoryAccessoryJoin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hash_field', models.IntegerField(blank=True, unique=True)),
('accessory_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_join', to='products.category')),
('cat_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='accessory_join', to='products.category')),
],
options={
'verbose_name_plural': 'category_accessories',
'db_table': 'category_accessories',
},
),
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,991
|
Agamiru/online_store
|
refs/heads/master
|
/products/migrations/0023_auto_20201216_1630.py
|
# Generated by Django 3.1 on 2020-12-16 15:30
from django.db import migrations, models
import products.models
import products.utils.model_utils
class Migration(migrations.Migration):
dependencies = [
('products', '0022_auto_20201216_1156'),
]
operations = [
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(max_length=100, unique=True, validators=[products.utils.model_utils.CrossModelUniqueNameValidator([products.models.UniqueCategory])]),
),
migrations.AlterField(
model_name='subcategory1',
name='name',
field=models.CharField(max_length=100, unique=True, validators=[products.utils.model_utils.CrossModelUniqueNameValidator([products.models.UniqueCategory])]),
),
migrations.AlterField(
model_name='subcategory2',
name='name',
field=models.CharField(max_length=100, unique=True, validators=[products.utils.model_utils.CrossModelUniqueNameValidator([products.models.UniqueCategory])]),
),
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,992
|
Agamiru/online_store
|
refs/heads/master
|
/products/migrations/0016_auto_20201211_2319.py
|
# Generated by Django 3.1 on 2020-12-11 22:19
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0015_auto_20201211_0655'),
]
operations = [
migrations.AlterField(
model_name='category',
name='main_features',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=20), default=list, size=None),
),
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,993
|
Agamiru/online_store
|
refs/heads/master
|
/online_store/settings/staging.py
|
import os
from decouple import Csv
from dj_database_url import parse as dburl
from .common import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = config("ALLOWED_HOSTS", cast=Csv())
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': config("DATABASE_URL", default="", cast=dburl)
}
if os.environ.get('GITHUB_WORKFLOW'):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'github_actions',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,994
|
Agamiru/online_store
|
refs/heads/master
|
/products/migrations/0006_auto_20201210_0946.py
|
# Generated by Django 3.1 on 2020-12-10 08:46
import django.contrib.postgres.fields.jsonb
from django.db import migrations
import products.models
class Migration(migrations.Migration):
dependencies = [
('products', '0005_auto_20201210_0937'),
]
operations = [
migrations.AlterField(
model_name='category',
name='alias',
field=django.contrib.postgres.fields.jsonb.JSONField(default=products.models.json_default),
),
migrations.AlterField(
model_name='category',
name='main_features',
field=django.contrib.postgres.fields.jsonb.JSONField(default=products.models.json_default),
),
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,995
|
Agamiru/online_store
|
refs/heads/master
|
/products/migrations/0030_auto_20210111_1820.py
|
# Generated by Django 3.1 on 2021-01-11 17:20
from django.db import migrations
from django.contrib.postgres.operations import (
HStoreExtension, TrigramExtension
)
class Migration(migrations.Migration):
dependencies = [
('products', '0029_auto_20210111_0120'),
]
operations = [
HStoreExtension(),
TrigramExtension()
]
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,996
|
Agamiru/online_store
|
refs/heads/master
|
/telegram_app/settings.py
|
from decouple import config
import telegram
BOT_TOKEN = config("BOT_TOKEN")
webhook_url = f"{config('BASE_URL')}/{config('BOT_TOKEN')}/"
bot = telegram.Bot(token=BOT_TOKEN)
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,293,997
|
Agamiru/online_store
|
refs/heads/master
|
/telegram_app/messages.py
|
start_message = "Hello, I can help you search and make orders for audio gears in Nigeria.\n\n" \
"Type the name of an item you want to search for -\n" \
"*e.g Focusrite Scarlett 2i2*\n\n" \
"We'll check if we have it in stock"
|
{"/api/scripts.py": ["/api/models.py"], "/api/model_utils.py": ["/api/models.py"], "/api/serializer.py": ["/api/models.py"], "/api/models.py": ["/api/model_utils.py"], "/api/migrations/0001_initial.py": ["/api/models.py"], "/api/admin.py": ["/api/models.py"], "/api/migrations/0008_auto_20200922_0213.py": ["/api/models.py"], "/products/models.py": ["/products/utils/model_utils.py", "/products/utils/manager_utils.py", "/products/managers.py"], "/products/migrations/0035_auto_20210115_0122.py": ["/products/models.py"], "/products/migrations/0031_auto_20210112_1303.py": ["/products/models.py"], "/products/apps.py": ["/products/models.py", "/products/signals.py"], "/app_admin/admin.py": ["/app_admin/models.py"], "/products/migrations/0019_auto_20201214_1509.py": ["/products/utils/model_utils.py"], "/products/tests/test_model_utils.py": ["/products/utils/model_utils.py", "/products/models.py"], "/products/managers.py": ["/products/utils/manager_utils.py"], "/products/tests/test_managers.py": ["/products/models.py"], "/products/migrations/0024_auto_20201216_1635.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0004_auto_20201210_0835.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/signals.py": ["/products/models.py"], "/products/tests/test_models.py": ["/products/models.py"], "/telegram_app/utils.py": ["/telegram_app/settings.py"], "/products/migrations/0020_auto_20201216_0745.py": ["/products/models.py", "/products/utils/model_utils.py"], "/telegram_app/urls.py": ["/telegram_app/views.py", "/telegram_app/settings.py"], "/products/admin.py": ["/products/models.py", "/products/utils/admin_utils.py"], "/telegram_app/views.py": ["/telegram_app/settings.py", "/telegram_app/messages.py"], "/products/migrations/0008_category_test.py": ["/products/models.py"], "/products/migrations/0023_auto_20201216_1630.py": ["/products/models.py", "/products/utils/model_utils.py"], "/products/migrations/0006_auto_20201210_0946.py": ["/products/models.py"]}
|
27,323,448
|
buseskorkmaz/MyTicket-Website-with-Flask-and-SQL
|
refs/heads/main
|
/db_operations.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 22 11:10:45 2020
@author: bkorkmaz
"""
import psycopg2
from db import *
def register_to_db(email,password,city,theaterFan,cinemaFan,music):
connection = connect()
cursor = connection.cursor()
isOrganizer = False
postgres_insert_query = """ INSERT INTO auth (email_address, password, isorganizer) VALUES (%s,%s,%s)"""
record_to_insert = (email,password,isOrganizer)
try:
cursor.execute(postgres_insert_query, record_to_insert)
connection.commit()
auth_id_query = """SELECT MAX(auth_id) from auth"""
cursor.execute(auth_id_query)
auth_id = cursor.fetchone()[0]
except:
error = 'Could not insert'
return error
postgres_insert_query = """ INSERT INTO app_user (city, favourite_music_type, istheaterfan, iscinemafan) VALUES (%s,%s,%s,%s)"""
record_to_insert = (city,music,theaterFan, cinemaFan)
try:
cursor.execute(postgres_insert_query, record_to_insert)
connection.commit()
user_id_query = """SELECT MAX(user_id) from app_user"""
cursor.execute(user_id_query)
user_id = cursor.fetchone()[0]
except:
error = 'Could not insert'
return error
if(isOrganizer == False):
organizer_id = 0
postgres_insert_query = """ INSERT INTO id_table (user_id, auth_id, organizer_id) VALUES (%s,%s,%s)"""
record_to_insert = (user_id,auth_id,organizer_id)
try:
cursor.execute(postgres_insert_query, record_to_insert)
connection.commit()
except:
error = 'Could not insert'
return error
if(connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
return True
def check_auth_db(email,password):
connection = connect()
cursor = connection.cursor()
try:
sql_select_query = """SELECT auth_id from auth WHERE email_address = %s and password = %s"""
cursor.execute(sql_select_query, (email,password))
record = cursor.fetchone()[0]
if(connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
return True
except:
return 'Could not found'
def get_events_from_db():
connection = connect()
cursor = connection.cursor()
try:
sql_select_query = """SELECT * FROM event """
cursor.execute(sql_select_query)
record = cursor.fetchall()
events = []
for r in record:
sql_select_query = """SELECT place,city_id FROM place WHERE place_id = %s"""
place_id = r[5]
cursor.execute(sql_select_query,(str(place_id)))
places = cursor.fetchone()
place = places[0]
city_id = places[1]
sql_select_query = """SELECT city,country FROM city WHERE city_id = %s"""
cursor.execute(sql_select_query,(str(city_id)))
city_records = cursor.fetchone()
city = city_records[0]
country = city_records[1]
event = [r[1], r[3], r[4], place, city, country, r[6],r[0]]
events.append(event)
if(connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
return events
except:
return 'Could not found'
def get_event_from_db(event_id):
connection = connect()
cursor = connection.cursor()
try:
sql_select_query = """SELECT * FROM event WHERE event_id=%s"""
cursor.execute(sql_select_query,(event_id,))
r = cursor.fetchone()
sql_select_query = """SELECT place,city_id FROM place WHERE place_id = %s"""
place_id = r[5]
cursor.execute(sql_select_query,(str(place_id)))
places = cursor.fetchone()
place = places[0]
city_id = places[1]
sql_select_query = """SELECT city,country FROM city WHERE city_id = %s"""
cursor.execute(sql_select_query,(str(city_id)))
city_records = cursor.fetchone()
city = city_records[0]
country = city_records[1]
location = city + "/" + country
event = [r[1],r[8] ,r[3], r[6], place, location, r[4], "80"]
if(connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
return event
except:
return 'Could not found'
|
{"/authentication.py": ["/db_operations.py"], "/db_operations.py": ["/db.py"]}
|
27,323,449
|
buseskorkmaz/MyTicket-Website-with-Flask-and-SQL
|
refs/heads/main
|
/authentication.py
|
# -*- coding: utf-8 -*-
from flask import Flask, render_template, redirect, url_for, request
from db_operations import *
app = Flask(__name__, static_url_path='', static_folder='static')
@app.route("/")
def hello():
return 'Hello World!'
@app.route("/event",methods=['GET', 'POST'])
def event():
event_id = request.args['event_id']
event = get_event_from_db(event_id)
if(event == 'Could not found'):
return "Selected event is not available"
else:
#get event from db event_name, event_description, ticket_prices, place, location,time, total_basket
#event = ["event_name","image_link" ,"event_description", "ticket_prices", "place", "location","time", "total_basket"]
print(event_id)
print(event[1])
if request.method == "POST":
ticket_amount = request.form['ticket_amount']
#sent data to basket
return render_template('productpage.html',data=event)
@app.route("/myaccount",methods=['GET', 'POST'])
def myaccount():
#event_name = request.args['event_name']
#print(event_name)
return "My Account Informations"
@app.route("/basket",methods=['GET', 'POST'])
def basket():
#event_name = request.args['event_name']
#print(event_name)
return "My Basket Informations"
@app.route("/homepage",methods=['GET', 'POST'])
def homepage():
error = None
events = get_events_from_db()
basket = 85
if request.method == 'POST':
event_id = request.form["event_id"]
return redirect(url_for("event",event_id=event_id))#encrypt it
return render_template('tickets_for_events_with_dummy.html', error=error, data = events)
# Route for handling the login page logic
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
registered = check_auth_db(email,password)
if(registered != 'Could not found'):
return redirect(url_for('homepage'))
else:
error = "Check for credentials or register!"
return render_template('signin.html', error=error)
# Route for handling the login page logic
@app.route('/register', methods=['GET', 'POST'])
def register():
error = None
if request.method == 'POST':
try:
email = request.form['email']
except:
return 'There is an error in email'
try:
password = request.form['password']
except:
return 'There is an error in password'
try:
repeatpasword = request.form['repeatpassword']
except:
return 'There is an error in password repeat'
try:
city = request.form['city']
except:
return 'There is an error in city'
try:
theaterFan = request.form.get('theater')
if(theaterFan == "Yes"):
theaterFan = True
else:
theaterFan = False
except:
return 'There is an error in theater'
try:
cinemaFan = request.form.get('cinema')
if(cinemaFan == "Yes"):
cinemaFan = True
else:
cinemaFan = False
except:
return 'There is an error in cinema'
try:
music = request.form.get("music")
if music == "":
music = None
except:
return 'There is an error in music'
if(repeatpasword == password):
success = register_to_db(email,password,city,theaterFan,cinemaFan,music)
if(success):
return redirect(url_for('login'))
else:
error = "Database error"
else:
error= "Passwords did not match"
return render_template('register.html', error=error)
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
{"/authentication.py": ["/db_operations.py"], "/db_operations.py": ["/db.py"]}
|
27,344,945
|
mrbahrani/oop-project-trello
|
refs/heads/main
|
/abstract_class.py
|
class AbstractItem:
def __init__(self):
self.id = None
self.name = str()
self.description = str()
self.order = int()
self._elements_list = list()
self._members = list()
self.model_class = None
def __contains__(self, element):
return element in self._elements_list
def __eq__(self, other):
return self.id == other.id
def set_id(self, id):
self.id = id
def get_id(self):
return self.id
def _add_element(self, query_manager, element, order=None):
"""
:param query_manager:
:param element:
:param order:
:return elements_list:
this method saves passed `element` and adds it to elements list
"""
order = order if order else len(self._elements_list)
element.set_order(order)
element_model = element.save(query_manager, self)
element.set_id(element_model.id)
self._elements_list.insert(order, element)
self._reorder_elements(element, order)
return self._elements_list
def _remove_element(self, query_manager, element):
"""
:param query_manager:
:param element:
:return:
this method removes passed element both from database and elements list
"""
try:
self._elements_list.remove(element)
element.delete(query_manager) # delete rom database
del element # delete the object itself
except ValueError: # if element doesnt exist in elements_list
return None
return self._elements_list
def _move_element(self, query_manager, element, parent_element, order=None):
"""
:param query_manager:
:param element:
:param parent_element:
:param order:
:return:
this method removes element from its own parent
and adds it to destination parent
"""
self._remove_element(query_manager, element)
parent_element._add_element(query_manager, element, order)
def _reorder_elements(self, element, index: int):
pass
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def set_order(self, order):
self.order = order
def get_order(self):
return self.order
def add_member(self, member):
self._members.append(member)
def remove_member(self, member):
try:
self._members.remove(member)
except ValueError: # if element doesnt exist in members
return None
return self._members
def _get_elements_list(self):
return self._elements_list
def save(self, query_manager, parent_element=None):
"""
:param query_manager:
:param parent_element:
:return element:
this method saves changes in this object
updates if already exists in database
or creates if its not in database
"""
if not self.id: # if object has no instance in db then create it
if parent_element:
# print(parent_element)
element = query_manager.create_object(self, parent_element)
else:
element = query_manager.create_object(self)
else: # if object already exists in db then update it
element = query_manager.update_object(self)
return element
def delete(self, query_manager):
"""
:param query_manager:
:return:
this method deletes current object from database
"""
# if object is saved in db then delete it from db
if self.id:
query_manager.delete_object(self)
self.id = None
|
{"/abstract_class.py": ["/db_interface.py"], "/tests.py": ["/board.py", "/team.py", "/user.py", "/card.py", "/table.py"], "/table.py": ["/card.py", "/models.py", "/abstract_class.py"], "/model_map.py": ["/models.py"], "/card.py": ["/abstract_class.py", "/models.py"], "/team.py": ["/user.py", "/board.py", "/abstract_class.py", "/models.py"], "/db_interface.py": ["/abstract_class.py", "/model_map.py"], "/main.py": ["/models.py", "/board.py", "/table.py", "/card.py", "/user.py", "/team.py", "/db_interface.py"], "/board.py": ["/table.py", "/models.py", "/abstract_class.py"]}
|
27,344,946
|
mrbahrani/oop-project-trello
|
refs/heads/main
|
/table.py
|
from card import Card
from models import TableModel
from abstract_class import AbstractItem
class Table(AbstractItem):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model_class = TableModel
@property
def cards(self):
return self._get_elements_list()
def add_card(self, query_manager, card: Card, order=None):
return self._add_element(query_manager, card, order)
def remove_card(self, query_manager, card: Card):
return self._remove_element(query_manager, card)
def move_card(self, query_manager, card: Card, table, order=None):
self._move_element(query_manager, card, table, order)
|
{"/abstract_class.py": ["/db_interface.py"], "/tests.py": ["/board.py", "/team.py", "/user.py", "/card.py", "/table.py"], "/table.py": ["/card.py", "/models.py", "/abstract_class.py"], "/model_map.py": ["/models.py"], "/card.py": ["/abstract_class.py", "/models.py"], "/team.py": ["/user.py", "/board.py", "/abstract_class.py", "/models.py"], "/db_interface.py": ["/abstract_class.py", "/model_map.py"], "/main.py": ["/models.py", "/board.py", "/table.py", "/card.py", "/user.py", "/team.py", "/db_interface.py"], "/board.py": ["/table.py", "/models.py", "/abstract_class.py"]}
|
27,344,947
|
mrbahrani/oop-project-trello
|
refs/heads/main
|
/model_map.py
|
from models import *
db_map = {
CardModel: ["name", "order", "description", "table"],
TableModel: ["name", "board"],
BoardModel: ["name", "team"],
TeamModel: ["name"],
UserModel: ["name", "username", "email", "password"]
}
parents = ["table", "board", "team"]
|
{"/abstract_class.py": ["/db_interface.py"], "/tests.py": ["/board.py", "/team.py", "/user.py", "/card.py", "/table.py"], "/table.py": ["/card.py", "/models.py", "/abstract_class.py"], "/model_map.py": ["/models.py"], "/card.py": ["/abstract_class.py", "/models.py"], "/team.py": ["/user.py", "/board.py", "/abstract_class.py", "/models.py"], "/db_interface.py": ["/abstract_class.py", "/model_map.py"], "/main.py": ["/models.py", "/board.py", "/table.py", "/card.py", "/user.py", "/team.py", "/db_interface.py"], "/board.py": ["/table.py", "/models.py", "/abstract_class.py"]}
|
27,344,948
|
mrbahrani/oop-project-trello
|
refs/heads/main
|
/card.py
|
from abstract_class import AbstractItem
from models import CardModel
class Card(AbstractItem):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model_class = CardModel
self._elements_list = None # card has no list of any sub-items
# def add_check_list_element(self, checkListElement):
# self.checkList.append(checkListElement)
# def remove_check_list_element(self, card):
# for checkListElementIndex in len(self.checkListElement):
# if self.cards[checkListElementIndex].matchUser(card):
# self.cards.pop(checkListElementIndex)
|
{"/abstract_class.py": ["/db_interface.py"], "/tests.py": ["/board.py", "/team.py", "/user.py", "/card.py", "/table.py"], "/table.py": ["/card.py", "/models.py", "/abstract_class.py"], "/model_map.py": ["/models.py"], "/card.py": ["/abstract_class.py", "/models.py"], "/team.py": ["/user.py", "/board.py", "/abstract_class.py", "/models.py"], "/db_interface.py": ["/abstract_class.py", "/model_map.py"], "/main.py": ["/models.py", "/board.py", "/table.py", "/card.py", "/user.py", "/team.py", "/db_interface.py"], "/board.py": ["/table.py", "/models.py", "/abstract_class.py"]}
|
27,344,949
|
mrbahrani/oop-project-trello
|
refs/heads/main
|
/user.py
|
class User:
def __init__(self):
self.id = None
self.username = str()
self.email = str()
self.name = str()
self.__password = str()
def set_password(self, password):
self.__password = password
def __get_password(self):
return self.__password
def set_username(self, username):
self.username = username
def get_username(self):
return self.username
def set_name(self, name):
self.name = name
def get_name(self):
return self.username
def set_email(self, email):
self.email = email
def get_email(self):
return self.email
def match_user(self, user):
return self.username == user.username and \
self.name == user.name and \
self.email == self.name
def sign_up(self, name, username, email, password):
pass
def sign_in(self, username, password):
pass
|
{"/abstract_class.py": ["/db_interface.py"], "/tests.py": ["/board.py", "/team.py", "/user.py", "/card.py", "/table.py"], "/table.py": ["/card.py", "/models.py", "/abstract_class.py"], "/model_map.py": ["/models.py"], "/card.py": ["/abstract_class.py", "/models.py"], "/team.py": ["/user.py", "/board.py", "/abstract_class.py", "/models.py"], "/db_interface.py": ["/abstract_class.py", "/model_map.py"], "/main.py": ["/models.py", "/board.py", "/table.py", "/card.py", "/user.py", "/team.py", "/db_interface.py"], "/board.py": ["/table.py", "/models.py", "/abstract_class.py"]}
|
27,344,950
|
mrbahrani/oop-project-trello
|
refs/heads/main
|
/models.py
|
from peewee import *
import datetime
import random
db = SqliteDatabase('trello.sqlite')
class BaseModel(Model):
class Meta:
database = db
class UserModel(BaseModel):
name = CharField(default='')
username = CharField(unique=True)
email = CharField(unique=True)
password = CharField()
class TeamModel(BaseModel):
name = CharField()
description = TextField(default='')
class BoardModel(BaseModel):
name = CharField()
team = ForeignKeyField(TeamModel, backref='boards')
created_at = DateTimeField(default=datetime.datetime.now)
class TableModel(BaseModel):
name = CharField()
board = ForeignKeyField(BoardModel, backref='tables')
created_at = DateTimeField(default=datetime.datetime.now)
class CardModel(BaseModel):
name = CharField()
order = IntegerField()
description = TextField(default='')
table = ForeignKeyField(TableModel, backref='cards')
created_at = DateTimeField(default=datetime.datetime.now)
class MemberCardRelation(BaseModel):
member = ForeignKeyField(UserModel, backref='memberships')
card = ForeignKeyField(CardModel, backref='member_relations', null=False)
class MemberBoardRelation(BaseModel):
member = ForeignKeyField(UserModel, backref='memberships')
board = ForeignKeyField(BoardModel, backref='member_relations', null=False)
class MemberTeamRelation(BaseModel):
member = ForeignKeyField(UserModel, backref='memberships')
team = ForeignKeyField(TeamModel, backref='member_relations', null=False)
def initialize():
db.connect()
db.create_tables([UserModel, TeamModel, BoardModel, TableModel, CardModel, MemberCardRelation, MemberBoardRelation, MemberTeamRelation], safe=True)
# db.close()
def add_some_users_and_teams():
users = [
{'name': 'gholam', 'email': 'a@b.com', 'username': 'gholiGhollak', 'password': '123qweasd'},
{'name': 'sheykh pashmeddin', 'email': 'aa@b.com', 'username': 'furryPashmak', 'password': 'asdfqwer'},
{'name': 'sirish sefat', 'email': 'a@db.com', 'username': 'sooriSirish', 'password': 'erydfgh'},
{'name': 'ghelghelak mirza', 'email': 'aadf@b.com', 'username': 'ghelGheli', 'password': 'xcvbsdfg'},
{'name': 'ververe jadoo', 'email': 'a@sfb.com', 'username': 'veriVerVere', 'password': '1qaz2wsx'},
{'name': 'kopol chorool', 'email': 'aasdf@basf.com', 'username': 'golabiPorHajm', 'password': 'edcrfv'},
{'name': 'pakhmak-o-ddole', 'email': 'aasdf@bsdg.com', 'username': 'sidneySweet', 'password': 'tgbyhn'},
{'name': 'mashangak', 'email': 'asdg@sdfb.com', 'username': 'stupidMashang', 'password': 'ujmik,'},
]
teams = [
{'name': 'the-A-team',
'description': 'a team that actually does nothing and just about exist which constantly make idiot conversations'},
{'name': 'translators',
'description': 'some people saying none-sense jubarish in other languages witch means abso-bloody-lutely nothing'},
]
created_users = []
for user in users:
u = UserModel.create(
name=user['name'],
email=user['email'],
username=user['username'],
password=user['password'],
)
created_users.append(u)
print(created_users)
for team in teams:
t = TeamModel.create(
name=team['name'],
description=team['description']
)
def add_some_tables():
boards = BoardModel.select()
ts = []
for i in range(30):
t = TableModel.create(name='some table %d' % i,
board=boards[random.randrange(0, len(boards))])
ts.append(t)
print(ts)
def add_some_card():
tables = TableModel.select()
cs = []
for table in tables:
for i in range(random.randrange(5, 10)):
c = CardModel.create(
name='some card %d' % i,
order=i,
description='some looong loong description about what this card does or wants and whats needs to be done in order for this card to leave us the hell alone',
table=table,
)
cs.append(c)
print(cs)
def add_some_members():
pass
def add_some_boards():
teams = TeamModel.select()
boards = [
{'name': 'product backlog', 'team': ''},
{'name': 'technical team', 'team': ''},
{'name': 'lets do some tasks', 'team': ''},
{'name': 'lets do nothing', 'team': ''},
{'name': 'some useless board', 'team': ''},
{'name': 'some cool board', 'team': ''},
{'name': 'a board full of stars', 'team': ''},
]
bs = []
for board in boards:
b = BoardModel.create(
name=board['name'],
team=teams[random.randrange(0, len(teams))]
)
bs.append(b)
print(bs)
if __name__ == '__main__':
initialize()
add_some_users_and_teams()
add_some_boards()
add_some_tables()
add_some_card()
|
{"/abstract_class.py": ["/db_interface.py"], "/tests.py": ["/board.py", "/team.py", "/user.py", "/card.py", "/table.py"], "/table.py": ["/card.py", "/models.py", "/abstract_class.py"], "/model_map.py": ["/models.py"], "/card.py": ["/abstract_class.py", "/models.py"], "/team.py": ["/user.py", "/board.py", "/abstract_class.py", "/models.py"], "/db_interface.py": ["/abstract_class.py", "/model_map.py"], "/main.py": ["/models.py", "/board.py", "/table.py", "/card.py", "/user.py", "/team.py", "/db_interface.py"], "/board.py": ["/table.py", "/models.py", "/abstract_class.py"]}
|
27,344,951
|
mrbahrani/oop-project-trello
|
refs/heads/main
|
/tests.py
|
from board import Board
from team import Team
from user import User
from card import Card
from table import Table
def create_card(all_teams, query_manager):
exec(open("./main.py").read())
table = all_teams[0].boards[1].tables[3]
c = Card()
c.name = 'some name'
c.description = 'dasdfsaf asdf asfg afg asdgawrgewqrg asdf'
c.order = 121
table.add_card(query_manager, c)
|
{"/abstract_class.py": ["/db_interface.py"], "/tests.py": ["/board.py", "/team.py", "/user.py", "/card.py", "/table.py"], "/table.py": ["/card.py", "/models.py", "/abstract_class.py"], "/model_map.py": ["/models.py"], "/card.py": ["/abstract_class.py", "/models.py"], "/team.py": ["/user.py", "/board.py", "/abstract_class.py", "/models.py"], "/db_interface.py": ["/abstract_class.py", "/model_map.py"], "/main.py": ["/models.py", "/board.py", "/table.py", "/card.py", "/user.py", "/team.py", "/db_interface.py"], "/board.py": ["/table.py", "/models.py", "/abstract_class.py"]}
|
27,344,952
|
mrbahrani/oop-project-trello
|
refs/heads/main
|
/team.py
|
from user import User
from board import Board
from abstract_class import AbstractItem
from models import TeamModel
class Team(AbstractItem):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model_class = TeamModel
@property
def boards(self):
return self._get_elements_list()
def add_board(self, query_manager, board: Board, order=None):
return self._add_element(query_manager, board, order)
def remove_board(self, query_manager, board: Board):
return self._remove_element(query_manager, board)
|
{"/abstract_class.py": ["/db_interface.py"], "/tests.py": ["/board.py", "/team.py", "/user.py", "/card.py", "/table.py"], "/table.py": ["/card.py", "/models.py", "/abstract_class.py"], "/model_map.py": ["/models.py"], "/card.py": ["/abstract_class.py", "/models.py"], "/team.py": ["/user.py", "/board.py", "/abstract_class.py", "/models.py"], "/db_interface.py": ["/abstract_class.py", "/model_map.py"], "/main.py": ["/models.py", "/board.py", "/table.py", "/card.py", "/user.py", "/team.py", "/db_interface.py"], "/board.py": ["/table.py", "/models.py", "/abstract_class.py"]}
|
27,344,953
|
mrbahrani/oop-project-trello
|
refs/heads/main
|
/db_interface.py
|
from abstract_class import AbstractItem
from model_map import db_map, parents
class QueryHandler:
def create_object(self, obj: AbstractItem, parent: AbstractItem):
parameter_list = dict()
# model_class = obj.model_class
# for field in db_map[model_class]:
# if field not in parents:
# parameter_list[field] = getattr(obj, field)
model_class = obj.model_class
for field in db_map[model_class]:
if field in parents:
parameter_list[field] = parent.get_id()
else:
parameter_list[field] = getattr(obj, field)
return model_class.create(**parameter_list)
def retrieve_object(self, obj: AbstractItem):
parameter_list = dict()
model_class = obj.model_class
for field in db_map[model_class]:
if getattr(obj, field) is not None:
parameter_list[field] = getattr(obj, field)
return model_class.select().where(**parameter_list).get()
def delete_object(self, obj: AbstractItem):
model_class = obj.model_class
model_class.delete().where(model_class.id == obj.id)
def update_object(self, obj: AbstractItem):
model_class = obj.model_class
parameter_list = dict()
for field in db_map[model_class]:
parameter_list[field] = getattr(obj, field)
model_class = obj.model_class
model_class.update(**parameter_list)\
.where(model_class.id == obj.id).execute()
|
{"/abstract_class.py": ["/db_interface.py"], "/tests.py": ["/board.py", "/team.py", "/user.py", "/card.py", "/table.py"], "/table.py": ["/card.py", "/models.py", "/abstract_class.py"], "/model_map.py": ["/models.py"], "/card.py": ["/abstract_class.py", "/models.py"], "/team.py": ["/user.py", "/board.py", "/abstract_class.py", "/models.py"], "/db_interface.py": ["/abstract_class.py", "/model_map.py"], "/main.py": ["/models.py", "/board.py", "/table.py", "/card.py", "/user.py", "/team.py", "/db_interface.py"], "/board.py": ["/table.py", "/models.py", "/abstract_class.py"]}
|
27,344,954
|
mrbahrani/oop-project-trello
|
refs/heads/main
|
/main.py
|
from models import *
from board import Board
from table import Table
from card import Card
from user import User
from team import Team
from db_interface import QueryHandler
def load_db():
teams = TeamModel.select()
boards = BoardModel.select()
tables = TableModel.select()
cards = CardModel.select()
users = UserModel.select()
return teams, boards, tables, cards, users
def convert_to_user_classes(users):
all_users = []
for user in users:
u = User()
u.id = user.id
u.set_name(user.name)
u.set_username(user.username)
u.set_email(user.email)
u.set_name(user.name)
u.set_password(user.password)
all_users.append(u)
return all_users
def convert_to_card_classes(cards):
all_cards = []
table_to_card_map = dict()
for card in cards:
c = Card()
c.set_id(card.id)
c.set_description(card.description)
c.set_order(card.order)
c.set_name(card.name)
if not table_to_card_map.get(card.table.id):
table_to_card_map[card.table.id] = [c]
else:
table_to_card_map[card.table.id].append(c)
all_cards.append(c)
return all_cards, table_to_card_map
def convert_to_table_classes(tables, table_to_card_map):
all_items = []
board_to_table = dict()
for model in tables:
item = Table()
item.set_id(model.id)
# item.set_description(model.description)
# item.set_order(model.order)
item.set_name(model.name)
item._elements_list = table_to_card_map[item.get_id()]
if not board_to_table.get(model.board.id):
board_to_table[model.board.id] = [item]
else:
board_to_table[model.board.id].append(item)
all_items.append(item)
return all_items, board_to_table
def convert_to_board_classes(boards, board_to_table):
all_items = []
team_to_board = dict()
for model in boards:
item = Board()
item.set_id(model.id)
# item.set_description(model.description)
# item.set_order(model.order)
item.set_name(model.name)
item._elements_list = board_to_table[item.get_id()]
if not team_to_board.get(model.team.id):
team_to_board[model.team.id] = [item]
else:
team_to_board[model.team.id].append(item)
all_items.append(item)
return all_items, team_to_board
def convert_to_team_classes(teams, team_to_board):
all_items = []
for model in teams:
item = Team()
item.set_id(model.id)
item.set_description(model.description)
# item.set_order(model.order)
item.set_name(model.name)
item._elements_list = team_to_board[item.get_id()]
all_items.append(item)
return all_items
def refresh_from_db():
teams, boards, tables, cards, users = load_db()
all_users = convert_to_user_classes(users)
all_cards, table_to_cards = convert_to_card_classes(cards)
all_tables, board_to_table = convert_to_table_classes(tables, table_to_cards)
all_boards, team_to_board = convert_to_board_classes(boards, board_to_table)
all_teams = convert_to_team_classes(teams, team_to_board)
query_manager = QueryHandler()
return all_cards, all_users, all_boards, all_tables, all_teams, query_manager
if __name__ == "__main__":
all_cards, all_users, all_boards, all_tables, all_teams, query_manager = refresh_from_db()
|
{"/abstract_class.py": ["/db_interface.py"], "/tests.py": ["/board.py", "/team.py", "/user.py", "/card.py", "/table.py"], "/table.py": ["/card.py", "/models.py", "/abstract_class.py"], "/model_map.py": ["/models.py"], "/card.py": ["/abstract_class.py", "/models.py"], "/team.py": ["/user.py", "/board.py", "/abstract_class.py", "/models.py"], "/db_interface.py": ["/abstract_class.py", "/model_map.py"], "/main.py": ["/models.py", "/board.py", "/table.py", "/card.py", "/user.py", "/team.py", "/db_interface.py"], "/board.py": ["/table.py", "/models.py", "/abstract_class.py"]}
|
27,344,955
|
mrbahrani/oop-project-trello
|
refs/heads/main
|
/board.py
|
from table import Table
from models import BoardModel
from abstract_class import AbstractItem
class Board(AbstractItem):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model_class = BoardModel
@property
def tables(self):
return self._get_elements_list()
def add_table(self, query_manager, table: Table, order=None):
return self._add_element(query_manager, table, order)
def remove_table(self, query_manager, table: Table):
return self._remove_element(query_manager, table)
|
{"/abstract_class.py": ["/db_interface.py"], "/tests.py": ["/board.py", "/team.py", "/user.py", "/card.py", "/table.py"], "/table.py": ["/card.py", "/models.py", "/abstract_class.py"], "/model_map.py": ["/models.py"], "/card.py": ["/abstract_class.py", "/models.py"], "/team.py": ["/user.py", "/board.py", "/abstract_class.py", "/models.py"], "/db_interface.py": ["/abstract_class.py", "/model_map.py"], "/main.py": ["/models.py", "/board.py", "/table.py", "/card.py", "/user.py", "/team.py", "/db_interface.py"], "/board.py": ["/table.py", "/models.py", "/abstract_class.py"]}
|
27,369,496
|
Vincent550102/todolist_linebot
|
refs/heads/main
|
/test.py
|
import json
db = json.load(open('DataBase.json', encoding='utf-8'))
|
{"/main.py": ["/env.py", "/UserDataBaseTemplate.py"]}
|
27,369,497
|
Vincent550102/todolist_linebot
|
refs/heads/main
|
/main.py
|
from __future__ import unicode_literals
import os
from flask import Flask, request, abort
from linebot import LineBotApi, WebhookHandler
from linebot.exceptions import InvalidSignatureError
from linebot.models import MessageEvent, TextMessage, TextSendMessage, ImageSendMessage
from time import sleep
import configparser,requests,json
app = Flask(__name__)
CATPI = "https://api.thecatapi.com/v1/images/search"
# LINE 聊天機器人的基本資料
config = configparser.ConfigParser()
config.read('config.ini')
line_bot_api = LineBotApi(config.get('line-bot', 'channel_access_token'))
handler = WebhookHandler(config.get('line-bot', 'channel_secret'))
# line_bot_api = LineBotApi("10cfe0b9-052c-47ea-95d4-e99fa8761c99")
# handler = WebhookHandler("2bb08b3eefd3898727ae8ab11436a05f")
def get_catimg():
return requests.get(CATPI).json()[-1]['url']
# 接收 LINE 的資訊
@app.route("/callback", methods=['POST'])
def callback():
signature = request.headers['X-Line-Signature']
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
print(body)
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
# 學你說話
@handler.add(MessageEvent, message=TextMessage)
def echo(event):
print(event)
mess = event.message.text.split(' ')
uid = event.source.user_id
if mess[0] == "我要貓咪圖片":
img = get_catimg()
line_bot_api.reply_message(
event.reply_token,
ImageSendMessage(
original_content_url=img,
preview_image_url=img
)
)
elif mess[0] == "加入":
db = json.load(open('DataBase.json', encoding='utf-8'))
result = ""
for part in mess:
result += part if part != mess[0] else ''
if uid in db:
db[uid]['todolist'].append(result)
else:
db[uid] = {
"uid" : uid,
"todolist" : [result],
"nickname" : "nickname"
}
print(db[uid]['todolist'])
line_bot_api.reply_message(
event.reply_token,
TextMessage(text='已加入 : "'+result+'"'+" 在 " +str(len(db[uid]['todolist'])))
)
with open('DataBase.json','w',encoding='utf-8') as f:
json.dump(db,f,indent=2,sort_keys=True,ensure_ascii=False)
elif mess[0] == "檢視":
result = '_ToDoList_\n'
db = json.load(open('DataBase.json', encoding='utf-8'))
print(db)
for idx,item in enumerate(db[uid]['todolist']):
result += '{}. {}\n'.format(str(idx+1),item)
print(db)
line_bot_api.reply_message(
event.reply_token,
TextMessage(text=result)
)
elif mess[0] == '刪除':
db = json.load(open('DataBase.json', encoding='utf-8'))
del db[uid]['todolist'][int(mess[1])-1]
print(db)
line_bot_api.reply_message(
event.reply_token,
TextMessage(text="已刪除 "+str(mess[1]))
)
with open('DataBase.json','w',encoding='utf-8') as f:
json.dump(db,f,indent=2,sort_keys=True,ensure_ascii=False)
else:
line_bot_api.reply_message(
event.reply_token,
TextMessage(text='我不知道你在說甚麼@@ : "'+event.message.text+'"')
)
if __name__ == "__main__":
app.run()
|
{"/main.py": ["/env.py", "/UserDataBaseTemplate.py"]}
|
27,498,814
|
aashutoshPanda/Xmeme
|
refs/heads/main
|
/mysite/urls.py
|
from django.contrib import admin
from django.urls import path, include, re_path # <ADD> repath....
# from django.views.generic import TemplateView # <ADD>
urlpatterns = [
path('admin/', admin.site.urls),
# re_path(r'^.*', TemplateView.as_view(template_name='index.html')), # <ADD>
]
|
{"/meme/views.py": ["/meme/serializers.py", "/meme/models.py", "/meme/utils.py"], "/meme/urls.py": ["/meme/views.py"], "/meme/serializers.py": ["/meme/models.py"]}
|
27,626,979
|
suwan9/UDAML
|
refs/heads/master
|
/CMUML/utils/loss.py
|
import torch
import torch.nn.functional as F
def entropy(p):
p = F.softmax(p)
return -torch.mean(torch.sum(p * torch.log(p+1e-5), 1))
def entropy_margin(p, value, margin=0.2, weight=None):
p = F.softmax(p)
return -torch.mean(hinge(torch.abs(-torch.sum(p * torch.log(p+1e-5), 1)-value), margin))
def hinge(input, margin=0.2):
return torch.clamp(input, min=margin)
|
{"/main.py": ["/model.py", "/eval.py"], "/CMUML/main.py": ["/eval.py"], "/UDAML/main.py": ["/eval.py"]}
|
27,626,980
|
suwan9/UDAML
|
refs/heads/master
|
/CMUML/main.py
|
from data import *
from net import *
from lib import *
import datetime
from tqdm import tqdm
if is_in_notebook():
from tqdm import tqdm_notebook as tqdm
from torch import optim
from tensorboardX import SummaryWriter
import torch.backends.cudnn as cudnn
from eval import batch_hard_triplet_loss
from eval import batch_all_triplet_loss
from eval import convert_label_to_similarity
from eval import CircleLoss
cudnn.benchmark = True
cudnn.deterministic = True
seed_everything()
# if args.misc.gpus < 1:
# import os
#
# os.environ["CUDA_VISIBLE_DEVICES"] = ""
# gpu_ids = []
# device = torch.device('cpu')
# else:
# # gpu_ids = select_GPUs(args.misc.gpus)
# gpu_ids = [0]
# device = gpu_ids[0]
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
now = datetime.datetime.now().strftime('%b%d_%H-%M-%S')
log_dir = f'{args.log.root_dir}/{now}'
logger = SummaryWriter(log_dir)
with open(join(log_dir, 'config.yaml'), 'w') as f:
f.write(yaml.dump(save_config))
model_dict = {
'resnet50': ResNet50Fc,
'vgg16': VGG16Fc
}
class TotalNet(nn.Module):
def __init__(self):
super(TotalNet, self).__init__()
self.feature_extractor = model_dict[args.model.base_model](args.model.pretrained_model)
classifier_output_dim = len(source_classes)
self.classifier = CLS(self.feature_extractor.output_num(), classifier_output_dim, bottle_neck_dim=256)
self.discriminator = AdversarialNetwork(256)
# self.discriminator_separate = AdversarialNetwork(256)
def forward(self, x):
f = self.feature_extractor(x)
f, _, __, y = self.classifier(f)
d = self.discriminator(_)
d_0 = self.discriminator_separate(_)
return y, d, d_0
totalNet = TotalNet()
# feature_extractor = nn.DataParallel(totalNet.feature_extractor, device_ids=gpu_ids, device=device).train(
# True)
# classifier = nn.DataParallel(totalNet.classifier, device_ids=gpu_ids, device=device).train(True)
# # discriminator = nn.DataParallel(totalNet.discriminator, device_ids=gpu_ids, device=device).train(True)
# discriminator_separate = nn.DataParallel(totalNet.discriminator_separate, device_ids=gpu_ids,
# device=device).train(True)
feature_extractor = totalNet.feature_extractor.to(device)
classifier = totalNet.classifier.to(device)
discriminator = totalNet.discriminator.to(device)
# discriminator_separate = totalNet.discriminator_separate.to(device)
if args.test.test_only:
assert os.path.exists(args.test.resume_file)
data = torch.load(open(args.test.resume_file, 'rb'))
feature_extractor.load_state_dict(data['feature_extractor'])
classifier.load_state_dict(data['classifier'])
discriminator.load_state_dict(data['discriminator'])
# discriminator_separate.load_state_dict(data['discriminator_separate'])
feat_all = []
counters = [AccuracyCounter() for x in range(len(source_classes) + 1)]
with TrainingModeManager([feature_extractor, classifier, discriminator], train=False) as mgr, \
Accumulator(['feature', 'predict_prob', 'label', 'fc2_s',
'entropy', 'consistency', 'confidence']) as target_accumulator, \
torch.no_grad():
for i, (im, label) in enumerate(tqdm(target_test_dl, desc='testing ')):
im = im.to(device)
label = label.to(device)
feature = feature_extractor.forward(im)
feature, __, fc2_s, fc2_s2, fc2_s3, fc2_s4, fc2_s5, predict_prob = classifier.forward(feature)
# domain_prob = discriminator_separate.forward(__)
ss = feature.tolist()
feat_all = list(feat_all) + list(ss)
entropy = get_entropy(fc2_s, fc2_s2, fc2_s3, fc2_s4, fc2_s5, domain_temperature=1.0,
class_temperature=1.0)
consistency = get_consistency(fc2_s, fc2_s2, fc2_s3, fc2_s4, fc2_s5)
confidence, indices = torch.max(predict_prob, dim=1)
# predict_prob = get_predict_prob(fc2_s, fc2_s2, fc2_s3, fc2_s4, fc2_s5)
for name in target_accumulator.names:
globals()[name] = variable_to_numpy(globals()[name])
target_accumulator.updateData(globals())
for x in target_accumulator:
globals()[x] = target_accumulator[x]
entropy = normalize_weight(torch.tensor(entropy))
consistency = normalize_weight(torch.tensor(consistency))
confidence = nega_weight(torch.tensor(confidence))
#######################################################################
# print(entropy.size())
# print(consistency.size())
# target_share_weight = (entropy + consistency) / 2
#target_share_weight = (entropy + consistency + confidence) / 3
target_share_weight = (confidence + 1 - consistency + 1 - entropy) / 3
############################################################################
entropy_common = []
entropy_private = []
consistency_common = []
consistency_private = []
confidence_common = []
confidence_private = []
weight_common = []
weight_private = []
for (each_entropy, each_consistency, each_confidence, each_weight, each_label) \
in zip(entropy, consistency, confidence, target_share_weight, label):
if each_label < 10:
entropy_common.append(each_entropy)
consistency_common.append(each_consistency)
confidence_common.append(each_confidence)
weight_common.append(each_weight)
else:
entropy_private.append(each_entropy)
consistency_private.append(each_consistency)
confidence_private.append(each_confidence)
weight_private.append(each_weight)
# for x in target_accumulator:
# print(target_accumulator['target_share_weight'])
# print(entropy.size())
# hist, bin_edges = np.histogram(entropy_common, bins=10, range=(0, 1))
# print(hist)
# print(bin_edges)
#
# hist, bin_edges = np.histogram(entropy_private, bins=10, range=(0, 1))
# print(hist)
# print(bin_edges)
hist, bin_edges = np.histogram(confidence_common, bins=10, range=(0, 1))
#print(hist)
#print(bin_edges)
hist, bin_edges = np.histogram(confidence_private, bins=10, range=(0, 1))
#print(hist)
#print(bin_edges)
#
# hist, bin_edges = np.histogram(consistency, bins=20, range=(0, 1))
# print(hist)
# print(bin_edges)
# ana = list(zip(entropy, consistency, confidence, target_share_weight, label))
# array = sorted(ana, key=lambda x: x[0])
# np.savetxt("ana.csv", array, delimiter=',')
# print(array)
#
# a1, a2, a3 = zip(*array)
# print(a1)
# print(a2)
# print(a3)
'''
estimate_counters = [AccuracyCounter() for x in range(len(source_classes) + 1)]
for (each_predict_prob, each_label, each_target_share_weight) in zip(predict_prob, label, target_share_weight):
each_pred_id = np.argmax(each_predict_prob)
if each_target_share_weight < (args.test.w_0/2):
estimate_counters[int(each_pred_id)].Npred += 1.0
class_ratio = [x.Npred for x in estimate_counters]
print(class_ratio)
common_threshold = np.mean(class_ratio) / 4
common_estimate = []
for i in range(len(estimate_counters)):
if estimate_counters[i].Npred > common_threshold:
common_estimate.append(i)
'''
# print(common_estimate)
# def outlier(each_target_share_weight, each_pred_id):
# return each_target_share_weight > args.test.w_0 or each_pred_id not in common_estimate
#def outlier(each_target_share_weight, each_pred_id):
#return each_target_share_weight > args.test.w_0
def outlier(each_target_share_weight):
#return each_target_share_weight > args.test.w_0
return each_target_share_weight < args.test.w_0
############################################################################################
def calculate(list_val):
total = 0
T = 10
for ele in range(0, len(list_val)):
total = total + list_val[ele]
return T*np.log(total)
T = 10
#i = 0
#Ttfeat = np.array(Tfeatall)
#Ttfeat = Ttfeat.cpu().numpy()
for i in range(1031):
list_logit=[np.exp(feat_all[i])/T] #Tfeatall[i] for x in enumerate(logit_t_energy)Tfeat_2f[i]
#logit_t_energy = Tfeat_2f .detach().cpu().numpy()
#print(logit_t_energy)
#-E(X) 值越大,表示其越是分布内的样本,否则表示其越是分布外的样本
energy = [calculate(x) for x in enumerate(list_logit)]#Tfeat_2f[i]
#rr = a+b+c
energy = energy/np.log(80)#1031
#energy = torch.Tensor(energy)
#energy = energy.cpu()
energye = energy[0]
#print(energye,'8888')
################################################################################
counters = [AccuracyCounter() for x in range(len(source_classes) + 1)]
'''
for (each_predict_prob, each_label, each_target_share_weight) in zip(predict_prob, label, target_share_weight):
if each_label in source_classes:
counters[each_label].Ntotal += 1.0
each_pred_id = np.argmax(each_predict_prob)
if not outlier(each_target_share_weight, each_pred_id):
counters[int(each_pred_id)].Npred += 1.0
if not outlier(each_target_share_weight, each_pred_id) and each_pred_id == each_label:
counters[each_label].Ncorrect += 1.0
else:
counters[-1].Ntotal += 1.0
each_pred_id = np.argmax(each_predict_prob)
if outlier(each_target_share_weight, each_pred_id):
counters[-1].Ncorrect += 1.0
counters[-1].Npred += 1.0
else:
counters[int(each_pred_id)].Npred += 1.0
# class_ratio = [x.Npred for x in counters]
# print(class_ratio)
acc_tests = [x.reportAccuracy() for x in counters if not np.isnan(x.reportAccuracy())]
correct = [x.Ncorrect for x in counters]
amount = [x.Ntotal for x in counters]
common_acc = np.sum(correct[0:-1]) / np.sum(amount[0:-1])
outlier_acc = correct[-1] / amount[-1]
'''
ln = 0
ee1 = 0
ee0 = 0
ee = 0
for (each_predict_prob, each_label, each_target_share_weight, each_energy) in zip(predict_prob, label, target_share_weight, energye):
#for (each_predict_prob, each_label, each_target_share_weight) in zip(predict_prob, label, target_share_weight):
if outlier(each_target_share_weight) and each_energy < -1:
ln = ln + 1
if not outlier(each_target_share_weight) or each_energy >= -1:
ee += 1
each_pred_id = np.argmax(each_predict_prob)
if not outlier(each_target_share_weight) or each_energy >= -1:
if each_pred_id == 0:
ee0 += 1
if not outlier(each_target_share_weight) or each_energy >= -1:
if each_pred_id == 1:
ee1 += 1
if each_label in source_classes:
counters[each_label].Ntotal += 1.0
each_pred_id = np.argmax(each_predict_prob)
if not outlier(each_target_share_weight) or each_energy >= -1:
if each_pred_id == each_label:
counters[each_label].Ncorrect += 1.0
else:
counters[-1].Ntotal += 1.0
if outlier(each_target_share_weight) and each_energy < -1:
counters[-1].Ncorrect += 1.0
acc_tests = [x.reportAccuracy() for x in counters if not np.isnan(x.reportAccuracy())]
correct = [x.Ncorrect for x in counters]
amount = [x.Ntotal for x in counters]
common_acc = np.sum(correct[0:-1]) / np.sum(amount[0:-1])
outlier_acc = correct[-1] / amount[-1]
print(ln,'eeeeeeeee')
print(ee,'eeeeeeeee')
print(counters[0].Ntotal,counters[0].Ncorrect,'11111111111')
print(counters[1].Ntotal,counters[1].Ncorrect,'22222222222')
print(counters[-1].Ntotal,counters[-1].Ncorrect,'3333333333')
print(ee0,ee1,'44444444444')
print('common_acc={}, outlier_acc={}'.format(common_acc, outlier_acc))
bscore = 2 / (1 / common_acc + 1 / outlier_acc)
acc_test = torch.ones(1, 1) * np.mean(acc_tests)
#print('common_acc={}, outlier_acc={}'.format(common_acc, outlier_acc))
#bscore = 2 / (1 / common_acc + 1 / outlier_acc)
print('hscore={}'.format(bscore))
#acc_test = torch.ones(1, 1) * np.mean(acc_tests)
print('perclass accuracy is {}'.format(acc_test.item()))
exit(0)
# ===================optimizer
scheduler = lambda step, initial_lr: inverseDecaySheduler(step, initial_lr, gamma=10, power=0.75, max_iter=10000)
optimizer_finetune = OptimWithSheduler(
optim.SGD(feature_extractor.parameters(), lr=args.train.lr / 10.0, weight_decay=args.train.weight_decay,
momentum=args.train.momentum, nesterov=True), scheduler)
optimizer_cls = OptimWithSheduler(
optim.SGD(classifier.bottleneck.parameters(), lr=args.train.lr, weight_decay=args.train.weight_decay,
momentum=args.train.momentum, nesterov=True), scheduler)
fc_para = [{"params": classifier.fc.parameters()}, {"params": classifier.fc2.parameters()},
{"params": classifier.fc3.parameters()}, {"params": classifier.fc4.parameters()},
{"params": classifier.fc5.parameters()}]
optimizer_fc = OptimWithSheduler(
optim.SGD(fc_para, lr=args.train.lr * 5, weight_decay=args.train.weight_decay,
momentum=args.train.momentum, nesterov=True), scheduler)
optimizer_discriminator = OptimWithSheduler(
optim.SGD(discriminator.parameters(), lr=args.train.lr, weight_decay=args.train.weight_decay,
momentum=args.train.momentum, nesterov=True), scheduler)
# optimizer_discriminator_separate = OptimWithSheduler(
# optim.SGD(discriminator_separate.parameters(), lr=args.train.lr, weight_decay=args.train.weight_decay,
# momentum=args.train.momentum, nesterov=True), scheduler)
global_step = 0
best_acc = 0
total_steps = tqdm(range(args.train.min_step), desc='global step')
epoch_id = 0
threshold = torch.zeros(1).to(device)
while global_step < args.train.min_step:
#####################################################################################
if global_step % args.test.test_interval == 0:
#print('99999999')
with open(join(log_dir, 'config.yaml'), 'w') as f:
f.write(yaml.dump(save_config))
f = open(r"txt1/1 copy.txt", "r")
#with open("txt1/1.txt", "r") as f: #文件bai为du123.txt
#sourceInLines= f.readlines() #按行读出文件zhidao
#f.close()
new = [] #定义一个空列表,zhuan用来存储结果
for line in f.readlines():
temp1 = line.strip('\n') #去掉每行最内后的换行符'\n'
temp2 = temp1.split(',') #以','为标志,将每容行分割成列表
new.append(temp2) #将上一步得到的列表添加到new中
f.close()
#print(new)
#numbers = list(map(int, new))
#n = 0
#j = 0
new_id = []
new_label = []
for n in new:
#print(n)
new_id.append(int(n[0]))
new_label.append(int(n[1]))
#print(n)
#new_id = new_id
#new_id = np.array(new_id)
#new_label = np.array(new_label)
new_t = [[random.random() for _ in range(2)]for _ in range(np.size(new_id))]
i = 0
for i in range(np.size(new_id)):
new_t[i][0] = new_id[i]
new_t[i][1] = new_label[i]
i = i + 1
r = 0
e = 0
###################################################################################
iters = tqdm(
zip(source_train_dl, source_train_dl2, source_train_dl3, source_train_dl4, source_train_dl5, target_train_dl),
desc=f'epoch {epoch_id} ', total=min(len(source_train_dl), len(target_train_dl)))
epoch_id += 1
for i, ((im_source, label_source), (im_source2, label_source2), (im_source3, label_source3),
(im_source4, label_source4), (im_source5, label_source5), (im_target, label_target)) in enumerate(iters):
feature_extractor.train()
classifier.train()
save_label_target = label_target # for debug usage
label_source = label_source.to(device)
label_source2 = label_source2.to(device)
label_source3 = label_source3.to(device)
label_source4 = label_source4.to(device)
label_source5 = label_source5.to(device)
label_target = label_target.to(device)
# =========================forward pass
im_source = im_source.to(device)
im_source2 = im_source2.to(device)
im_source3 = im_source3.to(device)
im_source4 = im_source4.to(device)
im_source5 = im_source5.to(device)
im_target = im_target.to(device)
fc1_s = feature_extractor.forward(im_source)
fc1_s2 = feature_extractor.forward(im_source2)
fc1_s3 = feature_extractor.forward(im_source3)
fc1_s4 = feature_extractor.forward(im_source4)
fc1_s5 = feature_extractor.forward(im_source5)
fc1_t = feature_extractor.forward(im_target)
Tfc1_s = fc1_s.tolist()
Tfc1_t = fc1_t.tolist()
#print(Tfc1_s, Tfc1_t, 'tttttttttttttttt')
#Tfc1_s = fc1_s.cpu().detach().numpy()
#Tfeat_0f[i][j] = Tfeat_0f[i][j].cpu().numpy()
#Tfc1_t = fc1_t.cpu().detach().numpy()
#Tfc1_s = np.array(Tfc1_s)
#Tfc1_t = np.array(Tfc1_t)
#new_id = []
#new_label = []
#print(Tfc1_s,Tfc1_t,np.size(Tfc1_s,axis=0),np.size(Tfc1_s,axis=1),np.size(Tfc1_t,axis=0),np.size(Tfc1_t,axis=1),'22222222')#################################
#Tfeat_s = []
Tfeat_s = Tfc1_s
Tfeat_t = []
Tfeat_t22 = []
Tlable = []
Tlable_t22 = label_source
#Tlable = label_source
Tlable_t22 = Tlable_t22.tolist()
#print(label_target,'99999999')
i = 0
#for id in new_id:
for i in range(np.size(new_id)):
idd = r
if e != 0:
y = idd-(36*e)
if e == 0:
y = idd
#y = idd-(32*e)
for y in range(36):
if idd == new_t[i][0]:
if e != 0:
x = idd-(36*e)
if e == 0:
x = idd
#print(Tfc1_s[x])
#Tfeat_s += list([Tfc1_s[x]])
#print(new_t[i][0],new_t[i][1],'1111')
Tfeat_t += list([Tfc1_t[x]])
#new_label
Tlable.append(new_t[i][1])
if new_t[i][1] == 2:
Tfeat_t22 += list([Tfc1_t[x]])
Tlable_t22.append(new_t[i][1])
#print(Tfeat_s,Tfeat_t,Tlable,'333333333')
#print(idd,'11111111111')
#input()
idd = idd + 1
y = y + 1
Tfeat_t = np.array(Tfeat_t)
#input()
Tfeat_s = list(Tfeat_s) + list(Tfeat_t22)
Tfeat_s = np.array(Tfeat_s)
Tlable = np.array(Tlable)
Tlable_t22 = np.array(Tlable_t22)
#print(label_target)
#print(Tfeat_s,Tlable)
#input()
#print(Tfeat_s,Tfeat_t,Tlable,'333333333')
#print(np.size(Tfeat_s,axis=1),np.size(Tfeat_t,axis=1),Tlable,'333333333')
#print(idd,'11111111111')
#input()
#print(Tfeat_s,Tfeat_t,Tlable)
#input()
if Tfeat_s != []:
Tloss_s = batch_all_triplet_loss(Tlable_t22, Tfeat_s, 0.4, False)
#Tloss_s = batch_hard_triplet_loss(Tlable_t22, Tfeat_s, 0.4, False)#
if Tfeat_t != []:
Tloss_t = batch_all_triplet_loss(Tlable, Tfeat_t, 0.2, False)
#Tloss_t = batch_hard_triplet_loss(Tlable, Tfeat_t, 0.2, False)#
if Tfeat_s == []:
Tloss_s = 0
if Tfeat_t == []:
Tloss_t = 0
Tloss = Tloss_s + Tloss_t
'''
Tlable = torch.from_numpy(Tlable)
Tfeat_s1 = torch.from_numpy(Tfeat_s)
Tlable_t22 = torch.from_numpy(Tlable_t22)
Tfeat_t1 = torch.from_numpy(Tfeat_t)
if Tfeat_s != []:
inp_sp, inp_sn = convert_label_to_similarity(Tfeat_s1, Tlable_t22)
criterion = CircleLoss(m=0.1, gamma=256)
circle_loss = criterion(inp_sp, inp_sn)
Tloss_s = circle_loss
Tloss_s = np.array(Tloss_s)
if Tfeat_t != []:
inp_sp, inp_sn = convert_label_to_similarity(Tfeat_t1, Tlable)
criterion = CircleLoss(m=0.4, gamma=256)
circle_loss = criterion(inp_sp, inp_sn)
Tloss_t = circle_loss
Tloss_t = np.array(Tloss_t)
if Tfeat_s == []:
Tloss_s = 0
if Tfeat_t == []:
Tloss_t = 0
Tloss = Tloss_s + Tloss_t
'''
#########################################################################
fc1_s, feature_source, fc2_s, fc2_s2, fc2_s3, fc2_s4, fc2_s5, predict_prob_source = classifier.forward(fc1_s)
fc1_s2, feature_source2, fc2_s_2, fc2_s2_2, fc2_s3_2, fc2_s4_2, fc2_s5_2, predict_prob_source2 = \
classifier.forward(fc1_s2)
fc1_s3, feature_source3, fc2_s_3, fc2_s2_3, fc2_s3_3, fc2_s4_3, fc2_s5_3, predict_prob_source3 = \
classifier.forward(fc1_s3)
fc1_s4, feature_source4, fc2_s_4, fc2_s2_4, fc2_s3_4, fc2_s4_4, fc2_s5_4, predict_prob_source4 = \
classifier.forward(fc1_s4)
fc1_s5, feature_source5, fc2_s_5, fc2_s2_5, fc2_s3_5, fc2_s4_5, fc2_s5_5, predict_prob_source5 = \
classifier.forward(fc1_s5)
fc1_t, feature_target, fc2_t, fc2_t2, fc2_t3, fc2_t4, fc2_t5, predict_prob_target = classifier.forward(fc1_t)
domain_prob_discriminator_source = discriminator.forward(feature_source)
domain_prob_discriminator_target = discriminator.forward(feature_target)
source_share_weight = get_label_weight(label_source, common_classes).view(36, 1).to(device)
entropy = get_entropy(fc2_s, fc2_s2, fc2_s3, fc2_s4, fc2_s5,
domain_temperature=1.0, class_temperature=1.0).detach()
consistency = get_consistency(fc2_s, fc2_s2, fc2_s3, fc2_s4, fc2_s5).detach()
# confidence, indices = torch.max(predict_prob_target, dim=1)
target_share_weight = get_target_weight(entropy, consistency, threshold).view(36, 1).to(device)
if global_step < 500:
source_share_weight = torch.zeros_like(source_share_weight)
target_share_weight = torch.zeros_like(target_share_weight)
# ==============================compute loss
adv_loss = torch.zeros(1, 1).to(device)
# adv_loss_separate = torch.zeros(1, 1).to(device)
tmp = source_share_weight * nn.BCELoss(reduction='none')(domain_prob_discriminator_source,
torch.ones_like(domain_prob_discriminator_source))
adv_loss += torch.mean(tmp, dim=0, keepdim=True)
tmp = target_share_weight * nn.BCELoss(reduction='none')(domain_prob_discriminator_target,
torch.zeros_like(domain_prob_discriminator_target))
adv_loss += torch.mean(tmp, dim=0, keepdim=True)
# ============================== cross entropy loss, it receives logits as its inputs
ce = nn.CrossEntropyLoss()(fc2_s, label_source)
ce2 = nn.CrossEntropyLoss()(fc2_s2_2, label_source2)
ce3 = nn.CrossEntropyLoss()(fc2_s3_3, label_source3)
ce4 = nn.CrossEntropyLoss()(fc2_s4_4, label_source4)
ce5 = nn.CrossEntropyLoss()(fc2_s5_5, label_source5)
with OptimizerManager(
[optimizer_finetune, optimizer_cls, optimizer_fc, optimizer_discriminator]):
# [optimizer_finetune, optimizer_cls, optimizer_discriminator, optimizer_discriminator_separate]):
# loss = ce + adv_loss + adv_loss_separate
loss = (ce + ce2 + ce3 + ce4 + ce5) / 5 + adv_loss + Tloss
loss.backward()
global_step += 1
total_steps.update()
if global_step % args.log.log_interval == 0:
counter = AccuracyCounter()
counter.addOneBatch(variable_to_numpy(one_hot(label_source, len(source_classes))),
variable_to_numpy(predict_prob_source))
acc_train = torch.tensor([counter.reportAccuracy()]).to(device)
logger.add_scalar('adv_loss', adv_loss, global_step)
logger.add_scalar('ce', ce, global_step)
# logger.add_scalar('adv_loss_separate', adv_loss_separate, global_step)
logger.add_scalar('acc_train', acc_train, global_step)
if global_step % args.test.test_interval == 0:
feature_extractor.eval()
classifier.eval()
entropy = None
consistency = None
counters = [AccuracyCounter() for x in range(len(source_classes) + 1)]
with TrainingModeManager([feature_extractor, classifier, discriminator], train=False) as mgr, \
Accumulator(['feature', 'predict_prob', 'label', 'entropy', 'consistency',
'confidence']) as target_accumulator, torch.no_grad():
#############################################################
txt_id = [[0 for _ in range(3)]for _ in range(1031)]
idd = [0 for _ in range(1031)]
txt_00 = [[0 for _ in range(3)]for _ in range(1031)]
txt_11 = [[0 for _ in range(3)]for _ in range(1031)]
txt_22 = [[0 for _ in range(3)]for _ in range(1031)]
for i in range(1031):
txt_id[i][0] = i
idd[i] = i
#print(txt_id[i][0],'111111')
h = 0
feat_all = []
##############################################################
for i, (im, label) in enumerate(tqdm(target_test_dl, desc='testing')):
im = im.to(device)
label = label.to(device)
feature = feature_extractor.forward(im)
feature, __, fc2_s, fc2_s2, fc2_s3, fc2_s4, fc2_s5, predict_prob = classifier.forward(
feature)
entropy = get_entropy(fc2_s, fc2_s2, fc2_s3, fc2_s4, fc2_s5,
domain_temperature=1.0, class_temperature=1.0).detach()
consistency = get_consistency(fc2_s, fc2_s2, fc2_s3, fc2_s4, fc2_s5).detach()
confidence, indices = torch.max(predict_prob, dim=1)
###################################################################
predict_prob1 = predict_prob.tolist()
#print(np.size(predict_prob1,0),'22222')
for j in range(np.size(predict_prob1,0)):
if predict_prob1[j][0] > predict_prob1[j][1]:
#print('u')
#print(predict_prob1[j][0],'333333')
txt_id[h][2] = predict_prob1[j][0]
else:
txt_id[h][2] = predict_prob1[j][1]
#print(txt_id[h],'22222')
h = h + 1
#input()
ss = feature.tolist()
feat_all = list(feat_all) + list(ss)
##############################################################
for name in target_accumulator.names:
globals()[name] = variable_to_numpy(globals()[name])
target_accumulator.updateData(globals())
for x in target_accumulator:
globals()[x] = target_accumulator[x]
entropy = normalize_weight(torch.tensor(entropy))
consistency = normalize_weight(torch.tensor(consistency))
#confidence = nega_weight(torch.tensor(confidence))
confidence = nega_weight(torch.tensor(confidence))
#target_share_weight = (entropy + consistency) / 2
target_share_weight = (confidence + 1 - consistency + 1 - entropy) / 3
threshold = torch.mean(target_share_weight).to(device)
def outlier(each_target_share_weight):
return each_target_share_weight < args.test.w_0
#################################################################################################
def calculate(list_val):
total = 0
T = 10
for ele in range(0, len(list_val)):
total = total + list_val[ele]
return T*np.log(total)
T = 10
for i in range(1031):
list_logit=[np.exp(feat_all[i])/T]
energy = [calculate(x) for x in enumerate(list_logit)]#Tfeat_2f[i]
energy = energy/np.log(80)#1031
energye = energy[0]
#print(energye,'8888')
####################################################################################################################
counters = [AccuracyCounter() for x in range(len(source_classes) + 1)]
ln = 0
a = 0
b = 0
c = 0
for (each_predict_prob, each_label, each_target_share_weight, each_energy, i) in zip(predict_prob, label,
target_share_weight, energye, idd):
if outlier(each_target_share_weight) and each_energy < -3:
txt_id[i][1] = 2
ln = ln + 1
else:
txt_id[i][1] = np.argmax(each_predict_prob)
#print(txt_id[i])
#input()
if txt_id[i][1] == 2:
#txt_22[c].append(txt_id[i])
txt_22[c] = txt_id[i]
c = c + 1
if txt_id[i][1] == 0:
#txt_00[a].append(txt_id[i])
txt_00[a] = txt_id[i]
a = a + 1
if txt_id[i][1] == 1:
#txt_11[b].append(txt_id[i])
txt_11[b] = txt_id[i]
b = b + 1
if each_label in source_classes:
counters[each_label].Ntotal += 1.0
each_pred_id = np.argmax(each_predict_prob)
if not outlier(each_target_share_weight) or each_energy >= -3:
if each_pred_id == each_label:
counters[each_label].Ncorrect += 1.0
else:
counters[-1].Ntotal += 1.0
if outlier(each_target_share_weight) and each_energy < -3:
counters[-1].Ncorrect += 1.0
sorted(txt_00, key=lambda s: s[2], reverse=True)
sorted(txt_11, key=lambda s: s[2], reverse=True)
a = int(a*0.35)
b = int(b*0.35)
txt_0 = [[random.random() for _ in range(2)]for _ in range(a)]
txt_1 = [[random.random() for _ in range(2)]for _ in range(b)]
txt_2 = [[random.random() for _ in range(2)]for _ in range(c)]
txt_all = []
for i in range(a):
txt_0[i][0] = txt_00[i][0]
txt_0[i][1] = txt_00[i][1]
for i in range(b):
txt_1[i][0] = txt_11[i][0]
txt_1[i][1] = txt_11[i][1]
for i in range(c):
txt_2[i][0] = txt_22[i][0]
txt_2[i][1] = txt_22[i][1]
txt_all = list(txt_0) + list(txt_1) + list(txt_2)
acc_tests = [x.reportAccuracy() for x in counters if not np.isnan(x.reportAccuracy())]
correct = [x.Ncorrect for x in counters]
amount = [x.Ntotal for x in counters]
common_acc = np.sum(correct[0:-1]) / np.sum(amount[0:-1])
outlier_acc = correct[-1] / amount[-1]
print(ln,'eeeeeeeee')
print(counters[0].Ntotal,counters[0].Ncorrect,'11111111111')
print(counters[1].Ntotal,counters[1].Ncorrect,'22222222222')
print(counters[-1].Ntotal,counters[-1].Ncorrect,'3333333333')
print('common_acc={}, outlier_acc={}'.format(common_acc, outlier_acc))
bscore = 2 / (1 / common_acc + 1 / outlier_acc)
acc_test = torch.ones(1, 1) * np.mean(acc_tests)
with open("txt1/1 copy.txt", "w") as output:
i = 0
for i in range(a+b+c):
s = str(txt_all[i]).replace('[','').replace(']','')#去除[],这两行按数据不同,可以选择
s = s.replace("'",'')+'\n' #去除单引号,逗号,每行末尾追加换行符 .replace(',','')
output.write(s)
logger.add_scalar('acc_test', acc_test, global_step)
logger.add_scalar('bscore', bscore, global_step)
# clear_output()
data = {
"feature_extractor": feature_extractor.state_dict(),
'classifier': classifier.state_dict(),
'discriminator': discriminator.state_dict() if not isinstance(discriminator, Nonsense) else 1.0,
# 'discriminator_separate': discriminator_separate.state_dict(),
}
if acc_test > best_acc:
best_acc = acc_test
with open(join(log_dir, 'best.pkl'), 'wb') as f:
torch.save(data, f)
with open(join(log_dir, 'current.pkl'), 'wb') as f:
torch.save(data, f)
r = r + 36
e = e + 1
|
{"/main.py": ["/model.py", "/eval.py"], "/CMUML/main.py": ["/eval.py"], "/UDAML/main.py": ["/eval.py"]}
|
27,626,981
|
suwan9/UDAML
|
refs/heads/master
|
/UDAML/data.py
|
from config import *
from easydl import *
from collections import Counter
from torchvision.transforms.transforms import *
from torch.utils.data import DataLoader, WeightedRandomSampler
import os
import numpy as np
import tensorpack
import time
import random
import numbers
#from scipy.misc import imresize
from PIL import Image
import numpy as np
from imageio import imread
import tensorlayer as tl
from six.moves import cPickle
from utilities import *
import warnings
'''
assume classes across domains are the same.
[0 1 ..................................................................... N - 1]
|----common classes --||----source private classes --||----target private classes --|
'''
a, b, c = args.data.dataset.n_share, args.data.dataset.n_source_private, args.data.dataset.n_total
c = c - a - b
common_classes = [i for i in range(a)]
source_private_classes = [i + a for i in range(b)]
target_private_classes = [i + a + b for i in range(c)]
source_classes = common_classes + source_private_classes
target_classes = common_classes + target_private_classes
train_transform = Compose([
Resize(256),
RandomCrop(224),
RandomHorizontalFlip(),
ToTensor()
])
test_transform = Compose([
Resize(256),
CenterCrop(224),
ToTensor()
])
source_train_ds = FileListDataset(list_path=source_file, path_prefix=dataset.prefixes[args.data.dataset.source],
transform=train_transform, filter=(lambda x: x in source_classes))
source_test_ds = FileListDataset(list_path=source_file,path_prefix=dataset.prefixes[args.data.dataset.source],
transform=test_transform, filter=(lambda x: x in source_classes))
target_train_ds = FileListDataset(list_path=target_file, path_prefix=dataset.prefixes[args.data.dataset.target],
transform=train_transform, filter=(lambda x: x in target_classes))
target_test_ds = FileListDataset(list_path=target_file, path_prefix=dataset.prefixes[args.data.dataset.target],
transform=test_transform, filter=(lambda x: x in target_classes))
classes = source_train_ds.labels
freq = Counter(classes)
class_weight = {x : 1.0 / freq[x] if args.data.dataloader.class_balance else 1.0 for x in freq}
source_weights = [class_weight[x] for x in source_train_ds.labels]
sampler = WeightedRandomSampler(source_weights, len(source_train_ds.labels))
source_train_dl = DataLoader(dataset=source_train_ds, batch_size=args.data.dataloader.batch_size,
sampler=sampler, num_workers=args.data.dataloader.data_workers, drop_last=True)
source_test_dl = DataLoader(dataset=source_test_ds, batch_size=args.data.dataloader.batch_size, shuffle=False,
num_workers=1, drop_last=False)
target_train_dl = DataLoader(dataset=target_train_ds, batch_size=args.data.dataloader.batch_size,shuffle=True,
num_workers=args.data.dataloader.data_workers, drop_last=True)
target_test_dl = DataLoader(dataset=target_test_ds, batch_size=args.data.dataloader.batch_size, shuffle=False,
num_workers=1, drop_last=False)
#———————————————————————————————————————分割线————————————————————————————————————
warnings.filterwarnings('ignore', message='.*', category=Warning)
class CustomDataLoader(object):#自定义数据加载器
def __init__(self, dataset, batch_size, num_threads=8,remainder=None):
self.ds0 = dataset
self.batch_size = batch_size
self.num_threads = num_threads
if not remainder:
try:
is_train = self.ds0.is_train
remainder = False if is_train else True
# if is_train, there is no need to set reminder 如果是训练,则无需设置提醒
except Exception as e:
# self.ds0 maybe doesn't have is_train attribute, then it has no test mode, set remainder = False
# self.ds0可能没有is_train属性,那么它没有测试模式,设置remainment=False
remainder = False
# use_list=False, for each in data point, add a batch dimension (return in numpy array)
self.ds1 = tensorpack.dataflow.BatchData(self.ds0, self.batch_size,remainder=remainder, use_list=False,)
#将数据点成批堆叠。它生成的数据点与ds相同数量的组件,但每个组件都有一个新的额外维度,即批处理大小。
#批处理可以是原始组件的列表,也可以是原始组件的numpy数组。
# use 1 thread in test to avoid randomness (test should be deterministic)
self.ds2 = tensorpack.dataflow.PrefetchDataZMQ(self.ds1, num_proc=self.num_threads if not remainder else 1)
#在>=1进程中运行数据流,使用ZeroMQ进行通信。它将分叉以下调用过程:方法:重置状态(),
#并通过ZeroMQ IPC管道从每个进程的给定数据流中收集数据点。这通常比:类:MultiProcessRunner。
# required by tensorlayer package
self.ds2.reset_state()
def generator(self):
return self.ds2.get_data()
class BaseDataset(tensorpack.dataflow.RNGDataFlow):#基本数据集
def __init__(self, is_train=True, skip_pred=None, transform=None, sample_weight=None):
self.is_train = is_train
self.skip_pred = skip_pred or (lambda data, label, is_train : False)
self.transform = transform or (lambda data, label, is_train : (data, label))
self.sample_weight = sample_weight or (lambda data, label : 1.0)
self.datas = []
self.labels = []
self._fill_data()
self._post_init()
def _fill_data(self):
raise NotImplementedError("not implemented!")
#如果这个方法没有被子类重写,但是调用了,就会报错。
def _post_init(self):
tmp = [[data, label] for (data, label) in zip(self.datas, self.labels) if not self.skip_pred(data, label, self.is_train) ]
self.datas = [x[0] for x in tmp]
self.labels = [x[1] for x in tmp]
if callable(self.sample_weight):
# callable返回对象是否可调用(即某种函数)。请注意,类是可调用的,具有调用函数。
self._weight = [self.sample_weight(x, y) for (x, y) in zip(self.datas, self.labels)]
else:
self._weight = self.sample_weight
self._weight = np.asarray(self._weight, dtype=np.float32).reshape(-1)
assert len(self._weight) == len(self.datas), 'dimension not match!'
#尺寸不匹配
self._weight = self._weight / np.sum(self._weight)
def size(self):
return len(self.datas)
def _get_one_data(self, data, label):
raise NotImplementedError("not implemented!")
def get_data(self):
size = self.size()
ids = list(range(size))
for _ in range(size):
id = np.random.choice(ids, p=self._weight) if self.is_train else _
#np.random.choice处理数据时经常需要从数组中随机抽取元素
#从a(只要是ndarray都可以,但必须是一维的)中随机抽取数字,并组成指定大小(size)的数组
#replace:True表示可以取相同数字,False表示不可以取相同数字
#数组p:与数组a相对应,表示取数组a中每个元素的概率,默认为选取每个元素的概率相同。
data, label = self._get_one_data(self.datas[id], self.labels[id])
data, label = self.transform(data, label, self.is_train)
yield np.asarray(data), np.asarray([label]) if isinstance(label, numbers.Number) else label
#np.asarray将输入转为矩阵格式
#isinstance() 函数来判断一个对象是否是一个已知的类型
#python有yield表达式,它只能用于定义生成器函数,生成器可以控制函数的执行,
#函数可以再生成器语句出暂停执行,当前使用的变量,堆栈等都会保留,直到下次使用生成器方法。
class BaseImageDataset(BaseDataset):#基本图像数据集
def __init__(self, imsize=224, is_train=True, skip_pred=None, transform=None, sample_weight=None):
self.imsize = imsize
super(BaseImageDataset, self).__init__(is_train, skip_pred, transform, sample_weight=sample_weight)
def _get_one_data(self, data, label):
im = imread(data, pilmode='RGB') #图像读取
if self.imsize:
'''
norm_map = imresize(raw_hm, (height, width))
#换成
norm_map = np.array(Image.fromarray(raw_hm).resize( (height, width)))
'''
#im = imresize(im, (self.imsize, self.imsize))
im = np.array(Image.fromarray(im).resize( (self.imsize, self.imsize)))
#输入固定大小调整shape
return im, label
def one_hot(n_class, index):
tmp = np.zeros((n_class,), dtype=np.float32)
tmp[index] = 1.0
return tmp
class FileListDataset1(BaseImageDataset):#文件列表数据集
def __init__(self, list_path, path_prefix='', imsize=224, is_train=True, skip_pred=None, transform=None, sample_weight=None):
self.list_path = list_path
self.path_prefix = path_prefix
super(FileListDataset1, self).__init__(imsize=imsize, is_train=is_train, skip_pred=skip_pred, transform=transform, sample_weight=sample_weight)
def _fill_data(self):
with open(self.list_path, 'r') as f:
data = [[line.split()[0], line.split()[1]] for line in f.readlines() if line.strip()]
# avoid empty lines 避免空行
# split() 通过指定分隔符对字符串进行切片
# readlines() 方法用于读取所有行(直到结束符 EOF)并返回列表
# strip() 方法用于移除字符串头尾指定的字符(默认为空格或换行符)或字符序列。
self.datas = [os.path.join(self.path_prefix, x[0]) for x in data]
#os.path.join()函数:连接两个或更多的路径名组件
try:
self.labels = [int(x[1]) for x in data]
except ValueError as e:
print('invalid label number, maybe there is space in image path?')
#标签号无效,可能是图像路径中有空格
raise e
|
{"/main.py": ["/model.py", "/eval.py"], "/CMUML/main.py": ["/eval.py"], "/UDAML/main.py": ["/eval.py"]}
|
27,626,982
|
suwan9/UDAML
|
refs/heads/master
|
/CMUML/models/basenet.py
|
from torchvision import models
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.autograd import Function, Variable
class GradReverse(Function):
@staticmethod
def forward(ctx, x,lambd):
ctx.save_for_backward(lambd)
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
lambd=ctx.saved_tensors[0]
return grad_output.neg()*lambd, None
def grad_reverse(x,lambd=1.0):
return GradReverse.apply(x, Variable(torch.ones(1)*lambd).cuda())
class ResBase(nn.Module):
def __init__(self, option='resnet50', pret=True, unit_size=100):
super(ResBase, self).__init__()
self.dim = 2048
if option == 'resnet18':
model_ft = models.resnet18(pretrained=pret)
self.dim = 512
if option == 'resnet50':
model_ft = models.resnet50(pretrained=pret)
if option == 'resnet101':
model_ft = models.resnet101(pretrained=pret)
if option == 'resnet152':
model_ft = models.resnet152(pretrained=pret)
mod = list(model_ft.children())
mod.pop()
self.features = nn.Sequential(*mod)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), self.dim)
return x
class ResClassifier_MME(nn.Module):
def __init__(self, num_classes=12, input_size=2048, temp=0.05):
super(ResClassifier_MME, self).__init__()
self.fc = nn.Linear(input_size, num_classes, bias=False)
self.tmp = temp
def set_lambda(self, lambd):
self.lambd = lambd
def forward(self, x, dropout=False, return_feat=False, reverse=False):
if return_feat:
return x
x = F.normalize(x)
x = self.fc(x)/self.tmp
return x
def weight_norm(self):
w = self.fc.weight.data
norm = w.norm(p=2, dim=1, keepdim=True)
self.fc.weight.data = w.div(norm.expand_as(w))
def weights_init(self, m):
m.weight.data.normal_(0.0, 0.1)
|
{"/main.py": ["/model.py", "/eval.py"], "/CMUML/main.py": ["/eval.py"], "/UDAML/main.py": ["/eval.py"]}
|
27,626,983
|
suwan9/UDAML
|
refs/heads/master
|
/UDAML/main.py
|
from data import *
from net import *
from lib import *
import datetime
from tqdm import tqdm
if is_in_notebook():
from tqdm import tqdm_notebook as tqdm
from torch import optim
from tensorboardX import SummaryWriter
import torch.backends.cudnn as cudnn
import numpy as np
import torch
import operator
from os import listdir
from eval import batch_hard_triplet_loss
from eval import batch_all_triplet_loss
cudnn.benchmark = True
cudnn.deterministic = True
seed_everything()
if args.misc.gpus < 1:
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '7'
gpu_ids = []
output_device = torch.device('cpu')
else:
# gpu_ids = select_GPUs(args.misc.gpus)
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '7'
gpu_ids = [0]
output_device = gpu_ids[0]
now = datetime.datetime.now().strftime('%b%d_%H-%M-%S')
log_dir = f'{args.log.root_dir}/{now}'
logger = SummaryWriter(log_dir)
'''
with open(join(log_dir, 'config.yaml'), 'w') as f:
f.write(yaml.dump(save_config))
f = open(r"txt1/1.txt", "r")
#with open("txt1/1.txt", "r") as f: #文件bai为du123.txt
#sourceInLines= f.readlines() #按行读出文件zhidao
#f.close()
new = [] #定义一个空列表,zhuan用来存储结果
for line in f.readlines():
temp1 = line.strip('\n') #去掉每行最内后的换行符'\n'
temp2 = temp1.split(',') #以','为标志,将每容行分割成列表
new.append(temp2) #将上一步得到的列表添加到new中
f.close()
#print(new)
#numbers = list(map(int, new))
#n = 0
#j = 0
new_id = []
new_label = []
for n in new:
#print(n)
new_id.append(int(n[0]))
new_label.append(int(n[1]))
#print(n)
#new_id = new_id
#new_id = np.array(new_id)
#new_label = np.array(new_label)
new_t = [[random.random() for _ in range(2)]for _ in range(np.size(new_id))]
i = 0
for i in range(np.size(new_id)):
new_t[i][0] = new_id[i]
new_t[i][1] = new_label[i]
i = i + 1
#print(new_id,'11111')
#print(new_label,'77777')
#print(new_t,'22222')
'''
model_dict = {
'resnet50': ResNet50Fc,
'vgg16': VGG16Fc
}
class TotalNet(nn.Module):
def __init__(self):
super(TotalNet, self).__init__()
self.feature_extractor = model_dict[args.model.base_model](args.model.pretrained_model)
classifier_output_dim = len(source_classes)
self.classifier = CLS(self.feature_extractor.output_num(), classifier_output_dim, bottle_neck_dim=256)
self.discriminator = AdversarialNetwork(256)
self.discriminator_separate = AdversarialNetwork(256)
def forward(self, x):
f = self.feature_extractor(x)
f, _, __, y = self.classifier(f)
d = self.discriminator(_)
d_0 = self.discriminator_separate(_)
return y, d, d_0
totalNet = TotalNet()
feature_extractor = nn.DataParallel(totalNet.feature_extractor, device_ids=gpu_ids, output_device=output_device).train(True)
classifier = nn.DataParallel(totalNet.classifier, device_ids=gpu_ids, output_device=output_device).train(True)
discriminator = nn.DataParallel(totalNet.discriminator, device_ids=gpu_ids, output_device=output_device).train(True)
discriminator_separate = nn.DataParallel(totalNet.discriminator_separate, device_ids=gpu_ids, output_device=output_device).train(True)
if args.test.test_only:
assert os.path.exists(args.test.resume_file)
data = torch.load(open(args.test.resume_file, 'rb'))
feature_extractor.load_state_dict(data['feature_extractor'])
classifier.load_state_dict(data['classifier'])
discriminator.load_state_dict(data['discriminator'])
discriminator_separate.load_state_dict(data['discriminator_separate'])
feat_all = []
label_all = []
counters = [AccuracyCounter() for x in range(len(source_classes) + 1)]
with TrainingModeManager([feature_extractor, classifier, discriminator_separate], train=False) as mgr, \
Accumulator(['feature', 'predict_prob', 'label', 'domain_prob', 'before_softmax',
'target_share_weight']) as target_accumulator, \
torch.no_grad():
for i,(im, label) in enumerate(tqdm(target_test_dl, desc='testing ')):
#print(i,'uuuuuu')
#input()
im = im.to(output_device)
label = label.to(output_device)
#print(label)
#input()
label_all = list(label_all) + list(label)
feature = feature_extractor.forward(im)
feature, __, before_softmax, predict_prob = classifier.forward(feature)
domain_prob = discriminator_separate.forward(__)
ss = feature.tolist()
#print(ss,'9999')
#feattt = [[random.random() for _ in range(2048)]for _ in range(8)]
#print(feattt,'0000')
#for j in range(8):
#feattt[j] = feature[j].tolist()
feat_all = list(feat_all) + list(ss)
#print(feature,'qqqqq')
#print(before_softmax,'wwwww')
#print(predict_prob,'eeeeee')
#input()
target_share_weight = get_target_share_weight(domain_prob, before_softmax, domain_temperature=1.0,
class_temperature=1.0)
for name in target_accumulator.names:
globals()[name] = variable_to_numpy(globals()[name])
target_accumulator.updateData(globals())
#print(label_all,'qqqqq')
for x in target_accumulator:
globals()[x] = target_accumulator[x]
def outlier(each_target_share_weight):
return each_target_share_weight < args.test.w_0 #T未知
#################################################################################################
def calculate(list_val):
total = 0
T = 10
for ele in range(0, len(list_val)):
total = total + list_val[ele]
return T*np.log(total)
T = 10
#i = 0
#Ttfeat = np.array(Tfeatall)
#Ttfeat = Ttfeat.cpu().numpy()
for i in range(1031):
list_logit=[np.exp(feat_all[i])/T] #Tfeatall[i] for x in enumerate(logit_t_energy)Tfeat_2f[i]
#logit_t_energy = Tfeat_2f .detach().cpu().numpy()
#print(logit_t_energy)
#-E(X) 值越大,表示其越是分布内的样本,否则表示其越是分布外的样本
energy = [calculate(x) for x in enumerate(list_logit)]#Tfeat_2f[i]
#rr = a+b+c
energy = energy/np.log(80)#1031
#energy = torch.Tensor(energy)
#energy = energy.cpu()
energye = energy[0]
print(energye,'8888')
#i = 0
#j = 0
#x = 0
#lab = [0 for i in range(c+b+a)]
#lab = [0 for n in range(1031)]
#i = 0
#for i in range(1031):
#lab[i][0] = Tfeatal[i][0]
#for i in range(1031):
#if energy[0][i] < -4.5:
#lab[i] = 2 #Tfeat_2e[i][2] = 2
#x = x + 1
#print(x,'99999')
#else:
#lab[i][1] = 4
#################################################################################################
#print(label)
counters = [AccuracyCounter() for x in range(len(source_classes) + 1)]
ln = 0
#print(np.size(counters),'1111111')
for (each_predict_prob, each_label, each_target_share_weight, each_energy) in zip(predict_prob, label, target_share_weight, energye):
#print(each_energy)
#input()
if outlier(each_target_share_weight[0]) and each_energy < -4.55:
ln = ln + 1
if each_label in source_classes:
counters[each_label].Ntotal += 1.0
each_pred_id = np.argmax(each_predict_prob)
if not outlier(each_target_share_weight[0]) and each_pred_id == each_label or each_energy >= -4.55:
#print(each_target_share_weight[0],'777777')
#如果是已知类并且分类正确
counters[each_label].Ncorrect += 1.0
#print(counters[each_label].Ncorrect)
else:
counters[-1].Ntotal += 1.0
if outlier(each_target_share_weight[0]) and each_energy < -4.55:
counters[-1].Ncorrect += 1.0
#print(counters[-1].Ncorrect)
#print(counters,'333333333')
print(counters[0].Ntotal,counters[0].Ncorrect,'44444')
print(counters[1].Ntotal,counters[1].Ncorrect,'55555')
print(counters[-1].Ntotal,counters[-1].Ncorrect,'66666')
print(ln,'777777')
acc_tests = [x.reportAccuracy() for x in counters if not np.isnan(x.reportAccuracy())]
acc_test = torch.ones(1, 1) * np.mean(acc_tests)
#print(np.size(counters),'2222222')
print(f'test accuracy is {acc_test.item()}')
exit(0)
# ===================optimizer
scheduler = lambda step, initial_lr: inverseDecaySheduler(step, initial_lr, gamma=10, power=0.75, max_iter=10000)
optimizer_finetune = OptimWithSheduler(
optim.SGD(feature_extractor.parameters(), lr=args.train.lr / 10.0, weight_decay=args.train.weight_decay, momentum=args.train.momentum, nesterov=True),
scheduler)
optimizer_cls = OptimWithSheduler(
optim.SGD(classifier.parameters(), lr=args.train.lr, weight_decay=args.train.weight_decay, momentum=args.train.momentum, nesterov=True),
scheduler)
optimizer_discriminator = OptimWithSheduler(
optim.SGD(discriminator.parameters(), lr=args.train.lr, weight_decay=args.train.weight_decay, momentum=args.train.momentum, nesterov=True),
scheduler)
optimizer_discriminator_separate = OptimWithSheduler(
optim.SGD(discriminator_separate.parameters(), lr=args.train.lr, weight_decay=args.train.weight_decay, momentum=args.train.momentum, nesterov=True),
scheduler)
global_step = 0
best_acc = 0
total_steps = tqdm(range(args.train.min_step),desc='global step')
epoch_id = 0
while global_step < args.train.min_step:
#id = 0
########################################################################################################
if global_step % args.test.test_interval == 0:
#print('99999999')
with open(join(log_dir, 'config.yaml'), 'w') as f:
f.write(yaml.dump(save_config))
f = open(r"txt1/1.txt", "r")
#with open("txt1/1.txt", "r") as f: #文件bai为du123.txt
#sourceInLines= f.readlines() #按行读出文件zhidao
#f.close()
new = [] #定义一个空列表,zhuan用来存储结果
for line in f.readlines():
temp1 = line.strip('\n') #去掉每行最内后的换行符'\n'
temp2 = temp1.split(',') #以','为标志,将每容行分割成列表
new.append(temp2) #将上一步得到的列表添加到new中
f.close()
#print(new)
#numbers = list(map(int, new))
#n = 0
#j = 0
new_id = []
new_label = []
for n in new:
#print(n)
new_id.append(int(n[0]))
new_label.append(int(n[1]))
#print(n)
#new_id = new_id
#new_id = np.array(new_id)
#new_label = np.array(new_label)
new_t = [[random.random() for _ in range(2)]for _ in range(np.size(new_id))]
i = 0
for i in range(np.size(new_id)):
new_t[i][0] = new_id[i]
new_t[i][1] = new_label[i]
i = i + 1
#print(new_id,'11111')
#print(new_label,'77777')
#print(new_t,'22222')
#########################################################################################################
r = 0
e = 0
iters = tqdm(zip(source_train_dl, target_train_dl), desc=f'epoch {epoch_id} ', total=min(len(source_train_dl), len(target_train_dl)))
epoch_id += 1
for i, ((im_source, label_source), (im_target, label_target)) in enumerate(iters):
#print(i, im_source, label_source, im_target, label_target,'111111111')
save_label_target = label_target # for debug usage
label_source = label_source.to(output_device)
label_target = label_target.to(output_device)
label_target = torch.zeros_like(label_target)
# =========================forward pass
im_source = im_source.to(output_device)
im_target = im_target.to(output_device)
fc1_s = feature_extractor.forward(im_source)
fc1_t = feature_extractor.forward(im_target)
Tfc1_s = fc1_s.tolist()
Tfc1_t = fc1_t.tolist()
#Tfc1_s = fc1_s.cpu().detach().numpy()
#Tfeat_0f[i][j] = Tfeat_0f[i][j].cpu().numpy()
#Tfc1_t = fc1_t.cpu().detach().numpy()
#Tfc1_s = np.array(Tfc1_s)
#Tfc1_t = np.array(Tfc1_t)
#new_id = []
#new_label = []
#print(Tfc1_s,Tfc1_t,np.size(Tfc1_s,axis=0),np.size(Tfc1_s,axis=1),np.size(Tfc1_t,axis=0),np.size(Tfc1_t,axis=1),'22222222')#################################
#Tfeat_s = []
Tfeat_s = Tfc1_s
Tfeat_t = []
Tfeat_t22 = []
Tlable = []
Tlable_t22 = label_source
#Tlable = label_source
Tlable_t22 = Tlable_t22.tolist()
#print(label_target,'99999999')
i = 0
#for id in new_id:
for i in range(np.size(new_id)):
idd = r
if e != 0:
y = idd-(32*e)
if e == 0:
y = idd
#y = idd-(32*e)
for y in range(32):
if idd == new_t[i][0]:
if e != 0:
x = idd-(32*e)
if e == 0:
x = idd
#print(Tfc1_s[x])
#Tfeat_s += list([Tfc1_s[x]])
#print(new_t[i][0],new_t[i][1],'1111')
Tfeat_t += list([Tfc1_t[x]])
#new_label
Tlable.append(new_t[i][1])
if new_t[i][1] == 2:
Tfeat_t22 += list([Tfc1_t[x]])
Tlable_t22.append(new_t[i][1])
#print(Tfeat_s,Tfeat_t,Tlable,'333333333')
#print(idd,'11111111111')
#input()
idd = idd + 1
y = y + 1
Tfeat_t = np.array(Tfeat_t)
#input()
Tfeat_s = list(Tfeat_s) + list(Tfeat_t22)
Tfeat_s = np.array(Tfeat_s)
Tlable = np.array(Tlable)
Tlable_t22 = np.array(Tlable_t22)
#print(label_target)
#print(Tfeat_s,Tlable)
#input()
#print(Tfeat_s,Tfeat_t,Tlable,'333333333')
#print(np.size(Tfeat_s,axis=1),np.size(Tfeat_t,axis=1),Tlable,'333333333')
#print(idd,'11111111111')
#input()
#print(Tfeat_s,Tfeat_t,Tlable)
#input()
if Tfeat_s != []:
Tloss_s = batch_all_triplet_loss(Tlable_t22, Tfeat_s, 0.4, False)#batch_hard_triplet_loss(Tlable, Tfeat_s, 0.3, False)#
if Tfeat_t != []:
Tloss_t = batch_all_triplet_loss(Tlable, Tfeat_t, 0.2, False)#batch_all_triplet_loss(Tlable, Tfeat_t, 0.3, False)#
if Tfeat_s == []:
Tloss_s = 0
if Tfeat_t == []:
Tloss_t = 0
Tloss = Tloss_s + Tloss_t
#print(Tloss)
#Tloss1 +=
#print(Tloss_s,Tloss_t,'888888')
#input()
fc1_s, feature_source, fc2_s, predict_prob_source = classifier.forward(fc1_s)
fc1_t, feature_target, fc2_t, predict_prob_target = classifier.forward(fc1_t)
#Tfeature_source = feature_source.cpu().detach().numpy()
#Tfeature_target = feature_target.cpu().detach().numpy()
#Tfeature_source = np.array(Tfeature_source)
#Tfeature_target = np.array(Tfeature_target)
#print(predict_prob_source,predict_prob_target,'333333333')
#print(fc1_s, fc2_s, predict_prob_source, fc1_t, fc2_t, predict_prob_target,'333333333')
#print(Tfeature_source, Tfeature_target, np.size(Tfeature_source,axis=0),np.size(Tfeature_target,axis=0),'444444444')
#for j in range(32):
#Tfeat[j] = fc1_s[j]
#r = 0
#for r in range(2048):
#Tfeat[j][r] =
#input()
domain_prob_discriminator_source = discriminator.forward(feature_source)
domain_prob_discriminator_target = discriminator.forward(feature_target)
domain_prob_discriminator_source_separate = discriminator_separate.forward(feature_source.detach())
domain_prob_discriminator_target_separate = discriminator_separate.forward(feature_target.detach())
source_share_weight = get_source_share_weight(domain_prob_discriminator_source_separate, fc2_s, domain_temperature=1.0, class_temperature=10.0)
source_share_weight = normalize_weight(source_share_weight)
target_share_weight = get_target_share_weight(domain_prob_discriminator_target_separate, fc2_t, domain_temperature=1.0, class_temperature=1.0)
target_share_weight = normalize_weight(target_share_weight)
# ==============================compute loss
adv_loss = torch.zeros(1, 1).to(output_device)
adv_loss_separate = torch.zeros(1, 1).to(output_device)
tmp = source_share_weight * nn.BCELoss(reduction='none')(domain_prob_discriminator_source, torch.ones_like(domain_prob_discriminator_source))
adv_loss += torch.mean(tmp, dim=0, keepdim=True)
#print(tmp,adv_loss)
tmp = target_share_weight * nn.BCELoss(reduction='none')(domain_prob_discriminator_target, torch.zeros_like(domain_prob_discriminator_target))
adv_loss += torch.mean(tmp, dim=0, keepdim=True)
#print(adv_loss,'111')
adv_loss_separate += nn.BCELoss()(domain_prob_discriminator_source_separate, torch.ones_like(domain_prob_discriminator_source_separate))
adv_loss_separate += nn.BCELoss()(domain_prob_discriminator_target_separate, torch.zeros_like(domain_prob_discriminator_target_separate))
#print(adv_loss_separate,'55555555')
# ============================== cross entropy loss
ce = nn.CrossEntropyLoss(reduction='none')(predict_prob_source, label_source)
#print(ce,'55555555')
ce = torch.mean(ce, dim=0, keepdim=True)
#print(ce,'eeeeeee')
with OptimizerManager(
[optimizer_finetune, optimizer_cls, optimizer_discriminator, optimizer_discriminator_separate]):
loss = ce + adv_loss + adv_loss_separate + Tloss
#print(loss,'7777777')
#input()
loss.backward()
global_step += 1
total_steps.update()
if global_step % args.log.log_interval == 0:
counter = AccuracyCounter()
counter.addOneBatch(variable_to_numpy(one_hot(label_source, len(source_classes))), variable_to_numpy(predict_prob_source))
acc_train = torch.tensor([counter.reportAccuracy()]).to(output_device)
logger.add_scalar('adv_loss', adv_loss, global_step)
logger.add_scalar('ce', ce, global_step)
logger.add_scalar('adv_loss_separate', adv_loss_separate, global_step)
logger.add_scalar('acc_train', acc_train, global_step)
if global_step % args.test.test_interval == 0:
counters = [AccuracyCounter() for x in range(len(source_classes) + 1)]
with TrainingModeManager([feature_extractor, classifier, discriminator_separate], train=False) as mgr, \
Accumulator(['feature', 'predict_prob', 'label', 'domain_prob', 'before_softmax', 'target_share_weight']) as target_accumulator, \
torch.no_grad():
txt_id = [[0 for _ in range(3)]for _ in range(1031)]
idd = [0 for _ in range(1031)]
txt_00 = [[0 for _ in range(3)]for _ in range(1031)]
txt_11 = [[0 for _ in range(3)]for _ in range(1031)]
txt_22 = [[0 for _ in range(3)]for _ in range(1031)]
for i in range(1031):
txt_id[i][0] = i
idd[i] = i
#print(txt_id[i][0],'111111')
h = 0
feat_all = []
for i, (im, label) in enumerate(tqdm(target_test_dl, desc='testing ')):
#print(i,'00000')
im = im.to(output_device)
label = label.to(output_device)
feature = feature_extractor.forward(im)
feature, __, before_softmax, predict_prob = classifier.forward(feature)
domain_prob = discriminator_separate.forward(__)
predict_prob1 = predict_prob.tolist()
#print(np.size(predict_prob1,0),'22222')
for j in range(np.size(predict_prob1,0)):
if predict_prob1[j][0] > predict_prob1[j][1]:
#print('u')
#print(predict_prob1[j][0],'333333')
txt_id[h][2] = predict_prob1[j][0]
else:
txt_id[h][2] = predict_prob1[j][1]
#print(txt_id[h],'22222')
h = h + 1
#input()
ss = feature.tolist()
feat_all = list(feat_all) + list(ss)
target_share_weight = get_target_share_weight(domain_prob, before_softmax, domain_temperature=1.0,
class_temperature=1.0)
for name in target_accumulator.names:
globals()[name] = variable_to_numpy(globals()[name])
target_accumulator.updateData(globals())
#print(txt_id,'22222')
for x in target_accumulator:
globals()[x] = target_accumulator[x]
def outlier(each_target_share_weight):
return each_target_share_weight < args.test.w_0
#################################################################################################
def calculate(list_val):
total = 0
T = 10
for ele in range(0, len(list_val)):
total = total + list_val[ele]
return T*np.log(total)
T = 10
for i in range(1031):
list_logit=[np.exp(feat_all[i])/T]
energy = [calculate(x) for x in enumerate(list_logit)]#Tfeat_2f[i]
energy = energy/np.log(80)#1031
energye = energy[0]
print(energye,'8888')
####################################################################################################################
counters = [AccuracyCounter() for x in range(len(source_classes) + 1)]
ln = 0
a = 0
b = 0
c = 0
for (each_predict_prob, each_label, each_target_share_weight, each_energy, i) in zip(predict_prob, label,
target_share_weight, energye, idd):
#print(i)
if outlier(each_target_share_weight[0]) and each_energy < -4.55:
txt_id[i][1] = 2
ln = ln + 1
else:
txt_id[i][1] = np.argmax(each_predict_prob)
#print(txt_id[i])
#input()
if txt_id[i][1] == 2:
#txt_22[c].append(txt_id[i])
txt_22[c] = txt_id[i]
c = c + 1
if txt_id[i][1] == 0:
#txt_00[a].append(txt_id[i])
txt_00[a] = txt_id[i]
a = a + 1
if txt_id[i][1] == 1:
#txt_11[b].append(txt_id[i])
txt_11[b] = txt_id[i]
b = b + 1
if each_label in source_classes:
counters[each_label].Ntotal += 1.0
each_pred_id = np.argmax(each_predict_prob)
if not outlier(each_target_share_weight[0]) and each_pred_id == each_label or each_energy >= -4.55:
counters[each_label].Ncorrect += 1.0
else:
counters[-1].Ntotal += 1.0
if outlier(each_target_share_weight[0]) and each_energy < -4.55:
counters[-1].Ncorrect += 1.0
#print(txt_00,txt_11,'wwwwwwwwwww')
#print(a,b,c,'zzzzzzzzzzz')
sorted(txt_00, key=lambda s: s[2], reverse=True)
sorted(txt_11, key=lambda s: s[2], reverse=True)
a = int(a*0.35)
b = int(b*0.35)
txt_0 = [[random.random() for _ in range(2)]for _ in range(a)]
txt_1 = [[random.random() for _ in range(2)]for _ in range(b)]
txt_2 = [[random.random() for _ in range(2)]for _ in range(c)]
txt_all = []
for i in range(a):
txt_0[i][0] = txt_00[i][0]
txt_0[i][1] = txt_00[i][1]
for i in range(b):
txt_1[i][0] = txt_11[i][0]
txt_1[i][1] = txt_11[i][1]
for i in range(c):
txt_2[i][0] = txt_22[i][0]
txt_2[i][1] = txt_22[i][1]
txt_all = list(txt_0) + list(txt_1) + list(txt_2)
#print(txt_0,'rrrrrrrr')
#print(txt_1,'qqqqqqqq')
#print(txt_2,'eeeeeeee')
print(counters[0].Ntotal,counters[0].Ncorrect,'44444')
print(counters[1].Ntotal,counters[1].Ncorrect,'55555')
print(counters[-1].Ntotal,counters[-1].Ncorrect,'66666')
print(ln,'777777')
acc_tests = [x.reportAccuracy() for x in counters if not np.isnan(x.reportAccuracy())]
acc_test = torch.ones(1, 1) * np.mean(acc_tests)
print(f'test accuracy is {acc_test.item()}')
#input()
with open("txt1/1.txt", "w") as output:
i = 0
for i in range(a+b+c):
s = str(txt_all[i]).replace('[','').replace(']','')#去除[],这两行按数据不同,可以选择
s = s.replace("'",'')+'\n' #去除单引号,逗号,每行末尾追加换行符 .replace(',','')
output.write(s)
logger.add_scalar('acc_test', acc_test, global_step)
clear_output()
data = {
"feature_extractor": feature_extractor.state_dict(),
'classifier': classifier.state_dict(),
'discriminator': discriminator.state_dict() if not isinstance(discriminator, Nonsense) else 1.0,
'discriminator_separate': discriminator_separate.state_dict(),
}
if acc_test > best_acc:
best_acc = acc_test
with open(join(log_dir, 'best.pkl'), 'wb') as f:
torch.save(data, f)
with open(join(log_dir, 'current.pkl'), 'wb') as f:
torch.save(data, f)
#input()
r = r + 32
e = e + 1
#print(r,e)
#input()
|
{"/main.py": ["/model.py", "/eval.py"], "/CMUML/main.py": ["/eval.py"], "/UDAML/main.py": ["/eval.py"]}
|
27,626,984
|
suwan9/UDAML
|
refs/heads/master
|
/CMUML/utils/xray_obda.py
|
import os
import random
import sys
source = sys.argv[1]
target = sys.argv[2]
p_path = os.path.join('research/masaito/Xray/source')
dir_list = os.listdir(p_path)
dir_list.sort()
source_list = dir_list[:2]
target_list = dir_list
print(source_list)
print(target_list)
path_source = "../txt/source.txt"
path_target = "../txt/target.txt"
write_source = open(path_source,"w")
write_target = open(path_target,"w")
for k, direc in enumerate(source_list):
if not '.txt' in direc:
files = os.listdir(os.path.join(p_path, direc))
for i, file in enumerate(files):
if direc in source_list:
class_name = direc
file_name = os.path.join('data/Xray/source', direc, file)
write_source.write('%s %s\n' % (file_name, source_list.index(class_name)))
else:
continue
p_path = os.path.join('research/masaito/Xray/target')
dir_list = os.listdir(p_path)
dir_list.sort()
for k, direc in enumerate(target_list):
if not '.txt' in direc:
files = os.listdir(os.path.join(p_path, direc))
for i, file in enumerate(files):
file_name = os.path.join('data/Xray/target', direc, file)
if direc in source_list:
class_name = direc
write_target.write('%s %s\n' % (file_name, source_list.index(class_name)))
elif direc in target_list:
write_target.write('%s %s\n' % (file_name, len(source_list)))
|
{"/main.py": ["/model.py", "/eval.py"], "/CMUML/main.py": ["/eval.py"], "/UDAML/main.py": ["/eval.py"]}
|
27,626,985
|
suwan9/UDAML
|
refs/heads/master
|
/CMUML/utils/utils.py
|
from models.basenet import *
import torch
def get_model_mme(net, num_class=13, unit_size=2048, temp=0.05):
model_g = ResBase(net, unit_size=unit_size)
model_c = ResClassifier_MME(num_classes=num_class, input_size=unit_size, temp=temp)
return model_g, model_c
def save_model(model_g, model_c, save_path):
save_dic = {
'g_state_dict': model_g.state_dict(),
'c_state_dict': model_c.state_dict(),
}
torch.save(save_dic, save_path)
def load_model(model_g, model_c, load_path):
checkpoint = torch.load(load_path)
model_g.load_state_dict(checkpoint['g_state_dict'])
model_c.load_state_dict(checkpoint['c_state_dict'])
return model_g, model_c
|
{"/main.py": ["/model.py", "/eval.py"], "/CMUML/main.py": ["/eval.py"], "/UDAML/main.py": ["/eval.py"]}
|
27,699,034
|
cheshire3/clic
|
HEAD
|
/clic/dickens/concordance_new.py
|
import os
from cheshire3.document import StringDocument
from cheshire3.internal import cheshire3Root
from cheshire3.server import SimpleServer
from cheshire3.baseObjects import Session
import json
wd = os.getcwd()
## get metadata: Information about chapters, word counts etc. from each individual book
booklist_r = open(''.join(wd + '/clic/dickens/booklist'), 'r')
booklist = json.load(booklist_r)
class Concordancer_New(object):
def __init__(self):
self.session = Session()
self.session.database = 'db_dickens'
self.serv = SimpleServer(self.session,
os.path.join(cheshire3Root, 'configs', 'serverConfig.xml')
)
self.db = self.serv.get_object(self.session, self.session.database)
self.qf = self.db.get_object(self.session, 'defaultQueryFactory')
self.resultSetStore = self.db.get_object(self.session, 'resultSetStore')
self.idxStore = self.db.get_object(self.session, 'indexStore')
#self.logger = self.db.get_object(self.session, 'concordanceLogger')
## main concordance method
## create a list of lists containing each three contexts left - node -right,
## and a list within those contexts containing each word. Add two separate lists containing metadata information:
## [ [left context - word 1, word 2, etc.], [node - word 1, word 2, etc], [right context - word 1, etc],
## [chapter metadata], [book metadata]
## ],
## etc.
def create_concordance(self, terms, idxName, Materials, selectWords):
##self.logger.log(10, 'CREATING CONCORDANCE FOR RS: {0} in {1} - {2}'.format(terms, idxName, Materials))
session = self.session
db = self.db
qf = self.qf
conc_lines = [] # return concordance lines in list
wordWindow = 10 # wordWindow is set to 10 by default - on both sides of node
books = []
for Material in Materials:
MatIdx = 'book-idx'
if Material in ['dickens', 'ntc']:
MatIdx_Vol = 'subCorpus-idx'
books.append('c3.{0} = "{1}"'.format(MatIdx_Vol, Material))
else:
books.append('c3.{0} = "{1}"'.format(MatIdx, Material))
## search whole phrase or individual words?
if selectWords == "whole":
nodeLength = len(terms.split(' '))
terms = [terms]
else:
nodeLength = 1
terms = terms.split(' ')
## define search term
term_clauses = []
for term in terms:
term_clauses.append('c3.{0} = "{1}"'.format(idxName, term))
## conduct database search
## note: /proxInfo needed to search individual books
query = qf.get_query(session, ' or '.join(books) + ' and/proxInfo ' + ' or '.join(term_clauses))
rs = db.search(session, query)
## get total number of hits (not yet used in interface)
total_count = 0
if len(rs) > 0:
for i in rs:
total_count = total_count + len(i.proxInfo)
## search through each record (chapter) and identify location of search term(s)
if len(rs) > 0:
count = 0 ## count hits
for i in rs:
## get xml record
rec = i.fetch_record(session)
tree = rec.get_dom(session).getroottree()
for m in i.proxInfo:
count += 1
if count > 1000: ## current search limit: 1000
break
else:
if idxName in ['chapter-idx']:
w = m[0][1]
elif idxName in ['quote-idx', 'non-quote-idx', 'longsus-idx', 'shortsus-idx']:
(e_q, w_q) = (m[0][0], m[0][1])
## locate search term in xml
search_term = tree.xpath('//*[@eid="%d"]/following::w[%d+1]' % (e_q, w_q))
## get xml of sentence
sentence_tree = tree.xpath('//*[@eid="%d"]/following::w[%d+1]/ancestor-or-self::s' % (e_q, w_q))
chapter_tree = tree.xpath('//*[@eid="%d"]/following::w[%d+1]/ancestor-or-self::div' % (e_q, w_q))
## counts words preceding sentence
prec_s_tree = chapter_tree[0].xpath('//div//s[@sid="%s"]/preceding::s/descendant::w' % sentence_tree[0].get('sid'))
prec_s_wcount = len(prec_s_tree)
## count words within sentence
count_s = 0
for word in chapter_tree[0].xpath('//div//s[@sid="%s"]/descendant::w' % sentence_tree[0].get('sid')):
if not word.get('o') == search_term[0].get('o'):
count_s += 1
else:
break
## word number within chapter is adding word count in preceding sentence and word count in current sentence
wcount = prec_s_wcount + count_s
w = wcount
## Define leftOnset as w - 10, then get all w and n between that and node
wordWindow = int(wordWindow)
leftOnset = max(1, w-wordWindow+1) ## we operate with word position, not list position (word 1 = 0 position in list)
nodeOnset = w+1
nodeOffset = w+nodeLength
try:
rightOnset = nodeOffset + 1
except:
rightOnset = None
ch_words = len(tree.xpath('//div/descendant::w')) ## move to level for each record (chapter) ?
rightOffset = min(rightOnset + wordWindow, rightOnset + (ch_words - rightOnset) + 1 )
left_text = []
for l in range(leftOnset, nodeOnset):
try:
left_n_pr = tree.xpath('//div/descendant::w[%d]/preceding-sibling::n[1]' % l)[0].text
except:
left_n_pr = ''
left_w = tree.xpath('//div/descendant::w[%d]' % l)[0].text
try:
left_n_fo = tree.xpath('//div/descendant::w[%d]/following-sibling::n[1]' % l)[0].text
except:
left_n_fo = ''
left_text.append(''.join(left_n_pr + left_w + left_n_fo))
node_text = []
for n in range(nodeOnset, rightOnset):
try:
node_n_pr = tree.xpath('//div/descendant::w[%d]/preceding-sibling::n[1]' % n)[0].text
except:
node_n_pr = ''
node_w = tree.xpath('//div/descendant::w[%d]' % n)[0].text
try:
node_n_fo = tree.xpath('//div/descendant::w[%d]/following-sibling::n[1]' % n)[0].text
except:
node_n_fo
node_text.append(''.join(node_n_pr + node_w + node_n_fo))
right_text = []
for r in range(rightOnset, rightOffset):
try:
right_n_pr = tree.xpath('//div/descendant::w[%d]/preceding-sibling::n[1]' % r)[0].text
except:
right_n_pr = ''
right_w = tree.xpath('//div/descendant::w[%d]' % r)[0].text
try:
right_n_fo = tree.xpath('//div/descendant::w[%d]/following-sibling::n[1]' % r)[0].text
except:
right_n_fo = ''
right_text.append(''.join(right_n_pr + right_w + right_n_fo))
###
book = tree.xpath('//div')[0].get('book')
chapter = tree.xpath('//div')[0].get('num')
para_chap = tree.xpath('//div//descendant::w[%d+1]/ancestor-or-self::p' % w)[0].get('pid')
sent_chap = tree.xpath('//div//descendant::w[%d+1]/ancestor-or-self::s' % w)[0].get('sid')
word_chap = w
## count paragraph, sentence and word in whole book
count_para = 0
count_sent = 0
count_word = 0
booktitle = []
total_word = []
for b in booklist:
if b[0][0] == book:
booktitle.append(b[0][1])
total_word.append(b[1][0][2])
for j, c in enumerate(b[2]):
while j+1 < int(chapter):
count_para = count_para + int(c[0])
count_sent = count_sent + int(c[1])
count_word = count_word + int(c[2])
j += 1
break
## total word in chapter
if j+1 == int(chapter):
chapWordCount = b[2][j][2]
book_title = booktitle[0] ## get book title
total_word = total_word[0]
para_book = count_para + int(para_chap)
sent_book = count_sent + int(sent_chap)
word_book = count_word + int(word_chap)
conc_line = [left_text, node_text, right_text,
[book, book_title, chapter, para_chap, sent_chap, str(word_chap), str(chapWordCount)],
[str(para_book), str(sent_book), str(word_book), str(total_word)]]
conc_lines.append(conc_line)
conc_lines.insert(0, len(conc_lines))
#conc_lines.insert(0, total_count)
return conc_lines
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,035
|
cheshire3/clic
|
HEAD
|
/clic/dickens/old_analysisFiles/collocate.py
|
import os
import re
try:
import cPickle as Pickle
except ImportError:
import Pickle
from operator import itemgetter
from cheshire3.server import SimpleServer
from cheshire3.document import StringDocument
from cheshire3.internal import cheshire3Root
from clic.dickens.concordancer import Concordancer
cheshirePath = os.path.join('HOME', '/home/cheshire')
class Collocate(object):
db = None
serv = None
session = None
concStore = None
collStore = None
idxStore = None
logger = None
sortList = 0
wordNumber = 1
def __init__(self, session, logger):
self.session = session
session.database = 'db_dickens'
serv = SimpleServer(session,
os.path.join(cheshire3Root, 'configs', 'serverConfig.xml')
)
self.db = serv.get_object(session, session.database)
self.concStore = self.db.get_object(session, 'concordanceStore')
self.collStore = self.db.get_object(session, 'collocateStore')
self.idxStore = self.db.get_object(session, 'indexStore')
self.logger = logger
def save_collocates(self, collocates, id):
string = Pickle.dumps(collocates)
doc = StringDocument(string)
doc.id = id
self.collStore.store_document(self.session, doc)
self.collStore.commit_storing(self.session)
return id
def load_collocates(self, id):
string = self.collStore.fetch_document(self.session, id).get_raw(self.session)
wordWindow = id[id.rfind('_') + 1:]
collocates = Pickle.loads(string)
#self.logger.log(collocates)
return collocates
def create_collocateTable(self, id, window=5):
def emptyList():
return map(lambda x: int(x), list('0' * window))
try:
self.collStore.fetch_document(self.session, id)
except:
conc = Concordancer(self.session, self.logger)
self.logger.log('Creating collocate table')
(conc, totalOccs, win) = conc.load_concordance(id)
collocates = {}
table = []
for line in conc:
left = line[0]
right = line[2]
for pos,w in enumerate(left[-window:][::-1]):
wordID = w[0]
try:
collocates[wordID][0][pos] += 1
except:
collocates[wordID]=[emptyList(),emptyList()]
collocates[wordID][0][pos] += 1
for pos,w in enumerate(right[0:window]):
wordID = w[0]
try:
collocates[wordID][1][pos] += 1
except:
collocates[wordID]=[emptyList(),emptyList()]
collocates[wordID][1][pos] += 1
for i in collocates.items():
left = list(i[1][0])
left.reverse()
right = list(i[1][1])
collocates[i[0]] = [sum(i[1][0]) + sum(i[1][1]), sum(i[1][0]), sum(i[1][1]), left, right]
self.save_collocates(collocates,id)
return '<rsid>%s</rsid>' % id
def get_collocateTable(self, id, sort=1, offset=0, pageSize=None):
def flatten(x):
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
try:
self.collStore.fetch_document(self.session, id)
except:
self.create_collocateTable(id)
collocates = self.load_collocates(id)
idxName = '%s-idx' % id.split('|')[0]
if idxName in ['quote-idx', 'longsus-idx', 'shortsus-idx']:
idxName = 'chapter-idx'
idx=self.db.get_object(self.session, idxName)
# flatten collocate table structure
colls=[]
for i in collocates.items():
colls.append(flatten(i))
colls = sorted(colls, key=itemgetter(sort - 1), reverse=True)
collocateTable = []
for l in colls:
collocateTable.append((idx.fetch_termById(self.session, l[0]), l[1], l[2], l[3], l[4:9], l[9:]))
return collocateTable
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,036
|
cheshire3/clic
|
HEAD
|
/clic/dickens/web/index.py
|
"""CLiC Dickens Search Application"""
from __future__ import absolute_import
import sys
from mod_python_wsgi.wrap import ModPythonWSGIApp
from clic.deploy.utils import WSGIAppArgumentParser
from clic.dickens.web.dickensHandler import handler
def main(argv=None):
"""Start up a simple app server to serve the application."""
global argparser, application
global application
import paste.httpserver
from paste.urlmap import URLMap
from paste.urlparser import make_pkg_resources
if argv is None:
args = argparser.parse_args()
else:
args = argparser.parse_args(argv)
urlmap = URLMap(make_pkg_resources(None, 'clic', 'www/dickens'))
urlmap['/'] = application
paste.httpserver.serve(urlmap,
host=args.hostname,
port=args.port,
)
application = ModPythonWSGIApp(handler)
# Set up argument parser
argparser = WSGIAppArgumentParser(
conflict_handler='resolve',
description=__doc__.splitlines()[0]
)
if __name__ == '__main__':
sys.exit(main())
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,037
|
cheshire3/clic
|
HEAD
|
/setup.py
|
"""clic setup file."""
from __future__ import with_statement
import inspect
import os
import re
# Import Setuptools
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
from clic.setuptools.commands import develop, install
name_ = 'clic'
version_ = '0.1'
description_ = "CLiC Project"
# Inspect to find current path
setuppath = inspect.getfile(inspect.currentframe())
setupdir = os.path.dirname(setuppath)
# Requirements
dependency_links_ = []
install_requires_ = []
with open(os.path.join(setupdir, 'requirements.txt'), 'r') as fh:
for line in fh:
if line.startswith('-e '):
dependency_links_.append(re.sub('^-e\s+', '', line.strip()))
install_requires_.append(line[line.rfind('#egg=') + 5:].strip())
else:
install_requires_.append(line.strip())
# Description
with open(os.path.join(setupdir, 'README.rst'), 'r') as fh:
long_description_ = fh.read()
setup(
name = name_,
version = version_,
description = description_,
long_description=long_description_,
packages=['clic'],
requires=['webob'],
install_requires=install_requires_,
dependeny_links= dependency_links_,
author = 'Catherine Smith',
maintainer = 'John Harrison',
maintainer_email = u'john.harrison@liv.ac.uk',
license = "BSD",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Utilities",
"Topic :: Internet :: WWW/HTTP :: HTTP Servers",
"Topic :: Internet :: WWW/HTTP :: HTTP Servers",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware",
],
cmdclass = {
'develop': develop,
'install': install
},
)
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,038
|
cheshire3/clic
|
HEAD
|
/clic/dickens/web/old_interface/dickensGetDist.py
|
#!/home/cheshire/install/bin/python -i
import time,sys,os
# set sys paths
sys.path.insert(1,'/home/cheshire/cheshire3/code')
from cheshire3.baseObjects import Session
from cheshire3.server import SimpleServer
def getDist(word, indexName):
dist = {}
cql = 'c3.idx-text-' + stem + ' exact ' + word
q = parse('')
rs = db.search(session,q)
hits = len(rs)
if (hits>0):
for r in rs:
try:
dist[r.occurences]+=1
except:
dist[r.occurences]=1
return dist
def printDist(dist):
hits = sum(dist.values())
for i in dist:
print "%s,%s,%0.2f" % (i, dist[i], float(dist[i])/float(hits) * 100.0)
def groupDist(dist):
hits = sum(dist.values())
occs=0
for v in dist:
occs += int(v) * int(dist[v])
for i in [1,2,3]:
print "%s\t%s\t%0.2f" % (i, dist[i], float(dist[i])/float(hits) * 100.0)
fourPlus=0
for i in range(4,max(dist.keys())):
try:
fourPlus += dist[i]
except:
continue
print "4+\t%s\t%0.2f" % (fourPlus, float(fourPlus)/float(hits) * 100.0)
print "\n%i occurrences in %i articles" % (occs,hits)
session = Session()
serv = SimpleServer(session, "../../configs/serverConfig.xml")
db = serv.get_object(session, 'db_news')
session.database = 'db_news'
idxStore = db.get_object(session, 'indexStore')
recStore = db.get_object(session, 'recordStore')
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,039
|
cheshire3/clic
|
HEAD
|
/clic/setuptools/__init__.py
|
__all__ = ['commands', 'exceptions']
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,040
|
cheshire3/clic
|
HEAD
|
/clic/dickens/web/old_interface/dickensWebConfig.py
|
#
# Script: webConfig.py
# Version: 0.01
# Description:
# HTML fragments used by Cheshire3 web-search interface
#
# Language: Python
# Author(s): JH - John Harrison <john.harrison@liv.ac.uk>
# Date: 09 August 2007
#
# Copyright: © University of Liverpool 2005-2007
#
# Version History:
# 0.01 - 09/08/2007 - JH - File created to support templateHandler.py
#
# NB:
# - If you are not experieced in editing HTML you are advised not to edit any of the HTML fragments
# - Modifying placeholder words (block caps enclosed in '%' e.g. %TITLE%) WILL SERIOUSLY affect the functionality of the system.
#
# Changes to original:
# You should make a note of any changes that you make to the originally distributed file here.
# This will make it easier to remeber which fields need to be modified next time you update the software.
#
#
# TODO: this will require setting specific to your configuration
databaseName = 'dickens'
# Interface specific configurables
repository_name = "Cheshire3 %s Search Interface" % (databaseName.title())
# set Cheshire3 base install path
cheshirePath = '/home/cheshire'
# Path where HTML fragments (browse.html, email.html, resolve.html, search.html)
# and template.ssi are located
htmlPath = cheshirePath + '/cheshire3/www/%s/html' % databaseName
templatePath = htmlPath + '/template.ssi'
# TODO: XPath to data to display in search result - may be a string, or a list of strings in descending order of preference
titleXPath = 'head/headline'
# Result rows
browse_result_row = '''
<tr class="%ROWCLASS%">
<td>
<a href="SCRIPT?operation=search&fieldidx1=%IDX%&fieldrel1=%REL%&fieldcont1=%CGITERM%" title="Find matching records">%TERM%</a>
</td>
<td class="hitcount">%COUNT%</td>
</tr>'''
search_result_row = '''
<tr>
<td class="hit">
<table width="100%">
<tr>
<td colspan="4">
<a href="display.html?%RSID%&hitposition=%HITPOSITION%" title="Display record summary"><strong>%TITLE%</strong></a>
</td>
</tr>
<tr>
<td width="100">
</td>
<td width="100">
</td>
<td width="100">
</td>
<td class="relv">%RELV%</td>
</tr>
</table>
</td>
</tr>'''
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,041
|
cheshire3/clic
|
HEAD
|
/clic/dickens/old_analysisFiles/concordancer.py
|
import os
import re
from lxml import etree
try:
import cPickle as Pickle
except ImportError:
import Pickle
from operator import itemgetter
from cheshire3.document import StringDocument
from cheshire3.internal import cheshire3Root
from cheshire3.server import SimpleServer
cheshirePath = os.path.join('HOME', '/home/cheshire')
maxSize = 5000;
class Concordancer(object):
db = None
serv = None
session = None
concStore = None
idxStore = None
logger = None
wordNumber = 1
sortList = 0
wn = 1
def __init__(self, session, logger):
self.session = session
session.database = 'db_dickens'
serv = SimpleServer(session,
os.path.join(cheshire3Root, 'configs', 'serverConfig.xml')
)
self.db = serv.get_object(session, session.database)
self.resultSetStore = self.db.get_object(session, 'resultSetStore')
self.concStore = self.db.get_object(session, 'concordanceStore')
self.idxStore = self.db.get_object(session, 'indexStore')
self.matches = []
self.logger = logger
def filter_concordance(self, id, matchList):
# self.logger.log('concordance filtering')
matchArray = matchList.split(' ')
# self.logger.log(matchArray)
idx = id.split('|')[0]
if idx == 'window' or idx== 'quote' or idx == 'non-quote' or idx== 'longsus' or idx == 'shortsus':
index = self.db.get_object(self.session, 'chapter-idx')
else:
index = self.db.get_object(self.session, '%s-idx' % idx)
try:
rs = self.resultSetStore.fetch_resultSet(self.session, id)
except:
self.logger.log('no resultSet')
pass
else:
for r in rs:
prox = r.proxInfo
for m in prox:
vec = self.idxStore.fetch_proxVector(self.session, index, r, m[0][0])
ids = [v[1] for v in vec]
match = True
for i in matchArray:
if not int(i) in ids:
match = False
break
if match is True:
self.logger.log([r.id, m[0][0]])
self.matches.append([r.id, m[0][0]])
# self.logger.log(self.matches)
concordance = self.load_concordance(id)[0]
concordance.sort(self.filterFunc)
id = self.save_concordance(concordance, id, 5)
# self.logger.log('filtering complete - %s' % id)
return id
def filterFunc(self, x, y):
if [x[3][1], x[4][0][0]] in self.matches and [y[3][1], y[4][0][0]] in self.matches :
return 0
elif [x[3][1], x[4][0][0]] in self.matches and not [y[3][1], y[4][0][0]] in self.matches :
return -1
elif not [x[3][1], x[4][0][0]] in self.matches and [y[3][1], y[4][0][0]] in self.matches :
return 1
else :
return 0
def sort_concordance(self, id, side='left', wn=1):
self.logger.log('sorting concordance')
self.wn = int(wn)
if side=='left':
self.sortList = 0
self.wordNumber = int(wn)/-1
elif side =='node':
self.sortList = 1
self.wordNumber = int(wn)-1
else :
self.sortList = 2
self.wordNumber = int(wn)-1
self.logger.log('side and number set')
temp = self.load_concordance(id)
self.logger.log('concordance loaded')
concordance = temp[0]
totalOccs = temp[1]
wordWindow = temp[2]
self.logger.log('sorting length = %d wordWindow %d' % (totalOccs, wordWindow))
if self.wordNumber > wordWindow:
self.wordNumber = wordWindow
concordance.sort(self.sortFunc)
id = self.save_concordance(concordance, id, wordWindow)
self.logger.log('sorting complete - %s' % id)
return id
def sortFunc(self, x, y):
if x[self.sortList] and y[self.sortList]:
if len(x[self.sortList]) >= self.wn and len(y[self.sortList]) >= self.wn:
return cmp(x[self.sortList][self.wordNumber][0], y[self.sortList][self.wordNumber][0])
elif len(x[self.sortList]) >= self.wn and not len(y[self.sortList]) >= self.wn:
return 1
elif not len(x[self.sortList]) >= self.wn and len(y[self.sortList]) >= self.wn:
return -1
else:
return 0
elif x[self.sortList] and not y[self.sortList] :
return 1
elif not x[self.sortList] and y[self.sortList] :
return -1
else :
return 0
def create_concordance(self, id):
self.logger.log(id) ###
self.logger.log('CREATING CONCORDANCE FOR RS: {0}'.format(id))
syntaxRe = re.compile('[\w]* |[\w]*$|[[(][ ]?[\w]*[ ]?[])][\s$]?|{[\w\s]+}[\s$]?')
session = self.session
idxStore = self.idxStore
variableArray = id.split('|')
idx = variableArray[0] ## sentence, quote, etc.
type = variableArray[1] ## any etc.
terms = variableArray[2].replace('_', ' ')
corpus = variableArray[6][:variableArray[6].find('.')]
if corpus == 'A':
prefix = '-austen'
else:
prefix = ''
slots = []
if idx == 'window' : ## ?
idx = 'chapter'
type = 'window'
## RS: chapter index is accessed later on for individual idx type
# elif idx in ['quote', 'non-quote', 'longsus', 'shortsus']:
# idx = 'chapter'
syntax = False
## RS: TO-DO: LOOK INTO WHAT IS DONE HERE FOR TYPE==PHRASE
if (type == 'phrase' and (terms.find('(') > -1 or terms.find('{') > -1 or terms.find('[') > -1)) :
syntax = True
iter = syntaxRe.finditer(terms)
counter = 0
for i in iter:
if i.group() != '':
termSet = i.group()
if termSet[0] == '[' or termSet[0] == '(' or termSet[0] == '{' :
slots.append([counter, termSet[0], {}])
counter += 1
wordWindow = int(variableArray[4])
## RS: index read for individual idx types below
#index = self.db.get_object(session, '%s%s-idx' % (idx, prefix))
try:
rs = self.resultSetStore.fetch_resultSet(session, id)
except:
self.logger.log('NO RS EXISTS')
else:
if (len(rs) > 0):
clines = [] ###
#for each rsItem
for k, i in enumerate(rs):
rec = i.fetch_record(session) ### RS: get record
tree = rec.get_dom(session).getroottree() ### RS: get xml
#self.logger.log('+++++++++++++++++++++++++')
temp = []
for m in i.proxInfo:
if idx in ['chapter']:
elems = [0] ## RS: elems start at 0 for all but sentence and paragraph
(e, w) = (0, m[0][1])
elif idx in ['quote', 'non-quote', 'longsus', 'shortsus']:
elems = [0]
(e_q, w_q) = (m[0][0], m[0][1])
search_term = tree.xpath('//*[@eid="%d"]/following::w[%d+1]' % (e_q, w_q))
sentence = tree.xpath('//*[@eid="%d"]/following::w[%d+1]/ancestor-or-self::s' % (e_q, w_q))[0]
chapter = tree.xpath('//*[@eid="%d"]/following::w[%d+1]/ancestor-or-self::div' % (e_q, w_q))[0]
c_walker = chapter.getiterator()
count = 0
for c in c_walker:
if c.tag == 'w' and not c.text.lower() == terms.split(' ')[0]:
count += 1
elif c.tag == 'w' and c.text.lower() == terms.split(' ')[0]:
## verify sentence match
if not sentence == chapter.xpath('//div/descendant-or-self::w[%d+1]/ancestor-or-self::s' % count)[0] :
count += 1 ##?
continue
elif sentence == chapter.xpath('//div/descendant-or-self::w[%d+1]/ancestor-or-self::s' % count)[0] :
## verify word match
if not c.get('o') == search_term[0].get('o'):
count += 1 ##?
continue
else:
break
w = count
(e, w) = (0, w)
## sentences etc.
else:
temp.append(m[0][0])
elems = set(temp)
(e, w) = (m[0][0], m[0][1])
## nodeLength: len(m) for all?
nodeLength = len(m)
##TO DO: Check whether I need this
# for m in i.proxInfo:
# (e, w) = (m[0][0], m[0][1])
# if type == 'all' or type == 'window':
# nodeLength = 1
# else :
# nodeLength = len(m)
## get indexes
if idx in ['quote', 'non-quote', 'longsus', 'shortsus']:
index = self.db.get_object(session, 'chapter-idx')
else:
index = self.db.get_object(session, '%s%s-idx' % (idx, prefix))
vecs = {}
for el in elems:
vecs[el] = idxStore.fetch_proxVector(session, index, i, e)
v = vecs[el]
before = [[x[1], x[0]] for x in v[max(0, w-wordWindow):w]] ## prints number of words defined in wordwindow before node
node = [[x[1], x[0]] for x in v[w: min(w+nodeLength, len(v))]]
after = [[x[1], x[0]] for x in v[min(w+nodeLength, len(v)):min(w+nodeLength+wordWindow, len(v))]]
finalOffset=0
try:
tid = v[w+nodeLength+wordWindow]
finalOffset=tid[2]
except:
finalOffset = None
## test if node is at the end of string (i.e. node offset corresponds with right-hand offset)
lastNodeOffset = v[w+nodeLength-1][2]
rightHandOffset = v[min(w+nodeLength, len(v)-1)][2]
if rightHandOffset == lastNodeOffset:
rightHandOffset = None
leftOnset = [e, v[max(0, w-wordWindow)][2]]
leftOffset = [e, v[w][2]]
rightOffset = [e, rightHandOffset]
finalOffset = [e, finalOffset]
proxOffset = [leftOnset, leftOffset, rightOffset, finalOffset]
loc = [i.recordStore, i.id, idx, k]
conc = [before, node, after, loc, proxOffset]
clines.append(conc)
#self.logger.log('|||||||||||||||||||||||||||||||||||||||||||||')
#self.logger.log(slots)
if syntax : ### RS: Not in use
for s in slots:
d = s[2]
for c in clines:
try:
d[c[1][s[0]][0]] += 1
except:
d[c[1][s[0]][0]] = 1
#self.logger.log(d.items())
string = []
klist = d.items()
klist.sort(key=itemgetter(1),reverse=True)
for k in klist:
#self.logger.log(k)
total = k[1]
word = index.fetch_termById(session, k[0])
string.append('<tr><td>%s</td><td>%s</td></tr>' % (word, total))
table = '<table class="frameTable">%s</table>' % ''.join(string)
else:
table = ''
#self.logger.log(clines)
self.save_concordance(clines, id, wordWindow)
return (len(clines)-1, table) # add slots
# [[words#, wordOffsets][words#, wordOffsets][words#, wordOffsets][recordStore, recId, index][[elem#, charOff],[elem#, charOff],[elem#, charOff],[elem#, charOff]]]
def save_concordance(self, clines, id, wordWindow):
global maxSize
# self.logger.log('saving concordance - %d' % len(clines))
if len(clines) > maxSize :
i = 1
for j in range(0, len(clines), maxSize):
slice = clines[j:j+maxSize]
slice.insert(0, [len(clines), wordWindow])
string = Pickle.dumps(slice)
doc = StringDocument(string)
doc.id = '%s_%d' % (id, i)
i += 1
self.concStore.store_document(self.session, doc)
else :
clines.insert(0, [len(clines), wordWindow])
string = Pickle.dumps(clines)
doc = StringDocument(string)
doc.id = '%s_1' % id
self.concStore.store_document(self.session, doc)
self.concStore.commit_storing(self.session)
return id
def load_concordance(self, id, offset=0, pageSize=None):
global maxSize
self.logger.log('loading concordance with id %s ' % id )
wordWindow = None
totalOccs = None
if pageSize is None and offset == 0:
# self.logger.log('loading complete set')
list = []
for c in self.concStore :
if c.id[:c.id.rfind('_')] == id :
list.append(c.id)
list.sort(lambda x, y: cmp(x[x.rfind('_')+1:],y[y.rfind('_')+1:]))
concordance = []
for i in list:
string = self.concStore.fetch_document(self.session, i).get_raw(self.session)
temp = Pickle.loads(string)
for x, j in enumerate(temp):
if x == 0:
wordWindow = j[1]
totalOccs = j[0]
else:
concordance.append(j)
else:
slice = (offset/maxSize) + 1
# only one slice needed
if (offset + pageSize) - (maxSize * slice) < maxSize:
# self.logger.log('loading 1 slice')
string = self.concStore.fetch_document(self.session, '%s_%d' % (id, slice)).get_raw(self.session)
clines = Pickle.loads(string)
if wordWindow is None:
wordWindow = clines[0][1]
if totalOccs is None:
wordWindow = clines[0][0]
concordance = clines[(offset-((slice-1)*maxSize))+1:(offset-((slice-1)*maxSize)+pageSize)+1]
else:
startSlice = slice
# self.logger.log('loading multiple slices')
conc = []
while len(conc) < offset + pageSize:
string = self.concStore.fetch_document(self.session, '%s_%d' % (id, slice)).get_raw(self.session)
temp = Pickle.loads(string)
for x, j in enumerate(temp):
if x == 0:
wordWindow = j[1]
totalOccs = j[0]
else:
conc.append(j)
if len(conc) == totalOccs:
break
slice += 1
concordance = conc[offset - ((startSlice - 1) * maxSize):offset - ((startSlice - 1) * maxSize) + pageSize]
return [concordance, totalOccs, wordWindow]
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,042
|
cheshire3/clic
|
HEAD
|
/clic/dickens/create_bookcountsjson.py
|
## TO CREATE bookcounts.json
import os
import re
from lxml import etree
import json
from cheshire3.document import StringDocument
from cheshire3.internal import cheshire3Root
from cheshire3.server import SimpleServer
from cheshire3.baseObjects import Session
### read info from booklist: cumulative word count in chapters
# booklist = open('/home/aezros/workspace/testClic/staticFiles_test/booklist2')
# booklist = json.load(booklist)
# for b1 in booklist:
#
###
session = Session()
session.database = 'db_dickens'
serv = SimpleServer(session,
os.path.join(cheshire3Root, 'configs', 'serverConfig.xml')
)
db = serv.get_object(session, session.database)
qf = db.get_object(session, 'defaultQueryFactory')
resultSetStore = db.get_object(session, 'resultSetStore')
idxStore = db.get_object(session, 'indexStore')
list_books = ['BH', 'BR', 'DC', 'DS', 'ED', 'GE', 'HT', 'LD', 'MC', 'NN',
'OCS', 'OMF', 'OT', 'PP', 'TTC',
'AgnesG', 'Antoni', 'arma', 'cran', 'Deronda', 'dracula', 'emma', 'frank', 'jane', 'Jude',
'LadyAud', 'mary', 'NorthS', 'persuasion', 'pride', 'sybil', 'Tess', 'basker', 'Pomp', 'mill',
'dorian', 'Prof', 'native', 'alli', 'Jekyll', 'wwhite', 'vanity', 'VivianG', 'wh'
]
titles = {'BH': 'Bleak House', 'BR': 'Barnaby Rudge', 'DC': 'David Copperfield', 'DS': 'Dombey and Son',
'ED': 'The Mystery of Edwin Drood', 'GE': 'Great Expectations', 'HT': 'Hard Times', 'ld': 'Little Dorrit',
'MC': 'Martin Chuzzlewit', 'NN': 'Nicholas Nickleby', 'OCS': 'The Old Curiosity Shop', 'OMF': 'Our Mutual Friend',
'OT': 'Oliver Twist', 'PP': 'Pickwick Papers', 'TTC': 'A Tale of Two Cities',
'AgnesG': 'Agnes Grey', 'Antoni': 'Antonina, or the Fall of Rome', 'arma': 'Armadale', 'cran': 'Cranford',
'Deronda': 'Daniel Deronda', 'dracula': 'Dracula', 'emma': 'Emma', 'frank': 'Frankenstein', 'jane': 'Jane Eyre',
'Jude': 'Jude the Obscure', 'LadyAud': 'Lady Audley\'s Secret', 'mary': 'Mary Barton', 'NorthS': 'North and South',
'persuasion': 'Persuasion', 'pride': 'Pride and Prejudice', 'sybil': 'Sybil, or the two nations',
'Tess': 'Tess of the D\'Urbervilles', 'basker': 'The Hound of the Baskervilles', 'Pomp': 'The Last Days of Pompeii',
'mill': 'The Mill on the Floss', 'dorian': 'The Picture of Dorian Gray', 'Prof': 'The Professor',
'native': 'The Return of the Native', 'alli': 'The Small House at Allington',
'Jekyll': 'The Strange Case of Dr Jekyll and Mr Hide', 'wwhite': 'The Woman in White',
'vanity': 'Vanity Fair', 'VivianG': 'Vivian Grey', 'wh': 'Wuthering Heights'
}
list_all_books = []
#list_all_books.insert(0, 'dickens')
within_book = []
for b in list_books:
query = qf.get_query(session, 'c3.book-idx = "%s"' % b)
results = db.search(session, query)
sent_idx = db.get_object(session, 'sentence-idx')
quote_idx = db.get_object(session, 'quote-idx')
nonquote_idx = db.get_object(session, 'non-quote-idx')
sent_facets = sent_idx.facets(session, results)
all_words = 0
for x in sent_facets:
all_words += x[1][2]
quote_facets = quote_idx.facets(session, results)
quote_words = 0
for x in quote_facets:
quote_words += x[1][2]
nonquote_facets = nonquote_idx.facets(session, results)
nonquote_words = 0
for x in nonquote_facets:
nonquote_words += x[1][2]
###
query = qf.get_query(session, 'c3.book-idx = "{0}"'.format(b))
results = db.search(session, query)
wordTotal = 0
wordCumulative = []
for i, r in enumerate(results):
rec = r.fetch_record(session)
tree = rec.get_dom(session).getroottree()
wordInChap = len(tree.xpath('//div/descendant::w'))
wordStartChap = wordTotal
wordTotal = wordStartChap + wordInChap
wordCumulative.append(wordStartChap)
## find title
book_title = ""
for t in titles.iteritems():
if b == t[0]:
book_title = t[1]
break
within_book.append([b, book_title, [all_words, quote_words, (all_words - quote_words)], wordCumulative])
#break
list_all_books.append(within_book)
#print json.dumps(list_all_books)
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,043
|
cheshire3/clic
|
HEAD
|
/clic/dickens/web/old_interface/dickensSearchHandler.py
|
import cgitb
import os
import re
import smtplib
import sys
import string
import time
import traceback
import urllib
# import mod_python stuffs
from mod_python import apache, Cookie
from mod_python.util import FieldStorage
databaseName = 'dickens'
cheshirePath = os.environ.get('HOME', '/home/cheshire')
# list of subcorpora indexes
subcorpora = ['quote', 'non-quote', 'longsus', 'shortsus']
from xml.sax.saxutils import escape
from lxml import etree
# import Cheshire3/PyZ3950 stuff
import cheshire3.exceptions
from cheshire3.baseObjects import Session
from cheshire3.document import StringDocument
from cheshire3.internal import cheshire3Root
from cheshire3.record import LxmlRecord
from cheshire3.server import SimpleServer
from cheshire3.utils import flattenTexts
from cheshire3.web.www_utils import *
from clic.dickens.concordancer import *
from clic.dickens.collocate import *
class SearchHandler(object):
htmlPath = os.path.join(cheshirePath, 'clic', 'www', databaseName, 'html')
txtStorePath = os.path.join(cheshirePath, 'clic', 'www', databaseName, 'txt')
logger = None
redirected = False
def __init__(self, lgr):
self.logger = lgr
build_architecture()
def send_html(self, data, req, code=200):
req.content_type = 'text/html'
req.content_length = len(data)
if (type(data) == unicode):
data = data.encode('utf-8')
req.write(data)
req.flush()
def send_xml(self, data, req, code=200):
req.content_type = 'text/xml'
req.content_length = len(data)
if (type(data) == unicode):
data = data.encode('utf-8')
req.write(data)
req.flush()
def send_txt(self, data, req, code=200):
req.content_type = 'application/msword'
req.content_length = len(data)
req.send_http_header()
req.write(data)
req.flush()
def sort(self, form):
id = form.get('id', None)
side = form.get('side', 'left')
wordNumber = form.get('wordNumber', 1)
concordancer = Concordancer(session, self.logger)
id = concordancer.sort_concordance(id, side, wordNumber)
return '<rsid>%s</rsid>' % id
def filter(self, form):
id = form.get('id', None)
matchList = form.get('matchlist', None)
concordancer = Concordancer(session, self.logger)
id = concordancer.filter_concordance(id, matchList)
return '<rsid>%s</rsid>' % id
def search(self, req):
global db, idxStore, resultSetStore
self.logger.log('search called')
start = time.time()
form = FieldStorage(req)
#self.logger.log(form) # RS: <mod_python.util.FieldStorage object at 0x2e35a50>
type_ = form.get('type', None)
#self.logger.log(form.get('type')) ## search mode: 'any' etc. (to print leave out None)
terms = form.get('terms', None) ## search term
book = form.get('book', 'all') ## book id
csCheckbox = form.get('caseSensitive', None)
caseSensitive = csCheckbox and "s" or "i" ## sets case sensitive to s (sensitive) or i (insensitive)
id_ = form.get('id', None) ## search id (e.g. quote|any|superlative|0|10|i|D.all|)
span = int(form.get('span', 0)) ## starts at 0?
#wordWindow = int(form.get('windowsize', 10)) ## We want to increase this
wordWindow = int(form.get('windowsize', 20)) ###
gid = form.get('gid', None) ## c3.quote-idx any/proxinfo "superlative" and/proxinfo c3.book-idx = BH
if id_:
# remove the 'kwic_grid_' that comes from LiveGrid id
self.logger.log('ID SUPPLIED DISPLAYING LINES')
id_ = id_[10:]
start = int(form.get('offset', 0)) ## ?
howmany = int(form.get('page_size', 100))## ?
return self.kwicDisplay(id_, start, howmany)
elif (gid != None):
start = int(form.get('offset', 0))
howmany = int(form.get('page_size', 100))
return self.kwicDisplay(gid, start, howmany)
else:
if (terms == None):
self.logger.log('no terms')
## RS: return search id as context (e.g. 'quote'), search type (e.g. 'any'), search term (removing funny symbols),
## span (start at 0), windowsize (10 - why?), case sensitivity and book id.
id_ = '%s|%s|%s|%d|%d|%s|%s|' % (form.get('context', None), type_, multiReplace(terms, {'"' : '*', ' ' : '_', '<' : '(', '>' : ')'}), span, wordWindow, caseSensitive, book)
try:
rs = resultSetStore.fetch_resultSet(session, id_) ## search query using cheshire method
except cheshire3.exceptions.ObjectDoesNotExistException:
if type_ == 'CQL':
queryString = terms
else:
(queryString, idx) = self.build_query(id_) ## ALTERNATIVE QUERY SEARCH METHOD?
query = qf.get_query(session, queryString)
(mins, secs) = divmod(time.time() - start, 60)
self.logger.log('%s\nquery parsed: %s' % (queryString, secs)) ## print queryString (e.g. c3.quote-idx any/proxinfo "shocking") and time it takes
rs = db.search(session, query)
(mins, secs) = divmod(time.time() - start, 60)
self.logger.log('db searched: %s' % secs)
# Save ResultSet ## RS: Don't have to ask cheshire twice for same search term
resultSetStore.begin_storing(session)
rs.id = id_
resultSetStore.store_resultSet(session, rs)
resultSetStore.commit_storing(session)
try:
totalOccs = rs.totalOccs
except:
totalOccs = 'unavailable'
if totalOccs == 0:
totalOccs = 'unavailable'
(mins, secs) = divmod(time.time() - start, 60)
(hours, mins) = divmod(mins, 60)
self.logger.log('search complete: %d:%d:%d' % (hours, mins, secs))
output = '<results><rsid>%s</rsid><totalDocs>%i</totalDocs><totalOccs>%s</totalOccs></results>' % (id_, len(rs), str(totalOccs))
return output
def build_query(self, id):
global syntaxRe
# self.logger.log('building query')
start = time.time()
idArray = id.split('|')
context = idArray[0]
type = idArray[1]
terms = idArray[2].replace('_', ' ')
span = idArray[3]
caseSensitive = idArray[5]
book = idArray[6]
corpus = book[:book.find('.')]
book = book[book.find('.')+1:]
if corpus == 'A':
prefix = '-austen'
else:
prefix = ''
if (context == 'chapter'):
idx = 'c3.chapter%s-idx' % prefix
elif (context == 'paragraph'):
idx = 'c3.paragraph%s-idx' % prefix
elif (context == 'sentence'):
idx = 'c3.sentence%s-idx' % prefix
elif (context in subcorpora):
idx = 'c3.%s%s-idx' % (context, prefix)
elif (context == 'window'):
idx = 'c3.chapter%s-idx' % prefix
# check to see if case sensitivity has been specified
if caseSensitive == 's':
idx += '-case'
if context == 'window':
queryString = '%s window/proxinfo/distance<%s "%s"' % (idx, span, terms)
elif (type == 'all'):
if (terms.find('{') == -1 and
terms.find('[') == -1 and
terms.find('(') == -1 and
len(terms.split(' ')) == 1
):
#queryString = '%s all/proxinfo "%s"' % (idx, terms)
termArray = terms.split(' ')
queryList = []
for t in termArray:
queryList.append('%s all/proxinfo "%s"' % (idx, t))
queryString = ' prox/distance>0/unit=word '.join(queryList)
else:
iter = syntaxRe.finditer(terms)
queryList = []
for i in iter:
if i.group() != '':
termSet = i.group()
if termSet[0] == '[' :
queryList.append('%s-stem any/proxinfo "%s"' % (idx, termSet[1:termSet.rfind(']')]))
elif termSet[0] == '(':
queryList.append('%s-pos any/proxinfo "%s"' % (idx, termSet[1:termSet.rfind(')')]))
elif termSet[0] == '{':
queryList.append('%s any/proxinfo "%s"' % (idx, termSet[1:termSet.rfind('}')]))
else:
queryList.append('%s any/proxinfo "%s"' % (idx, termSet))
queryString = ' prox/distance=0/unit=element '.join(queryList)
else:
if (type == 'phrase'):
if (terms.find('{') == -1 and
terms.find('[') == -1 and
terms.find('(') == -1
):
queryString = '%s =/proxinfo "%s"' % (idx, terms)
else:
iter = syntaxRe.finditer(terms)
queryList = []
for i in iter:
if i.group() != '':
termSet = i.group()
if termSet[0] == '[' :
queryList.append('%s-stem any/proxinfo "%s"' % (idx, termSet[1:termSet.rfind(']')]))
elif termSet[0] == '(' :
queryList.append('%s-pos any/proxinfo "%s"' % (idx, termSet[1:termSet.rfind(')')]))
elif termSet[0] == '{' :
queryList.append('%s any/proxinfo "%s"' % (idx, termSet[1:termSet.rfind('}')]))
else :
queryList.append('%s any/proxinfo "%s"' % (idx, termSet))
queryString = ' prox/distance=1/unit=word/ordered '.join(queryList)
elif (type == 'any'):
if terms.find('{') == -1 and terms.find('[') == -1 and terms.find('(') == -1 :
queryString = '%s any/proxinfo "%s"' % (idx, terms)
else :
iter = syntaxRe.finditer(terms)
queryList = []
for i in iter:
if i.group() != '':
termSet = i.group()
if termSet[0] == '[' :
queryList.append('%s-stem any/proxinfo "%s"' % (idx, termSet[1:termSet.rfind(']')]))
elif termSet[0] == '(' :
queryList.append('%s-pos any/proxinfo "%s"' % (idx, termSet[1:termSet.rfind(')')]))
elif termSet[0] == '{' :
queryList.append('%s any/proxinfo "%s"' % (idx, termSet[1:termSet.rfind('}')]))
else :
queryList.append('%s any/proxinfo "%s"' % (idx, termSet))
queryString = ' or '.join(queryList)
if book != 'all':
if book in ['novel', 'other']:
queryString = '%s and/proxinfo c3.novel-idx = %s' % (queryString, book)
else:
queryString = '%s and/proxinfo c3.book-idx = %s' % (queryString, book)
# TODO: this doesn't need to return idx any more
return [queryString, idx]
def kwicDisplay(self, id, start=0, nRecs=100):
self.logger.log('displaying kwiclines')
result = []
extraSpaceElems = ['s']
replhash = {'.': '',
'"': '',
',': '',
"'": '',
"&": ''
}
count = start
concordancer = Concordancer(session, self.logger)
#temp = concordancer.load_concordance(id, start, nRecs)
concordance, totalOccs, wordWindow = concordancer.load_concordance(id, start, nRecs)
# concordance = temp[0]
# totalOccs = temp[1]
# wordWindow = temp[2]
## RS - NEW: Call concordance.sort_concordance() ?
#this isn't needed now - change to just display whatever it gets concordance does sorting out what
for i in range(0, len(concordance)): ## len = 4
count += 1;
recStore = concordance[i][3][0] ## name of record store query is found in (recordStore)
recid = concordance[i][3][1] ## which number of record query is found in
context = concordance[i][3][2] ## chapter, sentence. Never quotes, non-quotes etc. as these are currently sub-corpora
rsPage = concordance[i][3][3] ## query order? (if three matches - 0,1,2)
# This isn't needed now - change to just display whatever it gets
# Concordance does sorting out what
for i, row in enumerate(concordance):
count += 1;
recStore = row[3][0]
recid = row[3][1]
context = row[3][2]
rsPage = row[3][3]
recordStore = db.get_object(session, recStore)
rec = recordStore.fetch_record(session, recid) ## get record
nodeIdxs = []
wordOffsets = []
for x in concordance[i][4]:
nodeIdxs.append(x[0]) ## NOTE: returns 0 if in sub-corpora (e.g. node is chapter)
wordOffsets.append(x[1]) ## identifies search word? SEEMS TO FIND THE WRONG ONE FOR SUB-CORPORA
for x in row[4]:
nodeIdxs.append(x[0])
wordOffsets.append(x[1])
#self.logger.log(nodeIdxs)
#self.logger.log(wordOffsets)
# Get the paragraph/sentence/article with eid that matches
tree = rec.get_dom(session).getroottree()
self.logger.log('++++++++++++++++++++++++++++++++++++++++++++++++++++++++ %s' % context)
if context in ['chapter', 'quote', 'non-quote', 'longsus', 'shortsus'] :
#if context in ['chapter']:
node = tree.xpath('//div[@type="chapter"]')[0] ## gets the whole chapter in chapter and sub-corpora contexts
elif context == 'HISC' : ## ?
node = tree.xpath('//body/headline')[0]
else:
node = tree.xpath('//*[@eid=%s]' % nodeIdxs[0])[0] ## gets target sentence in sentence context
#self.logger.log(etree.tostring(node[0]))
walker = node.getiterator()
texts = []
for c in walker:
if c.tag == 'txt': ## includes chapter
if c.text:
texts.append(c.text)
if c.tail:
texts.append(c.tail)
elif c.tag in extraSpaceElems:
texts.append(' ')
else:
continue
text = ''.join(texts).lstrip()
#self.logger.log('text: %s' % text) ## LOGGING TEXT CONTENT (NB: whole chapter if subcorpus)
for j in range(0, len(wordOffsets)):
space = False
while not space and wordOffsets[j] > 0 :
if text[wordOffsets[j]-1] in string.punctuation :
wordOffsets[j] = wordOffsets[j]-1
else :
space = True
#self.logger.log(text[wordOffsets[0]:wordOffsets[3]]) ### print whole concordance list (len = 4)
if wordOffsets[1] > wordOffsets[0]:
left = text[wordOffsets[0]:wordOffsets[1]]
newleft = []
#left = left[::-1] ## RS: THIS WILL REVERSE THE WORD DIRECTION
for w in left.split(' '):
if subcorpora != None:
#newleft.append("<span onclick=\"getCFP)'%s'(\">‮%s‬ </span>" % (multiReplace(w, replhash), w))
## RS: IS THE BELOW ALL I NEED?
newleft.append("<span onclick=\"getCFP)('%s')\">%s</span>" % (multiReplace(w, replhash), w))
else:
newleft.append("<span>‮%s‬ </span>" % (w))
left = ' '.join(newleft)
#check this works for []
left = multiReplace(left, {']' : ']]', ')' : '))', '}':'}}', '& ' : 'amp; '})
left = multiReplace(left, {'[' : ']', '(' : ')', '{' : '}'})
left = multiReplace(left, {']]' : '[', '))' : '(', '}}' : '{'})
else:
left = ''
if wordOffsets[2] > wordOffsets[1]:
right = text[wordOffsets[2]:wordOffsets[3]]
newright = []
for w in right.split(' '):
if subcorpora != None:
newright.append("<span onclick=\"getCFP('%s')\">%s</span>" % (multiReplace(w, replhash), w))
else:
newright.append("<span>%s</span>" % (w))
right = ' '.join(newright)
key = text[wordOffsets[1]:wordOffsets[2]]
else:
right = ''
key = text[wordOffsets[1]:]
result.extend([
'<tr>',
'<td><a href="javascript:displayArticle(\'%s\', %d, %d, \'%s\')">%d</a></td>' % (id, rsPage, nodeIdxs[0], '_'.join([str(x[1]) for x in concordance[i][1]]), count),
'<td>',
left,
'</td><td>',
key,
'</td><td>',
right,
'</td></tr>'
])
# keyTagged = (left + '‬ </td><td> ' + key + ' </td><td> ' + right)
# result.append('<tr><td><a href="/dickens/search?operation=search&mode=article&parent=%d&elem=%d&os1=%d&os2=%d" target="_article">%d</a></td><td> ‮ %s</td></tr>' % (recid, nodeIdxs[0], max(wordOffsets[1], -1), max(wordOffsets[2], -1), count, keyTagged))
### RS Sort KWIC here?
resultString = '<ajax-response><response type="object" id="%s"><rows update_ui="true">%s</rows></response></ajax-response>' % (id, ' '.join(result))
regex = re.compile('&(?!\w+;)')
resultString = re.sub(regex, '&', resultString)
return resultString
def concordance(self, form):
id_ = form.get('id', None)
concordance = Concordancer(session, self.logger)
(lines, table) = concordance.create_concordance(id_)
if table == '':
return '<xml><lines>%s</lines></xml>' % lines
else :
return '<xml><lines>%s</lines>%s</xml>' % (lines, table)
def collocates(self, form):
id_ = form.get('id', None)
gid = form.get('gid', None)
start = int(form.get('offset', 0))
howmany = int(form.get('page_size', 50))
# sort params from LiveGrid come in the form
# 's1: ASC' or 's5: ASC' - with num = to column in table
sort = re.compile('^s\d+')
sortList = filter(lambda x: sort.match(x), form.keys())
sortby = int(len(sortList) > 0 and sortList[0][1:] or 3)
sortdir = form.get('sort_dir', 'desc')
if id_ is not None:
self.logger.log('IN COLLOCATES Function %s' % id_)
# remove the 'collocate_grid_' that comes from LiveGrid id
id_ = id_[15:]
coll = Collocate(session, self.logger)
table = coll.get_collocateTable(id_, sortby)
return self.collocatesDisplay(id_,
table,
start,
howmany,
sortby
)
elif gid is not None:
self.logger.log('IN COLLOCATES Function %s' % gid)
coll = Collocate(session, self.logger)
collocateId = coll.create_collocateTable(gid)
return collocateId
else:
return '<error>No concordance object with id: %s</error>' % id
def collocatesDisplay(self, id_, collocates,
start=0, numRows=10, sortby=1):
lines = []
for n in range(start, min(len(collocates), start + numRows)):
l = collocates[n]
left = "<td>%s</td>" % ('</td><td>'.join(map(lambda x: str(x), l[4])))
right = "<td>%s</td>" % ('</td><td>'.join(map(lambda x: str(x), l[5])))
lines.append('<tr><td>%i</td><td>%s</td><td>%i</td><td>%i</td><td>%i</td>%s%s</tr>' % (n+1,l[0],l[1],l[2],l[3],left,right))
resultString = '<ajax-response><response type="object" id="%s"><rows update_ui="true">%s</rows></response></ajax-response>' % (id_, ' '.join(lines))
return resultString
def create_cfp(self, form):
term = string.lower(form.get('term', None).value)
q = qf.get_query(session, 'c3.sentence-idx any "%s"' % term)
output = []
for i in subcorpora :
output.append('<tr>')
output.append('<td>%s</td>' % i)
idx = db.get_object(session, '%s-idx' % i)
total = float(idx.fetch_metadata(session)['nOccs'])
entry = idx.scan(session, q, 1, '=')
if entry[0][0] == term:
perc = round(float(entry[0][1][2]*10000.00)/total, 2)
output.append('<td>%0.2f</td>' % perc)
else:
output.append('<td>0</td>')
output.append('</tr>')
return '<table><th>subcorpus</th><th>total occs</th>%s</table>' % ''.join(output)
# def articleDisplay(self, req):
# form = FieldStorage(req)
# parent = form.get('parent', None)
# elem = form.get('elem','')
# os1 = form.get('os1','')
# os2 = form.get('os2','')
# highlight = etree.fromstring("<highlight><elem>%s</elem><os1>%s</os1><os2>%s</os2></highlight>" % (elem,os1,os2))
# rec = recordStore.fetch_record(session, parent)
# et = etree.fromstring(rec.get_xml(session))
# et.append(highlight)
# rec = LxmlRecord(et)
# doc = articleTransformer.process_record(session, rec)
# output = doc.get_raw(session)
# return '<html><head></head><body><p>%s</p></body></html>' % output
def articleDisplay(self, form):
self.logger.log('ARTICLE DISPLAY REQUESTED')
page = int(form.get('page', 1))-1
id = form.get('id', None)
elem = form.get('elem', '')
words = form.get('words', '').split('_')
context = form.get('id', '').split('|')[0]
rs = resultSetStore.fetch_resultSet(session, id)
rec = rs[page].fetch_record(session)
#rec = recordStore.fetch_record(session, parent)
tree = rec.get_dom(session).getroottree()
if context in ['chapter',
'window',
'longsus',
'shortsus']:
baseXPath = '//div[@type="chapter"]/descendant::w[WOFFSET]'
elif context in ['quote',
'non-quote',]:
baseXPath = '//*[@eid=EIDVALUE]/following::w[WOFFSET]'
elif context == 'HISC':
baseXPath = '/article/body/headline/descendant::w[WOFFSET]'
el = tree.xpath('/article/body/headline')[0]
el.set('highlight', 'true')
else:
baseXPath = '//*[@eid=%s]/descendant::w[WOFFSET]' % elem
el = tree.xpath('//*[@eid=%s]' % elem)[0]
el.set('highlight', 'true')
for w in words:
word = tree.xpath(baseXPath.replace('WOFFSET', str(int(w)+1)))[0]
word.set('inv', 'node')
return '<html><head></head><body><p>%s</p></body></html>' % articleTransformer.process_record(session, rec).get_raw(session)
def articleBrowse(self, form):
self.logger.log('ARTICLE BROWSE REQUESTED')
id = form.get('id', None)
context = id.split('|')[0]
type=id.split('|')[1]
if type == 'CQL':
type = 'any'
page = int(form.get('page', 1))-1
rs = resultSetStore.fetch_resultSet(session, id)
proxInfo = rs[page].proxInfo
rec = rs[page].fetch_record(session)
tree = rec.get_dom(session).getroottree()
if context in ['chapter',
'window',
'longsus',
'shortsus']:
baseXPath = '//div[@type="chapter"]/descendant::w[WOFFSET]'
elif context in ['quote',
'non-quote',]:
baseXPath = '//*[@eid=EIDVALUE]/following::w[WOFFSET]'
elif context == 'HISC':
baseXPath = '//headline/descendant::w[WOFFSET]'
else:
baseXPath = '//*[@eid=EIDVALUE]/descendant::w[WOFFSET]'
self.logger.log(rs[page].proxInfo)
for m in rs[page].proxInfo:
if (type == 'phrase' or
type == 'any' and
not context in ['window',
'quote',
'non-quote',
'longsus',
'shortsus']
):
for p in m:
xp = multiReplace(baseXPath,
{'EIDVALUE': p[0],
'WOFFSET': p[1] + 1
}
)
word = tree.xpath(xp)[0]
word.set('inv', 'node')
elif (type == 'all' or
context in ['window',
'quote',
'non-quote',
'longsus',
'shortsus']
):
xp = multiReplace(baseXPath,
{'EIDVALUE': m[0][0],
'WOFFSET': m[0][1] + 1
}
)
word = tree.xpath(xp)[0]
word.set('inv', 'node')
for i in range(1, len(m)):
xp = multiReplace(baseXPath,
{'EIDVALUE': m[i][0],
'WOFFSET': m[i][1] + 1
}
)
word = tree.xpath()[0]
word.set('inv', 'other')
doc = articleTransformer.process_record(session, rec)
return ('<html><head></head><body><p>{0}</p></body></html>'
''.format(doc.get_raw(session))
)
def arm(self, form):
self.logger.log('Build ARM')
id = form.get('id', 'test')
vecTxr.vectorIndex = db.get_object(session, '%s-idx' % id.split('|')[0])
self.logger.log(vecTxr.vectorIndex)
if id.split('|')[1] == 'any' and id.find('_') != -1 :
vecTxr.stripMatch = 0
else:
vecTxr.stripMatch = 1
try:
doc2 = vectorStore.fetch_document(session, id)
except:
(qs, idx) = self.build_query(id)
q = qf.get_query(session, qs)
rs = db.search(session, q)
for rsi in rs:
adf.load(session, rsi, cache=0, format='vectorTransformer')
for doc in adf:
doc2 = arm.process_document(session, doc)
doc2.id = id
vectorStore.store_document(session, doc2)
vectorStore.commit_storing(session)
return '<rsid>%s</rsid>' % id
def exportkwic(self, form, start=0):
self.logger.log('exporting kwiclines')
id = form.get('rsid', None)
result = []
extraSpaceElems = ['s']
replhash = { '.' : ''
, '"' : ''
, ',' : ''
, "'" : ''
, "&" : ''
}
count = start;
concordancer = Concordancer(session, self.logger)
temp = concordancer.load_concordance(id)
concordance = temp[0]
totalOccs = temp[1]
wordWindow = temp[2]
# This isn't needed now - change to just display whatever it gets
# concordance does sorting out what
for i in range(0, len(concordance)):
count += 1;
recStore = concordance[i][3][0]
recid = concordance[i][3][1]
context = concordance[i][3][2]
rsPage = concordance[i][3][3]
recordStore = db.get_object(session, recStore)
rec = recordStore.fetch_record(session, recid)
nodeIdxs = []
wordOffsets = []
for x in concordance[i][4]:
nodeIdxs.append(x[0])
wordOffsets.append(x[1])
# self.logger.log(nodeIdxs)
# self.logger.log(wordOffsets)
#get the paragraph/sentence/article with eid that matches
tree = rec.get_dom(session).getroottree()
# self.logger.log('++++++++++++++++++++++++++++++++++++++++++++++++++++++++ %s' % context)
if context in ['chapter', 'quote', 'non-quote', 'longsus', 'shortsus'] :
node = tree.xpath('//div[@type="chapter"]')[0]
elif context == 'HISC' :
node = tree.xpath('//body/headline')[0]
else :
node = tree.xpath('//*[@eid=%s]' % nodeIdxs[0])[0]
walker = node.getiterator()
texts = []
for c in walker:
if c.tag == 'txt':
if c.text:
texts.append(c.text)
if c.tail:
texts.append(c.tail)
elif c.tag in extraSpaceElems :
texts.append(' ')
else:
continue
text = ''.join(texts).lstrip()
for j in range(0, len(wordOffsets)) :
space = False
while not space and wordOffsets[j] > 0 :
if text[wordOffsets[j]-1] in string.punctuation :
wordOffsets[j] = wordOffsets[j]-1
else :
space = True
if wordOffsets[1] > wordOffsets[0]:
left = text[wordOffsets[0]:wordOffsets[1]]
left = left[-40:]
else :
left = ''
if wordOffsets[2] > wordOffsets[1]:
right = text[wordOffsets[2]:wordOffsets[3]]
right = right[:40]
key = text[wordOffsets[1]:wordOffsets[2]]
else:
right = ''
key = text[wordOffsets[1]:]
keyTagged = (left + '\t' + key + '\t' + right)
# result.append('<tr><td><a href="/dickens/search?operation=search&mode=article&parent=%d&elem=%d&os1=%d&os2=%d" target="_article">%d</a></td><td> %s</td></tr>' % (recid, nodeIdxs[0], max(wordOffsets[1], -1), max(wordOffsets[2], -1), count, keyTagged))
result.append(keyTagged)
# keyTagged = (left + '‬ </td><td> ' + key + ' </td><td> ' + right)
# result.append('<tr><td><a href="/dickens/search?operation=search&mode=article&parent=%d&elem=%d&os1=%d&os2=%d" target="_article">%d</a></td><td> ‮ %s</td></tr>' % (recid, nodeIdxs[0], max(wordOffsets[1], -1), max(wordOffsets[2], -1), count, keyTagged))
resultString = '\n'.join(result)
# regex = re.compile('&(?!\w+;)')
# resultString = re.sub(regex, '&', resultString)
# self.logger.log(resultString)
return resultString
def armTable(self, form):
# global vectorStore, arm, fimi2, rule
id = form.get('id', 'test')
rule.index = db.get_object(session, '%s-idx' % id.split('|')[0])
try :
doc2 = vectorStore.fetch_document(session, id)
except :
(qs, idx) = self.build_query(id)
q = qf.get_query(session, qs)
rs = db.search(session, q)
# rs = resultSetStore.fetch_resultSet(session, id)
for rsi in rs:
adf.load(session, rsi, cache=0, format='vectorTransformer')
for doc in adf:
doc2 = arm.process_document(session, doc)
self.logger.log('ARM process complete')
doc2.id = id
vectorStore.store_document(session, doc2)
vectorStore.commit_storing(session)
try :
doc2 = fimi2.process_document(session, doc2)
except :
pass
try:
doc2 = rule.process_document(session, doc2)
except:
pass
(fis, rules) = doc2.get_raw(session)
output = []
count = 0
for f in fis:
output.append(f.toXml())
output = '<fis>%s</fis>' % ' '.join(output)
rec = LxmlRecord(etree.fromstring(output))
doc = armTableTxr.process_record(session, rec)
else:
try:
doc2 = fimi2.process_document(session, doc2)
except:
pass
try:
doc2 = rule.process_document(session, doc2)
except:
pass
(fis, rules) = doc2.get_raw(session)
output = []
count = 0
for f in fis:
output.append(f.toXml())
output = '<fis>%s</fis>' % ' '.join(output)
rec = LxmlRecord(etree.fromstring(output))
doc = armTableTxr.process_record(session, rec)
return '<rsid>%s</rsid>' % doc.get_raw(session).replace('%%ID%%', id)
def handle(self, req):
form = FieldStorage(req)
mode = form.get('mode', None)
if (mode == 'search'):
page = self.search(req)
self.send_xml(page,req)
elif (mode=='collocates'):
page = self.collocates(form)
self.send_xml(page,req)
elif (mode=='exportkwic'):
page = self.exportkwic(form)
self.send_txt(page, req)
return
elif (mode == 'concordance'):
page = self.concordance(form)
self.send_xml(page, req)
elif (mode=='arm'):
page = self.arm(form)
self.send_xml(page,req)
elif (mode=='armtable'):
page = self.armTable(form)
self.send_xml(page,req)
elif (mode=='article'):
page = self.articleDisplay(form)
self.send_html(page, req)
elif (mode=='browse'):
page = self.articleBrowse(form)
self.send_html(page, req)
elif (mode=='sort'):
page = self.sort(form)
self.send_xml(page, req)
elif (mode=='filter'):
page = self.filter(form)
self.send_xml(page, req)
elif (mode=='cfp'):
page = self.create_cfp(form)
self.send_xml(page, req)
else :
page = read_file('search.html')
self.send_html(page, req)
# send the display
def build_architecture(data=None):
global session, serv, db, qf, xmlp, recordStore, resultSetStore, idxStore, articleTransformer, kwicTransformer, proxExtractor, simpleExtractor, adf, fimi2, rule, arm, vecTxr, vectorStore, armTableTxr
session = Session()
session.environment = 'apache'
session.user = None
serv = SimpleServer(session,
os.path.join(cheshire3Root, 'configs', 'serverConfig.xml')
)
session.database = 'db_' + databaseName
db = serv.get_object(session, session.database)
qf = db.get_object(session, 'defaultQueryFactory')
xmlp = db.get_object(session, 'LxmlParser')
recordStore = db.get_object(session, 'recordStore')
resultSetStore = db.get_object(session, 'resultSetStore')
simpleExtractor = db.get_object(session, 'SimpleExtractor')
proxExtractor = db.get_object(session, 'ProxExtractor')
articleTransformer = db.get_object(session, 'article-Txr')
kwicTransformer = db.get_object(session, 'kwic-Txr')
idxStore = db.get_object(session, 'indexStore')
#adf = db.get_object(session, 'accDocFac')
#fimi2 = db.get_object(session, 'MagicFimiPreParser')
#rule = db.get_object(session, 'RulePreParser')
#arm = db.get_object(session, 'ARMVectorPreParser')
#vecTxr = db.get_object(session, 'Vector1Txr')
#vectorStore = db.get_object(session, 'vectorStore')
#armTableTxr = db.get_object(session, 'armTable-Txr')
# Some stuff to do on initialisation
#rebuild = True
#serv = None
#session = None
#db = None
#xmlp = None
#recordStore = None
#sentenceStore = None
#paragraphStore = None
#resultSetStore = None
#articleTransformer = None
#kwicTransformer = None
#
punctuationRe = re.compile('([@+=;!?:*"{}()\[\]\~/\\|\#\&\^]|[-.,\'](?=\s+)|(?<=\s)[-.,\'])') # this busts when there are accented chars
wordRe = re.compile('\s*\S+')
syntaxRe = re.compile('[\w]* |[\w]*$|[[(][ ]?[\w]*[ ]?[])][\s$]?|{[\w\s]+}[\s$]?')
#
#cheshirePath = '/home/cheshire/cheshire3'
#logPath = os.path.join(cheshirePath, 'clic', 'www', databaseName, 'logs', 'searchHandler.log')
#htmlPath = os.path.join(cheshirePath, 'clic', 'www', databaseName, 'html')
# Discover objects...
#def handler(req):
# global db, htmlPath, logPath, cheshirePath, xmlp, recordStore
# try:
# try:
# fp = recordStore.get_path(session, 'databasePath')
# assert (rebuild)
# assert (os.path.exists(fp) and time.time() - os.start(fp).st_mtime > 60*60)
# except :
# build_architecture()
#
# remote_host = req.get_remote_host(apache.REMOTE_NOLOOKUP) # get the remote host's IP for logging
# os.chdir(htmlPath) # cd to where html fragments are
# lgr = FileLogger(logPath, remote_host) # initialise logger object
# searchHandler = SearchHandler(lgr) # initialise handler - with logger for this request
# try:
# searchHandler.handle(req) # handle request
# finally:
# # clean-up
# try: lgr.flush() # flush all logged strings to disk
# except: pass
# del lgr, searchHandler # delete handler to ensure no state info is retained
# except:
# req.content_type = "text/html"
# cgitb.Hook(file = req).handle() # give error info
# else:
# return apache.OK
#- end handler()
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,044
|
cheshire3/clic
|
HEAD
|
/clic/dickens/keywords.py
|
import os
from math import log1p
import operator
from cheshire3.document import StringDocument
from cheshire3.internal import cheshire3Root
from cheshire3.server import SimpleServer
from cheshire3.baseObjects import Session
cheshirePath = os.path.join('HOME', '/home/cheshire')
class Keywords(object):
def __init__(self):
self.session = Session()
self.session.database = 'db_dickens'
self.serv = SimpleServer(self.session,
os.path.join(cheshire3Root, 'configs', 'serverConfig.xml')
)
self.db = self.serv.get_object(self.session, self.session.database)
self.qf = self.db.get_object(self.session, 'defaultQueryFactory')
self.resultSetStore = self.db.get_object(self.session, 'resultSetStore')
self.idxStore = self.db.get_object(self.session, 'indexStore')
self.logger = self.db.get_object(self.session, 'keywordLogger')
def list_keywords(self, testIdxName, testMaterials, refIdxName, refMaterials, pValue):
#self.logger.log(10, 'CREATING KEYWORDS FOR RS: {0} in {1}, compared to {2} in {3}'.format(testIdxName, testMaterials, refIdxName, refMaterials, pValue))
session = self.session
db = self.db
clauses = []
for testMaterial in testMaterials:
if testMaterial in ['dickens', 'ntc']:
testMatIdx = 'subCorpus-idx'
else:
testMatIdx = 'book-idx'
clauses.append('c3.{0} = "{1}"'.format(testMatIdx, testMaterial))
test_query = self.qf.get_query(session,
' or '.join(clauses)
)
test_results = db.search(session, test_query)
test_idx = db.get_object(session, testIdxName)
test_facets = test_idx.facets(session, test_results)
## create dictionary containing word/cluster and number of occurrences
test_dict = {x[0]: x[1][2] for x in test_facets}
# Reference results
clauses_ref = []
for refMaterial in refMaterials:
if refMaterial in ['dickens', 'ntc']:
refMatIdx = 'subCorpus-idx'
else:
refMatIdx = 'book-idx'
clauses_ref.append('c3.{0} = "{1}"'.format(refMatIdx, refMaterial))
ref_query = self.qf.get_query(session,
' or '.join(clauses_ref)
)
ref_results = db.search(session, ref_query)
ref_idx = db.get_object(session, refIdxName)
ref_facets = ref_idx.facets(session, ref_results)
ref_dict = {x[0]: x[1][2] for x in ref_facets}
## get test and ref lengths
## I use total counts to calculate expected values
testLength = sum(test_dict.values())
refLength = sum(ref_dict.values())
kw_list = []
for term, freqTest in test_dict.iteritems():
if freqTest > 1:
try:
## Method 1: how many observations of a given word is found in ref corpus but not in test corpus
## Subtract number of occurrences in testIndex from number of occurrences in sentences
#freqRef = float(ref_dict[term] - freqTest)
## Method 2: treat groups as mutually exclusive. NOTE: When comparing quotes with whole text the occurrences will overlap
freqRef = float(ref_dict[term])
except KeyError:
freqRef = 5.0e-324
else:
if freqRef <= 0:
freqRef = 5.0e-324
## following Paul Ryson formula for log likelihood (http://ucrel.lancs.ac.uk/llwizard.html)
## 1. Expected occurrence within corpus
## 1a. Expected reference value: based on sentence index
## - Get the total N from corpus 1 (reference corpus)
## - Multiply by the sum of observations found in ref corpus and those found in test corpus
## - Divide by the sum of total N in test corpus and reference corpus
expectedRef = refLength*(freqTest+freqRef)/(testLength+refLength)
## 1b. Expected test value
## Equivalent steps to 1a, but multiply by test N
expectedTest = testLength*(freqTest+freqRef)/(testLength+refLength)
## 2. Log Likelihood
## Compare actual observations with expected ocurrence for both test and ref, and add these values
## Use log1p() (for natural logarithm - ln) instead of log()
if freqTest*log1p(freqTest/expectedTest) >= freqRef*log1p(freqRef/expectedRef):
try:
LL = 2*((freqTest*log1p(freqTest/expectedTest)) + (freqRef*log1p(freqRef/expectedRef)))
LL = '%.2f' % LL
except:
LL = 909090
else:
try:
LL = -2*((freqTest*log1p(freqTest/expectedTest)) + (freqRef*log1p(freqRef/expectedRef)))
LL = '%.2f' % LL
except:
LL = 909090
if freqRef == 5.0e-324:
freqRef2 = 0
else:
freqRef2 = int('%.0f' % freqRef)
dec_Test = '%.2f' % freqTest
dec_Ref = '%.2f' % freqRef
propTest = (float(dec_Test)/testLength) * 100
propRef = (float(dec_Ref)/refLength) * 100
if float(pValue) == 0.000001:
if float(LL) >= 23.93:# or float(LL) <= -23.93: ## We only deal with positive LL values
kw_list.append(['', term, str(freqTest), '%.2f' % propTest, str(freqRef2), '%.2f' % propRef, float(LL), pValue])
else:
if float(pValue) == 0.0000001:
if float(LL) >= 28.38:
kw_list.append(['', term, str(freqTest), '%.2f' % propTest, str(freqRef2), '%.2f' % propRef, float(LL), pValue])
if float(pValue) == 0.00000001:
if float(LL) >= 32.85:
kw_list.append(['', term, str(freqTest), '%.2f' % propTest, str(freqRef2), '%.2f' % propRef, float(LL), pValue])
if float(pValue) == 0.000000001:
if float(LL) >= 37.33:
kw_list.append(['', term, str(freqTest), '%.2f' % propTest, str(freqRef2), '%.2f' % propRef, float(LL), pValue])
if float(pValue) == 0.0000000001:
if float(LL) >= 41.83:
kw_list.append(['', term, str(freqTest), '%.2f' % propTest, str(freqRef2), '%.2f' % propRef, float(LL), pValue])
if float(pValue) == 0.00000000001:
if float(LL) >= 46.33:
kw_list.append(['', term, str(freqTest), '%.2f' % propTest, str(freqRef2), '%.2f' % propRef, float(LL), pValue])
if float(pValue) == 0.00001:
if (float(LL) > 19.52):# or (float(LL) < -19.52):
kw_list.append(['', term, str(freqTest), '%.2f' % propTest, str(freqRef2), '%.2f' % propRef, float(LL), pValue])
elif float(pValue) == 0.0001:
if (float(LL) > 15.14):# or (float(LL) < -15.14):
kw_list.append(['', term, str(freqTest), '%.2f' % propTest, str(freqRef2), '%.2f' % propRef, float(LL), pValue])
elif float(pValue) == 0.001:
if (float(LL) > 10.83):# or (float(LL) < -10.83):
kw_list.append(['', term, str(freqTest), '%.2f' % propTest, str(freqRef2), '%.2f' % propRef, float(LL), pValue])
elif float(pValue) == 0.01:
if (float(LL) > 6.64):# or (float(LL) < -6.64):
kw_list.append(['', term, str(freqTest), '%.2f' % propTest, str(freqRef2), '%.2f' % propRef, float(LL), pValue])
elif float(pValue) == 0.05:
if (float(LL) > 3.85):# or (float(LL) < -3.85):
kw_list.append(['', term, str(freqTest), '%.2f' % propTest, str(freqRef2), '%.2f' % propRef, float(LL), pValue])
elif float(pValue) == 0.1: ## NB: returns all values
if (float(LL) > 2.71):# or (float(LL) < -2.71):
kw_list.append(['', term, str(freqTest), '%.2f' % propTest, str(freqRef2), '%.2f' % propRef, float(LL), pValue])
## sort by K value (descending)
kw_list.sort(key=operator.itemgetter(6), reverse=True) ## reverse=TRUE for descending order
return kw_list[0:4999]
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,045
|
cheshire3/clic
|
HEAD
|
/clic/__init__.py
|
"""Corpus Linguistics in Cheshire3 (CLiC)."""
__name__ = "clic"
__package__ = "clic"
__all__ = ['deploy', 'dickens', 'setuptools', 'stats']
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,046
|
cheshire3/clic
|
HEAD
|
/clic/dickens/web/old_interface/dickensHandler.py
|
import cgitb
import os
import re
import smtplib
import sys
import time
import traceback
import sys
import urllib
# Import mod_python stuffs
from mod_python import apache, Cookie
from mod_python.util import FieldStorage
from crypt import crypt
# import Cheshire3/PyZ3950 stuff
from cheshire3.baseObjects import Session
from cheshire3.document import StringDocument
import cheshire3.exceptions
from cheshire3.internal import cheshire3Root
from cheshire3.server import SimpleServer
# C3 web search utils
from cheshire3.web.www_utils import *
# separate file containing display configs + some HMTL for table rows etc.
from clic.dickens.web.dickensWebConfig import *
from clic.dickens.web.dickensSearchHandler import SearchHandler
from clic.dickens.web.dickensBrowseHandler import BrowseHandler
cheshirePath = os.environ.get('HOME', '/home/cheshire')
logPath = os.path.join(cheshirePath, 'clic', 'www', databaseName, 'logs', 'searchHandler.log')
htmlPath = os.path.join(cheshirePath, 'clic', 'www', databaseName, 'html')
session = Session()
session.environment = 'apache'
session.user = None
serv = SimpleServer(session, os.path.join(cheshire3Root, 'configs', 'serverConfig.xml'))
session.database = 'db_dickens'
db = serv.get_object(session, session.database)
authStore = db.get_object(session, 'authStore')
# Discover objects...
def handler(req):
global db, htmlPath, logPath, cheshirePath, xmlp, recordStore
form = FieldStorage(req)
try:
dir = req.uri[1:].rsplit('/')[1]
except IndexError:
return apache.HTTP_NOT_FOUND
remote_host = req.get_remote_host(apache.REMOTE_NOLOOKUP)
lgr = FileLogger(logPath, remote_host)
# lgr.log(req.uri)
# lgr.log('directory is %s' % dir)
# if dir == 'index.html' :
# page = read_file(os.path.join(cheshirePath, 'clic', 'www', 'dickens', 'html', 'index.html'))
# req.write(page)
# #req.sendfile(os.path.join(cheshirePath, 'clic', 'www', 'dickens', 'html' + dir))
# return apache.OK
if dir in ['css', 'js', 'img', 'images']:
#raise ValueError(os.path.join(cheshirePath, 'clic', 'www' + req.uri))
req.sendfile(os.path.join(cheshirePath, 'clic', 'www' + req.uri))
return apache.OK
else:
try:
remote_host = req.get_remote_host(apache.REMOTE_NOLOOKUP) # get the remote host's IP for logging
os.chdir(htmlPath) # cd to where html fragments are
lgr = FileLogger(logPath, remote_host)
# Determine whether to use a sub-handler
if form.get('operation', None) =='search':
handler = SearchHandler(lgr) # initialise handler - with logger for this request
elif form.get('operation', None) =='browse':
handler = BrowseHandler(lgr)
else:
req.content_type = "text/html"
page = read_file('dickensInterface.html')
req.write(page)
#return apache.HTTP_NOT_FOUND
return apache.OK
# Handle request
try:
handler.handle(req)
finally:
# Clean-up
# Flush all logged strings to disk
try:
lgr.flush()
except:
pass
# Delete handler to ensure no state info is retained
del lgr, handler
except:
req.content_type = "text/html"
cgitb.Hook(file = req).handle() # give error info
return apache.HTTP_INTERNAL_SERVER_ERROR
else:
return apache.OK
#- end handler()
#def authenhandler(req):
# global session, authStore
# # build the architecture
# pw = req.get_basic_auth_pw()
# un = req.user
# try: session.user = authStore.fetch_object(session, un)
# except: return apache.HTTP_UNAUTHORIZED
# if (session.user and session.user.password == crypt(pw, pw[:2])):
# return apache.OK
# else:
# return apache.HTTP_UNAUTHORIZED
# #- end authenhandler()
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,047
|
cheshire3/clic
|
HEAD
|
/clic/dickens/web/flask/api.py
|
from __future__ import absolute_import ## help python find modules within clic package (see John H email 09.04.2014)
from flask import Flask
import json
app = Flask(__name__,static_url_path='')
## Use beaker to save search (cache). See documentation on http://beaker.readthedocs.org/en/latest/caching.html
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
from clic.dickens.keywords import Keywords
from clic.dickens.clusters import Clusters
from clic.dickens.concordance_new import Concordancer_New
from flask import request
from flask import render_template
cache_opts = {
'cache.type': 'file',
'cache.data_dir': '/tmp/cache/data',
'cache.lock_dir': '/tmp/cache/lock'
}
cache = CacheManager(**parse_cache_config_options(cache_opts))
@app.route('/keywords/', methods=['GET'])
def keywords():
## get search specifications (args):
args = request.args
## put keywords into json:
keyword_result = fetchKeywords(args) # get list of keywords
keywords = json.dumps(keyword_result) # return keyword list as json
return keywords
## ajax route: not in use here
# @app.route('/ajax-keywords',methods=['GET'])
# def ajax_keyords():
# args = request.args
# #return json.dumps(fetchKeywords(args))
@app.route('/clusters/', methods=['GET'])
def clusters():
args = request.args
clusters_result = fetchClusters(args)
clusters = json.dumps(clusters_result)
return clusters
@app.route('/concordances/',methods=['GET'])
def concordances():
args = request.args
concordances_result = fetchConcordance(args)
concordances = json.dumps(concordances_result)
return concordances
#@cache.cache('keywords', expire=3600) ## expires after 3600 secs
def fetchKeywords(args):
keyworder = Keywords()
args = processArgs(args, 'keywords')
keywords = keyworder.list_keywords(args[0], args[1], args[2], args[3], args[4])
return {'keywords':keywords}
#@cache.cache('clusters', expire=3600)
def fetchClusters(args):
cluster = Clusters()
args = processArgs(args, 'clusters')
clusterlist = cluster.list_clusters(args[0], args[1])
return {'clusters' : clusterlist}
#@cache.cache('concordances', expire=3600)
def fetchConcordance(args):
concordancer = Concordancer_New()
args = processArgs(args, 'concordances')
concordances = concordancer.create_concordance(args[0], args[1], args[2], args[3])
return {'concordances' : concordances}
def processArgs(args, method):
methodArgs = []
if method == 'clusters':
if not str(args["testIdxMod"]) == 'chapter':
testMod = str(args["testIdxMod"])
Group = str(args['testIdxGroup'])
testIdxName = "{0}-{1}".format(testMod, Group)
else:
testMod = ''
Group = str(args['testIdxGroup'])
testIdxName = "{0}".format(Group)
methodArgs.insert(0, testIdxName)
book_collection = args.getlist('testCollection') ## args is a multiDictionary: use .getlist() to access individual books
methodArgs.insert(1, book_collection)
if method == 'keywords':
Group = str(args['testIdxGroup'])
if not str(args["testIdxMod"]) == 'chapter':
testMod = str(args["testIdxMod"])
testIdxName = "{0}-{1}".format(testMod, Group)
else:
testMod = ''
testIdxName = "{0}".format(Group)
methodArgs.insert(0, testIdxName)
book_collection = args.getlist('testCollection')
methodArgs.insert(1, book_collection) ## test corpus
refbook_collection = args.getlist('refCollection')
if not str(args["refIdxMod"]) == 'chapter':
refMod = str(args['refIdxMod'])
refIdxName = "{0}-{1}".format(refMod, Group)
else:
refMod = ''
refIdxName = "{0}".format(Group)
pValue = str(args['pValue'])
methodArgs.insert(2, refIdxName)
methodArgs.insert(3, refbook_collection) ## ref corpus
methodArgs.insert(4, pValue)
elif method == 'concordances':
testMod = str(args["testIdxMod"])
testIdxName = testMod + '-idx'
#wordWindow = str(args['wordWindow']) ## activate when in future wordWindow is in search options
book_collection = args.getlist('testCollection')
select_words = str(args['selectWords'])
methodArgs.insert(0, str(args['terms']))
methodArgs.insert(1, testIdxName)
#methodArgs.insert(2, wordWindow)
methodArgs.insert(2, book_collection)
methodArgs.insert(3, select_words)
return methodArgs
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,048
|
cheshire3/clic
|
HEAD
|
/clic/dickens/clusters.py
|
import os
import operator
from cheshire3.document import StringDocument
from cheshire3.internal import cheshire3Root
from cheshire3.server import SimpleServer
from cheshire3.baseObjects import Session
cheshirePath = os.path.join('HOME', '/home/cheshire')
class Clusters(object):
def __init__(self):
self.session = Session()
self.session.database = 'db_dickens'
self.serv = SimpleServer(self.session,
os.path.join(cheshire3Root, 'configs', 'serverConfig.xml')
)
self.db = self.serv.get_object(self.session, self.session.database)
self.qf = self.db.get_object(self.session, 'defaultQueryFactory')
self.resultSetStore = self.db.get_object(self.session, 'resultSetStore')
self.idxStore = self.db.get_object(self.session, 'indexStore')
self.logger = self.db.get_object(self.session, 'clusterLogger') ## added to dbs/dickens/config.xml
def list_clusters(self, idxName, Materials):
#self.logger.log(10, 'CREATING CLUSTERS FOR RS: {0} in {1}'.format(idxName, Materials))
session = self.session
db = self.db
clauses = []
for Material in Materials:
if Material in ['dickens', 'ntc']:
MatIdx = 'subCorpus-idx'
else:
MatIdx = 'book-idx'
clauses.append('c3.{0} = "{1}"'.format(MatIdx, Material))
query = self.qf.get_query(session,
' or '.join(clauses)
)
results = db.search(session, query)
idx = db.get_object(session, idxName)
facets = idx.facets(session, results)
dict = {}
for x in facets:
dict[x[0]] = x[1][2]
cluster_list = []
for term, freq in dict.iteritems():
if freq >= 2:
prop = (float(freq)/float(len(dict))) * 100
cluster_list.append(['', term, freq, str(prop)[:5]]) ## add empty array node at beginning (see Pete email 23.04.14)
cluster_list.sort(key=operator.itemgetter(2), reverse=True)
if len(cluster_list) <= 5000:
return cluster_list
else:
return cluster_list[0:4999]
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,049
|
cheshire3/clic
|
HEAD
|
/clic/dickens/__init__.py
|
__all__ = ['collocate', 'concordancer', 'normalizer']
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,050
|
cheshire3/clic
|
HEAD
|
/clic/deploy/__init__.py
|
"""CLiC Deployment."""
__all__ = ['utils']
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,051
|
cheshire3/clic
|
HEAD
|
/clic/setuptools/commands.py
|
"""Setuptools command sub-classes."""
from __future__ import with_statement, absolute_import
import inspect
import os
import re
from os.path import abspath, dirname, exists, expanduser, join
from setuptools import Command
from setuptools.command import develop as _develop
from setuptools.command import install as _install
from cheshire3.exceptions import ConfigFileException
from cheshire3.internal import cheshire3Home, cheshire3Root
from cheshire3.server import SimpleServer
from cheshire3.session import Session
from .exceptions import DevelopException, InstallException
class clic_command(Command):
"""Base Class for custom commands."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def apply_config_templates(self):
"Read config template(s), make subs, write config file(s)."
global distropath
def apply_config_tmpl(path):
"Subroutine to turn templates into configs"
global distropath
# Read in template
with open(path + '.tmpl', 'r') as fh:
config = fh.read()
# Make replacements
config = re.sub('>~/clic/(.*?)</',
r'>{0}/\1</'.format(distropath),
config
)
# Write finished config file
with open(path, 'w') as fh:
fh.write(config)
# Dickens Database
apply_config_tmpl(
join(
distropath,
'dbs',
'dickens',
'config.xml'
)
)
def run(self):
raise NotImplementedError()
class develop(_develop.develop, clic_command):
user_options = _develop.develop.user_options + clic_command.user_options
def initialize_options(self):
_develop.develop.initialize_options(self)
clic_command.initialize_options(self)
def finalize_options(self):
_develop.develop.finalize_options(self)
clic_command.finalize_options(self)
def install_for_development(self):
global distropath, server, session
# Carry out normal procedure
_develop.develop.install_for_development(self)
# Use config templates to generate configs
self.apply_config_templates()
# Tell the server to register the config file
try:
server.register_databaseConfigFile(session,
join(distropath,
'dbs',
'dickens',
'config.xml'
)
)
except ConfigFileException as e:
if e.reason.startswith("Database with id 'db_dickens' is already "
"registered."):
# Existing install / development install
raise DevelopException("Package is already installed. To "
"install in 'develop' mode you must "
"first run the 'uninstall' command.")
def uninstall_link(self):
global server, session
# Carry out normal procedure
_develop.develop.uninstall_link(self)
# Unregister the database by deleting
# Cheshire3 database config plugin
serverDefaultPath = server.get_path(session,
'defaultPath',
cheshire3Root
)
userSpecificPath = join(expanduser('~'), '.cheshire3-server')
pluginPath = join('configs', 'databases', 'db_dickens.xml')
if exists(join(serverDefaultPath, pluginPath)):
os.remove(join(serverDefaultPath, pluginPath))
elif exists(os.path.join(userSpecificPath, pluginPath)):
os.remove(os.path.join(userSpecificPath, pluginPath))
else:
server.log_error(session, "No database plugin file")
class install(_install.install, clic_command):
def run(self):
# Carry out normal procedure
_install.install.run(self)
# Use config templates to generate configs
self.apply_config_templates()
# Install Cheshire3 database config plugin
# Tell the server to register the config file
try:
server.register_databaseConfigFile(session,
join(distropath,
'dbs',
'dickens',
'config.xml'
)
)
except ConfigFileException as e:
if e.reason.startswith("Database with id 'db_ead' is already "
"registered."):
# Existing install / development install
raise InstallException("Package is already installed. To "
"install you must first run the "
"'uninstall' command.")
# Inspect to find current path
modpath = inspect.getfile(inspect.currentframe())
moddir = dirname(modpath)
distropath = abspath(join(moddir, '..', '..'))
serverConfig = os.path.join(cheshire3Root,
'configs',
'serverConfig.xml'
)
session = Session()
server = SimpleServer(session, serverConfig)
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,699,052
|
cheshire3/clic
|
HEAD
|
/clic/dickens/web/old_interface/dickensBrowseHandler.py
|
import cgitb
import os
import re
import smtplib
import sys
import time
import traceback
import urllib
# import mod_python stuffs
from mod_python import apache, Cookie
from mod_python.util import FieldStorage
from cheshire3.internal import cheshire3Root
databaseName = 'dickens'
cheshirePath = os.environ.get('HOME', '/home/cheshire')
# value to normalize frequency counts by
normalizationBase = 10000
z = False
zstatSig = 3
tfp = False
# settings
browseIndexes = ['sentence-idx', 'quote-idx', 'non-quote-idx', 'shortsus-idx', 'longsus-idx', '3gram-idx', 'non-quote-3gram-idx', 'quote-3gram-idx', '4gram-idx', 'non-quote-4gram-idx', 'quote-4gram-idx', '5gram-idx', 'non-quote-5gram-idx', 'quote-5gram-idx', 'longsus-5gram-idx']
indexForStats = 'sentence-idx'
# import Cheshire3/PyZ3950 stuff
from cheshire3.baseObjects import Session
from cheshire3.document import StringDocument
import cheshire3.exceptions
from cheshire3.internal import cheshire3Root
from cheshire3.server import SimpleServer
from clic.stats import zscore
# C3 web search utils
class BrowseHandler(object):
htmlPath = os.path.join(cheshirePath, 'clic', 'www', databaseName, 'html')
logger = None
redirected = False
def __init__(self, lgr):
self.logger = lgr
build_architecture()
def send_html(self, data, req, code=200):
req.content_type = 'text/html'
req.content_length = len(data)
if (type(data) == unicode):
data = data.encode('utf-8')
req.write(data)
req.flush()
def send_xml(self, data, req, code=200):
req.content_type = 'text/xml'
req.content_length = len(data)
if (type(data) == unicode):
data = data.encode('utf-8')
req.write(data)
req.flush()
def comma(self, d):
s = str(d)
if s.find('.') != -1:
a = s[:s.find('.')]
b = s[s.find('.'):]
else:
a = s
b = ''
l = []
while len(a) > 3:
l.insert(0, a[-3:])
a = a[0:-3]
if a:
l.insert(0, a)
return ','.join(l) + b
def create_TFP(self, form):
word = form.get('word', None)
indexName = form.get('index', None)
if indexName != 'sentence':
cql = 'c3.sentence-idx exact %s' % word
q = qf.get_query(session, cql)
rs_base = db.search(session,q)
cql = 'c3.%s-idx exact %s' % (indexName, word)
q = qf.get_query(session, cql)
rs = db.search(session, q)
subset = []
hits = len(rs)
if (hits > 0):
for r in rs:
subset.append(r)
hits_base = len(rs_base)
dist_base = {}
dist_pos = {}
dist_neg = {}
if (hits_base > 0):
for r in rs_base:
try:
dist_base[r.occurences] += 1
except:
dist_base[r.occurences] = 1
if r in subset:
try:
dist_pos[r.occurences] += 1
except:
dist_pos[r.occurences] = 1
else:
try:
dist_neg[r.occurences] += 1
except:
dist_neg[r.occurences] = 1
hits_base = sum(dist_base.values())
hits_pos = sum(dist_pos.values())
hits_neg = sum(dist_neg.values())
output = ['<table><tr><td>frequency</td><td>when in %s (%s)</td><td>when not in %s (%s)</td><td>all</td></tr>' % (indexName, '%', indexName, '%')]
for i in [1, 2, 3]:
output.append('<tr><td>%s</td><td>%0.2f</td><td>%0.2f</td><td>%0.2f</td></tr>' % (i, max(float(dist_pos[i])/float(hits_pos) * 100.0,0), max(float(dist_neg[i])/float(hits_neg) * 100.0,0), max(float(dist_base[i])/float(hits_base) * 100.0,0)))
fourPlus_base = 0
fourPlus_pos = 0
fourPlus_neg = 0
for i in range(4,max(dist_base.keys())):
try:
fourPlus_base += dist_base[i]
except:
continue
for i in range(4,max(dist_pos.keys())):
try:
fourPlus_pos += dist_pos[i]
except:
continue
for i in range(4,max(dist_neg.keys())):
try:
fourPlus_neg += dist_neg[i]
except:
continue
output.append('<tr><td>4+</td><td>%0.2f</td><td>%0.2f</td><td>%0.2f</td></tr>' % (max(float(fourPlus_pos)/float(hits_pos) * 100.0,0), max(float(fourPlus_neg)/float(hits_neg) * 100.0,0), max(float(fourPlus_base)/float(hits_base) * 100.0,0)))
output.append('</table>')
return ''.join(output)
else :
dist = {}
cql = 'c3.%s-idx exact %s' % (indexName, word)
q = qf.get_query(session, cql)
rs = db.search(session,q)
hits = len(rs)
if (hits>0):
for r in rs:
try:
dist[r.occurences]+=1
except:
dist[r.occurences]=1
hits = sum(dist.values())
output = ['<table><tr><td>frequency</td><td>total articles</td><td>%</td></tr>']
for i in [1,2,3]:
try :
output.append('<tr><td>%s</td><td>%s</td><td>%0.2f</td></tr>' % (i, dist[i], float(dist[i])/float(hits) * 100.0))
except KeyError :
output.append('<tr><td>%s</td><td>0</td><td>0</td></tr>' % i)
fourPlus=0
for i in range(4,max(dist.keys())):
try:
fourPlus += dist[i]
except:
continue
try :
output.append('<tr><td>4+</td><td>%s</td><td>%0.2f</td></tr>' % (fourPlus, float(fourPlus)/float(hits) * 100.0))
except KeyError:
output.append('<tr><td>4+</td><td>0</td><td>0</td></tr>')
output.append('</table>')
return ''.join(output)
#print "\n%i occurrences in %i articles" % (occs,hits)
# TODO: firstTotal and total need to be generated from the equivalent non-gram index nOccs
def compareIndexes(self, req):
self.logger.log('comparing indexes')
start = time.time()
form = FieldStorage(req)
id = form.get('id','data_grid')
offset = str(form.get('offset', 0))
if offset.find('.') != -1:
startNum = int(offset[:offset.find('.')])
adjustValue = int(offset[offset.find('.')+1:])
else :
startNum = int(offset)
adjustValue = 0
howMany = int(form.get('page_size', 100))
indexStrings = form.get('index', None)
baseIdx = db.get_object(session, indexForStats)
corpusSize = baseIdx.fetch_metadata(session)['nOccs']
indexList = []
addTfp = False
# list means we are comparing indexes otherwise its just one
# we get the actual index object from the string and store them in indexList
if (indexStrings.__class__ == list):
if (indexStrings[0].find('gram') == -1):
addTfp = True
for i in range(0, len(indexStrings)):
if indexStrings[i].find('gram') == -1:
compareIndex = db.get_object(session, '%s' % indexStrings[i])
else:
if indexStrings[i].replace('-idx', '').find('-') == -1:
compareIndex = db.get_object(session, 'sentence-idx')
else:
compareIndex = db.get_object(session, '%s-idx' % indexStrings[i][:indexStrings[i].replace('-idx', '').rfind('-')])
indexList.append((db.get_object(session, '%s' % indexStrings[i]), compareIndex))
else :
if (indexStrings.find('gram') == -1):
addTfp = True
compareIndex = db.get_object(session, '%s' % indexStrings)
else:
if indexStrings.replace('-idx', '').find('-') == -1:
compareIndex = db.get_object(session, 'sentence-idx')
else:
compareIndex = db.get_object(session, '%s-idx' % indexStrings[:indexStrings.replace('-idx', '').rfind('-')])
indexList.append((db.get_object(session, '%s' % indexStrings), compareIndex))
# indexList.append(db.get_object(session, '%s' % indexStrings))
#
output = []
firstIndex = indexList[0][0]
firstTotal = indexList[0][1].fetch_metadata(session)['nOccs']
q = qf.get_query(session, 'idx-foo any "bar"')
appending = True
if startNum < 0 :
appending = False
startNum = startNum/-1
idxLength = firstIndex.fetch_metadata(session)['nTerms']
completed = False
cycles = 0
firstStart = startNum
while len(output) < howMany and completed == False:
if appending:
startNum = int(firstStart+(howMany*cycles))
else:
startNum = int(startNum-(howMany*cycles))
cycles += 1
if appending and idxLength-(startNum) <= howMany:
completed = True
if appending:
termList = firstIndex.fetch_termFrequencies(session, 'occ', startNum, min(howMany, idxLength-(startNum)), '>')
else:
termList = firstIndex.fetch_termFrequencies(session, 'occ', startNum, min(howMany, startNum), '<')
for i, t in enumerate(termList):
cells = []
word = firstIndex.fetch_termById(session, t[1])
q.term.value = word
percentage = round((float(t[2]) / float(firstTotal) * normalizationBase), 2)
firstIndexName = indexList[0][0].id[:indexList[0][0].id.find('-idx')]
if appending:
cells.append('<td>%d</td>' % (i + 1 + startNum))
else:
cells.append('<td>%d</td>' % (startNum + 1 - i))
# This try/except/else deals with whether we are viewing one
# index or more than one
try:
indexList[1]
except:
# A single index
if addTfp == True and tfp == True:
cells.append('<td><a href="javascript:searchFor(\'%s\', \'%s\')">%s</a></td><td><a href="javascript:tfpFor(\'%s\', \'%s\')">tfp</a></td><td>%s</td>' % (word, firstIndexName, word, word, firstIndexName, percentage))
else :
cells.append('<td><a href="javascript:searchFor(\'%s\', \'%s\')">%s</a></td><td>%s</td>' % (word, firstIndexName, word, percentage))
cells.append('<td>%s</td>' % t[2])
# more than one index
else:
if addTfp == True and tfp == True:
cells.append('<td><a href="javascript:searchFor(\'%s\', \'%s\')">%s</a></td><td><a href="javascript:tfpFor(\'%s\', \'%s\')">tfp</a></td><td>%s</td>' % (word, firstIndexName, word, word, firstIndexName, percentage))
else :
cells.append('<td><a href="javascript:searchFor(\'%s\', \'%s\')">%s</a></td><td>%s</td>' % (word, firstIndexName, word, percentage))
othersTotal = 0
othersHits = 0
self.logger.log(cells)
for j in range(1, len(indexList)):
total = indexList[j][1].fetch_metadata(session)['nOccs']
othersTotal += total
occs = indexList[j][0].scan(session, q, 1)
if (occs[0][0] == word):
othersHits += occs[0][1][2]
#add each cell
normalisedOccs = round((float(occs[0][1][2]) / float(total) * normalizationBase), 2)
cells.append('<td>%s</td>' % normalisedOccs)
else :
cells.append('<td>0</td>')
if z :
zstat = zscore(othersHits, t[2], othersTotal, indexList[0][1].fetch_metadata(session)['nOccs'])
if zstat >= zstatSig:
cells.append('<td>%s</td>' % zstat)
else :
continue
output.append('<tr>%s</tr>' % ''.join(cells))
if not appending:
output.reverse()
# output = output[adjustValue:]
(mins, secs) = divmod(time.time()-start, 60)
self.logger.log('scanning complete: %s' % secs)
return '<ajax-response><response type="object" id="%s_updater"><rows update_ui="true">%s</rows></response></ajax-response>' % (id, ''.join(output))
def sortFunc (self, x, y):
return cmp(self.getNum(x),self.getNum(y))
def getNum(self, str):
try :
return int(re.findall(r'\d+', str)[0])
except :
return 0
def getIndexList(self, req):
indexStore = db.get_object(session, 'indexStore')
output = []
for i in indexStore :
if i.id in browseIndexes:
output.append('<option class="%s" value="%s">%s</option>' % (self.getNum(i.id), i.id, i.id[:-4]))
output.sort()
output.sort(self.sortFunc)
return '<xml>%s</xml>' % ''.join(output)
def getStatsTable(self, req):
indexList = ['sentence-idx', 'quote-idx', 'non-quote-idx', 'shortsus-idx', 'longsus-idx']
output = ['<tr><th>Sub-Corpus</th><th>Total Word Count</th></tr>']
for string in indexList:
index = db.get_object(session, string)
md = index.fetch_metadata(session)
output.append('<tr><td>%s</td><td class="number">%s</td></tr>' % (string[:string.rfind('-')], self.comma(md['nOccs'])))
return '<xml>%s</xml>' % ''.join(output)
def handle(self, req):
form = FieldStorage(req)
mode = form.get('mode', None)
if (mode == 'compare'):
page = self.compareIndexes(req)
self.send_xml(page, req)
elif (mode == 'index') :
page = self.getIndexList(req)
self.send_xml(page, req)
elif (mode == 'statstable') :
page = self.getStatsTable(req)
self.send_xml(page, req)
elif (mode == 'tfp') :
page = self.create_TFP(form)
self.send_xml(page, req)
def build_architecture(data=None):
global session, serv, db, qf, xmlp, recordStore, sentenceStore, paragraphStore, resultSetStore, articleTransformer, kwicTransformer
session = Session()
session.environment = 'apache'
session.user = None
serv = SimpleServer(session,
os.path.join(cheshire3Root, 'configs', 'serverConfig.xml')
)
session.database = 'db_' + databaseName
db = serv.get_object(session, session.database)
qf = db.get_object(session, 'defaultQueryFactory')
xmlp = db.get_object(session, 'LxmlParser')
recordStore = db.get_object(session, 'recordStore')
articleTransformer = db.get_object(session, 'article-Txr')
kwicTransformer = db.get_object(session, 'kwic-Txr')
|
{"/clic/dickens/old_analysisFiles/collocate.py": ["/clic/dickens/concordancer.py"], "/setup.py": ["/clic/setuptools/commands.py"], "/clic/dickens/web/old_interface/dickensSearchHandler.py": ["/clic/dickens/concordancer.py"], "/clic/dickens/web/flask/api.py": ["/clic/dickens/keywords.py", "/clic/dickens/clusters.py", "/clic/dickens/concordance_new.py"]}
|
27,738,205
|
jarp/farkle-cli
|
refs/heads/master
|
/app.py
|
import os
import sys
os.system("clear")
print("\n\n\n\n")
print('FARKLE!!!')
print("##############################################################\n")
print("Farkle as a CLI. What fun!\nCoded by Sir Captain Dr. Professor\n")
print("##############################################################\n\n")
|
{"/app.py": ["/models/game.py", "/models/roll.py", "/models/dice.py", "/models/cup.py"], "/models/roll.py": ["/models/dice.py", "/models/cup.py"]}
|
27,750,691
|
xiaoxiongzzz/yolov4-tiny-tf2-Detailed
|
refs/heads/main
|
/utils/utils.py
|
from functools import reduce
from PIL import Image
def compose(*funcs):
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)# 难点:复合函数的叠加。用自定义函数lambda叠加后面的参数序列。
else:
raise ValueError('Composition of empty sequence not supported.')
# 这个功能可以多百度,他是采取找到最小的那个缩放比后进行缩放。以保证图片不失真进入并缩放进入模型
def letterbox_image(image, size):
iw, ih = image.size
w, h = size
scale = min (w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
# BICUBIC更清晰,ANTIALIAS插值算法也可尝试,速度较快!
image = image.resize((nw, nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128, 128, 128)) # 128,128,128是RGB的灰度值
new_image.paste(image, ((w-nw)//2, (h-nh)//2)) # 贴灰条,将缩放后的图贴在会灰图上
return new_image
|
{"/nets/CSPdarknet53_tiny.py": ["/utils/utils.py"]}
|
27,750,692
|
xiaoxiongzzz/yolov4-tiny-tf2-Detailed
|
refs/heads/main
|
/train.py
|
# 复写代码第三步,最难啃的训练阶段,如何通过数据集训练出自己的权重一直是难点,
# 所以这一块需要慢慢写,慢慢读 我会从main函数开始,模拟debug的单步过程进行复写,
|
{"/nets/CSPdarknet53_tiny.py": ["/utils/utils.py"]}
|
27,784,238
|
JonaBenja/greta_thunberg_sentiment
|
refs/heads/master
|
/extract_statistics.py
|
# extract_statistics.py
import pandas as pd
import stanza
import nltk
from collections import defaultdict, Counter
from datetime import datetime
import pickle
from utils import calculate_ngram_frequencies
# Read in the Dutch data
nl_nlp = stanza.Pipeline('nl')
nl_content = pd.read_csv("dutch/nl_greta.tsv", sep="\t", header = 0, keep_default_na=False, encoding = 'utf-8', error_bad_lines=False)
nl_statistics = defaultdict(dict)
# Read in the Italian data
it_nlp = stanza.Pipeline('it')
it_content = pd.read_csv("italian/it_greta.tsv", sep="\t", header = 0, keep_default_na=False, encoding = 'utf-8', error_bad_lines=False)
it_statistics = defaultdict(dict)
"""
METADATA
"""
authors = Counter(nl_content['Author'])
nl_statistics['metadata']['n_authors'] = len(authors)
freq_authors = sorted(authors.items(), key=lambda item: item[1], reverse=True)
authors = Counter(it_content['Author'])
it_statistics['metadata']['n_authors'] = len(authors)
publishers = Counter(nl_content['Publisher'])
max_publishers = publishers.most_common(3)
nl_statistics['metadata']['n_publishers'] = len(publishers)
nl_statistics['metadata']['max_publishers'] = max_publishers
publishers = Counter(it_content['Publisher'])
max_publishers = publishers.most_common(4)
it_statistics['metadata']['n_publishers'] = len(publishers)
it_statistics['metadata']['max_publishers'] = max_publishers
# Time span
dates = list(set(nl_content['Publication Date']))
dates.remove('')
dates.sort(key = lambda date: datetime.strptime(date, '%Y-%m-%d'))
timespan = (dates[0], dates[-1])
nl_statistics['metadata']['timespan'] = timespan
dates = list(set(it_content['Publication Date']))
dates.remove('')
dates.sort(key = lambda date: datetime.strptime(date, '%Y-%m-%d'))
timespan = (dates[0], dates[-1])
it_statistics['metadata']['timespan'] = timespan
"""
CONTENT
number of types and tokens, most frequent content words, average sentence length (min, max, mean)...
types
number of tokens
type-token ratio
most common POS per lemma
most common token excl stopwords
most common bigrams excl stopwords
"""
title_lengths = []
for title in nl_content["Title"]:
title_lengths.append(len(title))
nl_statistics['content']['mean_title_length'] = sum(title_lengths)/len(title_lengths)
title_lengths = []
for title in it_content["Title"]:
title_lengths.append(len(title))
it_statistics['content']['mean_title_length'] = sum(title_lengths)/len(title_lengths)
"""
#This was the code that was used to create the processed Stanza files.
nl_texts = []
for article in nl_content['Text']:
if article:
processed_text = nl_nlp(article)
nl_texts.append(processed_text)
pickle.dump(nl_texts, open('processed_articles/nl_articles_stanza', "wb"))
it_texts = []
for article in it_content['Text']:
if article:
processed_text = it_nlp(article)
it_texts.append(processed_text)
pickle.dump(it_texts, open('processed_articles/it_articles_stanza', "wb"))
"""
nl_nlp_output = pickle.load(open('processed_articles/nl_articles_stanza',"rb"))
it_nlp_output = pickle.load(open('processed_articles/it_articles_stanza',"rb"))
token_pos_frequencies = Counter()
token_frequencies = Counter()
for data in nl_nlp_output:
sentences = data.sentences
for sentence in sentences:
token_pos = [(word.lemma, word.pos) for word in sentence.words]
token_pos_frequencies.update(token_pos)
words = [word.text for word in sentence.words]
token_frequencies.update(words)
nl_statistics['content']['freq_tokens'] = token_frequencies.most_common(50)
nl_statistics['content']['freq_token_pos'] = token_pos_frequencies.most_common(50)
token_pos_frequencies = Counter()
token_frequencies = Counter()
for data in it_nlp_output:
sentences = data.sentences
for sentence in sentences:
token_pos = [(word.lemma, word.pos) for word in sentence.words]
token_pos_frequencies.update(token_pos)
words = [word.text for word in sentence.words]
token_frequencies.update(words)
it_statistics['content']['freq_tokens'] = token_frequencies.most_common(50)
it_statistics['content']['freq_token_pos'] = token_pos_frequencies.most_common(50)
ngram_frequencies = calculate_ngram_frequencies(2, nl_nlp_output)
nl_statistics['content']['freq_n-gram'] = ngram_frequencies.most_common(20)
ngram_frequencies = calculate_ngram_frequencies(2, it_nlp_output)
it_statistics['content']['freq_n-gram'] = ngram_frequencies.most_common(20)
nl_stopwords = nltk.corpus.stopwords.words('dutch')
it_stopwords = nltk.corpus.stopwords.words('italian')
# mean sentence length
# n types
# n tokens
#print(nl_statistics)
#print(it_statistics)
|
{"/code/extract_statistics.py": ["/utils.py"], "/extract_statistics.py": ["/utils.py"]}
|
27,784,239
|
JonaBenja/greta_thunberg_sentiment
|
refs/heads/master
|
/utils.py
|
# utils
from collections import Counter
def calculate_ngram_frequencies(n, nlp_output):
ngram_frequencies = Counter()
for data in nlp_output:
for sentence in data.sentences:
tokens = [token.text for token in sentence.tokens]
ngrams = [" ".join(tokens[i:i+n]) for i in range(len(tokens)-n+1)]
ngram_frequencies.update(ngrams)
return ngram_frequencies
|
{"/code/extract_statistics.py": ["/utils.py"], "/extract_statistics.py": ["/utils.py"]}
|
27,784,240
|
JonaBenja/greta_thunberg_sentiment
|
refs/heads/master
|
/get_all_documents.py
|
# get_all_documents.py
|
{"/code/extract_statistics.py": ["/utils.py"], "/extract_statistics.py": ["/utils.py"]}
|
27,795,237
|
farida-el-kafrawy/mini_project
|
refs/heads/main
|
/mini_project_week6.py
|
from mini_project_week5 import view_couriers, view_products
import csv
import mysql.connector
from datetime import date
from rich.console import Console
rich = Console()
mydb = mysql.connector.connect(
host="localhost",
user="root",
password="password",
database="miniproject"
)
mycursor = mydb.cursor()
# mycursor.execute("CREATE TABLE orders (id INT AUTO_INCREMENT PRIMARY KEY, name VARCHAR(255), address VARCHAR(255), phone VARCHAR(20), courier INT, status VARCHAR(50), items VARCHAR(255))")
def first_orders():
sql = "INSERT INTO orders (name,address,phone,courier,status,items) VALUES (%s, %s, %s, %s, %s, %s)"
val = [
("Margie","SW19 4QT, Merton", "023471835",2,"Delivered", "3,6,5"),
("Margaret","Windermere Close Essex C4F 3QS","0783274234",4,"Delivered", "9,8,1,2"),
("Milly","Waterway Singapore","03257365326",2,"With courier", "3,4"),
("Michelle","Fiji Road","083275235",4,"Delivered", "5,5")
]
mycursor.executemany(sql, val)
mydb.commit()
# first_orders()
def add_order():
customer_name = input("Enter customer name")
customer_address = input("Enter customer address")
customer_phone = input("Enter customer phone number")
view_products()
items_str = str(input("Enter product ids separated by a comma: "))
view_couriers()
courier_number= input("Which courier is delivering this order. Select number please.")
sql = "INSERT INTO orders (name,address,phone,courier,status,items) VALUES (%s, %s, %s, %s, %s, %s)"
val = [
(customer_name, customer_address, customer_phone, courier_number, "Preparing", items_str),
]
mycursor.executemany(sql, val)
mydb.commit()
print("Order added")
def view_orders():
mycursor.execute("SELECT * FROM orders")
myresult = mycursor.fetchall()
print("Orders:")
for x in myresult:
print(f"{x[0]}. {x[1]}, {x[2]}, {x[3]}, {x[4]}, {x[5]}, {x[6]} ")
def update_order():
view_orders()
update_choice = int(input("Which order do you want to update? Choose number"))
update_property = int(input("""
What do you want to update?
1. Customer Name
2. Customer Address
3. Customer Phone Number
4. Courier
5. Order status
"""))
if update_property == 1:
update_name = input("Enter new name")
if update_name == '':
print("Try again")
update_order()
else:
sql = '''UPDATE orders
SET name = %s
WHERE id = %s;'''
val = (update_name, update_choice)
mycursor.execute(sql, val)
mydb.commit()
elif update_property ==2:
update_address= input("Enter new address")
if update_address == '':
print("Try again")
update_order()
else:
sql = '''UPDATE orders
SET address = %s
WHERE id = %s;'''
val = (update_address, update_choice)
mycursor.execute(sql, val)
mydb.commit()
elif update_property ==3:
update_phone = input("Enter new phone number")
if update_property == '':
print("Try again")
update_order()
else:
sql = '''UPDATE orders
SET phone = %s
WHERE id = %s;'''
val = (update_phone, update_choice)
mycursor.execute(sql, val)
mydb.commit()
elif update_property ==4:
view_couriers()
update_courier= input("Which courier is delivering this order. Select number please.")
if update_courier == '':
print("Try again")
update_order()
else:
sql = '''UPDATE orders
SET address = %s
WHERE id = %s;'''
val = (update_courier, update_choice)
mycursor.execute(sql, val)
mydb.commit()
elif update_property ==5:
update_status = int(input("""
Enter new status
1. Preparing
2. With courier
3. Delivered
"""))
if update_status ==3:
sql = '''UPDATE orders
SET status = 'Delivered'
WHERE id = %s;'''
val = (update_choice,)
mycursor.execute(sql, val)
mydb.commit()
elif update_status ==2:
sql = '''UPDATE orders
SET status = 'With courier'
WHERE id = %s;'''
val = (update_choice,)
mycursor.execute(sql, val)
mydb.commit()
elif update_status ==1:
sql = '''UPDATE orders
SET status = 'Preparing'
WHERE id = %s;'''
val = (update_choice,)
mycursor.execute(sql, val)
mydb.commit()
print("Order delivery status updated")
else:
rich.print("""[#808080]Invalid input.
Try again.[/]""")
update_order()
def delete_order():
view_orders()
order_to_delete = int(input("Which order do you wish to delete? (Select number)"))
sql = "DELETE FROM orders WHERE id = %s"
val = (order_to_delete, )
mycursor.execute(sql, val)
mydb.commit()
print(f"Product with id {order_to_delete} removed from list")
def orders_export_csv():
sql = 'select * from orders'
mycursor.execute(sql.encode('utf-8'))
data = mycursor.fetchall()
def get_filename_datetime():
return "orders-" + str(date.today()) + ".csv"
name = get_filename_datetime()
path = "C:/Users/farid/Documents/Data Engineering/Mini_project/" + name
with open(path,mode='w',encoding='utf-8') as f:
write = csv.writer(f,dialect='excel')
for item in data:
write.writerow(item)
def check_order_status():
mycursor.execute("SELECT * FROM orders ORDER BY status DESC")
myresult = mycursor.fetchall()
print("Orders:")
for x in myresult:
print(f"{x[0]}. {x[1]}, {x[2]}, {x[3]}, {x[4]}, {x[5]}, {x[6]} ")
def check_courier():
mycursor.execute("SELECT * FROM orders ORDER BY courier")
myresult = mycursor.fetchall()
print("Orders:")
for x in myresult:
print(f"{x[0]}. {x[1]}, {x[2]}, {x[3]}, COURIER: {x[4]}, {x[5]}, {x[6]} ")
|
{"/mini_project_week3.py": ["/mini_project_week5.py"], "/final_menu.py": ["/final_section_menus.py", "/mini_project_week5.py", "/mini_project_week6.py"], "/mini_project_menu.py": ["/section_menus.py"], "/section_menus.py": ["/mini_project_week4.py", "/mini_project_week3.py"], "/mini_project_week6.py": ["/mini_project_week5.py"], "/final_section_menus.py": ["/mini_project_week5.py", "/mini_project_week6.py"]}
|
27,795,238
|
farida-el-kafrawy/mini_project
|
refs/heads/main
|
/mini_project_week5.py
|
import csv
import mysql.connector
from rich.console import Console
rich = Console()
# mydb = mysql.connector.connect(
# host="localhost",
# user="root",
# password="password"
# )
# mycursor = mydb.cursor()
# mycursor.execute("CREATE DATABASE miniproject")
mydb = mysql.connector.connect(
host="localhost",
user="root",
password="password",
database="miniproject"
)
mycursor = mydb.cursor()
# mycursor.execute("CREATE TABLE products (id INT AUTO_INCREMENT PRIMARY KEY, name VARCHAR(255), price DECIMAL(4,2))")
# mycursor.execute("CREATE TABLE couriers (id INT AUTO_INCREMENT PRIMARY KEY, name VARCHAR(255), phone VARCHAR(20))")
def first_products():
sql = "INSERT INTO products (name,price) VALUES (%s, %s)"
val = [
("Coke Zero",0.8),
("Sprite",0.9),
("Fanta Lemon",0.8),
("Margharita Pizza",9.0)
]
mycursor.executemany(sql, val)
mydb.commit()
# first_products()
def first_couriers():
sql = "INSERT INTO couriers (name,phone) VALUES (%s, %s)"
val = [
("Mark", "0327592385"),
("Smith", "032895723"),
("Kale", "0325713523"),
]
mycursor.executemany(sql, val)
mydb.commit()
# first_couriers()
def add_product_db():
product_name = input("Enter product name")
product_price = input("Enter product price")
if product_name == '' or product_price == '':
rich.print("""
[#808080]Invalid input.
Try again.[/]
""")
add_product_db()
else:
sql = "INSERT INTO products (name,price) VALUES (%s, %s)"
val = [
(product_name, product_price),
]
mycursor.executemany(sql, val)
mydb.commit()
print(f"{product_name} added")
def add_courier_db():
courier_name = input("Enter courier name")
courier_phone = input("Enter courier phone number")
if courier_name == '' or courier_phone == '':
rich.print("""
[#808080]Invalid input.
Try again.[/]
""")
add_courier_db()
else:
sql = "INSERT INTO couriers (name,phone) VALUES (%s, %s)"
val = [
(courier_name, courier_phone),
]
mycursor.executemany(sql, val)
mydb.commit()
print(f"{courier_name} added")
def view_products():
mycursor.execute("SELECT id, name FROM products")
myresult = mycursor.fetchall()
print("ID. Product Name:")
for x in myresult:
print(f"{x[0]}. {x[1]}")
def view_couriers():
mycursor.execute("SELECT id, name FROM couriers")
myresult = mycursor.fetchall()
print("ID. Courier Name:")
for x in myresult:
print(f"{x[0]}. {x[1]}")
def delete_product_db():
view_products()
product_name = int(input("Enter product number to delete"))
sql = "DELETE FROM products WHERE id = %s"
val = (product_name, )
mycursor.execute(sql, val)
mydb.commit()
print(f"Product with id {product_name} removed from list")
def delete_courier_db():
view_couriers()
courier_name = int(input("Enter courier number to delete"))
sql = "DELETE FROM couriers WHERE id = %s"
val = (courier_name, )
mycursor.execute(sql, val)
mydb.commit()
print(f"Courier with id {courier_name} removed from list")
def update_courier_db():
view_couriers()
user_index_selection = int(input("""Which item do you wish to update?
Enter number."""))
name_or_phone = int(input("""
Enter 1 to update name
Enter 2 to update phone number
Enter 3 to update both
"""))
if name_or_phone == 1:
user_new_name = input("What is the new name?")
if user_new_name == '':
print("Try again")
update_courier_db()
else:
sql = '''UPDATE couriers
SET name = %s
WHERE id = %s;'''
val = (user_new_name, user_index_selection)
mycursor.execute(sql, val)
mydb.commit()
print("Courier updated")
elif name_or_phone == 2:
user_new_phone = input("What is the new phone number?")
if user_new_phone == '':
print("Try again")
update_courier_db()
else:
sql = '''UPDATE couriers
SET phone = %s
WHERE id = %s;'''
val = (user_new_phone, user_index_selection)
mycursor.execute(sql, val)
mydb.commit()
print("Courier updated")
elif name_or_phone ==3:
user_new_name = input("What is the new name?")
if user_new_name == '':
print("Try again")
update_courier_db()
else:
sql1 = '''UPDATE couriers
SET name = %s
WHERE id = %s;'''
val1 = (user_new_name, user_index_selection)
mycursor.execute(sql1, val1)
mydb.commit()
user_new_phone = input("What is the new phone number?")
if user_new_phone == '':
print("Try again")
update_courier_db()
else:
sql2 = '''UPDATE couriers
SET phone = %s
WHERE id = %s;'''
val2 = (user_new_phone, user_index_selection)
mycursor.execute(sql2, val2)
mydb.commit()
print("Courier updated")
else:
rich.print("""[#808080]Invalid input.
Try again.[/]""")
update_courier_db()
def update_product_db():
view_products()
user_index_selection = int(input("""Which item do you wish to update?
Enter number."""))
name_or_price = int(input("""
Enter 1 to update name
Enter 2 to update price
Enter 3 to update both
"""))
if name_or_price == 1:
user_new_name = input("What is the new name?")
sql = '''UPDATE products
SET name = %s
WHERE id = %s;'''
val = (user_new_name, user_index_selection)
mycursor.execute(sql, val)
mydb.commit()
print("Product updated")
elif name_or_price == 2:
user_new_price = input("What is the new price?")
sql = '''UPDATE products
SET price = %s
WHERE id = %s;'''
val = (user_new_price, user_index_selection)
mycursor.execute(sql, val)
mydb.commit()
print("Product updated")
elif name_or_price ==3:
user_new_name = input("What is the new name?")
sql1 = '''UPDATE products
SET name = %s
WHERE id = %s;'''
val1 = (user_new_name, user_index_selection)
mycursor.execute(sql1, val1)
user_new_phone = input("What is the new price?")
sql2 = '''UPDATE products
SET price = %s
WHERE id = %s;'''
val2 = (user_new_phone, user_index_selection)
mycursor.execute(sql2, val2)
mydb.commit()
print("Product updated")
else:
rich.print("""[#808080]Invalid input.
Try again.[/]""")
update_product_db()
def product_export_csv():
sql = 'select * from products'
mycursor.execute(sql.encode('utf-8'))
data = mycursor.fetchall()
filename = 'products.csv'
with open(filename,mode='w',encoding='utf-8') as f:
write = csv.writer(f,dialect='excel')
for item in data:
write.writerow(item)
def courier_export_csv():
sql = 'select * from couriers'
mycursor.execute(sql.encode('utf-8'))
data = mycursor.fetchall()
filename = 'couriers.csv'
with open(filename,mode='w',encoding='utf-8') as f:
write = csv.writer(f,dialect='excel')
for item in data:
write.writerow(item)
|
{"/mini_project_week3.py": ["/mini_project_week5.py"], "/final_menu.py": ["/final_section_menus.py", "/mini_project_week5.py", "/mini_project_week6.py"], "/mini_project_menu.py": ["/section_menus.py"], "/section_menus.py": ["/mini_project_week4.py", "/mini_project_week3.py"], "/mini_project_week6.py": ["/mini_project_week5.py"], "/final_section_menus.py": ["/mini_project_week5.py", "/mini_project_week6.py"]}
|
27,795,239
|
farida-el-kafrawy/mini_project
|
refs/heads/main
|
/final_section_menus.py
|
from mini_project_week5 import add_courier_db, add_product_db, update_product_db, update_courier_db, delete_courier_db, delete_product_db, view_couriers, view_products
from mini_project_week6 import check_order_status, check_courier, view_orders, add_order, update_order, delete_order, orders_export_csv
import json
def courier_menu():
menu_input = int(input("What would you like to do?"))
if menu_input ==0:
print("go back")
elif menu_input == 1:
view_couriers()
elif menu_input == 2:
add_courier_db()
elif menu_input == 3:
update_courier_db()
elif menu_input == 4:
delete_courier_db()
else:
print("Try again")
courier_menu()
def products_menu():
menu_input = int(input("What would you like to do?"))
if menu_input ==0:
print("go back")
elif menu_input == 1:
view_products()
elif menu_input == 2:
add_product_db()
elif menu_input == 3:
update_product_db()
elif menu_input == 4:
delete_product_db()
else:
print("Try again")
courier_menu()
def orders_menu():
menu_input = int(input("What would you like to do?"))
if menu_input ==0:
print("go back")
elif menu_input == 1:
view_orders()
elif menu_input == 2:
add_order()
elif menu_input ==3:
update_order()
elif menu_input ==4:
delete_order()
elif menu_input == 5:
check_order_status()
elif menu_input == 6:
check_courier()
else:
print("Try again")
courier_menu()
|
{"/mini_project_week3.py": ["/mini_project_week5.py"], "/final_menu.py": ["/final_section_menus.py", "/mini_project_week5.py", "/mini_project_week6.py"], "/mini_project_menu.py": ["/section_menus.py"], "/section_menus.py": ["/mini_project_week4.py", "/mini_project_week3.py"], "/mini_project_week6.py": ["/mini_project_week5.py"], "/final_section_menus.py": ["/mini_project_week5.py", "/mini_project_week6.py"]}
|
27,795,240
|
farida-el-kafrawy/mini_project
|
refs/heads/main
|
/final_menu.py
|
# main menu
from mini_project_week5 import add_courier_db, add_product_db, update_product_db, update_courier_db, delete_courier_db, delete_product_db, view_couriers, view_products, courier_export_csv, product_export_csv
from mini_project_week6 import check_order_status, check_courier, view_orders, add_order, update_order, delete_order, orders_export_csv
from rich.console import Console
rich = Console()
def back_or_no():
while True:
try:
rich.print("\n [#228B22] Would you like to go back to the main menu? [/]")
go_back = int(input("""
Press 1 for Yes
Press 2 for No"""))
if go_back == 1:
main_menu()
elif go_back ==2:
print("""
▐█▀▄─ ▀▄─▄▀ █▀▀──█
─▐█▀▀▄ ──█── █▀▀──▀
─▐█▄▄▀ ──▀── ▀▀▀──▄
""")
exit()
else:
rich.print("""[#808080]
Option was not found.
Try again. [/]""")
except ValueError:
rich.print("""[#808080]
Option was not found.
Try again. [/]""")
def main_menu():
rich.print("""[bold][#228B22]
Main Menu
Central Perk Cafe [/][/]""")
rich.print("""[#808080]
) (
( ) )
) ( ( [/]
[green] _______[/][#808080])[/][green]_
.-'---------|
( C|/\/\/\/\/|
'-./\/\/\/\/|
'_________'
'-------' [/]""")
main_selection = rich.input("""
[#228B22] A. Home [/]
[#A7DBD8] B. Products [/]
[#3CAEA3] C. Couriers [/]
[#F6D55C] D. Orders [/]
[#ED553B] E. Save and Exit [/]
Please make a selection A, B, C or D or E:
""")
if main_selection =="B" or main_selection == "b":
products_menu()
back_or_no()
if main_selection == "C" or main_selection == "c":
courier_menu()
back_or_no()
if main_selection == "D" or main_selection == "d":
orders_menu()
back_or_no()
if main_selection == "E" or main_selection == "e":
save_and_exit()
if main_selection == "A" or main_selection == "a":
rich.print("[#228B22] Back to Home [/]")
main_menu()
else:
rich.print(f"""[#808080]
Option '{main_selection}' was not found.
Try again.
Back to main menu. [/]""")
main_menu()
def courier_menu():
rich.print("""[#3CAEA3]
Press 0 to return to main menu
Press 1 to view courier list
Press 2 to add new courier
Press 3 to update existing courier
Press 4 to delete courier
[/] """)
while True:
try:
menu_input = int(input("What would you like to do?"))
if menu_input ==0:
main_menu()
elif menu_input == 1:
view_couriers()
back_or_no()
elif menu_input == 2:
add_courier_db()
back_or_no()
elif menu_input == 3:
update_courier_db()
back_or_no()
elif menu_input == 4:
delete_courier_db()
back_or_no()
else:
rich.print("""[#808080]
Option was not found.
Try again. [/]""")
courier_menu()
except ValueError:
rich.print("""[#808080]
Option was not found.
Try again. [/]""")
courier_menu()
def products_menu():
rich.print("""[#A7DBD8]
Press 0 to return to main menu
Press 1 to view product list
Press 2 to add new product
Press 3 to update existing product
Press 4 to delete product
[/] """)
while True:
try:
menu_input = int(input("What would you like to do?"))
if menu_input ==0:
main_menu()
elif menu_input == 1:
view_products()
back_or_no()
elif menu_input == 2:
add_product_db()
back_or_no()
elif menu_input == 3:
update_product_db()
back_or_no()
elif menu_input == 4:
delete_product_db()
back_or_no()
else:
rich.print("""[#808080]
Option was not found.
Try again. [/]""")
products_menu()
except ValueError:
rich.print("""[#808080]
Option was not found.
Try again. [/]""")
products_menu()
def orders_menu():
rich.print("""[#F6D55C]
Press 0 to return to main menu
Press 1 to view orders
Press 2 to add order
Press 3 to update order
Press 4 to delete order
Press 5 to check order status
Press 6 to check which courier has an order
[/] """)
while True:
try:
menu_input = int(input("What would you like to do?"))
if menu_input ==0:
main_menu()
elif menu_input == 1:
view_orders()
back_or_no()
elif menu_input == 2:
add_order()
back_or_no()
elif menu_input ==3:
update_order()
back_or_no()
elif menu_input ==4:
delete_order()
back_or_no()
elif menu_input == 5:
check_order_status()
back_or_no()
elif menu_input == 6:
check_courier()
back_or_no()
else:
rich.print("""[#808080]
Option was not found.
Try again. [/]""")
orders_menu()
except ValueError:
rich.print("""[#808080]
Option was not found.
Try again. [/]""")
orders_menu()
def save_and_exit():
while True:
try:
export = int(rich.input("[#ED553B] Would you like to export orders, products and couriers as CSV, write 1 for yes or 2 for no [/]"))
if export == 1:
product_export_csv()
courier_export_csv()
orders_export_csv()
print("Everything saved to current folder")
print("""
▐█▀▄─ ▀▄─▄▀ █▀▀──█
─▐█▀▀▄ ──█── █▀▀──▀
─▐█▄▄▀ ──▀── ▀▀▀──▄
""")
exit()
elif export == 2:
print("""
▐█▀▄─ ▀▄─▄▀ █▀▀──█
─▐█▀▀▄ ──█── █▀▀──▀
─▐█▄▄▀ ──▀── ▀▀▀──▄
""")
exit()
else:
rich.print("""[#808080]Option was not found.
Try again. [/]""")
save_and_exit()
except ValueError:
rich.print("""[#808080]Option was not found.
Try again. [/]""")
save_and_exit()
|
{"/mini_project_week3.py": ["/mini_project_week5.py"], "/final_menu.py": ["/final_section_menus.py", "/mini_project_week5.py", "/mini_project_week6.py"], "/mini_project_menu.py": ["/section_menus.py"], "/section_menus.py": ["/mini_project_week4.py", "/mini_project_week3.py"], "/mini_project_week6.py": ["/mini_project_week5.py"], "/final_section_menus.py": ["/mini_project_week5.py", "/mini_project_week6.py"]}
|
27,816,936
|
wangjinjin123/python_test_forme
|
refs/heads/main
|
/forTest/func13_test.py
|
"""
python外部数据源文件处理
Yaml:
是一种可读性高,用来表达数序列化的格式,常常作为配置文件使用
Json:
是一个轻量级的数据交换语言,该语言以易于让人阅读的文字为基础,用来传输由属性值或者需理性的值组成的数据对象
Excel:
有直观的界面、出色的计算功能和图标工具是一款电子制表软件
"""
from openpyxl import Workbook
from openpyxl.utils import get_column_letter
wb = Workbook()
dest_filename = 'empty_book.xlsx'
ws1 = wb.active
ws1.title = "range names"
for row in range(1, 40):
ws1.append(range(600))
ws2 = wb.create_sheet(title="Pi")
ws2['F5'] = 3.14
ws3 = wb.create_sheet(title="Data")
for row in range(10, 20):
for col in range(27, 54):
_ = ws3.cell(column=col, row=row, value="{0}".format(get_column_letter(col)))
print(ws3['AA10'].value)
ws4 = wb.create_sheet(title="my_sheet")
for i in range(1,31):
ws4.cell(column=1,row=i).value="test"
wb.save(filename = dest_filename)
#读数据
import yaml
# from openpyxl import load_workbook
# wb = load_workbook(filename = 'empty_book.xlsx')
# sheet_ranges = wb['range names']
# print(sheet_ranges['D18'].value)
#
# for i in range(1,31):
# print(sheet_ranges.cell(column=1, row=i).value)
#json的读写
"""
Yaml的读写 推荐
PyYAML
yaml.load yaml格式转成其他格式
yaml.dump 其他格式转yaml格式
"""
#loading YAML
# print(yaml.load("""
# - Hesperiidae
# - Papilionidae
# - Apatelodidae
# - Epiplemidae
# """, Loader=yaml.FullLoader))
print(yaml.load(open("../data/yaml_test.yml"), Loader=yaml.FullLoader))
#dumping YAML
print(yaml.dump([['Hesperiidae', 'Papilionidae', 'Apatelodidae', 'Epiplemidae', {'a': 1}]]))
with open("yaml1_test.yml","w") as f:
yaml.dump(data={"a": [1,2]}, stream=f)
|
{"/interface_test_combat/common/mysql.py": ["/interface_test_combat/common/config.py", "/interface_test_combat/common/get_log.py"], "/interface_test_combat/testcases/test_address_1.py": ["/interface_test_combat/api/address_model.py"], "/demo1.py": ["/baidu.py"], "/interface_test_combat/path_data.py": ["/interface_test_combat/api/address_model.py"], "/pytest_combat/testcases/test_calcu.py": ["/pytest_combat/testcases/test_param.py"], "/distribution/testcases/test_becommaster_success.py": ["/distribution/api/becom_master.py", "/distribution/api/login_test.py"]}
|
27,816,937
|
wangjinjin123/python_test_forme
|
refs/heads/main
|
/pytest_base/testcases/parameterize_test.py
|
import pytest
import yaml
class TestData:
@pytest.mark.parametrize("a,b",[(3,4),(20,4),(3,9)])
def test_data(self,a,b):
print(a+b)
@pytest.mark.parametrize(("a","b"), [(3, 4), (20, 4), (3, 9)])
def test_data(self, a, b):
print(a + b)\
@pytest.mark.parametrize(["a","b"],[(3,4),(20,4),(3,9)])
def test_data(self,a,b):
print(a+b)
@pytest.mark.parametrize(("a","b"), yaml.safe_load(open("D:\pycharm\pythonproject\pytest_base\data\data.yaml")))
def test_data(self, a, b):
print(a + b)
|
{"/interface_test_combat/common/mysql.py": ["/interface_test_combat/common/config.py", "/interface_test_combat/common/get_log.py"], "/interface_test_combat/testcases/test_address_1.py": ["/interface_test_combat/api/address_model.py"], "/demo1.py": ["/baidu.py"], "/interface_test_combat/path_data.py": ["/interface_test_combat/api/address_model.py"], "/pytest_combat/testcases/test_calcu.py": ["/pytest_combat/testcases/test_param.py"], "/distribution/testcases/test_becommaster_success.py": ["/distribution/api/becom_master.py", "/distribution/api/login_test.py"]}
|
27,816,938
|
wangjinjin123/python_test_forme
|
refs/heads/main
|
/interface_test_combat/common/config.py
|
import configparser
import os
#封装该文件是为了获取配置文件config.ini中得值
class ConfigIni:
"""
获取配置文件
BASE_PATH:获取当前项目的绝对路径
congfig_file_path:获取当前配置文件的路径,相当于根路径
"""
#获取当前项目的绝对路径
BASE_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
print(BASE_PATH)
#获取当前配置文件得路径,相当于根路径,读取config.ini配置文件
config_file_path = os.path.join(BASE_PATH, "config.ini")
print(config_file_path)
def __init__(self, file_path=config_file_path):
'''
定义一个配置文件的对象,默认一个文件路径,可自己补充其他路径
:param file_path:配置文件的绝对路径
'''
#为了让写入文件的路径是唯一值,故如此定义
self.config_file_path = file_path
# 定义配置文件对象,此处使用配置文件解析器,详细可查看:https://docs.python.org/zh-cn/3/library/configparser.html
self.cf = configparser.ConfigParser()
#读取配置文件
self.cf.read(file_path)
#封装get_key方法获取配置文件中俄value值
def get_key(self,section,option):
"""
获取配置文件得value值
:param section:配置文件中的section的值
:param option: 配置文件中option的值
:return value: 返回value的值
"""
#使用cf对象的get方法获取value值
value = self.cf.get(section,option)
return value
#封装set_value方法修改配置文件中的value值
def set_value(self,section,option,value):
"""
修改value的值
:param section: 配置文件中的section的值
:param option: 配置文件中option的值
:param value: 修改的value的值
:return: 无
"""
#python内存先修改值
self.cf.set(section,option,value)
# 需要通过文件的方式写入才行,不然实体文件的值不会改变
with open(self.config_file_path,"w+") as f:
self.cf.write(f)
cf = ConfigIni()
if __name__ == "__main__":
print(cf.get_key("test1","name4"))
print(cf.set_value("test1","name1","wang1"))
|
{"/interface_test_combat/common/mysql.py": ["/interface_test_combat/common/config.py", "/interface_test_combat/common/get_log.py"], "/interface_test_combat/testcases/test_address_1.py": ["/interface_test_combat/api/address_model.py"], "/demo1.py": ["/baidu.py"], "/interface_test_combat/path_data.py": ["/interface_test_combat/api/address_model.py"], "/pytest_combat/testcases/test_calcu.py": ["/pytest_combat/testcases/test_param.py"], "/distribution/testcases/test_becommaster_success.py": ["/distribution/api/becom_master.py", "/distribution/api/login_test.py"]}
|
27,816,939
|
wangjinjin123/python_test_forme
|
refs/heads/main
|
/forTest/func11_test.py
|
"""
python多线程
进程:
执行中的程序;
拥有独立的地址空间、内存、数据栈等
操作系统管理
派生(fork/spawn)新进程
进程间通信(IPC)方式共享信息
线程:
同进程下执行,共享相同的上下文
线程间的信息共享和通信更加容易
多线程并发执行 并发即轮询执行而不是同一时刻同时进行
需要同步原语?
python与线程
python是一个解释器
解释器主循环
主循环在哄只有一个控制线程在执行
使用全局解释器锁(GIL)---同步原语的一个技术
GIL保证一个线程
设置GIL
切换进一个线程去运行
执行下面的操作之一
两种线程管理
_thread:提供了基本的线程和锁
threading:提供了更高级别、功能更全面的线程管理
支持同步机制
支持守护线程
"""
import _thread
import logging
from time import sleep, ctime
"""
_thread模块
thread模块的函数:
start_new_thread(function,args,kwargs=None):派生一个新的线程,使用给定的args和可选的kyargs来执行function
allocate_lock():分配LockType锁对象
exit() 给线程退出指令
LockType锁对象的方法
acquire(wait = None) 尝试获取锁对象
locked() 如果获取了锁对象,则返回True 否则返回false
release() 释放锁
"""
#日志输出
# logging.basicConfig(level=logging.INFO)
# def loop0():
# logging.info("start loop0 at" + ctime())
# sleep(4)
# logging.info("end loop0 at" + ctime())
#
# def loop1():
# logging.info("start loop1 at" + ctime())
# sleep(2)
# logging.info("end loop0 at" + ctime())
#
# def main():
# logging.info("start all at" + ctime)
# loop0()
# loop1()
# logging.info("end all at" + ctime)
#
# if __name__ == '__main__':
# main()
#使用线程优化
# logging.basicConfig(level=logging.INFO)
# def loop0():
# logging.info("start loop0 at" + ctime())
# sleep(4)
# logging.info("end loop0 at" + ctime())
#
# def loop1():
# logging.info("start loop1 at" + ctime())
# sleep(2)
# logging.info("end loop0 at" + ctime())
#
# def main():
# logging.info("start all at" + ctime)
# _thread.start_new_thread(loop0())
# _thread.start_new_thread(loop1()) #主线程退出后所有子线程均被杀掉
# sleep(6)
# logging.info("end all at" + ctime)
#
# if __name__ == '__main__':
# main()
#监视子进程是否完成不完成就无限等待---锁
logging.basicConfig(level=logging.INFO)
def loop0():
logging.info("start loop0 at" + ctime())
sleep(4)
logging.info("end loop0 at" + ctime())
def loop1():
logging.info("start loop1 at" + ctime())
sleep(2)
logging.info("end loop0 at" + ctime())
def main():
logging.info("start all at" + ctime)
_thread.start_new_thread(loop0())
_thread.start_new_thread(loop1()) #主线程退出后所有子线程均被杀掉
sleep(6)
logging.info("end all at" + ctime)
if __name__ == '__main__':
main()
|
{"/interface_test_combat/common/mysql.py": ["/interface_test_combat/common/config.py", "/interface_test_combat/common/get_log.py"], "/interface_test_combat/testcases/test_address_1.py": ["/interface_test_combat/api/address_model.py"], "/demo1.py": ["/baidu.py"], "/interface_test_combat/path_data.py": ["/interface_test_combat/api/address_model.py"], "/pytest_combat/testcases/test_calcu.py": ["/pytest_combat/testcases/test_param.py"], "/distribution/testcases/test_becommaster_success.py": ["/distribution/api/becom_master.py", "/distribution/api/login_test.py"]}
|
27,816,940
|
wangjinjin123/python_test_forme
|
refs/heads/main
|
/forTest/whileTest.py
|
#while和else结合使用简单介绍
a=1
while a == 1:
print("a==1")
a = a + 1
else:
print("a != 1")
print(a)
#简单语句组(若while循环体中只有一个语句,则可和while写在同一行)
a=1
while a == 1:a = a + 1
else:
print("a != 1")
print(a)
#break:跳出整个循环体
for i in range(1,10):
if i == 5:
break
print(i)
#continu:跳出当前循环而费整个循环体
for i in range(1,10):
if i == 5:
continue
print(i)
|
{"/interface_test_combat/common/mysql.py": ["/interface_test_combat/common/config.py", "/interface_test_combat/common/get_log.py"], "/interface_test_combat/testcases/test_address_1.py": ["/interface_test_combat/api/address_model.py"], "/demo1.py": ["/baidu.py"], "/interface_test_combat/path_data.py": ["/interface_test_combat/api/address_model.py"], "/pytest_combat/testcases/test_calcu.py": ["/pytest_combat/testcases/test_param.py"], "/distribution/testcases/test_becommaster_success.py": ["/distribution/api/becom_master.py", "/distribution/api/login_test.py"]}
|
27,816,941
|
wangjinjin123/python_test_forme
|
refs/heads/main
|
/forTest/for_range.py
|
# for i in range(2,5):
# print(i)
def fib(n):
if n ==1:
return [0]
if n == 2:
return [0,1]
if n >= 3:
fibs = [0,1]
print(fibs[-1])
print(fibs[-2])
for i in range(3,n):
fibs = fibs.append(fibs[-2]+fibs[-1])
return fibs
print(fib(3))
|
{"/interface_test_combat/common/mysql.py": ["/interface_test_combat/common/config.py", "/interface_test_combat/common/get_log.py"], "/interface_test_combat/testcases/test_address_1.py": ["/interface_test_combat/api/address_model.py"], "/demo1.py": ["/baidu.py"], "/interface_test_combat/path_data.py": ["/interface_test_combat/api/address_model.py"], "/pytest_combat/testcases/test_calcu.py": ["/pytest_combat/testcases/test_param.py"], "/distribution/testcases/test_becommaster_success.py": ["/distribution/api/becom_master.py", "/distribution/api/login_test.py"]}
|
27,816,942
|
wangjinjin123/python_test_forme
|
refs/heads/main
|
/forTest/func7_test.py
|
"""
Json格式转化
如何使用json?
import json
常用的几种方法:
json.dumps(python_obj):可以将传入的json(数据类型)转换成一个字符串
json。loads(json_string):把字符串转换成json(数据类型)
json.dump():把json(数据类型)转换成字符串并存储在文件中
json.load(file_stream):把文件打开把里边的字符串抓换成json(数据类型)
"""
import json
data = {
"name":["herry","niackname"],
"age": 20,
"gender":"female"
}
data_1 = json.dumps(data)
print(type(data))
print(type(data_1))
print(data)
data_2 = json.loads(data_1)
print(type(data_2))
|
{"/interface_test_combat/common/mysql.py": ["/interface_test_combat/common/config.py", "/interface_test_combat/common/get_log.py"], "/interface_test_combat/testcases/test_address_1.py": ["/interface_test_combat/api/address_model.py"], "/demo1.py": ["/baidu.py"], "/interface_test_combat/path_data.py": ["/interface_test_combat/api/address_model.py"], "/pytest_combat/testcases/test_calcu.py": ["/pytest_combat/testcases/test_param.py"], "/distribution/testcases/test_becommaster_success.py": ["/distribution/api/becom_master.py", "/distribution/api/login_test.py"]}
|
27,816,943
|
wangjinjin123/python_test_forme
|
refs/heads/main
|
/pytest_base/src/pip_info.py
|
"""
pip依赖管理与虚拟环境
pip托管了很多的第三方库 地址:www.pypi.org
如何创建虚拟的环境?
python -m venv 虚拟环境名字
"""
|
{"/interface_test_combat/common/mysql.py": ["/interface_test_combat/common/config.py", "/interface_test_combat/common/get_log.py"], "/interface_test_combat/testcases/test_address_1.py": ["/interface_test_combat/api/address_model.py"], "/demo1.py": ["/baidu.py"], "/interface_test_combat/path_data.py": ["/interface_test_combat/api/address_model.py"], "/pytest_combat/testcases/test_calcu.py": ["/pytest_combat/testcases/test_param.py"], "/distribution/testcases/test_becommaster_success.py": ["/distribution/api/becom_master.py", "/distribution/api/login_test.py"]}
|
27,816,944
|
wangjinjin123/python_test_forme
|
refs/heads/main
|
/distribution/api/becom_master.py
|
import requests
class BecomeMaster:
@staticmethod
def become_master(cookie):
url = "https://distribution.quickcan.cn/distribution/master/become"
payload={}
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'cookie': cookie,
'x-device': 'A:dcada7cee7c89a61'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
return response.json()
|
{"/interface_test_combat/common/mysql.py": ["/interface_test_combat/common/config.py", "/interface_test_combat/common/get_log.py"], "/interface_test_combat/testcases/test_address_1.py": ["/interface_test_combat/api/address_model.py"], "/demo1.py": ["/baidu.py"], "/interface_test_combat/path_data.py": ["/interface_test_combat/api/address_model.py"], "/pytest_combat/testcases/test_calcu.py": ["/pytest_combat/testcases/test_param.py"], "/distribution/testcases/test_becommaster_success.py": ["/distribution/api/becom_master.py", "/distribution/api/login_test.py"]}
|
27,816,945
|
wangjinjin123/python_test_forme
|
refs/heads/main
|
/interface_test_combat/common/mysql.py
|
#封装mysql类
from sqlalchemy.dialects.mysql import pymysql
from interface_test_combat.common.config import cf
from interface_test_combat.common.get_log import log
class Mysql:
"""
操作mysql的类
"""
def __init__(self):
"""
初始化mysql的conn对象,连接数据库
"""
#通过配置文件获取数据库的host。port,username,password,charset,database
host = cf.get_key("mysql","host")
# 从配置文件获取的值是str,需要转化成int
port = int(cf.get_key("mysql","port"))
user = cf.get_key("mysql", "user")
password = cf.get_key("mysql", "password")
charset = cf.get_key("mysql", "charset")
database = cf.get_key("mysql", "database")
try:
#连接数据库
self.conn = pymysql.connect(host=host,port=port,user=user,password=password,charset=charset,database=database)
except Exception as e:
log.error(f"无法登陆数据库,错误原因:{e}")
def select(self, query):
"""
运行mysql的select语句
:param query: select语句
:return: select_data:返回全部的select语句的数据
"""
log.info(f"selct语句为{query}")
try:
#定义游标,并通过excute执行sql语句
"""
cursor():使用当前连接创建并返回游标
rollback():回滚当前事务
close():关闭当前连接
execute 执行数据库查询或命令,将结果从数据库获取到客户端
fetchone():获取结果集的下一行
fetchmany():获取结果集的下几行
fetchall():获取结果集中剩下的所有行
rowcount:最近一次的execute返回数据的行数或受影响的行数
"""
cur = self.conn.cursor()
cur.excute(query)
#fetchall读取游标中的所有select数据
select_data = cur.fetchall()
log.info("数据查询成功")
#返回select数据
return select_data
except Exception as e:
log.error(f"insert 语句错误,原因是{e}")
def insert(self,query):
"""
运行mysql的select语句
:param query: insert语句
:return:
"""
log.info(f"insert语句为:{query}")
try:
#定义游标,并通过excute执行sql语句
cur = self.conn.cursor()
cur.excute(query)
#insert执行成功后commit提交数据
cur.excute("commit")
log.info("数据插入成功")
except Exception as e:
log.error(f"insert 语句错误,原因是{e}")
#insert失败后rollback回滚数据
cur.excute("rollback")
def delete(self, query):
"""
运行mysql得delete语句
:param query: delete语句
:return:
"""
log.info(f"delete语句为:{query}")
try:
#定义游标,并通过execute执行delete语句
cur = self.conn.cursor()
cur.excute(query)
#delete执行成功后commit提交数据
cur.excute("commit")
log.info("数据删除成功")
except Exception as e:
log.error(f"selete语句失败,原因:{e}")
#delete失败后rollback回滚数据
cur.excute("rollback")
def close(self):
self.conn.close()
#整体流程整理:连接数据库-建立游标-增删改查sql-执行sql(与客户端建立连接,execute执行)-如果对数据库存在变更,需要提交事务commit-关闭游标-关闭数据库
#定义对象为单例模式,其他模块可方便使用
sql = Mysql()
if __name__ == "__main__":
a = Mysql()
organizer = "abc"
cal_id = "abc"
|
{"/interface_test_combat/common/mysql.py": ["/interface_test_combat/common/config.py", "/interface_test_combat/common/get_log.py"], "/interface_test_combat/testcases/test_address_1.py": ["/interface_test_combat/api/address_model.py"], "/demo1.py": ["/baidu.py"], "/interface_test_combat/path_data.py": ["/interface_test_combat/api/address_model.py"], "/pytest_combat/testcases/test_calcu.py": ["/pytest_combat/testcases/test_param.py"], "/distribution/testcases/test_becommaster_success.py": ["/distribution/api/becom_master.py", "/distribution/api/login_test.py"]}
|
27,816,946
|
wangjinjin123/python_test_forme
|
refs/heads/main
|
/pytest_base/testcases/test_search.py
|
"""
序号 断言方法 断言描述
1 assertEqual(arg1, arg2, msg=None) 验证arg1=arg2,不等则fail
2 assertNotEqual(arg1, arg2, msg=None) 验证arg1 != arg2, 相等则fail
3 assertTrue(expr, msg=None) 验证expr是true,如果为false,则fail
4 assertFalse(expr,msg=None) 验证expr是false,如果为true,则fail
5 assertIs(arg1, arg2, msg=None) 验证arg1、arg2是同一个对象,不是则fail
6 assertIsNot(arg1, arg2, msg=None) 验证arg1、arg2不是同一个对象,是则fail
7 assertIsNone(expr, msg=None) 验证expr是None,不是则fail
8 assertIsNotNone(expr, msg=None) 验证expr不是None,是则fail
9 assertIn(arg1, arg2, msg=None) 验证arg1是arg2的子串,不是则fail
10 assertNotIn(arg1, arg2, msg=None) 验证arg1不是arg2的子串,是则fail
11 assertIsInstance(obj, cls, msg=None) 验证obj是cls的实例,不是则fail
12 assertNotIsInstance(obj, cls, msg=None) 验证obj不是cls的实例,是则fail
"""
"""
testsuite 测试套件
创建测试套件:
suite=unittest.TestSuite()
添加测试用例:
suite.addTest(simple_test('test_add'))
"""
#被测函数 search1
import unittest
class Search:
@staticmethod
def search_fun():
print("search_fun")
return True
class TestSearch(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
print("setup class")
cls.search = Search()
# def setUp(self) -> None:
# print("set up")
# self.search = Search()
def test_search1(self) -> None:
print("testsearch1")
# search = Search()
assert True == self.search.search_fun()
def test_search2(self) -> None:
print("testsearch2")
# search = Search()
assert True == self.search.search_fun()
def test_search3(self) -> None:
print("testsearch3")
# search = Search()
assert True == self.search.search_fun()
# def tearDown(self) -> None:
# print("tear down")
@classmethod
def tearDownClass(cls) -> None:
print("teardown class")
class TestSearch1(unittest.TestCase):
def test_search1(self):
print("test_search1_class1")
def setUp(self) -> None:
print("set up")
self.search = Search()
def test_equal(self):
print("断言相等")
self.assertEqual(1, 1,"判断 1 == 1") #疑问:为甚需要用self来引用?
self.assertFalse(1 == 3, "不相等")
def test_notequal(self):
print("断言不等")
self.assertNotEqual(1,2,"判断 1 != 2")
self.assertTrue(1==2, "不相等")
def tearDown(self) -> None:
print("tear down")
class TestSearch2(unittest.TestCase):
def test_search_a(self):
print("testsearch_a")
if __name__ == '__main__':
#方法一,执行当前文件中所有的测试用例
#unittest.main()
#方法二:执行指定的测试用例,将要执行的测试用例添加到测试套件中
#创建一个测试套件-->testsuite
suite = unittest.TestSuite()
suite.addTest(TestSearch("test_search1"))
suite.addTest(TestSearch("test_search3"))
#suite.addTests()
unittest.TextTestRunner().run(suite)
#方法三:执行某个测试类.将测试类添加到测试套件中 批量执行测试类 unittest.TestLoader()
suite1 = unittest.TestLoader().loadTestsFromTestCase(TestSearch1)
suite2 = unittest.TestLoader().loadTestsFromTestCase(TestSearch2)
suite = unittest.TestSuite([suite1,suite2])
unittest.TextTestRunner(verbosity=2).run(suite)
|
{"/interface_test_combat/common/mysql.py": ["/interface_test_combat/common/config.py", "/interface_test_combat/common/get_log.py"], "/interface_test_combat/testcases/test_address_1.py": ["/interface_test_combat/api/address_model.py"], "/demo1.py": ["/baidu.py"], "/interface_test_combat/path_data.py": ["/interface_test_combat/api/address_model.py"], "/pytest_combat/testcases/test_calcu.py": ["/pytest_combat/testcases/test_param.py"], "/distribution/testcases/test_becommaster_success.py": ["/distribution/api/becom_master.py", "/distribution/api/login_test.py"]}
|
27,816,947
|
wangjinjin123/python_test_forme
|
refs/heads/main
|
/interface_test_combat/restful.py
|
"""
restful框架
url:每个URI 代表一种资源
交互:客户端和服务器之间,传递这种资源的某种表现层
动作:客户端通过4个http动词/动作(get post delete put)。对服务器端资源进行操作,实现表现层状态转化
表现层含义:资源的呈现形式
资源得含义:网络上的一个实体、文本、图片
状态转化含义:访问网站代表客户端和服务器得互动,post新建资源
实战1作业:
1、利用requests得get 和post实现/封装通讯录得增删改查
"""
import requests
|
{"/interface_test_combat/common/mysql.py": ["/interface_test_combat/common/config.py", "/interface_test_combat/common/get_log.py"], "/interface_test_combat/testcases/test_address_1.py": ["/interface_test_combat/api/address_model.py"], "/demo1.py": ["/baidu.py"], "/interface_test_combat/path_data.py": ["/interface_test_combat/api/address_model.py"], "/pytest_combat/testcases/test_calcu.py": ["/pytest_combat/testcases/test_param.py"], "/distribution/testcases/test_becommaster_success.py": ["/distribution/api/becom_master.py", "/distribution/api/login_test.py"]}
|
27,816,948
|
wangjinjin123/python_test_forme
|
refs/heads/main
|
/pytest_combat/testcases/conftest.py
|
#文件名统一,放置大家通用的公共模块
#conftest.py文件名是不能变得
#conftest.py与运行得用例要放在同一个package下边
#不需要import导入conftest.py,pytest用例会自动查找
#所有同目录测试文件运行前都会执行conftest.py文件
#全局得配置和前期工作都可以写在这里
"""
场景一:想要创建自己的fixture方法: 遵循就近原则:同文件>同目录>其他目录(也必须是在同一个包下边)
场景二:fixture带参数传递
"""
import pytest
import os
import yaml
from pytest_combat.api.calculator import Calculator
#获取文件所在目录
@pytest.fixture(scope="session")
def connectDB():
print("链接数据库的操作")
yield
print("断开数据库链接")
@pytest.fixture(scope="class")
def get_calc():
print("获取计算器实例")
calc = Calculator()
return calc
@pytest.fixture(scope="module")
def all_start():
print("开始计算")
yield
print("结束计算")
#通过 os.path.dirname 获取当前文件所在目录的路径
yaml_file_path = os.path.dirname(__file__) + "\data.yaml"
print(yaml_file_path)
with open(yaml_file_path) as f:
data = yaml.safe_load(f)
print(data)
add_datas = data["datas"][:5]
div_datas = data["datas"][5:8]
sub_datas = data["datas"][8:11]
mul_datas = data["datas"][11:]
ids_add = data["myids"][0:5]
ids_div = data["myids"][5:8]
ids_sub = data["myids"][8:11]
ids_mul = data["myids"][11:]
print(sub_datas)
print(mul_datas)
# ids = data["myids"]
#获取yaml文件中得数据并传递给创建得获取数据得fixture方法作为参数,实现参数化
@pytest.fixture(params=add_datas,ids=ids_add)
def get_datas_add(request):
# print("开始计算")
data_1 = request.param
print(f"request.param得测试数据是:{data_1}")
yield data_1 #返回传入的参数
# print("结束计算")
@pytest.fixture(params=div_datas,ids=ids_div)
def get_datas_div(request):
# print("开始计算")
data_2 = request.param
print(f"request.param得测试数据是:{data_2}")
yield data_2 #返回传入的参数
# print("结束计算")
@pytest.fixture(params=sub_datas,ids=ids_sub)
def get_datas_sub(request):
# print("开始计算")
data_3 = request.param
print(f"request.param得测试数据是:{data_3}")
yield data_3 #返回传入的参数
# print("结束计算")
@pytest.fixture(params=mul_datas,ids=ids_mul)
def get_datas_mul(request):
# print("开始计算")
data_4 = request.param
print(f"request.param得测试数据是:{data_4}")
yield data_4 #返回传入的参数
# print("结束计算")
|
{"/interface_test_combat/common/mysql.py": ["/interface_test_combat/common/config.py", "/interface_test_combat/common/get_log.py"], "/interface_test_combat/testcases/test_address_1.py": ["/interface_test_combat/api/address_model.py"], "/demo1.py": ["/baidu.py"], "/interface_test_combat/path_data.py": ["/interface_test_combat/api/address_model.py"], "/pytest_combat/testcases/test_calcu.py": ["/pytest_combat/testcases/test_param.py"], "/distribution/testcases/test_becommaster_success.py": ["/distribution/api/becom_master.py", "/distribution/api/login_test.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.