size
int64 0
304k
| ext
stringclasses 1
value | lang
stringclasses 1
value | branch
stringclasses 1
value | content
stringlengths 0
304k
| avg_line_length
float64 0
238
| max_line_length
int64 0
304k
|
|---|---|---|---|---|---|---|
566
|
py
|
PYTHON
|
15.0
|
# Copyright (C) 2019-Today: GRAP (<http://www.grap.coop/>)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import fields, models
class BaseModuleUpdate(models.TransientModel):
_inherit = "base.module.update"
analyse_installed_modules = fields.Boolean(default=True)
def update_module(self):
return super(
BaseModuleUpdate,
self.with_context(analyse_installed_modules=self.analyse_installed_modules),
).update_module()
| 33.294118
| 566
|
965
|
py
|
PYTHON
|
15.0
|
# Copyright (C) 2019-Today: GRAP (<http://www.grap.coop/>)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import fields, models
from odoo.tools.safe_eval import safe_eval
class IrModuleType(models.Model):
_name = "ir.module.type.rule"
_description = "Modules Types Rules"
_order = "sequence"
sequence = fields.Integer()
module_domain = fields.Char(required=True, default="[]")
module_type_id = fields.Many2one(
string="Module type", comodel_name="ir.module.type", required=True
)
def _get_module_type_id_from_module(self, module):
IrModuleModule = self.env["ir.module.module"]
for rule in self:
domain = safe_eval(rule.module_domain)
domain.append(("id", "=", module.id))
if IrModuleModule.search(domain):
return rule.module_type_id.id
return False
| 33.275862
| 965
|
1,292
|
py
|
PYTHON
|
15.0
|
# Copyright (C) 2019-Today: GRAP (<http://www.grap.coop/>)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, fields, models
class IrModuleAuthor(models.Model):
_name = "ir.module.author"
_description = "Modules Authors"
name = fields.Char(required=True)
installed_module_ids = fields.Many2many(
string="Modules",
comodel_name="ir.module.module",
relation="ir_module_module_author_rel",
)
installed_module_qty = fields.Integer(
string="Installed Modules Quantity",
compute="_compute_installed_module_qty",
store=True,
)
_sql_constraints = [
(
"name_uniq",
"unique(name)",
"The name of the modules author should be unique per database!",
),
]
@api.depends("installed_module_ids")
def _compute_installed_module_qty(self):
for author in self:
author.installed_module_qty = len(author.installed_module_ids)
@api.model
def _get_or_create(self, name):
authors = self.search([("name", "=", name)])
if authors:
return authors[0]
else:
return self.create({"name": name})
| 28.711111
| 1,292
|
283
|
py
|
PYTHON
|
15.0
|
# Copyright 2016-2020 Therp BV <https://therp.nl>.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import SUPERUSER_ID, api
def post_init_hook(cr, pool):
env = api.Environment(cr, SUPERUSER_ID, {})
env["letsencrypt"]._get_key("account.key")
| 35.375
| 283
|
770
|
py
|
PYTHON
|
15.0
|
# 2016-2021 Therp BV <https://therp.nl>.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
{
"name": "Let's Encrypt",
"version": "15.0.1.0.3",
"author": "Therp BV," "Tecnativa," "Acysos S.L," "Odoo Community Association (OCA)",
"website": "https://github.com/OCA/server-tools",
"license": "AGPL-3",
"category": "Tools",
"summary": "Request SSL certificates from letsencrypt.org",
"depends": ["base_setup"],
"data": [
"data/ir_config_parameter.xml",
"data/ir_cron.xml",
"views/res_config_settings.xml",
],
"demo": ["demo/ir_cron.xml"],
"post_init_hook": "post_init_hook",
"installable": True,
"external_dependencies": {"python": ["acme<2.0.0", "dnspython", "josepy"]},
}
| 36.666667
| 770
|
12,830
|
py
|
PYTHON
|
15.0
|
# Copyright 2018-2022 Therp BV <https://therp.nl>.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
import os
import shutil
from datetime import datetime, timedelta
from os import path
import dns.resolver
import mock
from odoo.exceptions import UserError, ValidationError
from odoo.tests import SingleTransactionCase
from ..models.letsencrypt import _get_challenge_dir, _get_data_dir
CERT_DIR = path.join(path.dirname(__file__), "certs")
def _poll(order, deadline):
order_resource = mock.Mock(["fullchain_pem"])
order_resource.fullchain_pem = "chain"
return order_resource
class TestLetsencrypt(SingleTransactionCase):
def setUp(self):
super().setUp()
self.env["ir.config_parameter"].set_param(
"web.base.url", "http://www.example.ltd"
)
self.env["res.config.settings"].create(
{
"letsencrypt_dns_provider": "shell",
"letsencrypt_dns_shell_script": "touch /tmp/.letsencrypt_test",
"letsencrypt_altnames": "www.example.ltd,*.example.ltd",
"letsencrypt_reload_command": "echo reloaded",
}
).set_values()
def test_config_settings(self):
setting_vals = self.env["res.config.settings"].default_get([])
self.assertEqual(setting_vals["letsencrypt_dns_provider"], "shell")
self.assertEqual(
setting_vals["letsencrypt_dns_shell_script"],
"touch /tmp/.letsencrypt_test",
)
self.assertEqual(
setting_vals["letsencrypt_altnames"], "www.example.ltd,*.example.ltd"
)
self.assertEqual(setting_vals["letsencrypt_reload_command"], "echo reloaded")
self.assertTrue(setting_vals["letsencrypt_needs_dns_provider"])
self.assertFalse(setting_vals["letsencrypt_prefer_dns"])
with self.assertRaises(ValidationError):
self.env["res.config.settings"].create(
{"letsencrypt_dns_shell_script": "# Empty script"}
).set_values()
@mock.patch("acme.client.ClientV2.answer_challenge")
@mock.patch("acme.client.ClientV2.poll_and_finalize", side_effect=_poll)
def test_http_challenge(self, poll, _answer_challenge):
letsencrypt = self.env["letsencrypt"]
self.env["res.config.settings"].create(
{"letsencrypt_altnames": ""}
).set_values()
letsencrypt._cron()
poll.assert_called()
self.assertTrue(os.listdir(_get_challenge_dir()))
self.assertFalse(path.isfile("/tmp/.letsencrypt_test"))
self.assertTrue(path.isfile(path.join(_get_data_dir(), "www.example.ltd.crt")))
# pylint: disable=unused-argument
@mock.patch("odoo.addons.letsencrypt.models.letsencrypt.DNSUpdate")
@mock.patch("dns.resolver.query")
@mock.patch("time.sleep")
@mock.patch("acme.client.ClientV2.answer_challenge")
@mock.patch("acme.client.ClientV2.poll_and_finalize", side_effect=_poll)
def test_dns_challenge(self, poll, answer_challenge, sleep, query, dnsupd):
record = None
def register_update(challenge, domain, token):
nonlocal record
record = mock.Mock()
record.to_text.return_value = '"%s"' % token
ret = mock.Mock()
ret.challenge = challenge
ret.domain = domain
ret.token = token
return ret
dnsupd.side_effect = register_update
ncalls = 0
def query_effect(domain, rectype):
nonlocal ncalls
self.assertEqual(domain, "_acme-challenge.example.ltd.")
self.assertEqual(rectype, "TXT")
ncalls += 1
if ncalls == 1:
raise dns.resolver.NXDOMAIN
elif ncalls == 2:
wrong_record = mock.Mock()
wrong_record.to_text.return_value = '"not right"'
return [wrong_record]
else:
return [record]
query.side_effect = query_effect
self.install_certificate(days_left=10)
self.env["letsencrypt"]._cron()
poll.assert_called()
self.assertEqual(ncalls, 3)
self.assertTrue(path.isfile("/tmp/.letsencrypt_test"))
self.assertTrue(path.isfile(path.join(_get_data_dir(), "www.example.ltd.crt")))
def test_dns_challenge_error_on_missing_provider(self):
self.env["res.config.settings"].create(
{
"letsencrypt_altnames": "*.example.ltd",
"letsencrypt_dns_provider": False,
}
).set_values()
with self.assertRaises(UserError):
self.env["letsencrypt"]._cron()
def test_prefer_dns_setting(self):
self.env["res.config.settings"].create(
{"letsencrypt_altnames": "example.ltd", "letsencrypt_prefer_dns": True}
).set_values()
# pylint: disable=no-value-for-parameter
self.test_dns_challenge()
def test_cascading(self):
cascade = self.env["letsencrypt"]._cascade_domains
self.assertEqual(
cascade(
[
"www.example.ltd",
"*.example.ltd",
"example.ltd",
"example.ltd",
"notexample.ltd",
"multi.sub.example.ltd",
"www2.example.ltd",
"unrelated.com",
]
),
[
"*.example.ltd",
"example.ltd",
"multi.sub.example.ltd",
"notexample.ltd",
"unrelated.com",
],
)
self.assertEqual(cascade([]), [])
self.assertEqual(cascade(["*.example.ltd"]), ["*.example.ltd"])
self.assertEqual(cascade(["www.example.ltd"]), ["www.example.ltd"])
self.assertEqual(
cascade(["www.example.ltd", "example.ltd"]),
["example.ltd", "www.example.ltd"],
)
with self.assertRaises(UserError):
cascade(["www.*.example.ltd"])
with self.assertRaises(UserError):
cascade(["*.*.example.ltd"])
def test_altnames_parsing(self):
config = self.env["ir.config_parameter"]
letsencrypt = self.env["letsencrypt"]
self.assertEqual(
letsencrypt._get_altnames(), ["www.example.ltd", "*.example.ltd"]
)
config.set_param("letsencrypt.altnames", "")
self.assertEqual(letsencrypt._get_altnames(), ["www.example.ltd"])
config.set_param("letsencrypt.altnames", "foobar.example.ltd")
self.assertEqual(letsencrypt._get_altnames(), ["foobar.example.ltd"])
config.set_param("letsencrypt.altnames", "example.ltd,example.org,example.net")
self.assertEqual(
letsencrypt._get_altnames(),
["example.ltd", "example.org", "example.net"],
)
config.set_param(
"letsencrypt.altnames", "example.ltd, example.org\nexample.net"
)
self.assertEqual(
letsencrypt._get_altnames(),
["example.ltd", "example.org", "example.net"],
)
def test_key_generation_and_retrieval(self):
key_a1 = self.env["letsencrypt"]._get_key("a.key")
key_a2 = self.env["letsencrypt"]._get_key("a.key")
key_b = self.env["letsencrypt"]._get_key("b.key")
self.assertIsInstance(key_a1, bytes)
self.assertIsInstance(key_a2, bytes)
self.assertIsInstance(key_b, bytes)
self.assertTrue(path.isfile(path.join(_get_data_dir(), "a.key")))
self.assertEqual(key_a1, key_a2)
self.assertNotEqual(key_a1, key_b)
@mock.patch("os.remove", side_effect=os.remove)
@mock.patch(
"odoo.addons.letsencrypt.models.letsencrypt.Letsencrypt._generate_key",
side_effect=lambda: None,
)
def test_interrupted_key_writing(self, generate_key, remove):
with self.assertRaises(TypeError):
self.env["letsencrypt"]._get_key("a.key")
self.assertFalse(path.isfile(path.join(_get_data_dir(), "a.key")))
remove.assert_called()
generate_key.assert_called()
def test_domain_validation(self):
self.env["letsencrypt"]._validate_domain("example.ltd")
self.env["letsencrypt"]._validate_domain("www.example.ltd")
with self.assertRaises(UserError):
self.env["letsencrypt"]._validate_domain("1.1.1.1")
with self.assertRaises(UserError):
self.env["letsencrypt"]._validate_domain("192.168.1.1")
with self.assertRaises(UserError):
self.env["letsencrypt"]._validate_domain("localhost.localdomain")
with self.assertRaises(UserError):
self.env["letsencrypt"]._validate_domain("testdomain")
with self.assertRaises(UserError):
self.env["letsencrypt"]._validate_domain("::1")
def test_young_certificate(self):
self.install_certificate(60)
self.assertFalse(
self.env["letsencrypt"]._should_run(
path.join(_get_data_dir(), "www.example.ltd.crt"),
["www.example.ltd", "*.example.ltd"],
)
)
def test_old_certificate(self):
self.install_certificate(20)
self.assertTrue(
self.env["letsencrypt"]._should_run(
path.join(_get_data_dir(), "www.example.ltd.crt"),
["www.example.ltd", "*.example.ltd"],
)
)
def test_expired_certificate(self):
self.install_certificate(-10)
self.assertTrue(
self.env["letsencrypt"]._should_run(
path.join(_get_data_dir(), "www.example.ltd.crt"),
["www.example.ltd", "*.example.ltd"],
)
)
def test_missing_certificate(self):
self.assertTrue(
self.env["letsencrypt"]._should_run(
path.join(_get_data_dir(), "www.example.ltd.crt"),
["www.example.ltd", "*.example.ltd"],
)
)
def test_new_altnames(self):
self.install_certificate(60, "www.example.ltd", ())
self.assertTrue(
self.env["letsencrypt"]._should_run(
path.join(_get_data_dir(), "www.example.ltd.crt"),
["www.example.ltd", "*.example.ltd"],
)
)
self.assertFalse(
self.env["letsencrypt"]._should_run(
path.join(_get_data_dir(), "www.example.ltd.crt"),
["www.example.ltd"],
)
)
def test_legacy_certificate_without_altnames(self):
self.install_certificate(60, use_altnames=False)
self.assertFalse(
self.env["letsencrypt"]._should_run(
path.join(_get_data_dir(), "www.example.ltd.crt"),
["www.example.ltd"],
)
)
def install_certificate(
self,
days_left,
common_name="www.example.ltd",
altnames=("*.example.ltd",),
use_altnames=True,
):
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
not_after = datetime.now() + timedelta(days=days_left)
not_before = not_after - timedelta(days=90)
key = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend()
)
cert_builder = (
x509.CertificateBuilder()
.subject_name(
x509.Name([x509.NameAttribute(x509.NameOID.COMMON_NAME, common_name)])
)
.issuer_name(
x509.Name([x509.NameAttribute(x509.NameOID.COMMON_NAME, "myca.biz")])
)
.not_valid_before(not_before)
.not_valid_after(not_after)
.serial_number(x509.random_serial_number())
.public_key(key.public_key())
)
if use_altnames:
cert_builder = cert_builder.add_extension(
x509.SubjectAlternativeName(
[x509.DNSName(common_name)]
+ [x509.DNSName(name) for name in altnames]
),
critical=False,
)
cert = cert_builder.sign(key, hashes.SHA256(), default_backend())
cert_file = path.join(_get_data_dir(), "%s.crt" % common_name)
with open(cert_file, "wb") as file_:
file_.write(cert.public_bytes(serialization.Encoding.PEM))
def tearDown(self):
super().tearDown()
shutil.rmtree(_get_data_dir(), ignore_errors=True)
if path.isfile("/tmp/.letsencrypt_test"):
os.remove("/tmp/.letsencrypt_test")
| 36.762178
| 12,830
|
825
|
py
|
PYTHON
|
15.0
|
# Copyright 2020 Therp BV <https://therp.nl>.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
import os
import shutil
from odoo.tests import HttpCase
from ..models.letsencrypt import _get_challenge_dir
class TestHTTP(HttpCase):
def test_query_existing(self):
with open(os.path.join(_get_challenge_dir(), "foobar"), "w") as file:
file.write("content")
res = self.url_open("/.well-known/acme-challenge/foobar")
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, "content")
def test_query_missing(self):
res = self.url_open("/.well-known/acme-challenge/foobar")
self.assertEqual(res.status_code, 404)
def tearDown(self):
super().tearDown()
shutil.rmtree(_get_challenge_dir(), ignore_errors=True)
| 31.730769
| 825
|
17,974
|
py
|
PYTHON
|
15.0
|
# Copyright 2016-2022 Therp BV <https://therp.nl>.
# Copyright 2016 Antonio Espinosa <antonio.espinosa@tecnativa.com>.
# Copyright 2018 Ignacio Ibeas <ignacio@acysos.com>.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
"""Fully automatic retrieval of Letsencrypt certificates."""
# pylint: disable=no-self-use,consider-using-f-string
import base64
import collections
import logging
import os
import re
import subprocess
import time
import urllib.parse
from datetime import datetime, timedelta
import requests
from odoo import _, api, models
from odoo.exceptions import UserError
from odoo.tools import config
_logger = logging.getLogger(__name__)
try:
import acme.challenges
import acme.client
import acme.crypto_util
import acme.errors
import acme.messages
import dns.resolver
import josepy
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
except ImportError as e:
_logger.debug(e)
WILDCARD = "*." # as defined in the spec
DEFAULT_KEY_LENGTH = 4096
TYPE_CHALLENGE_HTTP = "http-01"
TYPE_CHALLENGE_DNS = "dns-01"
V2_STAGING_DIRECTORY_URL = "https://acme-staging-v02.api.letsencrypt.org/directory"
V2_DIRECTORY_URL = "https://acme-v02.api.letsencrypt.org/directory"
LOCAL_DOMAINS = {
"localhost",
"localhost.localdomain",
"localhost6",
"localhost6.localdomain6",
"ip6-localhost",
"ip6-loopback",
}
DNSUpdate = collections.namedtuple("DNSUpdate", ("challenge", "domain", "token"))
def _get_data_dir():
dir_ = os.path.join(config.options.get("data_dir"), "letsencrypt")
if not os.path.isdir(dir_):
os.makedirs(dir_)
return dir_
def _get_challenge_dir():
dir_ = os.path.join(_get_data_dir(), "acme-challenge")
if not os.path.isdir(dir_):
os.makedirs(dir_)
return dir_
class Letsencrypt(models.AbstractModel):
"""Fully automatic retrieval of Letsencrypt certificates."""
_name = "letsencrypt"
_description = "Abstract model providing functions for letsencrypt"
@api.model
def _cron(self):
"""Define cronjob to renew certificate when needed."""
domains = self._get_altnames()
main_domain = domains[0]
domains = self._cascade_domains(domains)
for dom in domains:
self._validate_domain(dom)
cert_file = os.path.join(_get_data_dir(), "%s.crt" % main_domain)
if not self._should_run(cert_file, domains):
return
account_key = josepy.JWKRSA.load(self._get_key("account.key"))
client = self._create_client(account_key)
self._register_client(client, account_key)
authzr = self._get_authorization_resource(client, main_domain, domains)
self._handle_challenges(client, account_key, authzr)
order_resource = self._letsencrypt_validation(client, authzr)
self._save_and_reload(cert_file, order_resource)
def _get_altnames(self):
"""Get the configured altnames as a list of strings."""
parameter = self.env["ir.config_parameter"]
altnames = parameter.get_param("letsencrypt.altnames")
if not altnames:
base_url = parameter.get_param("web.base.url", "http://localhost")
return [urllib.parse.urlparse(base_url).hostname]
return re.split("(?:,|\n| |;)+", altnames)
def _cascade_domains(self, domains):
"""Remove domains that are obsoleted by wildcard domains in the list.
Requesting www.example.com is unnecessary if *.example.com is also
requested. example.com isn't obsoleted however, and neither is
sub.domain.example.com.
"""
to_remove = set()
for domain in domains:
if WILDCARD in domain[1:]:
raise UserError(
_("A wildcard is only allowed at the start of a domain")
)
if domain.startswith(WILDCARD):
postfix = domain[1:] # e.g. ".example.com"
# This makes it O(n²) but n <= 100 so it's ok
for other in domains:
if other.startswith(WILDCARD):
continue
if other.endswith(postfix):
prefix = other[: -len(postfix)] # e.g. "www"
if "." not in prefix:
to_remove.add(other)
return sorted(set(domains) - to_remove)
def _validate_domain(self, domain):
"""Validate that a domain is publicly accessible."""
if ":" in domain or all(char.isdigit() or char == "." for char in domain):
raise UserError(
_("Domain %s: Let's Encrypt doesn't support IP addresses!") % domain
)
if domain in LOCAL_DOMAINS or "." not in domain:
raise UserError(
_("Domain %s: Let's encrypt doesn't work with local domains!") % domain
)
def _should_run(self, cert_file, domains):
"""Inspect the existing certificate to see if action is necessary."""
if not os.path.isfile(cert_file):
_logger.info("No existing certificate found, creating a new one")
return True
with open(cert_file, "rb") as file_:
cert = x509.load_pem_x509_certificate(file_.read(), default_backend())
expiry = cert.not_valid_after
remaining = expiry - datetime.now()
if remaining < timedelta():
_logger.warning(
"Certificate expired on %s, which was %d days ago!",
expiry,
-remaining.days,
)
_logger.info("Renewing certificate now.")
return True
if remaining < timedelta(days=30):
_logger.info(
"Certificate expires on %s, which is in %d days, renewing it",
expiry,
remaining.days,
)
return True
# Should be a single name, but this is how the API works
names = {
entry.value
for entry in cert.subject.get_attributes_for_oid(
x509.oid.NameOID.COMMON_NAME
)
}
try:
names.update(
cert.extensions.get_extension_for_oid(
x509.oid.ExtensionOID.SUBJECT_ALTERNATIVE_NAME
).value.get_values_for_type(x509.DNSName)
)
except x509.extensions.ExtensionNotFound:
_logger.exception(_("Error updating name"))
domains = set(domains)
missing = domains - names
if missing:
_logger.info(
"Found new domains %s, requesting new certificate",
", ".join(missing),
)
return True
_logger.info(
"Certificate expires on %s, which is in %d days, no action needed",
expiry,
remaining.days,
)
return False
def _get_key(self, key_name):
"""Get a key for a filename, generating if if it doesn't exist."""
key_file = os.path.join(_get_data_dir(), key_name)
if not os.path.isfile(key_file):
_logger.info("Generating new key %s", key_name)
key_bytes = self._generate_key()
try:
with open(key_file, "wb") as file_:
os.fchmod(file_.fileno(), 0o600)
file_.write(key_bytes)
except BaseException:
# An incomplete file would block generation of a new one
if os.path.isfile(key_file):
os.remove(key_file)
raise
else:
_logger.info("Getting existing key %s", key_name)
with open(key_file, "rb") as file_:
key_bytes = file_.read()
return key_bytes
def _generate_key(self):
"""Generate an entirely new key."""
return rsa.generate_private_key(
public_exponent=65537,
key_size=DEFAULT_KEY_LENGTH,
backend=default_backend(),
).private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
def _create_client(self, account_key):
param = self.env["ir.config_parameter"]
testing_mode = param.get_param("letsencrypt.testing_mode") == "True"
if config["test_enable"] or testing_mode:
directory_url = V2_STAGING_DIRECTORY_URL
else:
directory_url = V2_DIRECTORY_URL
directory_json = requests.get(directory_url).json()
net = acme.client.ClientNetwork(account_key)
return acme.client.ClientV2(directory_json, net)
def _register_client(self, client, account_key):
"""Register this Letsencrypt client."""
new_reg = acme.messages.NewRegistration(
key=account_key.public_key(), terms_of_service_agreed=True
)
try:
client.new_account(new_reg)
_logger.info("Successfully registered.")
except acme.errors.ConflictError as err:
reg = acme.messages.Registration(key=account_key.public_key())
reg_res = acme.messages.RegistrationResource(body=reg, uri=err.location)
client.query_registration(reg_res)
_logger.info("Reusing existing account.")
def _get_authorization_resource(self, client, main_domain, domains):
"""Get acme authorization_resource."""
domain_key = self._get_key("%s.key" % main_domain)
_logger.info("Making CSR for the following domains: %s", domains)
csr = acme.crypto_util.make_csr(private_key_pem=domain_key, domains=domains)
return client.new_order(csr)
def _handle_challenges(self, client, account_key, authzr):
"""Handle challenges from the Letsencrypt provider.
For each requested domain name we receive a list of challenges.
We only have to do one from each list.
HTTP challenges are the easiest, so do one of those if possible.
We can do DNS challenges too. There are other types that we don't
support.
"""
pending_responses = []
prefer_dns = (
self.env["ir.config_parameter"].get_param("letsencrypt.prefer_dns")
== "True"
)
for authorizations in authzr.authorizations:
http_challenges = [
challenge
for challenge in authorizations.body.challenges
if challenge.chall.typ == TYPE_CHALLENGE_HTTP
]
other_challenges = [
challenge
for challenge in authorizations.body.challenges
if challenge.chall.typ != TYPE_CHALLENGE_HTTP
]
if prefer_dns:
ordered_challenges = other_challenges + http_challenges
else:
ordered_challenges = http_challenges + other_challenges
for challenge in ordered_challenges:
if challenge.chall.typ == TYPE_CHALLENGE_HTTP:
self._respond_challenge_http(challenge, account_key)
client.answer_challenge(challenge, acme.challenges.HTTP01Response())
break
if challenge.chall.typ == TYPE_CHALLENGE_DNS:
domain = authorizations.body.identifier.value
token = challenge.validation(account_key)
self._respond_challenge_dns(domain, token)
# We delay this because we wait for each domain.
# That takes less time if they've all already been changed.
pending_responses.append(
DNSUpdate(challenge=challenge, domain=domain, token=token)
)
break
else:
raise UserError(_("Could not respond to letsencrypt challenges."))
if pending_responses:
for update in pending_responses:
self._wait_for_record(update.domain, update.token)
# 1 minute was not always enough during testing, even once records
# were visible locally
_logger.info("All TXT records found, waiting 5 minutes more to make sure.")
time.sleep(300)
for update in pending_responses:
client.answer_challenge(update.challenge, acme.challenges.DNSResponse())
def _respond_challenge_http(self, challenge, account_key):
"""
Respond to the HTTP challenge by writing the file to serve.
"""
token = self._base64_encode(challenge.token)
challenge_file = os.path.join(_get_challenge_dir(), token)
with open(challenge_file, "w", encoding="utf-8") as file_:
file_.write(challenge.validation(account_key))
def _respond_challenge_dns(self, domain, token):
"""
Respond to the DNS challenge by creating the DNS record
on the provider.
"""
provider = self.env["ir.config_parameter"].get_param("letsencrypt.dns_provider")
if not provider:
raise UserError(
_("No DNS provider set, can't request wildcard certificate")
)
dns_function = getattr(self, "_respond_challenge_dns_" + provider)
dns_function(domain.replace("*.", ""), token)
def _respond_challenge_dns_shell(self, domain, token):
"""Respond to a DNS challenge using an arbitrary shell command."""
script_str = self.env["ir.config_parameter"].get_param(
"letsencrypt.dns_shell_script"
)
if script_str:
env = os.environ.copy()
env.update(
LETSENCRYPT_DNS_DOMAIN=domain,
LETSENCRYPT_DNS_CHALLENGE=token,
)
self._call_cmdline(script_str, env=env)
else:
raise UserError(_("No shell command configured for updating DNS records"))
def _base64_encode(self, data):
"""Encode data as a URL-safe base64 string without padding.
This should be the encoding that Let's Encrypt uses for all base64. See
https://github.com/ietf-wg-acme/acme/issues/64#issuecomment-168852757
and https://golang.org/pkg/encoding/base64/#RawURLEncoding
"""
return base64.urlsafe_b64encode(data).rstrip(b"=").decode("ascii")
def _wait_for_record(self, domain, token):
"""Wait until a TXT record for a domain is visible."""
if not domain.endswith("."):
# Fully qualify domain name, or it may try unsuitable names too
domain += "."
attempt = 0
while True:
attempt += 1
try:
for record in dns.resolver.query("_acme-challenge." + domain, "TXT"):
value = record.to_text()[1:-1]
if value == token:
return
_logger.debug("Found %r instead of %r", value, token)
except dns.resolver.NXDOMAIN:
_logger.debug("Record for %r does not exist yet", domain)
if attempt < 30:
_logger.info("Waiting for DNS update.")
time.sleep(60)
else:
_logger.warning(
"Could not find new record after 30 minutes! "
"Giving up and hoping for the best."
)
return
def _letsencrypt_validation(self, client, authzr):
"""Do the validation of the certificates."""
ir_config_parameter = self.env["ir.config_parameter"]
# let them know we are done and they should check
backoff = int(ir_config_parameter.get_param("letsencrypt.backoff", 3))
deadline = datetime.now() + timedelta(minutes=backoff)
try:
order_resource = client.poll_and_finalize(authzr, deadline)
except acme.errors.ValidationError as error:
_logger.error("Let's Encrypt validation failed!")
for authz in error.failed_authzrs:
for challenge in authz.body.challenges:
_logger.error(str(challenge.error))
raise
return order_resource
def _save_and_reload(self, cert_file, order_resource):
"""Save certfile and reload nginx or other webserver."""
ir_config_parameter = self.env["ir.config_parameter"]
with open(cert_file, "w", encoding="utf-8") as crt:
crt.write(order_resource.fullchain_pem)
_logger.info("SUCCESS: Certificate saved: %s", cert_file)
reload_cmd = ir_config_parameter.get_param("letsencrypt.reload_command", "")
if reload_cmd.strip():
self._call_cmdline(reload_cmd)
else:
_logger.warning("No reload command defined.")
def _call_cmdline(self, cmdline, env=None):
"""Call a shell command."""
with subprocess.Popen(
cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
shell=True,
) as process:
stdout, stderr = process.communicate()
stdout = stdout.strip()
stderr = stderr.strip()
if process.returncode:
if stdout:
_logger.warning(stdout)
if stderr:
_logger.warning(stderr)
raise UserError(
_("Error calling %(cmdline)s: %(returncode)d")
% {
"cmdline": cmdline,
"returncode": process.returncode,
}
)
if stdout:
_logger.info(stdout)
if stderr:
_logger.info(stderr)
| 39.851441
| 17,973
|
3,683
|
py
|
PYTHON
|
15.0
|
# Copyright 2018-2022 Therp BV <https://therp.nl>.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
"""Configuration of Letsencrypt."""
from odoo import _, api, exceptions, fields, models
DNS_SCRIPT_DEFAULT = """# Write your script here
# It should create a TXT record of $LETSENCRYPT_DNS_CHALLENGE
# on _acme-challenge.$LETSENCRYPT_DNS_DOMAIN
"""
class ResConfigSettings(models.TransientModel):
"""Configuration of Letsencrypt."""
_inherit = "res.config.settings"
letsencrypt_altnames = fields.Char(
string="Domain names",
default="",
help=(
"Domains to use for the certificate. " "Separate with commas or newlines."
),
config_parameter="letsencrypt.altnames",
)
letsencrypt_dns_provider = fields.Selection(
selection=[("shell", "Shell script")],
string="DNS provider",
help=(
"For wildcard certificates we need to add a TXT record on your "
'DNS. If you set this to "Shell script" you can enter a shell '
"script. Other options can be added by installing additional "
"modules."
),
config_parameter="letsencrypt.dns_provider",
)
letsencrypt_dns_shell_script = fields.Char(
string="DNS update script",
help=(
"Write a shell script that will update your DNS TXT records. "
"You can use the $LETSENCRYPT_DNS_CHALLENGE and "
"$LETSENCRYPT_DNS_DOMAIN variables."
),
default=DNS_SCRIPT_DEFAULT,
config_parameter="letsencrypt.dns_shell_script",
)
letsencrypt_needs_dns_provider = fields.Boolean()
letsencrypt_reload_command = fields.Char(
string="Server reload command",
help="Fill this with the command to restart your web server.",
config_parameter="letsencrypt.reload_command",
)
letsencrypt_testing_mode = fields.Boolean(
string="Use testing server",
help=(
"Use the Let's Encrypt staging server, which has higher rate "
"limits but doesn't create valid certificates."
),
config_parameter="letsencrypt.testing_mode",
)
letsencrypt_prefer_dns = fields.Boolean(
string="Prefer DNS validation",
help=(
"Validate through DNS even when HTTP validation is possible. "
"Use this if your Odoo instance isn't publicly accessible."
),
config_parameter="letsencrypt.prefer_dns",
)
@api.onchange("letsencrypt_altnames", "letsencrypt_prefer_dns")
def letsencrypt_check_dns_required(self):
"""Check wether DNS required for Letsencrypt."""
altnames = self.letsencrypt_altnames or ""
self.letsencrypt_needs_dns_provider = (
"*." in altnames or self.letsencrypt_prefer_dns
)
@api.model
def default_get(self, fields_list):
res = super().default_get(fields_list)
res["letsencrypt_needs_dns_provider"] = (
"*." in res["letsencrypt_altnames"] or res["letsencrypt_prefer_dns"]
)
return res
def set_values(self):
"""Set Letsencrypt values on settings object."""
result = super().set_values()
self.letsencrypt_check_dns_required()
if self.letsencrypt_dns_provider == "shell":
lines = [
line.strip() for line in self.letsencrypt_dns_shell_script.split("\n")
]
if all(line == "" or line.startswith("#") for line in lines):
raise exceptions.ValidationError(
_("You didn't write a DNS update script!")
)
return result
| 37.581633
| 3,683
|
1,091
|
py
|
PYTHON
|
15.0
|
# Copyright 2016,2022 Therp BV <https://therp.nl>.
# Copyright 2016 Antonio Espinosa <antonio.espinosa@tecnativa.com>.
# Copyright 2018 Ignacio Ibeas <ignacio@acysos.com>.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
# pylint: disable=too-few-public-methods,no-self-use
"""This controller handles the acme challenge call from Letsencrypt."""
import logging
import os
from odoo import _, http
from odoo.http import request
from ..models.letsencrypt import _get_challenge_dir
_logger = logging.getLogger(__name__)
class Letsencrypt(http.Controller):
"""This controller handles the acme challenge call from Letsencrypt."""
@http.route("/.well-known/acme-challenge/<filename>", auth="none")
def acme_challenge(self, filename):
"""Handle the acme challenge."""
path = os.path.join(_get_challenge_dir(), filename)
try:
with open(path, encoding="utf-8") as key:
return key.read()
except IOError:
_logger.exception(_("Error opening file %s"), path)
return request.not_found()
| 36.366667
| 1,091
|
1,347
|
py
|
PYTHON
|
15.0
|
# Copyright 2017 Simone Orsi
# Copyright 2018 Creu Blanca
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
{
"name": "Base Fontawesome",
"summary": """Up to date Fontawesome resources.""",
"version": "15.0.5.15.4",
"license": "LGPL-3",
"website": "https://github.com/OCA/server-tools",
"author": "Camptocamp,Creu Blanca,Odoo Community Association (OCA)",
"depends": ["web"],
"assets": {
"web.assets_common": [
(
"replace",
"web/static/lib/fontawesome/css/font-awesome.css",
"base_fontawesome/static/src/css/fontawesome.css",
),
"base_fontawesome/static/lib/fontawesome-5.15.4/css/all.css",
"base_fontawesome/static/lib/fontawesome-5.15.4/css/v4-shims.css",
"base_fontawesome/static/src/js/form_renderer.js",
"base_fontawesome/static/src/js/list_renderer.js",
],
"web.report_assets_common": [
(
"replace",
"web/static/lib/fontawesome/css/font-awesome.css",
"base_fontawesome/static/src/css/fontawesome.css",
),
"base_fontawesome/static/lib/fontawesome-5.15.4/css/all.css",
"base_fontawesome/static/lib/fontawesome-5.15.4/css/v4-shims.css",
],
},
}
| 38.485714
| 1,347
|
6,483
|
py
|
PYTHON
|
15.0
|
# © 2016 Antiun Ingeniería S.L. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import logging
from odoo import SUPERUSER_ID, api
_logger = logging.getLogger(__name__)
def pre_init_hook_for_submodules(cr, model, field):
"""Moves images from single to multi mode.
Feel free to use this as a ``pre_init_hook`` for submodules.
:param str model:
Model name, like ``product.template``.
:param str field:
Binary field that had the images in that :param:`model`, like
``image``.
"""
env = api.Environment(cr, SUPERUSER_ID, {})
with cr.savepoint():
table = env[model]._table
column_exists = table_has_column(cr, table, field)
# fields.Binary(), extract the binary content directly from the table
if column_exists:
extract_query = """
SELECT id, '%(model)s', '%(model)s,' || id, 'db', %(field)s
FROM %(table)s
WHERE %(field)s IS NOT NULL
""" % {
"table": table,
"field": field,
"model": model,
}
image_field = "file_db_store"
# fields.Binary(attachment=True), get the ir_attachment record ID
else:
extract_query = """
SELECT
res_id,
res_model,
CONCAT_WS(',', res_model, res_id),
'filestore',
id
FROM ir_attachment
WHERE res_field='%(field)s' AND res_model='%(model)s'
""" % {
"model": model,
"field": field,
}
image_field = "attachment_id"
cr.execute( # pylint: disable=sql-injection
"""
INSERT INTO base_multi_image_image (
owner_id,
owner_model,
owner_ref_id,
storage,
%s
)
%s
"""
% (image_field, extract_query)
)
def uninstall_hook_for_submodules(
cr, registry, model, field=None, field_medium=None, field_small=None
):
"""Moves images from multi to single mode and remove multi-images for a
given model.
:param odoo.sql_db.Cursor cr:
Database cursor.
:param odoo.modules.registry.RegistryManager registry:
Database registry, using v7 api.
:param str model:
Model technical name, like "res.partner". All multi-images for that
model will be deleted
:param str field:
Binary field that had the images in that :param:`model`, like
``image``.
:param str field_medium:
Binary field that had the medium-sized images in that :param:`model`,
like ``image_medium``.
:param str field_small:
Binary field that had the small-sized images in that :param:`model`,
like ``image_small``.
"""
env = api.Environment(cr, SUPERUSER_ID, {})
with cr.savepoint():
Image = env["base_multi_image.image"]
images = Image.search([("owner_model", "=", model)], order="sequence, id")
if images and (field or field_medium or field_small):
main_images = {}
for image in images:
if image.owner_id not in main_images:
main_images[image.owner_id] = image
main_images = main_images.values()
Model = env[model]
Field = field and Model._fields[field]
FieldMedium = field_medium and Model._fields[field_medium]
FieldSmall = field_small and Model._fields[field_small]
# fields.Binary(), save the binary content directly to the table
if (
(field and not Field.attachment)
or (field_medium and not FieldMedium.attachment)
or (field_small and not FieldSmall.attachment)
):
save_directly_to_table(
cr,
Model,
(field, field_medium, field_small),
(Field, FieldMedium, FieldSmall),
main_images,
)
# fields.Binary(attachment=True), save the ir_attachment record ID
if (
(field and Field.attachment)
or (field_medium and FieldMedium.attachment)
or (field_small and FieldSmall.attachment)
):
for main_image in main_images:
owner = Model.browse(main_image.owner_id)
if field and Field.attachment:
Field.write(owner, main_image.image_main)
if field_medium and FieldMedium.attachment:
FieldMedium.write(owner, main_image.image_medium)
if field_small and FieldSmall.attachment:
FieldSmall.write(owner, main_image.image_small)
images.unlink()
def save_directly_to_table(cr, Model, fields, Fields, main_images):
field, field_medium, field_small = fields
Field, FieldMedium, FieldSmall = Fields
fields = []
if field and not Field.attachment:
fields.append(field + " = " + "%(image)s")
if field_medium and not FieldMedium.attachment:
fields.append(field_medium + " = " + "%(image_medium)s")
if field_small and not FieldSmall.attachment:
fields.append(field_small + " = " + "%(image_small)s")
query = """
UPDATE %(table)s
SET %(fields)s
WHERE id = %%(id)s
""" % {
"table": Model._table,
"fields": ", ".join(fields),
}
for main_image in main_images:
params = {"id": main_image.owner_id}
if field and not Field.attachment:
params["image"] = main_image.image_main
if field_medium and not FieldMedium.attachment:
params["image_medium"] = main_image.image_medium
if field_small and not FieldSmall.attachment:
params["image_small"] = main_image.image_small
cr.execute(query, params) # pylint: disable=sql-injection
def table_has_column(cr, table, field):
query = """
SELECT %(field)s
FROM information_schema.columns
WHERE table_name=%(table)s and column_name=%(field)s;
"""
cr.execute(query, {"table": table, "field": field})
return bool(cr.fetchall())
| 36.206704
| 6,481
|
842
|
py
|
PYTHON
|
15.0
|
# © 2014 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
# © 2015 Antiun Ingeniería S.L. - Jairo Llopis
# © 2016 Sodexis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Multiple images base",
"summary": "Allow multiple images for database objects",
"version": "15.0.1.0.0",
"author": "Tecnativa, "
"Antiun Ingeniería, S.L., Sodexis, "
"LasLabs, "
"Odoo Community Association (OCA)",
"license": "AGPL-3",
"website": "https://github.com/OCA/server-tools",
"category": "Tools",
"depends": ["base"],
"installable": True,
"data": [
"security/ir.model.access.csv",
"views/image_view.xml",
],
"images": [
"images/form.png",
"images/kanban.png",
],
}
| 29.892857
| 837
|
3,243
|
py
|
PYTHON
|
15.0
|
# © 2014 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
# © 2015 Antiun Ingeniería S.L. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import _, api, fields, models, tools
class Owner(models.AbstractModel):
_name = "base_multi_image.owner"
_description = """ Wizard for base multi image """
image_ids = fields.One2many(
comodel_name="base_multi_image.image",
inverse_name="owner_id",
string="Images",
domain=lambda self: [("owner_model", "=", self._name)],
copy=True,
)
image_main = fields.Binary(
string="Main image",
store=False,
compute="_compute_multi_image",
inverse="_inverse_multi_image_main",
)
image_main_medium = fields.Binary(
string="Medium image",
compute="_compute_multi_image",
inverse="_inverse_multi_image_main_medium",
store=False,
)
image_main_small = fields.Binary(
string="Small image",
compute="_compute_multi_image",
inverse="_inverse_multi_image_main_small",
store=False,
)
@api.depends("image_ids")
def _compute_multi_image(self):
"""Get the main image for this object.
This is provided as a compatibility layer for submodels that already
had one image per record.
"""
for s in self:
first = s.image_ids[:1]
s.image_main = first.image_main
s.image_main_medium = first.image_medium
s.image_main_small = first.image_small
def _set_multi_image(self, image=False, name=False):
"""Save or delete the main image for this record.
This is provided as a compatibility layer for submodels that already
had one image per record.
"""
# Values to save
values = {
"storage": "db",
"file_db_store": tools.image_process(image, size=(1024, 1024)),
"owner_model": self._name,
}
if name:
values["name"] = name
for s in self:
if image:
values["owner_id"] = s.id
# Editing
if s.image_ids:
s.image_ids[0].write(values)
# Adding
else:
values.setdefault("name", name or _("Main image"))
s.image_ids = [(0, 0, values)]
# Deleting
elif s.image_ids:
s.image_ids[0].unlink()
def _inverse_multi_image_main(self):
self._set_multi_image(self.image_main)
def _inverse_multi_image_main_medium(self):
self._set_multi_image(self.image_main_medium)
def _inverse_multi_image_main_small(self):
self._set_multi_image(self.image_main_small)
def unlink(self):
"""Mimic `ondelete="cascade"` for multi images.
Will be skipped if ``env.context['bypass_image_removal']`` == True
"""
images = self.mapped("image_ids")
result = super().unlink()
if result and not self.env.context.get("bypass_image_removal"):
images.unlink()
return result
| 32.727273
| 3,240
|
7,296
|
py
|
PYTHON
|
15.0
|
# © 2014 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
# © 2015 Antiun Ingeniería S.L. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import base64
import logging
import os
from urllib.error import ContentTooShortError
from urllib.request import urlretrieve
from odoo import _, api, exceptions, fields, models, tools
_logger = logging.getLogger(__name__)
class Image(models.Model):
_name = "base_multi_image.image"
# TODO: when migrating to 15.0 use image.mixin
_order = "sequence, owner_model, owner_id, id"
_description = """ image model for multiple image functionality """
_sql_constraints = [
(
"uniq_name_owner",
"UNIQUE(owner_id, owner_model, name)",
_("A document can have only one image with the same name."),
),
]
# This Integer is really a split Many2one
owner_id = fields.Integer("Owner", required=True)
owner_model = fields.Char(required=True)
owner_ref_id = fields.Reference(
selection="_selection_owner_ref_id",
string="Referenced Owner",
compute="_compute_owner_ref_id",
store=True,
)
storage = fields.Selection(
[
("url", "URL"),
("file", "OS file"),
("db", "Database"),
("filestore", "Filestore"),
],
required=True,
default="filestore",
)
name = fields.Char("Image title", translate=True)
filename = fields.Char()
extension = fields.Char("File extension", readonly=True)
attachment_id = fields.Many2one(
"ir.attachment", string="Attachment", domain="[('index_content', '=', 'image')]"
)
file_db_store = fields.Binary("Image stored in database")
path = fields.Char("Image path", help="Image path")
url = fields.Char("Image remote URL")
image_main = fields.Image("Full-sized image", compute="_compute_image")
image_medium = fields.Image(
"Medium-sized image",
related="image_main",
max_width=128,
max_height=128,
help="Medium-sized image. It is automatically resized as a "
"128 x 128 px image, with aspect ratio preserved, only when the "
"image exceeds one of those sizes. Use this field in form views "
"or kanban views.",
)
image_small = fields.Image(
"Small-sized image",
related="image_main",
max_width=64,
max_height=64,
help="Small-sized image. It is automatically resized as a 64 x 64 px "
"image, with aspect ratio preserved. Use this field anywhere a "
"small image is required.",
)
comments = fields.Text(translate=True)
sequence = fields.Integer(default=10)
show_technical = fields.Boolean(compute="_compute_show_technical")
@api.model
@tools.ormcache("self")
def _selection_owner_ref_id(self):
"""Allow any model; after all, this field is readonly."""
return [(r.model, r.name) for r in self.env["ir.model"].search([])]
@api.depends("owner_model", "owner_id")
def _compute_owner_ref_id(self):
"""Get a reference field based on the split model and id fields."""
for s in self:
if s.owner_model:
s.owner_ref_id = "{0.owner_model},{0.owner_id}".format(s)
@api.depends("storage", "path", "file_db_store", "url")
def _compute_image(self):
"""Get image data from the right storage type."""
for s in self:
s.image_main = getattr(s, "_get_image_from_%s" % s.storage)()
@api.depends("owner_id", "owner_model")
def _compute_show_technical(self):
"""Know if you need to show the technical fields."""
self.show_technical = all(
"default_owner_%s" % f not in self.env.context for f in ("id", "model")
)
def _get_image_from_filestore(self):
return self.attachment_id.datas
def _get_image_from_db(self):
return self.file_db_store
def _get_image_from_file(self):
if self.path and os.path.exists(self.path):
try:
with open(self.path, "rb") as f:
return base64.b64encode(f.read())
except Exception as e:
_logger.error(
"Can not open the image %s, error : %s", self.path, e, exc_info=True
)
else:
_logger.error("The image %s doesn't exist ", self.path)
return False
def _get_image_from_url(self):
return self._get_image_from_url_cached(self.url)
@api.model
@tools.ormcache("url")
def _get_image_from_url_cached(self, url):
"""Allow to download an image and cache it by its URL."""
if url:
try:
(filename, header) = urlretrieve(url)
with open(filename, "rb") as f:
return base64.b64encode(f.read())
except ContentTooShortError:
_logger.error("URL %s cannot be fetched", url, exc_info=True)
return False
@api.model
def _make_name_pretty(self, name):
return name.replace("_", " ").capitalize()
@api.onchange("url")
def _onchange_url(self):
if self.url:
filename = self.url.split("/")[-1]
self.name, self.extension = os.path.splitext(filename)
self.name = self._make_name_pretty(self.name)
@api.onchange("path")
def _onchange_path(self):
if self.path:
self.name, self.extension = os.path.splitext(os.path.basename(self.path))
self.name = self._make_name_pretty(self.name)
@api.onchange("filename")
def _onchange_filename(self):
if self.filename:
self.name, self.extension = os.path.splitext(self.filename)
self.name = self._make_name_pretty(self.name)
@api.onchange("attachment_id")
def _onchange_attachmend_id(self):
if self.attachment_id:
self.name = self.attachment_id.res_name
@api.constrains("storage", "url")
def _check_url(self):
for record in self:
if record.storage == "url" and not record.url:
raise exceptions.ValidationError(
_("You must provide an URL for the image.")
)
@api.constrains("storage", "path")
def _check_path(self):
for record in self:
if record.storage == "file" and not record.path:
raise exceptions.ValidationError(
_("You must provide a file path for the image.")
)
@api.constrains("storage", "file_db_store")
def _check_store(self):
for record in self:
if record.storage == "db" and not record.file_db_store:
raise exceptions.ValidationError(
_("You must provide an attached file for the image.")
)
@api.constrains("storage", "attachment_id")
def _check_attachment_id(self):
for record in self:
if record.storage == "filestore" and not record.attachment_id:
raise exceptions.ValidationError(
_("You must provide an attachment for the image.")
)
| 35.926108
| 7,293
|
526
|
py
|
PYTHON
|
15.0
|
# Copyright 2020 Camptocamp SA
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl)
{
"name": "Base Time Window",
"summary": "Base model to handle time windows",
"version": "15.0.1.0.0",
"category": "Technical Settings",
"author": "ACSONE SA/NV, Camptocamp, Odoo Community Association (OCA)",
"license": "AGPL-3",
"website": "https://github.com/OCA/server-tools",
"depends": ["base"],
"data": ["data/time_weekday.xml", "security/ir.model.access.xml"],
"installable": True,
}
| 37.571429
| 526
|
5,528
|
py
|
PYTHON
|
15.0
|
# Copyright 2020 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import math
from datetime import time
from psycopg2.extensions import AsIs
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
from odoo.tools.misc import format_time
class TimeWindowMixin(models.AbstractModel):
_name = "time.window.mixin"
_description = "Time Window"
_order = "time_window_start"
# TODO patch api.constrains with field here?
_time_window_overlap_check_field = False
time_window_start = fields.Float("From", required=True)
time_window_end = fields.Float("To", required=True)
time_window_weekday_ids = fields.Many2many(
comodel_name="time.weekday", required=True
)
@api.constrains("time_window_start", "time_window_end", "time_window_weekday_ids")
def check_window_no_overlaps(self):
weekdays_field = self._fields["time_window_weekday_ids"]
for record in self:
if record.time_window_start > record.time_window_end:
raise ValidationError(
_("%(end_time)s must be > %(start_time)s")
% (
{
"end_time": self.float_to_time_repr(record.time_window_end),
"start_time": self.float_to_time_repr(
record.time_window_start
),
}
)
)
if not record.time_window_weekday_ids:
raise ValidationError(_("At least one time.weekday is required"))
# here we use a plain SQL query to benefit of the numrange
# function available in PostgresSQL
# (http://www.postgresql.org/docs/current/static/rangetypes.html)
SQL = """
SELECT
id
FROM
%(table)s w
join %(relation)s as d
on d.%(relation_window_fkey)s = w.id
WHERE
NUMRANGE(w.time_window_start::numeric,
w.time_window_end::numeric) &&
NUMRANGE(%(start)s::numeric, %(end)s::numeric)
AND w.id != %(window_id)s
AND d.%(relation_week_day_fkey)s in %(weekday_ids)s
AND w.%(check_field)s = %(check_field_id)s;"""
self.env.cr.execute(
SQL,
dict(
table=AsIs(self._table),
relation=AsIs(weekdays_field.relation),
relation_window_fkey=AsIs(weekdays_field.column1),
relation_week_day_fkey=AsIs(weekdays_field.column2),
start=record.time_window_start,
end=record.time_window_end,
window_id=record.id,
weekday_ids=tuple(record.time_window_weekday_ids.ids),
check_field=AsIs(self._time_window_overlap_check_field),
check_field_id=record[self._time_window_overlap_check_field].id,
),
)
res = self.env.cr.fetchall()
if res:
other = self.browse(res[0][0])
raise ValidationError(
_("%(record_name)s overlaps %(other_name)s")
% (
{
"record_name": record.display_name,
"other_name": other.display_name,
}
)
)
@api.depends("time_window_start", "time_window_end", "time_window_weekday_ids")
def _compute_display_name(self):
for record in self:
record.display_name = _("{days}: From {start} to {end}").format(
days=", ".join(record.time_window_weekday_ids.mapped("display_name")),
start=format_time(self.env, record.get_time_window_start_time()),
end=format_time(self.env, record.get_time_window_end_time()),
)
@api.constrains("time_window_start", "time_window_end")
def _check_window_under_twenty_four_hours(self):
error_msg = _("Hour should be between 00 and 23")
for record in self:
if record.time_window_start:
hour, minute = self._get_hour_min_from_value(record.time_window_start)
if hour > 23:
raise ValidationError(error_msg)
if record.time_window_end:
hour, minute = self._get_hour_min_from_value(record.time_window_end)
if hour > 23:
raise ValidationError(error_msg)
@api.model
def _get_hour_min_from_value(self, value):
hour = math.floor(value)
minute = round((value % 1) * 60)
if minute == 60:
minute = 0
hour += 1
return hour, minute
@api.model
def float_to_time_repr(self, value):
pattern = "%02d:%02d"
hour, minute = self._get_hour_min_from_value(value)
return pattern % (hour, minute)
@api.model
def float_to_time(self, value):
hour, minute = self._get_hour_min_from_value(value)
return time(hour=hour, minute=minute)
def get_time_window_start_time(self):
return self.float_to_time(self.time_window_start)
def get_time_window_end_time(self):
return self.float_to_time(self.time_window_end)
| 40.350365
| 5,528
|
1,739
|
py
|
PYTHON
|
15.0
|
# Copyright 2020 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import _, api, fields, models, tools
class TimeWeekday(models.Model):
_name = "time.weekday"
_description = "Time Week Day"
name = fields.Selection(
selection=[
("0", "Monday"),
("1", "Tuesday"),
("2", "Wednesday"),
("3", "Thursday"),
("4", "Friday"),
("5", "Saturday"),
("6", "Sunday"),
],
required=True,
)
_sql_constraints = [("name_uniq", "UNIQUE(name)", _("Name must be unique"))]
@api.depends("name")
def _compute_display_name(self):
"""
WORKAROUND since Odoo doesn't handle properly records where name is
a selection
"""
translated_values = dict(self._fields["name"]._description_selection(self.env))
for record in self:
record.display_name = translated_values[record.name]
def name_get(self):
"""
WORKAROUND since Odoo doesn't handle properly records where name is
a selection
"""
return [(r.id, r.display_name) for r in self]
@api.model
@tools.ormcache("name")
def _get_id_by_name(self, name):
return self.search([("name", "=", name)], limit=1).id
@api.model
def create(self, vals):
result = super().create(vals)
self._get_id_by_name.clear_cache(self)
return result
def write(self, vals):
result = super().write(vals)
self._get_id_by_name.clear_cache(self)
return result
def unlink(self):
result = super().unlink()
self._get_id_by_name.clear_cache(self)
return result
| 28.048387
| 1,739
|
1,077
|
py
|
PYTHON
|
15.0
|
# Copyright 2011-2015 Therp BV <https://therp.nl>
# Copyright 2016 Opener B.V. <https://opener.am>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Upgrade Analysis",
"summary": "Performs a difference analysis between modules"
" installed on two different Odoo instances",
"version": "15.0.3.0.0",
"category": "Migration",
"author": "Therp BV, Opener B.V., GRAP, Odoo Community Association (OCA)",
"maintainers": ["StefanRijnhart", "legalsylvain"],
"website": "https://github.com/OCA/server-tools",
"data": [
"security/ir.model.access.csv",
"views/menu.xml",
"views/view_upgrade_comparison_config.xml",
"views/view_upgrade_analysis.xml",
"views/view_upgrade_record.xml",
"wizards/view_upgrade_generate_record_wizard.xml",
"wizards/view_upgrade_install_wizard.xml",
],
"installable": True,
"depends": ["base"],
"external_dependencies": {
"python": ["mako", "dataclasses", "odoorpc", "openupgradelib"],
},
"license": "AGPL-3",
}
| 38.464286
| 1,077
|
8,297
|
py
|
PYTHON
|
15.0
|
# Copyright 2011-2015 Therp BV <https://therp.nl>
# Copyright 2016 Opener B.V. <https://opener.am>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import logging
from openupgradelib.openupgrade_tools import table_exists
from odoo import models
_logger = logging.getLogger(__name__)
def get_record_id(cr, module, model, field, mode):
"""
OpenUpgrade: get or create the id from the record table matching
the key parameter values
"""
cr.execute(
"SELECT id FROM upgrade_record "
"WHERE module = %s AND model = %s AND "
"field = %s AND mode = %s AND type = %s",
(module, model, field, mode, "field"),
)
record = cr.fetchone()
if record:
return record[0]
cr.execute(
"INSERT INTO upgrade_record "
"(create_date, module, model, field, mode, type) "
"VALUES (NOW() AT TIME ZONE 'UTC', %s, %s, %s, %s, %s)",
(module, model, field, mode, "field"),
)
cr.execute(
"SELECT id FROM upgrade_record "
"WHERE module = %s AND model = %s AND "
"field = %s AND mode = %s AND type = %s",
(module, model, field, mode, "field"),
)
return cr.fetchone()[0]
def compare_registries(cr, module, registry, local_registry):
"""
OpenUpgrade: Compare the local registry with the global registry,
log any differences and merge the local registry with
the global one.
"""
if not table_exists(cr, "upgrade_record"):
return
for model, flds in local_registry.items():
registry.setdefault(model, {})
for field, attributes in flds.items():
old_field = registry[model].setdefault(field, {})
mode = old_field and "modify" or "create"
record_id = False
for key, value in attributes.items():
if key not in old_field or old_field[key] != value:
if not record_id:
record_id = get_record_id(cr, module, model, field, mode)
cr.execute(
"SELECT id FROM upgrade_attribute "
"WHERE name = %s AND value = %s AND "
"record_id = %s",
(key, value, record_id),
)
if not cr.fetchone():
cr.execute(
"INSERT INTO upgrade_attribute "
"(create_date, name, value, record_id) "
"VALUES (NOW() AT TIME ZONE 'UTC', %s, %s, %s)",
(key, value, record_id),
)
old_field[key] = value
def hasdefault(field):
"""Return a representation of the field's default method.
The default method is only meaningful if the field is a regular read/write
field with a `default` method or a `compute` method.
Note that Odoo fields accept a literal value as a `default` attribute
this value is wrapped in a lambda expression in odoo/fields.py:
https://github.com/odoo/odoo/blob/7eeba9d/odoo/fields.py#L484-L487
"""
if (
not field.readonly # It's not a proper computed field
and not field.inverse # It's not a field that delegates their data
and not isrelated(field) # It's not an (unstored) related field.
):
if field.default:
return "default"
if field.compute:
return "compute"
return ""
def isfunction(field):
if (
field.compute
and (field.readonly or field.inverse)
and not field.related
and not field.company_dependent
):
return "function"
return ""
def isproperty(field):
if field.company_dependent:
return "property"
return ""
def isrelated(field):
if field.related:
return "related"
return ""
def _get_relation(field):
if field.type in ("many2many", "many2one", "one2many"):
return field.comodel_name
elif field.type == "many2one_reference":
return field.model_field
else:
return ""
def log_model(model, local_registry):
"""
OpenUpgrade: Store the characteristics of the BaseModel and its fields
in the local registry, so that we can compare changes with the
main registry
"""
if not model._name:
return
typemap = {"monetary": "float"}
# persistent models only
if isinstance(model, models.TransientModel):
return
model_registry = local_registry.setdefault(model._name, {})
if model._inherits:
model_registry["_inherits"] = {"_inherits": str(model._inherits)}
model_registry["_order"] = {"_order": model._order}
for fieldname, field in model._fields.items():
properties = {
"type": typemap.get(field.type, field.type),
"isfunction": isfunction(field),
"isproperty": isproperty(field),
"isrelated": isrelated(field),
"relation": _get_relation(field),
"table": field.relation if field.type == "many2many" else "",
"required": field.required and "required" or "",
"stored": field.store and "stored" or "",
"selection_keys": "",
"hasdefault": hasdefault(field),
}
if field.type == "selection":
if isinstance(field.selection, (tuple, list)):
properties["selection_keys"] = str(
sorted(x[0] for x in field.selection)
)
else:
properties["selection_keys"] = "function"
elif field.type == "binary":
properties["attachment"] = str(getattr(field, "attachment", False))
for key, value in properties.items():
if value:
model_registry.setdefault(fieldname, {})[key] = value
def log_xml_id(cr, module, xml_id):
"""
Log xml_ids at load time in the records table.
Called from:
- tools/convert.py:xml_import._test_xml_id()
- odoo/models.py:BaseModel._convert_records()
- odoo/addons/base/models/ir_model.py:IrModelConstraint._reflect_model()
# Catcha's
- The module needs to be loaded with 'init', or the calling method
won't be called. This can be brought about by installing the
module or updating the 'state' field of the module to 'to install'
or call the server with '--init <module>' and the database argument.
- Do you get the right results immediately when installing the module?
No, sorry. This method retrieves the model from the ir_model_table, but
when the xml id is encountered for the first time, this method is called
before the item is present in this table. Therefore, you will not
get any meaningful results until the *second* time that you 'init'
the module.
- The good news is that the upgrade_analysis module that comes
with this distribution allows you to deal with all of this with
one click on the menu item Settings -> Customizations ->
Database Structure -> OpenUpgrade -> Generate Records
- You cannot reinitialize the modules in your production database
and expect to keep working on it happily ever after. Do not perform
this routine on your production database.
:param module: The module that contains the xml_id
:param xml_id: the xml_id, with or without 'module.' prefix
"""
if not table_exists(cr, "upgrade_record"):
return
if "." not in xml_id:
xml_id = "{}.{}".format(module, xml_id)
cr.execute(
"SELECT model FROM ir_model_data " "WHERE module = %s AND name = %s",
xml_id.split("."),
)
record = cr.fetchone()
if not record:
_logger.warning("Cannot find xml_id %s", xml_id)
return
else:
cr.execute(
"SELECT id FROM upgrade_record "
"WHERE module=%s AND model=%s AND name=%s AND type=%s",
(module, record[0], xml_id, "xmlid"),
)
if not cr.fetchone():
cr.execute(
"INSERT INTO upgrade_record "
"(create_date, module, model, name, type) "
"values(NOW() AT TIME ZONE 'UTC', %s, %s, %s, %s)",
(module, record[0], xml_id, "xmlid"),
)
| 35.306383
| 8,297
|
362
|
py
|
PYTHON
|
15.0
|
BLACKLIST_MODULES = []
# the hw_* modules are not affected by a migration as they don't
# contain any ORM functionality, but they do start up threads that
# delay the process and spit out annoying log messages continuously.
# We also don't want to analyze tests modules
BLACKLIST_MODULES_STARTS_WITH = ["hw_", "test_"]
BLACKLIST_MODULES_ENDS_WITH = ["_test"]
| 36.2
| 362
|
20,132
|
py
|
PYTHON
|
15.0
|
# Copyright 2011-2015 Therp BV <https://therp.nl>
# Copyright 2015-2016 Opener B.V. <https://opener.am>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
# flake8: noqa: C901
#####################################################################
# library providing a function to analyse two progressive database
# layouts from the OpenUpgrade server.
#####################################################################
import collections
import copy
try:
from odoo.addons.openupgrade_scripts import apriori
except ImportError:
from dataclasses import dataclass, field as dc_field
@dataclass
class NullApriori:
renamed_modules: dict = dc_field(default_factory=dict)
merged_modules: dict = dc_field(default_factory=dict)
renamed_models: dict = dc_field(default_factory=dict)
merged_models: dict = dc_field(default_factory=dict)
apriori = NullApriori()
def module_map(module):
return apriori.renamed_modules.get(
module, apriori.merged_modules.get(module, module)
)
def model_rename_map(model):
return apriori.renamed_models.get(model, model)
def model_map(model):
return apriori.renamed_models.get(model, apriori.merged_models.get(model, model))
def inv_model_map(model):
inv_model_map_dict = {v: k for k, v in apriori.renamed_models.items()}
return inv_model_map_dict.get(model, model)
IGNORE_FIELDS = [
"create_date",
"create_uid",
"id",
"write_date",
"write_uid",
]
def compare_records(dict_old, dict_new, fields):
"""
Check equivalence of two OpenUpgrade field representations
with respect to the keys in the 'fields' arguments.
Take apriori knowledge into account for mapped modules or
model names.
Return True of False.
"""
for field in fields:
if field == "module":
if module_map(dict_old["module"]) != dict_new["module"]:
return False
elif field == "model":
if model_rename_map(dict_old["model"]) != dict_new["model"]:
return False
elif field == "other_prefix":
if (
dict_old["module"] != dict_old["prefix"]
or dict_new["module"] != dict_new["prefix"]
):
return False
if dict_old["model"] == "ir.ui.view":
# basically, to avoid the assets_backend case
return False
elif dict_old[field] != dict_new[field]:
return False
return True
def search(item, item_list, fields, get_all=None):
"""
Find a match of a dictionary in a list of similar dictionaries
with respect to the keys in the 'fields' arguments.
Return the item if found or None.
"""
all_found = []
for other in item_list:
if not compare_records(item, other, fields):
continue
if not get_all:
return other
if other["module"] != other["prefix"]:
all_found.append(other)
if get_all:
return all_found
# search for renamed fields
if "field" in fields:
for other in item_list:
if not item["field"] or item["field"] is not None or item["isproperty"]:
continue
if compare_records(dict(item, field=other["field"]), other, fields):
return other
return None
def fieldprint(old, new, field, text, reprs):
fieldrepr = "{}".format(old["field"])
if old["field"] not in ("_inherits", "_order"):
fieldrepr += " ({})".format(old["type"])
fullrepr = "{:<12} / {:<24} / {:<30}".format(old["module"], old["model"], fieldrepr)
if not text:
text = "{} is now '{}' ('{}')".format(field, new[field], old[field])
if field in ("column1", "column2"):
text += " [%s]" % old["table"]
if field == "relation":
text += " [nothing to do]"
reprs[module_map(old["module"])].append("{}: {}".format(fullrepr, text))
if field == "module":
text = "previously in module %s" % old[field]
fullrepr = "{:<12} / {:<24} / {:<30}".format(
new["module"], old["model"], fieldrepr
)
reprs[module_map(new["module"])].append("{}: {}".format(fullrepr, text))
def report_generic(new, old, attrs, reprs):
for attr in attrs:
if attr == "required":
if old[attr] != new["required"] and new["required"]:
text = "now required"
fieldprint(old, new, "", text, reprs)
elif attr == "stored":
if old[attr] != new[attr]:
if new["stored"]:
text = "is now stored"
else:
text = "not stored anymore"
fieldprint(old, new, "", text, reprs)
elif attr == "isfunction":
if old[attr] != new[attr]:
if new["isfunction"]:
text = "now a function"
else:
text = "not a function anymore"
fieldprint(old, new, "", text, reprs)
elif attr == "isproperty":
if old[attr] != new[attr]:
if new[attr]:
text = "now a property"
else:
text = "not a property anymore"
fieldprint(old, new, "", text, reprs)
elif attr == "isrelated":
if old[attr] != new[attr]:
if new[attr]:
text = "now related"
else:
text = "not related anymore"
fieldprint(old, new, "", text, reprs)
elif attr == "table":
if old[attr] != new[attr]:
fieldprint(old, new, attr, "", reprs)
if old[attr] and new[attr]:
if old["column1"] != new["column1"]:
fieldprint(old, new, "column1", "", reprs)
if old["column2"] != new["column2"]:
fieldprint(old, new, "column2", "", reprs)
elif old[attr] != new[attr]:
fieldprint(old, new, attr, "", reprs)
def compare_sets(old_records, new_records):
"""
Compare a set of OpenUpgrade field representations.
Try to match the equivalent fields in both sets.
Return a textual representation of changes in a dictionary with
module names as keys. Special case is the 'general' key
which contains overall remarks and matching statistics.
"""
reprs = collections.defaultdict(list)
def clean_records(records):
result = []
for record in records:
if record["field"] not in IGNORE_FIELDS:
result.append(record)
return result
old_records = clean_records(old_records)
new_records = clean_records(new_records)
origlen = len(old_records)
new_models = {column["model"] for column in new_records}
old_models = {column["model"] for column in old_records}
matched_direct = 0
matched_other_module = 0
matched_other_type = 0
in_obsolete_models = 0
obsolete_models = []
for model in old_models:
if model not in new_models:
if model_map(model) not in new_models:
obsolete_models.append(model)
non_obsolete_old_records = []
for column in copy.copy(old_records):
if column["model"] in obsolete_models:
in_obsolete_models += 1
else:
non_obsolete_old_records.append(column)
def match(match_fields, report_fields, warn=False):
count = 0
for column in copy.copy(non_obsolete_old_records):
found = search(column, new_records, match_fields)
if found:
if warn:
pass
# print "Tentatively"
report_generic(found, column, report_fields, reprs)
old_records.remove(column)
non_obsolete_old_records.remove(column)
new_records.remove(found)
count += 1
return count
matched_direct = match(
["module", "mode", "model", "field"],
[
"relation",
"type",
"selection_keys",
"_inherits",
"stored",
"isfunction",
"isrelated",
"required",
"table",
"_order",
],
)
# other module, same type and operation
matched_other_module = match(
["mode", "model", "field", "type"],
[
"module",
"relation",
"selection_keys",
"_inherits",
"stored",
"isfunction",
"isrelated",
"required",
"table",
"_order",
],
)
# other module, same operation, other type
matched_other_type = match(
["module", "mode", "model", "field"],
[
"relation",
"type",
"selection_keys",
"_inherits",
"stored",
"isfunction",
"isrelated",
"required",
"table",
"_order",
],
)
# Info that is displayed for deleted fields
printkeys_old = [
"relation",
"required",
"selection_keys",
"_inherits",
"mode",
"attachment",
]
# Info that is displayed for new fields
printkeys_new = printkeys_old + [
"hasdefault",
]
for column in old_records:
if column["field"] == "_order":
continue
# we do not care about removed non stored function fields
if not column["stored"] and (column["isfunction"] or column["isrelated"]):
continue
if column["mode"] == "create":
column["mode"] = ""
extra_message = ", ".join(
[
k + ": " + str(column[k]) if k != str(column[k]) else k
for k in printkeys_old
if column[k]
]
)
if extra_message:
extra_message = " " + extra_message
fieldprint(column, "", "", "DEL" + extra_message, reprs)
for column in new_records:
if column["field"] == "_order":
continue
# we do not care about newly added non stored function fields
if not column["stored"] and (column["isfunction"] or column["isrelated"]):
continue
if column["mode"] == "create":
column["mode"] = ""
printkeys = printkeys_new.copy()
if column["isfunction"] or column["isrelated"]:
printkeys.extend(["isfunction", "isrelated", "stored"])
extra_message = ", ".join(
[
k + ": " + str(column[k]) if k != str(column[k]) else k
for k in printkeys
if column[k]
]
)
if extra_message:
extra_message = " " + extra_message
fieldprint(column, "", "", "NEW" + extra_message, reprs)
for line in [
"# %d fields matched," % (origlen - len(old_records)),
"# Direct match: %d" % matched_direct,
"# Found in other module: %d" % matched_other_module,
"# Found with different type: %d" % matched_other_type,
"# In obsolete models: %d" % in_obsolete_models,
"# Not matched: %d" % len(old_records),
"# New columns: %d" % len(new_records),
]:
reprs["general"].append(line)
return reprs
def compare_xml_sets(old_records, new_records):
reprs = collections.defaultdict(list)
def match_updates(match_fields):
old_updated, new_updated = {}, {}
for column in copy.copy(old_records):
found_all = search(column, old_records, match_fields, True)
for found in found_all:
old_records.remove(found)
for column in copy.copy(new_records):
found_all = search(column, new_records, match_fields, True)
for found in found_all:
new_records.remove(found)
matched_records = list(old_updated.values()) + list(new_updated.values())
matched_records = [y for x in matched_records for y in x]
return matched_records
def match(match_fields, match_type="direct"):
matched_records = []
for column in copy.copy(old_records):
found = search(column, new_records, match_fields)
if found:
old_records.remove(column)
new_records.remove(found)
if match_type != "direct":
column["old"] = True
found["new"] = True
column[match_type] = found["module"]
found[match_type] = column["module"]
found["domain"] = (
column["domain"] != found["domain"]
and column["domain"] != "[]"
and found["domain"] is False
)
column["domain"] = False
found["definition"] = (
column["definition"]
and column["definition"] != found["definition"]
and "is now '{}' ('{}')".format(
found["definition"], column["definition"]
)
)
column["definition"] = False
column["noupdate_switched"] = False
found["noupdate_switched"] = column["noupdate"] != found["noupdate"]
if match_type != "direct":
matched_records.append(column)
matched_records.append(found)
elif (
match_type == "direct" and (found["domain"] or found["definition"])
) or found["noupdate_switched"]:
matched_records.append(found)
return matched_records
# direct match
modified_records = match(["module", "model", "name"])
# updated records (will be excluded)
match_updates(["model", "name"])
# other module, same full xmlid
moved_records = match(["model", "name"], "moved")
# other module, same suffix, other prefix
renamed_records = match(["model", "suffix", "other_prefix"], "renamed")
for record in old_records:
record["old"] = True
record["domain"] = False
record["definition"] = False
record["noupdate_switched"] = False
for record in new_records:
record["new"] = True
record["domain"] = False
record["definition"] = False
record["noupdate_switched"] = False
sorted_records = sorted(
old_records + new_records + moved_records + renamed_records + modified_records,
key=lambda k: (k["model"], "old" in k, k["name"]),
)
for entry in sorted_records:
content = ""
if "old" in entry:
content = "DEL %(model)s: %(name)s" % entry
if "moved" in entry:
content += " [moved to %(moved)s module]" % entry
elif "renamed" in entry:
content += " [renamed to %(renamed)s module]" % entry
elif "new" in entry:
content = "NEW %(model)s: %(name)s" % entry
if "moved" in entry:
content += " [moved from %(moved)s module]" % entry
elif "renamed" in entry:
content += " [renamed from %(renamed)s module]" % entry
if "old" not in entry and "new" not in entry:
content = "%(model)s: %(name)s" % entry
if entry["domain"]:
content += " (deleted domain)"
if entry["definition"]:
content += " (changed definition: %(definition)s)" % entry
if entry["noupdate"]:
content += " (noupdate)"
if entry["noupdate_switched"]:
content += " (noupdate switched)"
reprs[module_map(entry["module"])].append(content)
return reprs
def compare_model_sets(old_records, new_records):
"""
Compare a set of OpenUpgrade model representations.
"""
reprs = collections.defaultdict(list)
new_models = {column["model"]: column["module"] for column in new_records}
old_models = {column["model"]: column["module"] for column in old_records}
obsolete_models = []
for column in copy.copy(old_records):
model = column["model"]
if model in old_models:
if model not in new_models:
if model_map(model) not in new_models:
obsolete_models.append(model)
text = "obsolete model %s" % model
if column["model_type"]:
text += " [%s]" % column["model_type"]
reprs[module_map(column["module"])].append(text)
reprs["general"].append(
"obsolete model %s [module %s]"
% (model, module_map(column["module"]))
)
else:
moved_module = ""
if module_map(column["module"]) != new_models[model_map(model)]:
moved_module = " in module %s" % new_models[model_map(model)]
text = "obsolete model {} (renamed to {}{})".format(
model,
model_map(model),
moved_module,
)
if column["model_type"]:
text += " [%s]" % column["model_type"]
reprs[module_map(column["module"])].append(text)
reprs["general"].append(
"obsolete model %s (renamed to %s) [module %s]"
% (model, model_map(model), module_map(column["module"]))
)
else:
if module_map(column["module"]) != new_models[model]:
text = "model {} (moved to {})".format(model, new_models[model])
if column["model_type"]:
text += " [%s]" % column["model_type"]
reprs[module_map(column["module"])].append(text)
text = "model {} (moved from {})".format(model, old_models[model])
if column["model_type"]:
text += " [%s]" % column["model_type"]
for column in copy.copy(new_records):
model = column["model"]
if model in new_models:
if model not in old_models:
if inv_model_map(model) not in old_models:
text = "new model %s" % model
if column["model_type"]:
text += " [%s]" % column["model_type"]
reprs[column["module"]].append(text)
reprs["general"].append(
"new model {} [module {}]".format(model, column["module"])
)
else:
moved_module = ""
if column["module"] != module_map(old_models[inv_model_map(model)]):
moved_module = (
" in module %s" % old_models[inv_model_map(model)]
)
text = "new model {} (renamed from {}{})".format(
model,
inv_model_map(model),
moved_module,
)
if column["model_type"]:
text += " [%s]" % column["model_type"]
reprs[column["module"]].append(text)
reprs["general"].append(
"new model %s (renamed from %s) [module %s]"
% (model, inv_model_map(model), column["module"])
)
else:
if column["module"] != module_map(old_models[model]):
text = "model {} (moved from {})".format(model, old_models[model])
if column["model_type"]:
text += " [%s]" % column["model_type"]
reprs[column["module"]].append(text)
return reprs
| 36.804388
| 20,132
|
1,843
|
py
|
PYTHON
|
15.0
|
import logging
_logger = logging.getLogger(__name__)
class OdooPatch(object):
"""Simple mechanism to apply a collection of monkeypatches using a
context manager.
Classes can register their monkeypatches by inheriting from this class.
They need to define a `target` member, referring to the object or module
that needs to be patched, and a list `method_names`. They also need to
redefine those methods under the same name.
The original method is made available on the new method as
`_original_method`.
Example:
```
from odoo import api
from odoo.addons.some_module.models.my_model import MyModel
class MyModelPatch(OdooPatch):
target = MyModel
method_names = ['do_something']
@api.model
def do_something(self):
res = MyModelPatch.do_something._original_method()
...
return res
```
Usage:
```
with OdooPatch():
do_something()
```
"""
def __enter__(self):
for cls in OdooPatch.__subclasses__():
for method_name in cls.method_names:
method = getattr(cls, method_name)
method._original_method = getattr(cls.target, method_name)
setattr(cls.target, method_name, method)
def __exit__(self, exc_type, exc_value, tb):
for cls in OdooPatch.__subclasses__():
for method_name in cls.method_names:
method = getattr(cls.target, method_name)
if hasattr(method, "_original_method"):
setattr(cls.target, method_name, method._original_method)
else:
_logger.warning(
"_original_method not found on method %s of class %s",
method_name,
cls.target,
)
| 30.213115
| 1,843
|
739
|
py
|
PYTHON
|
15.0
|
from odoo import api, models
from ... import upgrade_log
from ..odoo_patch import OdooPatch
class BaseModelPatch(OdooPatch):
target = models.BaseModel
method_names = ["_convert_records"]
@api.model
def _convert_records(self, records, log=lambda a: None):
"""Log data ids that are imported with `load`"""
current_module = self.env.context["module"]
for res in BaseModelPatch._convert_records._original_method(
self, records, log=log
):
_id, xid, _record, _info = res
if xid:
xid = xid if "." in xid else "{}.{}".format(current_module, xid)
upgrade_log.log_xml_id(self.env.cr, current_module, xid)
yield res
| 32.130435
| 739
|
397
|
py
|
PYTHON
|
15.0
|
from odoo.tools.convert import xml_import
from .... import upgrade_log
from ...odoo_patch import OdooPatch
class XMLImportPatch(OdooPatch):
target = xml_import
method_names = ["_test_xml_id"]
def _test_xml_id(self, xml_id):
res = XMLImportPatch._test_xml_id._original_method(self, xml_id)
upgrade_log.log_xml_id(self.env.cr, self.module, xml_id)
return res
| 28.357143
| 397
|
1,482
|
py
|
PYTHON
|
15.0
|
from odoo.addons.base.models import ir_model
from ...... import upgrade_log
from .....odoo_patch import OdooPatch
class IrModelConstraintPatch(OdooPatch):
target = ir_model.IrModelConstraint
method_names = ["_reflect_model"]
def _reflect_model(self, model):
"""Reflect the _sql_constraints of the given model."""
def cons_text(txt):
return txt.lower().replace(", ", ",").replace(" (", "(")
# map each constraint on the name of the module where it is defined
constraint_module = {
constraint[0]: cls._module
for cls in reversed(type(model).mro())
if not getattr(cls, "pool", None)
for constraint in getattr(cls, "_local_sql_constraints", ())
}
data_list = []
for (key, definition, message) in model._sql_constraints:
conname = "%s_%s" % (model._table, key)
module = constraint_module.get(key)
record = self._reflect_constraint(
model, conname, "u", cons_text(definition), module, message
)
if record:
xml_id = "%s.constraint_%s" % (module, conname)
data_list.append(dict(xml_id=xml_id, record=record))
self.env["ir.model.data"]._update_xmlids(data_list)
for data in data_list:
xml_id = data.get("xml_id")
module = xml_id.split(".")[0]
upgrade_log.log_xml_id(self.env.cr, module, xml_id)
| 37.05
| 1,482
|
1,032
|
py
|
PYTHON
|
15.0
|
import logging
from threading import current_thread
from odoo import SUPERUSER_ID, api
from odoo.modules.registry import Registry
from .... import upgrade_log
from ...odoo_patch import OdooPatch
_logger = logging.getLogger(__name__)
class RegistryPatch(OdooPatch):
target = Registry
method_names = ["init_models"]
def init_models(self, cr, model_names, context, install=True):
module_name = context["module"]
_logger.debug("Logging models of module %s", module_name)
upg_registry = current_thread()._upgrade_registry
local_registry = {}
env = api.Environment(cr, SUPERUSER_ID, {})
for model in env.values():
if not model._auto:
continue
upgrade_log.log_model(model, local_registry)
upgrade_log.compare_registries(
cr, context["module"], upg_registry, local_registry
)
return RegistryPatch.init_models._original_method(
self, cr, model_names, context, install=install
)
| 31.272727
| 1,032
|
1,787
|
py
|
PYTHON
|
15.0
|
from odoo.tests import common, tagged
@tagged("post_install", "-at_install")
class TestUpgradeAnalysis(common.TransactionCase):
def setUp(self):
super().setUp()
self.IrModuleModule = self.env["ir.module.module"]
self.product_module = self.IrModuleModule.search([("name", "=", "product")])
self.sale_module = self.IrModuleModule.search([("name", "=", "sale")])
self.upgrade_analysis = self.IrModuleModule.search(
[("name", "=", "upgrade_analysis")]
)
def test_upgrade_install_wizard(self):
InstallWizard = self.env["upgrade.install.wizard"]
wizard = InstallWizard.create({})
wizard.select_odoo_modules()
self.assertTrue(
self.product_module.id in wizard.module_ids.ids,
"Select Odoo module should select 'product' module",
)
wizard.select_oca_modules()
self.assertTrue(
self.upgrade_analysis.id in wizard.module_ids.ids,
"Select OCA module should select 'upgrade_analysis' module",
)
wizard.select_other_modules()
self.assertFalse(
self.product_module.id in wizard.module_ids.ids,
"Select Other module should not select 'product' module",
)
wizard.unselect_modules()
self.assertEqual(
wizard.module_ids.ids, [], "Unselect module should clear the selection"
)
# For the time being, tests doens't call install_modules() function
# because installing module in a test context will execute the test
# of the installed modules, raising finally an error:
# TypeError: Many2many fields ir.actions.server.partner_ids and
# ir.actions.server.partner_ids use the same table and columns
| 38.847826
| 1,787
|
1,031
|
py
|
PYTHON
|
15.0
|
# Copyright 2011-2015 Therp BV <https://therp.nl>
# Copyright 2016 Opener B.V. <https://opener.am>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import os
from odoo import fields, models
from odoo.modules import get_module_path
class IrModuleModule(models.Model):
_inherit = "ir.module.module"
is_odoo_module = fields.Boolean(
compute="_compute_is_odoo_module",
)
is_oca_module = fields.Boolean(compute="_compute_is_oca_module")
def _compute_is_oca_module(self):
for module in self:
module.is_oca_module = "/OCA/" in module.website
def _compute_is_odoo_module(self):
for module in self:
module_path = get_module_path(module.name)
if not module_path:
module.is_odoo_module = False
continue
absolute_repo_path = os.path.split(module_path)[0]
x, relative_repo_path = os.path.split(absolute_repo_path)
module.is_odoo_module = relative_repo_path == "addons"
| 32.21875
| 1,031
|
3,098
|
py
|
PYTHON
|
15.0
|
# Copyright 2011-2015 Therp BV <https://therp.nl>
# Copyright 2016 Opener B.V. <https://opener.am>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from urllib.error import URLError
import odoorpc
from odoo import api, fields, models
from odoo.exceptions import UserError
from odoo.tools.translate import _
class UpgradeComparisonConfig(models.Model):
_name = "upgrade.comparison.config"
_description = "Upgrade Comparison Configuration"
name = fields.Char()
server = fields.Char(required=True, default="localhost")
port = fields.Integer(required=True, default=8069)
database = fields.Char(required=True)
username = fields.Char(required=True, default="admin")
password = fields.Char(required=True, default="admin")
version = fields.Char()
analysis_ids = fields.One2many(
string="Analyses", comodel_name="upgrade.analysis", inverse_name="config_id"
)
analysis_qty = fields.Integer(compute="_compute_analysis_qty")
@api.depends("analysis_ids")
def _compute_analysis_qty(self):
for config in self:
config.analysis_qty = len(config.analysis_ids)
def get_connection(self):
self.ensure_one()
try:
remote = odoorpc.ODOO(self.server, port=self.port)
except URLError as exc:
raise UserError(
_("Could not connect the Odoo server at %(server)s:%(port)s")
% {"server": self.server, "port": self.port}
) from exc
remote.login(self.database, self.username, self.password)
self.version = remote.version
return remote
def test_connection(self):
self.ensure_one()
try:
connection = self.get_connection()
user_model = connection.env["res.users"]
ids = user_model.search([("login", "=", "admin")])
user_info = user_model.read([ids[0]], ["name"])[0]
except Exception as e:
raise UserError(_("Connection failed.\n\nDETAIL: %s") % e) from e
return {
"type": "ir.actions.client",
"tag": "display_notification",
"params": {
"type": "info",
"message": _(
"You are correctly connected to the server %(server)s"
" (version %(version)s) with the user %(user_name)s"
)
% dict(
server=self.server,
version=self.version,
user_name=user_info["name"],
),
},
}
def new_analysis(self):
self.ensure_one()
analysis = self.env["upgrade.analysis"].create({"config_id": self.id})
return {
"name": analysis._description,
"view_mode": "form",
"res_model": analysis._name,
"type": "ir.actions.act_window",
# "target": "new",
"res_id": analysis.id,
# "nodestroy": True,
}
def action_show_analysis(self):
self.ensure_one()
return {}
| 32.270833
| 3,098
|
6,006
|
py
|
PYTHON
|
15.0
|
# Copyright 2011-2015 Therp BV <https://therp.nl>
# Copyright 2016-2020 Opener B.V. <https://opener.am>
# Copyright 2019 ForgeFlow <https://forgeflow.com>
# Copyright 2020 GRAP <https://grap.coop>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import ast
import logging
import os
from odoo import api, fields, models
from odoo.exceptions import ValidationError
from odoo.modules.module import MANIFEST_NAMES, get_module_path
from odoo.tools.translate import _
_logger = logging.getLogger(__name__)
class UpgradeRecord(models.Model):
_name = "upgrade.record"
_description = "Upgrade Record"
name = fields.Char(readonly=True)
module = fields.Char(readonly=True)
model = fields.Char(readonly=True)
field = fields.Char(readonly=True)
mode = fields.Selection(
[("create", "Create"), ("modify", "Modify")],
help="Set to Create if a field is newly created "
"in this module. If this module modifies an attribute of an "
"existing field, set to Modify.",
readonly=True,
)
type = fields.Selection(
[("field", "Field"), ("xmlid", "XML ID"), ("model", "Model")],
readonly=True,
)
attribute_ids = fields.One2many(
comodel_name="upgrade.attribute", inverse_name="record_id", readonly=True
)
noupdate = fields.Boolean(readonly=True)
domain = fields.Char(readonly=True)
definition = fields.Char(readonly=True)
prefix = fields.Char(compute="_compute_prefix_and_suffix")
suffix = fields.Char(compute="_compute_prefix_and_suffix")
model_original_module = fields.Char(compute="_compute_model_original_module")
model_type = fields.Char(compute="_compute_model_type")
@api.depends("name")
def _compute_prefix_and_suffix(self):
for rec in self:
rec.prefix, rec.suffix = rec.name.split(".", 1)
@api.depends("model", "type")
def _compute_model_original_module(self):
for rec in self:
if rec.type == "model":
rec.model_original_module = self.env[rec.model]._original_module
else:
rec.model_original_module = ""
@api.depends("model", "type")
def _compute_model_type(self):
for rec in self:
if rec.type == "model":
model = self.env[rec.model]
if model._auto and model._transient:
rec.model_type = "transient"
elif model._auto:
rec.model_type = ""
elif not model._auto and model._abstract:
rec.model_type = "abstract"
else:
rec.model_type = "sql_view"
else:
rec.model_type = ""
@api.model
def field_dump(self):
keys = [
"attachment",
"module",
"mode",
"model",
"field",
"type",
"isfunction",
"isproperty",
"isrelated",
"relation",
"required",
"stored",
"selection_keys",
"hasdefault",
"table",
"_inherits",
"_order",
]
template = {x: False for x in keys}
data = []
for record in self.search([("type", "=", "field")]):
repre = template.copy()
repre.update(
{
"module": record.module,
"model": record.model,
"field": record.field,
"mode": record.mode,
}
)
repre.update({x.name: x.value for x in record.attribute_ids})
if repre["table"]:
repre.update(
{
"column1": self.env[repre["model"]]
._fields[repre["field"]]
.column1,
"column2": self.env[repre["model"]]
._fields[repre["field"]]
.column2,
}
)
data.append(repre)
return data
@api.model
def list_modules(self):
"""Return the set of covered modules"""
self.env.cr.execute(
"""SELECT DISTINCT(module) FROM upgrade_record
ORDER BY module"""
)
return [module for module, in self.env.cr.fetchall()]
@staticmethod
def _read_manifest(addon_dir):
for manifest_name in MANIFEST_NAMES:
if os.access(os.path.join(addon_dir, manifest_name), os.R_OK):
with open(os.path.join(addon_dir, manifest_name), "r") as f:
manifest_string = f.read()
return ast.literal_eval(manifest_string)
raise ValidationError(
_("No manifest found in %(addon_dir)s") % {"addon_dir": addon_dir}
)
@api.model
def get_xml_records(self, module):
"""Return all XML records from the given module"""
addon_dir = get_module_path(module)
manifest = self._read_manifest(addon_dir)
# The order of the keys are important.
# Load files in the same order as in
# module/loading.py:load_module_graph
files = []
for key in ["init_xml", "update_xml", "data"]:
if not manifest.get(key):
continue
for xml_file in manifest[key]:
if not xml_file.lower().endswith(".xml"):
continue
parts = xml_file.split("/")
try:
with open(os.path.join(addon_dir, *parts), "r") as xml_handle:
files.append(xml_handle.read())
except UnicodeDecodeError:
_logger.warning(
"Encoding error: Unable to read %s",
os.path.join(addon_dir, *parts),
)
continue
return files
| 32.464865
| 6,006
|
536
|
py
|
PYTHON
|
15.0
|
# Copyright 2011-2015 Therp BV <https://therp.nl>
# Copyright 2016 Opener B.V. <https://opener.am>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class UpgradeAttribute(models.Model):
_name = "upgrade.attribute"
_description = "Upgrade Attribute"
name = fields.Char(readonly=True)
value = fields.Char(readonly=True)
record_id = fields.Many2one(
comodel_name="upgrade.record",
index=True,
ondelete="CASCADE",
readonly=True,
)
| 25.52381
| 536
|
22,546
|
py
|
PYTHON
|
15.0
|
# Copyright 2011-2015 Therp BV <https://therp.nl>
# Copyright 2016-2020 Opener B.V. <https://opener.am>
# Copyright 2019 ForgeFlow <https://forgeflow.com>
# Copyright 2020 GRAP <https://grap.coop>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
# flake8: noqa: C901
import logging
import os
from copy import deepcopy
from lxml import etree
from mako.template import Template
from odoo import fields, models, release
from odoo.exceptions import ValidationError
from odoo.modules import get_module_path
from odoo.tools import config
from odoo.tools.convert import nodeattr2bool
from odoo.tools.translate import _
try:
from odoo.addons.openupgrade_scripts.apriori import merged_modules, renamed_modules
except ImportError:
renamed_modules = {}
merged_modules = {}
from .. import compare
_logger = logging.getLogger(__name__)
_IGNORE_MODULES = ["openupgrade_records", "upgrade_analysis"]
class UpgradeAnalysis(models.Model):
_name = "upgrade.analysis"
_description = "Upgrade Analyses"
analysis_date = fields.Datetime(readonly=True)
state = fields.Selection(
[("draft", "draft"), ("done", "Done")], readonly=True, default="draft"
)
config_id = fields.Many2one(
string="Comparison Config",
comodel_name="upgrade.comparison.config",
readonly=True,
required=True,
)
log = fields.Text(readonly=True)
upgrade_path = fields.Char(
compute="_compute_upgrade_path",
readonly=True,
help=(
"The base file path to save the analyse files of Odoo modules. "
"Taken from Odoo's --upgrade-path command line option or the "
"'scripts' subdirectory in the openupgrade_scripts addon."
),
)
write_files = fields.Boolean(
help="Write analysis files to the module directories", default=True
)
def _compute_upgrade_path(self):
"""Return the --upgrade-path configuration option or the `scripts`
directory in `openupgrade_scripts` if available
"""
res = config.get("upgrade_path", False)
if not res:
module_path = get_module_path("openupgrade_scripts", display_warning=False)
if module_path:
res = os.path.join(module_path, "scripts")
self.upgrade_path = res
def _get_remote_model(self, connection, model):
self.ensure_one()
if model == "record":
if float(self.config_id.version) < 14.0:
return connection.env["openupgrade.record"]
else:
return connection.env["upgrade.record"]
return False
def _write_file(
self, module_name, version, content, filename="upgrade_analysis.txt"
):
module = self.env["ir.module.module"].search([("name", "=", module_name)])[0]
if module.is_odoo_module:
if not self.upgrade_path:
return (
"ERROR: no upgrade_path set when writing analysis of %s\n"
% module_name
)
full_path = os.path.join(self.upgrade_path, module_name, version)
else:
full_path = os.path.join(
get_module_path(module_name), "migrations", version
)
if not os.path.exists(full_path):
try:
os.makedirs(full_path)
except os.error:
return "ERROR: could not create migrations directory %s:\n" % (
full_path
)
logfile = os.path.join(full_path, filename)
try:
f = open(logfile, "w")
except Exception:
return "ERROR: could not open file %s for writing:\n" % logfile
_logger.debug("Writing analysis to %s", logfile)
f.write(content)
f.close()
return None
def analyze(self):
"""
Retrieve both sets of database representations,
perform the comparison and register the resulting
change set
"""
self.ensure_one()
self.write(
{
"analysis_date": fields.Datetime.now(),
}
)
connection = self.config_id.get_connection()
RemoteRecord = self._get_remote_model(connection, "record")
LocalRecord = self.env["upgrade.record"]
# Retrieve field representations and compare
remote_records = RemoteRecord.field_dump()
local_records = LocalRecord.field_dump()
res = compare.compare_sets(remote_records, local_records)
# Retrieve xml id representations and compare
flds = [
"module",
"model",
"name",
"noupdate",
"prefix",
"suffix",
"domain",
"definition",
]
local_xml_records = [
{field: record[field] for field in flds}
for record in LocalRecord.search([("type", "=", "xmlid")])
]
remote_xml_record_ids = RemoteRecord.search([("type", "=", "xmlid")])
remote_xml_records = [
{field: record[field] for field in flds}
for record in RemoteRecord.read(remote_xml_record_ids, flds)
]
res_xml = compare.compare_xml_sets(remote_xml_records, local_xml_records)
# Retrieve model representations and compare
flds = [
"module",
"model",
"name",
"model_original_module",
"model_type",
]
local_model_records = [
{field: record[field] for field in flds}
for record in LocalRecord.search([("type", "=", "model")])
]
remote_model_record_ids = RemoteRecord.search([("type", "=", "model")])
remote_model_records = [
{field: record[field] for field in flds}
for record in RemoteRecord.read(remote_model_record_ids, flds)
]
res_model = compare.compare_model_sets(
remote_model_records, local_model_records
)
affected_modules = sorted(
{
record["module"]
for record in remote_records
+ local_records
+ remote_xml_records
+ local_xml_records
+ remote_model_records
+ local_model_records
}
)
if "base" in affected_modules:
try:
pass
except ImportError:
_logger.error(
"You are using upgrade_analysis on core modules without "
" having openupgrade_scripts module available."
" The analysis process will not work properly,"
" if you are generating analysis for the odoo modules"
" in an openupgrade context."
)
# reorder and output the result
keys = ["general"] + affected_modules
modules = {
module["name"]: module
for module in self.env["ir.module.module"].search(
[("state", "=", "installed")]
)
}
general_log = ""
no_changes_modules = []
for ignore_module in _IGNORE_MODULES:
if ignore_module in keys:
keys.remove(ignore_module)
for key in keys:
contents = "---Models in module '%s'---\n" % key
if key in res_model:
contents += "\n".join([str(line) for line in res_model[key]])
if res_model[key]:
contents += "\n"
contents += "---Fields in module '%s'---\n" % key
if key in res:
contents += "\n".join([str(line) for line in sorted(res[key])])
if res[key]:
contents += "\n"
contents += "---XML records in module '%s'---\n" % key
if key in res_xml:
contents += "\n".join([str(line) for line in res_xml[key]])
if res_xml[key]:
contents += "\n"
if key not in res and key not in res_xml and key not in res_model:
contents += "---nothing has changed in this module--\n"
no_changes_modules.append(key)
if key == "general":
general_log += contents
continue
if compare.module_map(key) not in modules:
general_log += (
"ERROR: module not in list of installed modules:\n" + contents
)
continue
if key not in modules:
# no need to log in full log the merged/renamed modules
continue
if self.write_files:
error = self._write_file(key, modules[key].installed_version, contents)
if error:
general_log += error
general_log += contents
else:
general_log += contents
# Store the full log
if self.write_files and "base" in modules:
self._write_file(
"base",
modules["base"].installed_version,
general_log,
"upgrade_general_log.txt",
)
try:
self.generate_noupdate_changes()
except Exception as e:
_logger.exception("Error generating noupdate changes: %s" % e)
general_log += "ERROR: error when generating noupdate changes: %s\n" % e
try:
self.generate_module_coverage_file(no_changes_modules)
except Exception as e:
_logger.exception("Error generating module coverage file: %s" % e)
general_log += "ERROR: error when generating module coverage file: %s\n" % e
self.write(
{
"state": "done",
"log": general_log,
}
)
return True
@staticmethod
def _get_node_dict(element):
res = {}
if element is None:
return res
for child in element:
if "name" in child.attrib:
key = "./{}[@name='{}']".format(child.tag, child.attrib["name"])
res[key] = child
return res
@staticmethod
def _get_node_value(element):
if "eval" in element.attrib.keys():
return element.attrib["eval"]
if "ref" in element.attrib.keys():
return element.attrib["ref"]
if not len(element):
return element.text
return etree.tostring(element)
def _get_xml_diff(
self, remote_update, remote_noupdate, local_update, local_noupdate
):
odoo = etree.Element("odoo")
for xml_id in sorted(local_noupdate.keys()):
local_record = local_noupdate[xml_id]
remote_record = None
if xml_id in remote_update and xml_id not in remote_noupdate:
remote_record = remote_update[xml_id]
elif xml_id in remote_noupdate:
remote_record = remote_noupdate[xml_id]
if "." in xml_id:
module_xmlid = xml_id.split(".", 1)[0]
else:
module_xmlid = ""
if remote_record is None and not module_xmlid:
continue
if local_record.tag == "template":
old_tmpl = etree.tostring(remote_record, encoding="utf-8")
new_tmpl = etree.tostring(local_record, encoding="utf-8")
if old_tmpl != new_tmpl:
odoo.append(local_record)
continue
element = etree.Element(
"record", id=xml_id, model=local_record.attrib["model"]
)
# Add forcecreate attribute if exists
if local_record.attrib.get("forcecreate"):
element.attrib["forcecreate"] = local_record.attrib["forcecreate"]
record_remote_dict = self._get_node_dict(remote_record)
record_local_dict = self._get_node_dict(local_record)
for key in sorted(record_remote_dict.keys()):
if not local_record.xpath(key):
# The element is no longer present.
# Does the field still exist?
if record_remote_dict[key].tag == "field":
field_name = remote_record.xpath(key)[0].attrib.get("name")
if (
field_name
not in self.env[local_record.attrib["model"]]._fields.keys()
):
continue
# Overwrite an existing value with an empty one.
attribs = deepcopy(record_remote_dict[key]).attrib
for attr in ["eval", "ref"]:
if attr in attribs:
del attribs[attr]
element.append(etree.Element(record_remote_dict[key].tag, attribs))
else:
oldrepr = self._get_node_value(record_remote_dict[key])
newrepr = self._get_node_value(record_local_dict[key])
if oldrepr != newrepr:
element.append(deepcopy(record_local_dict[key]))
for key in sorted(record_local_dict.keys()):
if remote_record is None or not remote_record.xpath(key):
element.append(deepcopy(record_local_dict[key]))
if len(element):
odoo.append(element)
if not len(odoo):
return ""
return etree.tostring(
etree.ElementTree(odoo),
pretty_print=True,
xml_declaration=True,
encoding="utf-8",
).decode("utf-8")
@staticmethod
def _update_node(target, source):
for element in source:
if "name" in element.attrib:
query = "./{}[@name='{}']".format(element.tag, element.attrib["name"])
else:
# query = "./{}".format(element.tag)
continue
for existing in target.xpath(query):
target.remove(existing)
target.append(element)
@classmethod
def _process_data_node(
self, data_node, records_update, records_noupdate, module_name
):
noupdate = nodeattr2bool(data_node, "noupdate", False)
for record in data_node.xpath("./record") + data_node.xpath("./template"):
self._process_record_node(
record, noupdate, records_update, records_noupdate, module_name
)
@classmethod
def _process_record_node(
self, record, noupdate, records_update, records_noupdate, module_name
):
xml_id = record.get("id")
if not xml_id:
return
if "." in xml_id and xml_id.startswith(module_name + "."):
xml_id = xml_id[len(module_name) + 1 :]
for records in records_noupdate, records_update:
# records can occur multiple times in the same module
# with different noupdate settings
if xml_id in records:
# merge records (overwriting an existing element
# with the same tag). The order processing the
# various directives from the manifest is
# important here
self._update_node(records[xml_id], record)
break
else:
target_dict = records_noupdate if noupdate else records_update
target_dict[xml_id] = record
@classmethod
def _parse_files(self, xml_files, module_name):
records_update = {}
records_noupdate = {}
parser = etree.XMLParser(
remove_blank_text=True,
strip_cdata=False,
)
for xml_file in xml_files:
try:
# This is for a final correct pretty print
# Ref.: https://stackoverflow.com/a/7904066
# Also don't strip CDATA tags as needed for HTML content
root_node = etree.fromstring(xml_file.encode("utf-8"), parser=parser)
except etree.XMLSyntaxError:
continue
# Support xml files with root Element either odoo or openerp
# Condition: each xml file should have only one root element
# {<odoo>, <openerp> or —rarely— <data>};
root_node_noupdate = nodeattr2bool(root_node, "noupdate", False)
if root_node.tag not in ("openerp", "odoo", "data"):
raise ValidationError(
_("Unexpected root Element: %(root)s in file: %(file)s")
% {"root": root_node.getroot(), "file": xml_file}
)
for node in root_node:
if node.tag == "data":
self._process_data_node(
node, records_update, records_noupdate, module_name
)
elif node.tag == "record":
self._process_record_node(
node,
root_node_noupdate,
records_update,
records_noupdate,
module_name,
)
return records_update, records_noupdate
def generate_noupdate_changes(self):
"""Communicate with the remote server to fetch all xml data records
per module, and generate a diff in XML format that can be imported
from the module's migration script using openupgrade.load_data()
"""
self.ensure_one()
connection = self.config_id.get_connection()
remote_record_obj = self._get_remote_model(connection, "record")
local_record_obj = self.env["upgrade.record"]
local_modules = local_record_obj.list_modules()
all_remote_modules = remote_record_obj.list_modules()
for local_module in local_modules:
remote_files = []
remote_modules = []
remote_update, remote_noupdate = {}, {}
for remote_module in all_remote_modules:
if local_module == renamed_modules.get(
remote_module, merged_modules.get(remote_module, remote_module)
):
remote_files.extend(
remote_record_obj.get_xml_records(remote_module)
)
remote_modules.append(remote_module)
add_remote_update, add_remote_noupdate = self._parse_files(
remote_files, remote_module
)
remote_update.update(add_remote_update)
remote_noupdate.update(add_remote_noupdate)
if not remote_modules:
continue
local_files = local_record_obj.get_xml_records(local_module)
local_update, local_noupdate = self._parse_files(local_files, local_module)
diff = self._get_xml_diff(
remote_update, remote_noupdate, local_update, local_noupdate
)
if diff:
module = self.env["ir.module.module"].search(
[("name", "=", local_module)]
)
self._write_file(
local_module,
module.installed_version,
diff,
filename="noupdate_changes.xml",
)
return True
def generate_module_coverage_file(self, no_changes_modules):
self.ensure_one()
module_coverage_file_folder = config.get("module_coverage_file_folder", False)
if not module_coverage_file_folder:
return
file_template = Template(
filename=os.path.join(
get_module_path("upgrade_analysis"),
"static",
"src",
"module_coverage_template.rst.mako",
)
)
module_domain = [
("state", "=", "installed"),
("name", "not in", ["upgrade_analysis", "openupgrade_records"]),
]
connection = self.config_id.get_connection()
all_local_modules = (
self.env["ir.module.module"].search(module_domain).mapped("name")
)
all_remote_modules = (
connection.env["ir.module.module"]
.browse(connection.env["ir.module.module"].search(module_domain))
.mapped("name")
)
start_version = connection.version
end_version = release.major_version
all_modules = sorted(list(set(all_remote_modules + all_local_modules)))
module_descriptions = {}
for module in all_modules:
status = ""
if module in all_local_modules and module in all_remote_modules:
module_description = " %s" % module
elif module in all_local_modules:
module_description = " |new| %s" % module
else:
module_description = " |del| %s" % module
if module in compare.apriori.merged_modules:
status = "Merged into %s. " % compare.apriori.merged_modules[module]
elif module in compare.apriori.renamed_modules:
status = "Renamed to %s. " % compare.apriori.renamed_modules[module]
elif module in compare.apriori.renamed_modules.values():
status = (
"Renamed from %s. "
% [
x
for x in compare.apriori.renamed_modules
if compare.apriori.renamed_modules[x] == module
][0]
)
elif module in no_changes_modules:
status += "No DB layout changes. "
module_descriptions[module_description.ljust(49, " ")] = status.ljust(
49, " "
)
rendered_text = file_template.render(
start_version=start_version,
end_version=end_version,
module_descriptions=module_descriptions,
)
file_name = "modules{}-{}.rst".format(
start_version.replace(".", ""),
end_version.replace(".", ""),
)
file_path = os.path.join(module_coverage_file_folder, file_name)
f = open(file_path, "w+")
f.write(rendered_text)
f.close()
return True
| 37.822148
| 22,542
|
4,527
|
py
|
PYTHON
|
15.0
|
# Copyright 2011-2015 Therp BV <https://therp.nl>
# Copyright 2016 Opener B.V. <https://opener.am>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from threading import current_thread
from odoo import _, fields, models
from odoo.exceptions import UserError
from odoo.modules.registry import Registry
from ..odoo_patch.odoo_patch import OdooPatch
class GenerateWizard(models.TransientModel):
_name = "upgrade.generate.record.wizard"
_description = "Upgrade Generate Record Wizard"
state = fields.Selection([("draft", "Draft"), ("done", "Done")], default="draft")
def generate(self):
"""Reinitialize all installed modules.
Equivalent of running the server with '-d <database> --init all'
The goal of this is to fill the records table.
TODO: update module list and versions, then update all modules?"""
# Truncate the records table
self.env.cr.execute("TRUNCATE upgrade_attribute, upgrade_record;")
# Check of all the modules are correctly installed
modules = self.env["ir.module.module"].search(
[("state", "in", ["to install", "to upgrade"])]
)
if modules:
raise UserError(
_("Cannot seem to install or upgrade modules %s")
% (", ".join([module.name for module in modules]))
)
# Now reinitialize all installed modules
self.env["ir.module.module"].search([("state", "=", "installed")]).write(
{"state": "to install"}
)
self.env.cr.commit() # pylint: disable=invalid-commit
# Patch the registry on the thread
thread = current_thread()
thread._upgrade_registry = {}
# Regenerate the registry with monkeypatches that log the records
with OdooPatch():
Registry.new(self.env.cr.dbname, update_module=True)
# Free the registry
delattr(thread, "_upgrade_registry")
# Set domain property
self.env.cr.execute(
""" UPDATE upgrade_record our
SET domain = iaw.domain
FROM ir_model_data imd
JOIN ir_act_window iaw ON imd.res_id = iaw.id
WHERE our.type = 'xmlid'
AND imd.model = 'ir.actions.act_window'
AND our.model = imd.model
AND our.name = imd.module || '.' || imd.name
"""
)
self.env.cache.invalidate(
[
(self.env["upgrade.record"]._fields["domain"], None),
]
)
# Set constraint definition
self.env.cr.execute(
""" UPDATE upgrade_record our
SET definition = btrim(replace(replace(replace(replace(
imc.definition, chr(9), ' '), chr(10), ' '), ' ', ' '), ' ', ' '))
FROM ir_model_data imd
JOIN ir_model_constraint imc ON imd.res_id = imc.id
WHERE our.type = 'xmlid'
AND imd.model = 'ir.model.constraint'
AND our.model = imd.model
AND our.name = imd.module || '.' || imd.name"""
)
self.env.cache.invalidate(
[
(self.env["upgrade.record"]._fields["definition"], None),
]
)
# Set noupdate property from ir_model_data
self.env.cr.execute(
""" UPDATE upgrade_record our
SET noupdate = imd.noupdate
FROM ir_model_data imd
WHERE our.type = 'xmlid'
AND our.model = imd.model
AND our.name = imd.module || '.' || imd.name
"""
)
self.env.cache.invalidate(
[
(self.env["upgrade.record"]._fields["noupdate"], None),
]
)
# Log model records
self.env.cr.execute(
"""INSERT INTO upgrade_record
(create_date, module, name, model, type)
SELECT NOW() AT TIME ZONE 'UTC',
imd2.module, imd2.module || '.' || imd.name AS name,
im.model, 'model' AS type
FROM (
SELECT min(id) as id, name, res_id
FROM ir_model_data
WHERE name LIKE 'model_%' AND model = 'ir.model'
GROUP BY name, res_id
) imd
JOIN ir_model_data imd2 ON imd2.id = imd.id
JOIN ir_model im ON imd.res_id = im.id
ORDER BY imd.name, imd.id""",
)
return self.write({"state": "done"})
| 35.645669
| 4,527
|
4,040
|
py
|
PYTHON
|
15.0
|
# Copyright 2011-2015 Therp BV <https://therp.nl>
# Copyright 2016 Opener B.V. <https://opener.am>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
from odoo.modules.registry import Registry
from odoo.osv.expression import AND
from ..blacklist import (
BLACKLIST_MODULES,
BLACKLIST_MODULES_ENDS_WITH,
BLACKLIST_MODULES_STARTS_WITH,
)
class UpgradeInstallWizard(models.TransientModel):
_name = "upgrade.install.wizard"
_description = "Upgrade Install Wizard"
state = fields.Selection(
[("draft", "Draft"), ("done", "Done")], readonly=True, default="draft"
)
module_ids = fields.Many2many(
comodel_name="ir.module.module",
domain=lambda x: x._module_ids_domain(),
)
module_qty = fields.Integer(
string="Modules Quantity", compute="_compute_module_qty"
)
@api.model
def _module_ids_domain(self, extra_domain=None):
domain = [
"&",
("state", "not in", ["uninstallable", "unknown"]),
("name", "not in", BLACKLIST_MODULES),
]
if extra_domain:
domain = AND([domain, extra_domain])
modules = self.env["ir.module.module"].search(domain)
for start_pattern in BLACKLIST_MODULES_STARTS_WITH:
modules = modules.filtered(
lambda x, start_pattern=start_pattern: not x.name.startswith(
start_pattern
)
)
for end_pattern in BLACKLIST_MODULES_ENDS_WITH:
modules = modules.filtered(
lambda x, end_pattern=end_pattern: not x.name.endswith(end_pattern)
)
return [("id", "in", modules.ids)]
@api.depends("module_ids")
def _compute_module_qty(self):
for wizard in self:
wizard.module_qty = len(wizard.module_ids)
def select_odoo_modules(self, extra_domain=None):
self.ensure_one()
modules = self.env["ir.module.module"].search(
self._module_ids_domain(extra_domain=extra_domain)
)
modules = modules.filtered(lambda x: x.is_odoo_module)
self.module_ids = modules
return self.return_same_form_view()
def select_oca_modules(self, extra_domain=None):
self.ensure_one()
modules = self.env["ir.module.module"].search(
self._module_ids_domain(extra_domain=extra_domain)
)
modules = modules.filtered(lambda x: x.is_oca_module)
self.module_ids = modules
return self.return_same_form_view()
def select_other_modules(self, extra_domain=None):
self.ensure_one()
modules = self.env["ir.module.module"].search(
self._module_ids_domain(extra_domain=extra_domain)
)
modules = modules.filtered(lambda x: not (x.is_oca_module or x.is_odoo_module))
self.module_ids = modules
return self.return_same_form_view()
def select_installable_modules(self, extra_domain=None):
self.ensure_one()
self.module_ids = self.env["ir.module.module"].search(
self._module_ids_domain(extra_domain=extra_domain)
)
return self.return_same_form_view()
def unselect_modules(self):
self.ensure_one()
self.module_ids = False
return self.return_same_form_view()
def install_modules(self):
"""Set all selected modules and actually install them."""
self.ensure_one()
self.module_ids.write({"state": "to install"})
self.env.cr.commit() # pylint: disable=invalid-commit
Registry.new(self.env.cr.dbname, update_module=True)
self.write({"state": "done"})
return self.return_same_form_view()
def return_same_form_view(self):
return {
"type": "ir.actions.act_window",
"res_model": "upgrade.install.wizard",
"view_mode": "form",
"res_id": self.id,
"views": [(False, "form")],
"target": "new",
}
| 34.529915
| 4,040
|
465
|
py
|
PYTHON
|
15.0
|
# Copyright (c) 2018 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Remote Base",
"version": "15.0.1.0.0",
"category": "Generic Modules/Base",
"author": "Creu Blanca, Odoo Community Association (OCA)",
"website": "https://github.com/OCA/server-tools",
"license": "AGPL-3",
"depends": ["web"],
"data": ["security/ir.model.access.csv", "views/res_remote_views.xml"],
"installable": True,
}
| 33.214286
| 465
|
2,046
|
py
|
PYTHON
|
15.0
|
# Copyright 2018 Creu Blanca
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from mock import patch
from odoo import http
from odoo.tests import tagged
from odoo.tests.common import HttpCase
@tagged("post_install", "-at_install")
# Skip CSRF validation on tests
@patch(http.__name__ + ".WebRequest.validate_csrf", return_value=True)
class TestRemote(HttpCase):
def setUp(self):
super().setUp()
# Complex password to avoid conflicts with `password_security`
self.good_password = "Admin$%02584"
self.data_demo = {
"login": "demo",
"password": "Demo%&/(908409**",
}
self.remote_addr = "127.0.0.1"
with self.cursor() as cr:
env = self.env(cr)
# Make sure involved users have good passwords
env.user.password = self.good_password
env["res.users"].search(
[("login", "=", self.data_demo["login"])]
).password = self.data_demo["password"]
remote = self.env["res.remote"].search([("ip", "=", self.remote_addr)])
if remote:
remote.unlink()
def test_xmlrpc_login_ok(self, *args):
"""Test Login"""
data1 = self.data_demo
self.assertTrue(
self.xmlrpc_common.authenticate(
self.env.cr.dbname, data1["login"], data1["password"], {}
)
)
with self.cursor() as cr:
env = self.env(cr)
self.assertTrue(env["res.remote"].search([("ip", "=", self.remote_addr)]))
def test_xmlrpc_login_failure(self, *args):
"""Test Login Failure"""
data1 = self.data_demo
data1["password"] = "Failure!"
self.assertFalse(
self.xmlrpc_common.authenticate(
self.env.cr.dbname, data1["login"], data1["password"], {}
)
)
with self.cursor() as cr:
env = self.env(cr)
self.assertTrue(env["res.remote"].search([("ip", "=", self.remote_addr)]))
| 34.677966
| 2,046
|
1,349
|
py
|
PYTHON
|
15.0
|
# Copyright 2018 Creu Blanca
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
import logging
import socket
from odoo import api, fields, models
class ResRemote(models.Model):
_name = "res.remote"
_description = "Remotes"
name = fields.Char(required=True, string="Hostname", index=True, readonly=True)
ip = fields.Char(required=True)
in_network = fields.Boolean(
required=True, help="Shows if the remote can be found through the socket"
)
_sql_constraints = [("name_unique", "unique(name)", "Hostname must be unique")]
@api.model
def _create_vals(self, addr, hostname):
return {
"name": hostname or addr,
"ip": addr,
"in_network": bool(hostname),
}
@api.model
def _get_remote(self, addr):
try:
hostname, alias, ips = socket.gethostbyaddr(addr)
except socket.herror:
logging.warning("Remote with ip %s could not be found" % addr)
hostname = False
remote = self.search([("name", "=ilike", hostname or addr)])
if not remote:
remote = self.create(self._create_vals(addr, hostname))
if remote.ip != addr:
# IPs can change through time, but hostname should not change
remote.write({"ip": addr})
return remote
| 32.119048
| 1,349
|
1,791
|
py
|
PYTHON
|
15.0
|
# Copyright 2018 Creu Blanca
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from threading import current_thread
from odoo import SUPERUSER_ID, api, models
from odoo.exceptions import AccessDenied
from odoo.service import wsgi_server
from odoo.tools import config
class ResUsers(models.Model):
_inherit = "res.users"
def _register_hook(self):
"""🐒-patch XML-RPC controller to know remote address."""
super()._register_hook()
original_fn = wsgi_server.application_unproxied
def _patch(environ, start_response):
current_thread().environ = environ
return original_fn(environ, start_response)
wsgi_server.application_unproxied = _patch
@classmethod
def _auth_check_remote(cls, login, method):
"""Force a method to raise an AccessDenied on falsey return."""
with cls.pool.cursor() as cr:
env = api.Environment(cr, SUPERUSER_ID, {})
remote = env["res.users"].remote
if not config["test_enable"]:
remote.ensure_one()
result = method()
if not result:
# Force exception to record auth failure
raise AccessDenied()
return result
# Override all auth-related core methods
@classmethod
def _login(cls, db, login, password, user_agent_env):
return cls._auth_check_remote(
login,
lambda: super(ResUsers, cls)._login(db, login, password, user_agent_env),
)
@classmethod
def authenticate(cls, db, login, password, user_agent_env):
return cls._auth_check_remote(
login,
lambda: super(ResUsers, cls).authenticate(
db, login, password, user_agent_env
),
)
| 32.509091
| 1,788
|
479
|
py
|
PYTHON
|
15.0
|
# Copyright 2018 Creu Blanca
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from threading import current_thread
from odoo import models
class Base(models.AbstractModel):
_inherit = "base"
@property
def remote(self):
try:
remote_addr = current_thread().environ["REMOTE_ADDR"]
except (KeyError, AttributeError):
return self.env["res.remote"]
return self.env["res.remote"]._get_remote(remote_addr)
| 26.611111
| 479
|
942
|
py
|
PYTHON
|
15.0
|
# Copyright 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright 2015 Agile Business Group <http://www.agilebg.com>
# Copyright 2016 Grupo ESOC Ingenieria de Servicios, S.L.U. - Jairo Llopis
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
{
"name": "Database Auto-Backup",
"summary": "Backups database",
"version": "15.0.1.0.1",
"author": "Yenthe Van Ginneken, "
"Agile Business Group, "
"Grupo ESOC Ingenieria de Servicios, "
"LasLabs, "
"AdaptiveCity, "
"Odoo Community Association (OCA)",
"license": "AGPL-3",
"website": "https://github.com/OCA/server-tools",
"category": "Tools",
"depends": ["mail"],
"data": [
"data/ir_cron.xml",
"data/mail_message_subtype.xml",
"security/ir.model.access.csv",
"view/db_backup_view.xml",
],
"installable": True,
"external_dependencies": {"python": ["pysftp", "cryptography==2.6.1"]},
}
| 33.642857
| 942
|
9,509
|
py
|
PYTHON
|
15.0
|
# Copyright 2015 Agile Business Group <http://www.agilebg.com>
# Copyright 2015 Alessio Gerace <alesiso.gerace@agilebg.com>
# Copyright 2016 Grupo ESOC Ingenieria de Servicios, S.L.U. - Jairo Llopis
# Copyright 2016 LasLabs Inc.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
import logging
import os
from contextlib import contextmanager
from datetime import datetime, timedelta
from unittest.mock import PropertyMock, patch
from odoo import tools
from odoo.exceptions import UserError
from odoo.tests import common
_logger = logging.getLogger(__name__)
try:
import pysftp
except ImportError: # pragma: no cover
_logger.debug("Cannot import pysftp")
model = "odoo.addons.auto_backup.models.db_backup"
class_name = "%s.DbBackup" % model
class TestConnectionException(pysftp.ConnectionException):
def __init__(self):
super(TestConnectionException, self).__init__("test", "test")
class TestDbBackup(common.TransactionCase):
def setUp(self):
super(TestDbBackup, self).setUp()
self.Model = self.env["db.backup"]
@contextmanager
def mock_assets(self):
"""It provides mocked core assets"""
self.path_join_val = "/this/is/a/path"
with patch("%s.db" % model) as db:
with patch("%s.os" % model) as os:
with patch("%s.shutil" % model) as shutil:
os.path.join.return_value = self.path_join_val
yield {
"db": db,
"os": os,
"shutil": shutil,
}
@contextmanager
def patch_filtered_sftp(self, record):
"""It patches filtered record and provides a mock"""
with patch("%s.filtered" % class_name) as filtered:
filtered.side_effect = [], [record]
with patch("%s.backup_log" % class_name):
with patch("%s.sftp_connection" % class_name):
yield filtered
def new_record(self, method="sftp"):
vals = {"name": "Têst backup", "method": method, "days_to_keep": 1}
if method == "sftp":
vals.update(
{
"sftp_host": "test_host",
"sftp_port": "222",
"sftp_user": "tuser",
"sftp_password": "password",
"folder": "/folder/",
}
)
self.vals = vals
return self.Model.create(vals)
def test_compute_name_sftp(self):
"""It should create proper SFTP URI"""
rec_id = self.new_record()
self.assertEqual(
"sftp://%(user)s@%(host)s:%(port)s%(folder)s"
% {
"user": self.vals["sftp_user"],
"host": self.vals["sftp_host"],
"port": self.vals["sftp_port"],
"folder": self.vals["folder"],
},
rec_id.name,
)
def test_check_folder(self):
"""It should not allow recursive backups"""
rec_id = self.new_record("local")
with self.assertRaises(UserError):
rec_id.write(
{
"folder": "%s/another/path"
% tools.config.filestore(self.env.cr.dbname),
}
)
@patch("%s._" % model)
def test_action_sftp_test_connection_success(self, _):
"""It should raise connection succeeded warning"""
with patch("%s.sftp_connection" % class_name, new_callable=PropertyMock):
rec_id = self.new_record()
with self.assertRaises(UserError):
rec_id.action_sftp_test_connection()
_.assert_called_once_with("Connection Test Succeeded!")
@patch("%s._" % model)
def test_action_sftp_test_connection_fail(self, _):
"""It should raise connection fail warning"""
with patch(
"%s.sftp_connection" % class_name, new_callable=PropertyMock
) as conn:
rec_id = self.new_record()
conn().side_effect = TestConnectionException
with self.assertRaises(UserError):
rec_id.action_sftp_test_connection()
_.assert_called_once_with("Connection Test Failed!")
def test_action_backup_local(self):
"""It should backup local database"""
rec_id = self.new_record("local")
filename = rec_id.filename(datetime.now())
rec_id.action_backup()
generated_backup = [f for f in os.listdir(rec_id.folder) if f >= filename]
self.assertEqual(1, len(generated_backup))
def test_action_backup_local_cleanup(self):
"""Backup local database and cleanup old databases"""
rec_id = self.new_record("local")
old_date = datetime.now() - timedelta(days=3)
filename = rec_id.filename(old_date)
with patch("%s.datetime" % model) as mock_date:
mock_date.now.return_value = old_date
rec_id.action_backup()
generated_backup = [f for f in os.listdir(rec_id.folder) if f >= filename]
self.assertEqual(2, len(generated_backup))
filename = rec_id.filename(datetime.now())
rec_id.action_backup()
generated_backup = [f for f in os.listdir(rec_id.folder) if f >= filename]
self.assertEqual(1, len(generated_backup))
def test_action_backup_sftp_mkdirs(self):
"""It should create remote dirs"""
rec_id = self.new_record()
with self.mock_assets():
with self.patch_filtered_sftp(rec_id):
with patch("%s.cleanup" % class_name, new_callable=PropertyMock):
conn = rec_id.sftp_connection().__enter__()
rec_id.action_backup()
conn.makedirs.assert_called_once_with(rec_id.folder)
def test_action_backup_sftp_mkdirs_conn_exception(self):
"""It should guard from ConnectionException on remote.mkdirs"""
rec_id = self.new_record()
with self.mock_assets():
with self.patch_filtered_sftp(rec_id):
with patch("%s.cleanup" % class_name, new_callable=PropertyMock):
conn = rec_id.sftp_connection().__enter__()
conn.makedirs.side_effect = TestConnectionException
rec_id.action_backup()
# No error was raised, test pass
self.assertTrue(True)
def test_action_backup_sftp_remote_open(self):
"""It should open remote file w/ proper args"""
rec_id = self.new_record()
with self.mock_assets() as assets:
with self.patch_filtered_sftp(rec_id):
with patch("%s.cleanup" % class_name, new_callable=PropertyMock):
conn = rec_id.sftp_connection().__enter__()
rec_id.action_backup()
conn.open.assert_called_once_with(assets["os"].path.join(), "wb")
def test_action_backup_all_search(self):
"""It should search all records"""
rec_id = self.new_record()
with patch("%s.search" % class_name, new_callable=PropertyMock):
rec_id.action_backup_all()
rec_id.search.assert_called_once_with([])
def test_action_backup_all_return(self):
"""It should return result of backup operation"""
rec_id = self.new_record()
with patch("%s.search" % class_name, new_callable=PropertyMock):
res = rec_id.action_backup_all()
self.assertEqual(rec_id.search().action_backup(), res)
@patch("%s.pysftp" % model)
def test_sftp_connection_init_passwd(self, pysftp):
"""It should initiate SFTP connection w/ proper args and pass"""
rec_id = self.new_record()
rec_id.sftp_connection()
pysftp.Connection.assert_called_once_with(
host=rec_id.sftp_host,
username=rec_id.sftp_user,
port=rec_id.sftp_port,
password=rec_id.sftp_password,
)
@patch("%s.pysftp" % model)
def test_sftp_connection_init_key(self, pysftp):
"""It should initiate SFTP connection w/ proper args and key"""
rec_id = self.new_record()
rec_id.write({"sftp_private_key": "pkey", "sftp_password": "pkeypass"})
rec_id.sftp_connection()
pysftp.Connection.assert_called_once_with(
host=rec_id.sftp_host,
username=rec_id.sftp_user,
port=rec_id.sftp_port,
private_key=rec_id.sftp_private_key,
private_key_pass=rec_id.sftp_password,
)
@patch("%s.pysftp" % model)
def test_sftp_connection_return(self, pysftp):
"""It should return new sftp connection"""
rec_id = self.new_record()
res = rec_id.sftp_connection()
self.assertEqual(
pysftp.Connection(),
res,
)
def test_filename_default(self):
"""It should not error and should return a .dump.zip file str"""
now = datetime.now()
res = self.Model.filename(now)
self.assertTrue(res.endswith(".dump.zip"))
def test_filename_zip(self):
"""It should return a dump.zip filenam"""
now = datetime.now()
res = self.Model.filename(now, ext="zip")
self.assertTrue(res.endswith(".dump.zip"))
def test_filename_dump(self):
"""It should return a dump filenam"""
now = datetime.now()
res = self.Model.filename(now, ext="dump")
self.assertTrue(res.endswith(".dump"))
| 38.967213
| 9,508
|
11,278
|
py
|
PYTHON
|
15.0
|
# Copyright 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright 2015 Agile Business Group <http://www.agilebg.com>
# Copyright 2016 Grupo ESOC Ingenieria de Servicios, S.L.U. - Jairo Llopis
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
import logging
import os
import shutil
import traceback
from contextlib import contextmanager
from datetime import datetime, timedelta
from glob import iglob
from odoo import _, api, exceptions, fields, models, tools
from odoo.exceptions import UserError
from odoo.service import db
_logger = logging.getLogger(__name__)
try:
import pysftp
except ImportError: # pragma: no cover
_logger.debug("Cannot import pysftp")
class DbBackup(models.Model):
_description = "Database Backup"
_name = "db.backup"
_inherit = "mail.thread"
_sql_constraints = [
("name_unique", "UNIQUE(name)", "Cannot duplicate a configuration."),
(
"days_to_keep_positive",
"CHECK(days_to_keep >= 0)",
"I cannot remove backups from the future. Ask Doc for that.",
),
]
name = fields.Char(
compute="_compute_name",
store=True,
help="Summary of this backup process",
)
folder = fields.Char(
default=lambda self: self._default_folder(),
help="Absolute path for storing the backups",
required=True,
)
days_to_keep = fields.Integer(
required=True,
default=0,
help="Backups older than this will be deleted automatically. "
"Set 0 to disable autodeletion.",
)
method = fields.Selection(
[("local", "Local disk"), ("sftp", "Remote SFTP server")],
default="local",
help="Choose the storage method for this backup.",
)
sftp_host = fields.Char(
"SFTP Server",
help=(
"The host name or IP address from your remote"
" server. For example 192.168.0.1"
),
)
sftp_port = fields.Integer(
"SFTP Port",
default=22,
help="The port on the FTP server that accepts SSH/SFTP calls.",
)
sftp_user = fields.Char(
"Username in the SFTP Server",
help=(
"The username where the SFTP connection "
"should be made with. This is the user on the external server."
),
)
sftp_password = fields.Char(
"SFTP Password",
help="The password for the SFTP connection. If you specify a private "
"key file, then this is the password to decrypt it.",
)
sftp_private_key = fields.Char(
"Private key location",
help="Path to the private key file. Only the Odoo user should have "
"read permissions for that file.",
)
backup_format = fields.Selection(
[
("zip", "zip (includes filestore)"),
("dump", "pg_dump custom format (without filestore)"),
],
default="zip",
help="Choose the format for this backup.",
)
@api.model
def _default_folder(self):
"""Default to ``backups`` folder inside current server datadir."""
return os.path.join(tools.config["data_dir"], "backups", self.env.cr.dbname)
@api.depends("folder", "method", "sftp_host", "sftp_port", "sftp_user")
def _compute_name(self):
"""Get the right summary for this job."""
for rec in self:
if rec.method == "local":
rec.name = "%s @ localhost" % rec.folder
elif rec.method == "sftp":
rec.name = "sftp://%s@%s:%d%s" % (
rec.sftp_user,
rec.sftp_host,
rec.sftp_port,
rec.folder,
)
@api.constrains("folder", "method")
def _check_folder(self):
"""Do not use the filestore or you will backup your backups."""
for record in self:
if record.method == "local" and record.folder.startswith(
tools.config.filestore(self.env.cr.dbname)
):
raise exceptions.ValidationError(
_(
"Do not save backups on your filestore, or you will "
"backup your backups too!"
)
)
def action_sftp_test_connection(self):
"""Check if the SFTP settings are correct."""
try:
# Just open and close the connection
with self.sftp_connection():
raise UserError(_("Connection Test Succeeded!"))
except (
pysftp.CredentialException,
pysftp.ConnectionException,
pysftp.SSHException,
) as exc:
_logger.info("Connection Test Failed!", exc_info=True)
raise UserError(_("Connection Test Failed!")) from exc
def action_backup(self):
"""Run selected backups."""
backup = None
successful = self.browse()
# Start with local storage
for rec in self.filtered(lambda r: r.method == "local"):
filename = self.filename(datetime.now(), ext=rec.backup_format)
with rec.backup_log():
# Directory must exist
try:
os.makedirs(rec.folder)
except OSError as exc:
_logger.exception("Action backup - OSError: %s" % exc)
with open(os.path.join(rec.folder, filename), "wb") as destiny:
# Copy the cached backup
if backup:
with open(backup) as cached:
shutil.copyfileobj(cached, destiny)
# Generate new backup
else:
db.dump_db(
self.env.cr.dbname, destiny, backup_format=rec.backup_format
)
backup = backup or destiny.name
successful |= rec
# Ensure a local backup exists if we are going to write it remotely
sftp = self.filtered(lambda r: r.method == "sftp")
if sftp:
for rec in sftp:
filename = self.filename(datetime.now(), ext=rec.backup_format)
with rec.backup_log():
cached = db.dump_db(
self.env.cr.dbname, None, backup_format=rec.backup_format
)
with cached:
with rec.sftp_connection() as remote:
# Directory must exist
try:
remote.makedirs(rec.folder)
except pysftp.ConnectionException as exc:
_logger.exception(
"pysftp ConnectionException: %s" % exc
)
# Copy cached backup to remote server
with remote.open(
os.path.join(rec.folder, filename), "wb"
) as destiny:
shutil.copyfileobj(cached, destiny)
successful |= rec
# Remove old files for successful backups
successful.cleanup()
@api.model
def action_backup_all(self):
"""Run all scheduled backups."""
return self.search([]).action_backup()
@contextmanager
def backup_log(self):
"""Log a backup result."""
try:
_logger.info("Starting database backup: %s", self.name)
yield
except Exception:
_logger.exception("Database backup failed: %s", self.name)
escaped_tb = tools.html_escape(traceback.format_exc())
self.message_post( # pylint: disable=translation-required
body="<p>%s</p><pre>%s</pre>"
% (_("Database backup failed."), escaped_tb),
subtype_id=self.env.ref("auto_backup.mail_message_subtype_failure").id,
)
else:
_logger.info("Database backup succeeded: %s", self.name)
self.message_post(body=_("Database backup succeeded."))
def cleanup(self):
"""Clean up old backups."""
now = datetime.now()
for rec in self.filtered("days_to_keep"):
with rec.cleanup_log():
bu_format = rec.backup_format
file_extension = bu_format == "zip" and "dump.zip" or bu_format
oldest = self.filename(
now - timedelta(days=rec.days_to_keep), bu_format
)
if rec.method == "local":
for name in iglob(
os.path.join(rec.folder, "*.%s" % file_extension)
):
if os.path.basename(name) < oldest:
os.unlink(name)
elif rec.method == "sftp":
with rec.sftp_connection() as remote:
for name in remote.listdir(rec.folder):
if (
name.endswith(".%s" % file_extension)
and os.path.basename(name) < oldest
):
remote.unlink("{}/{}".format(rec.folder, name))
@contextmanager
def cleanup_log(self):
"""Log a possible cleanup failure."""
self.ensure_one()
try:
_logger.info(
"Starting cleanup process after database backup: %s", self.name
)
yield
except Exception:
_logger.exception("Cleanup of old database backups failed: %s")
escaped_tb = tools.html_escape(traceback.format_exc())
self.message_post( # pylint: disable=translation-required
body="<p>%s</p><pre>%s</pre>"
% (_("Cleanup of old database backups failed."), escaped_tb),
subtype_id=self.env.ref("auto_backup.failure").id,
)
else:
_logger.info("Cleanup of old database backups succeeded: %s", self.name)
@staticmethod
def filename(when, ext="zip"):
"""Generate a file name for a backup.
:param datetime.datetime when:
Use this datetime instead of :meth:`datetime.datetime.now`.
:param str ext: Extension of the file. Default: dump.zip
"""
return "{:%Y_%m_%d_%H_%M_%S}.{ext}".format(
when, ext="dump.zip" if ext == "zip" else ext
)
def sftp_connection(self):
"""Return a new SFTP connection with found parameters."""
self.ensure_one()
params = {
"host": self.sftp_host,
"username": self.sftp_user,
"port": self.sftp_port,
}
_logger.debug(
"Trying to connect to sftp://%(username)s@%(host)s:%(port)d", extra=params
)
if self.sftp_private_key:
params["private_key"] = self.sftp_private_key
if self.sftp_password:
params["private_key_pass"] = self.sftp_password
else:
params["password"] = self.sftp_password
return pysftp.Connection(**params)
| 36.977049
| 11,278
|
688
|
py
|
PYTHON
|
15.0
|
# Copyright 2016 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from psycopg2.extensions import ISQLQuote
class IdentifierAdapter(ISQLQuote):
def __init__(self, identifier, quote=True):
self.quote = quote
self.identifier = identifier
def __conform__(self, protocol):
if protocol == ISQLQuote:
return self
def getquoted(self):
def is_identifier_char(c):
return c.isalnum() or c in ["_", "$"]
format_string = '"%s"'
if not self.quote:
format_string = "%s"
return format_string % "".join(filter(is_identifier_char, self.identifier))
| 31.272727
| 688
|
850
|
py
|
PYTHON
|
15.0
|
# Copyright 2014-2016 Therp BV <http://therp.nl>
# Copyright 2021 Camptocamp SA <https://camptocamp.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Database cleanup",
"version": "15.0.1.0.2",
"author": "Therp BV,Odoo Community Association (OCA)",
"website": "https://github.com/OCA/server-tools",
"depends": ["base"],
"license": "AGPL-3",
"category": "Tools",
"data": [
"views/purge_wizard.xml",
"views/purge_menus.xml",
"views/purge_modules.xml",
"views/purge_models.xml",
"views/purge_columns.xml",
"views/purge_tables.xml",
"views/purge_data.xml",
"views/create_indexes.xml",
"views/purge_properties.xml",
"views/menu.xml",
"security/ir.model.access.csv",
],
"installable": True,
}
| 32.692308
| 850
|
1,679
|
py
|
PYTHON
|
15.0
|
# Copyright 2021 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import tagged
from .common import Common, environment
# Use post_install to get all models loaded more info: odoo/odoo#13458
@tagged("post_install", "-at_install")
class TestCleanupPurgeLineProperty(Common):
def setUp(self):
super().setUp()
with environment() as env:
# Create one property for tests
self.partner_name_field_id = env["ir.model.fields"].search(
[("name", "=", "name"), ("model_id.model", "=", "res.partner")], limit=1
)
def test_property_to_not_removed(self):
with environment() as env:
self.property = env["ir.property"].create(
{
"fields_id": self.partner_name_field_id.id,
"type": "char",
"value_text": "My default partner name",
"res_id": False,
}
)
wizard = env["cleanup.purge.wizard.property"].create({})
wizard.purge_all()
self.assertTrue(self.property.exists())
def test_property_no_value(self):
with environment() as env:
self.property = env["ir.property"].create(
{
"fields_id": self.partner_name_field_id.id,
"type": "char",
"value_text": False,
"res_id": False,
}
)
wizard = env["cleanup.purge.wizard.property"].create({})
wizard.purge_all()
self.assertFalse(self.property.exists())
| 36.5
| 1,679
|
1,411
|
py
|
PYTHON
|
15.0
|
# Copyright 2021 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import tagged
from .common import Common, environment
# Use post_install to get all models loaded more info: odoo/odoo#13458
@tagged("post_install", "-at_install")
class TestCleanupPurgeLineColumn(Common):
def setUp(self):
super().setUp()
with environment() as env:
# create a nonexistent model
self.model_name = "x_database.cleanup.test.model"
self.model_values = {
"name": "Database cleanup test model",
"model": self.model_name,
}
self.model = env["ir.model"].create(self.model_values)
env.cr.execute(
"insert into ir_attachment (name, res_model, res_id, type) values "
"('test attachment', %s, 42, 'binary')",
[self.model_name],
)
env.registry.models.pop(self.model_name)
def test_empty_model(self):
with environment() as env:
wizard = env["cleanup.purge.wizard.model"].create({})
wizard.purge_all()
# must be removed by the wizard
self.assertFalse(
env["ir.model"].search(
[
("model", "=", self.model_name),
]
)
)
| 35.275
| 1,411
|
942
|
py
|
PYTHON
|
15.0
|
# Copyright 2021 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import tagged
from .common import Common, environment
# Use post_install to get all models loaded more info: odoo/odoo#13458
@tagged("post_install", "-at_install")
class TestCreateIndexesLine(Common):
def setUp(self):
super().setUp()
with environment() as env:
# delete some index and check if our module recreated it
env.cr.execute("drop index res_partner_name_index")
def test_deleted_index(self):
with environment() as env:
wizard = env["cleanup.create_indexes.wizard"].create({})
wizard.purge_all()
env.cr.execute(
"select indexname from pg_indexes where "
"indexname='res_partner_name_index' and tablename='res_partner' "
)
self.assertEqual(env.cr.rowcount, 1)
| 36.230769
| 942
|
1,135
|
py
|
PYTHON
|
15.0
|
# Copyright 2021 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import tagged
from .common import Common, environment
# Use post_install to get all models loaded more info: odoo/odoo#13458
@tagged("post_install", "-at_install")
class TestCleanupPurgeLineData(Common):
def setUp(self):
super().setUp()
with environment() as env:
# create a data entry pointing nowhere
env.cr.execute("select max(id) + 1 from res_users")
env["ir.model.data"].create(
{
"module": "database_cleanup",
"name": "test_no_data_entry",
"model": "res.users",
"res_id": env.cr.fetchone()[0],
}
)
def test_pointing_nowhere(self):
with environment() as env:
wizard = env["cleanup.purge.wizard.data"].create({})
wizard.purge_all()
# must be removed by the wizard
with self.assertRaises(ValueError):
env.ref("database_cleanup.test_no_data_entry")
| 35.46875
| 1,135
|
1,709
|
py
|
PYTHON
|
15.0
|
# Copyright 2021 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from psycopg2 import ProgrammingError
from odoo.tests.common import tagged
from odoo.tools import mute_logger
from .common import Common, environment
# Use post_install to get all models loaded more info: odoo/odoo#13458
@tagged("post_install", "-at_install")
class TestCleanupPurgeLineColumn(Common):
def setUp(self):
super().setUp()
with environment() as env:
# create an orphaned column
env.cr.execute(
"alter table res_partner add column database_cleanup_test int"
)
def test_empty_column(self):
with environment() as env:
# We need use a model that is not blocked (Avoid use res.users)
partner_model = env["ir.model"].search(
[("model", "=", "res.partner")], limit=1
)
wizard = env["cleanup.purge.wizard.column"].create(
{
"purge_line_ids": [
(
0,
0,
{
"model_id": partner_model.id,
"name": "database_cleanup_test",
},
)
]
}
)
wizard.purge_all()
# must be removed by the wizard
with self.assertRaises(ProgrammingError):
with env.registry.cursor() as cr:
with mute_logger("odoo.sql_db"):
cr.execute("select database_cleanup_test from res_partner")
| 36.361702
| 1,709
|
773
|
py
|
PYTHON
|
15.0
|
# Copyright 2021 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from contextlib import contextmanager
import odoo
from odoo.tests import common
from odoo.tests.common import BaseCase, tagged
ADMIN_USER_ID = common.ADMIN_USER_ID
@contextmanager
def environment():
"""Return an environment with a new cursor for the current database; the
cursor is committed and closed after the context block.
"""
registry = odoo.registry(common.get_db_name())
with registry.cursor() as cr:
yield odoo.api.Environment(cr, ADMIN_USER_ID, {})
# Use post_install to get all models loaded more info: odoo/odoo#13458
@tagged("post_install", "-at_install")
class Common(BaseCase):
def setUp(self):
super().setUp()
| 28.62963
| 773
|
1,003
|
py
|
PYTHON
|
15.0
|
# Copyright 2021 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import tagged
from .common import Common, environment
# Use post_install to get all models loaded more info: odoo/odoo#13458
@tagged("post_install", "-at_install")
class TestCleanupPurgeLineModule(Common):
def setUp(self):
super().setUp()
with environment() as env:
# create a nonexistent module
self.module = env["ir.module.module"].create(
{
"name": "database_cleanup_test",
"state": "to upgrade",
}
)
def test_remove_to_upgrade_module(self):
with environment() as env:
wizard = env["cleanup.purge.wizard.module"].create({})
module_names = wizard.purge_line_ids.filtered(
lambda x: not x.purged
).mapped("name")
self.assertTrue("database_cleanup_test" in module_names)
| 34.586207
| 1,003
|
915
|
py
|
PYTHON
|
15.0
|
# Copyright 2021 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from psycopg2 import ProgrammingError
from odoo.tests.common import tagged
from odoo.tools import mute_logger
from .common import Common, environment
# Use post_install to get all models loaded more info: odoo/odoo#13458
@tagged("post_install", "-at_install")
class TestCleanupPurgeLineTable(Common):
def test_empty_table(self):
with environment() as env:
# create an orphaned table
env.cr.execute("create table database_cleanup_test (test int)")
wizard = env["cleanup.purge.wizard.table"].create({})
wizard.purge_all()
with self.assertRaises(ProgrammingError):
with env.registry.cursor() as cr:
with mute_logger("odoo.sql_db"):
cr.execute("select * from database_cleanup_test")
| 39.782609
| 915
|
1,249
|
py
|
PYTHON
|
15.0
|
# Copyright 2021 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests.common import tagged
from .common import Common, environment
# Use post_install to get all models loaded more info: odoo/odoo#13458
@tagged("post_install", "-at_install")
class TestCleanupPurgeLineMenu(Common):
def setUp(self):
super().setUp()
with environment() as env:
# create a new empty menu
self.menu = env["ir.ui.menu"].create({"name": "database_cleanup_test"})
def test_empty_menu(self):
with environment() as env:
wizard = env["cleanup.purge.wizard.menu"].create(
{
"purge_line_ids": [
(
0,
0,
{
"menu_id": self.menu.id,
},
)
]
}
)
wizard.purge_all()
self.assertFalse(
env["ir.ui.menu"].search(
[
("name", "=", "database_cleanup_test"),
]
)
)
| 31.225
| 1,249
|
4,540
|
py
|
PYTHON
|
15.0
|
# Copyright 2014-2016 Therp BV <http://therp.nl>
# Copyright 2021 Camptocamp <https://camptocamp.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
# pylint: disable=consider-merging-classes-inherited
from odoo import _, api, fields, models
from odoo.exceptions import UserError
from ..identifier_adapter import IdentifierAdapter
class CleanupPurgeLineTable(models.TransientModel):
_inherit = "cleanup.purge.line"
_name = "cleanup.purge.line.table"
_description = "Cleanup Purge Line Table"
wizard_id = fields.Many2one(
"cleanup.purge.wizard.table", "Purge Wizard", readonly=True
)
def purge(self):
"""
Unlink tables upon manual confirmation.
"""
if self:
objs = self
else:
objs = self.env["cleanup.purge.line.table"].browse(
self._context.get("active_ids")
)
tables = objs.mapped("name")
for line in objs:
if line.purged:
continue
# Retrieve constraints on the tables to be dropped
# This query is referenced in numerous places
# on the Internet but credits probably go to Tom Lane
# in this post http://www.postgresql.org/\
# message-id/22895.1226088573@sss.pgh.pa.us
# Only using the constraint name and the source table,
# but I'm leaving the rest in for easier debugging
self.env.cr.execute(
"""
SELECT conname, confrelid::regclass, af.attname AS fcol,
conrelid::regclass, a.attname AS col
FROM pg_attribute af, pg_attribute a,
(SELECT conname, conrelid, confrelid,conkey[i] AS conkey,
confkey[i] AS confkey
FROM (select conname, conrelid, confrelid, conkey,
confkey, generate_series(1,array_upper(conkey,1)) AS i
FROM pg_constraint WHERE contype = 'f') ss) ss2
WHERE af.attnum = confkey AND af.attrelid = confrelid AND
a.attnum = conkey AND a.attrelid = conrelid
AND confrelid::regclass = '%s'::regclass;
""",
(IdentifierAdapter(line.name, quote=False),),
)
for constraint in self.env.cr.fetchall():
if constraint[3] in tables:
self.logger.info(
"Dropping constraint %s on table %s (to be dropped)",
constraint[0],
constraint[3],
)
self.env.cr.execute(
"ALTER TABLE %s DROP CONSTRAINT %s",
(
IdentifierAdapter(constraint[3]),
IdentifierAdapter(constraint[0]),
),
)
self.logger.info("Dropping table %s", line.name)
self.env.cr.execute("DROP TABLE %s", (IdentifierAdapter(line.name),))
line.write({"purged": True})
return True
class CleanupPurgeWizardTable(models.TransientModel):
_inherit = "cleanup.purge.wizard"
_name = "cleanup.purge.wizard.table"
_description = "Purge tables"
@api.model
def find(self):
"""
Search for tables that cannot be instantiated.
Ignore views for now.
"""
known_tables = []
for model in self.env["ir.model"].search([]):
if model.model not in self.env:
continue
model_pool = self.env[model.model]
known_tables.append(model_pool._table)
known_tables += [
column.relation
for column in model_pool._fields.values()
if column.type == "many2many"
and (column.compute is None or column.store)
and column.relation
]
self.env.cr.execute(
"""
SELECT table_name FROM information_schema.tables
WHERE table_schema = 'public' AND table_type = 'BASE TABLE'
AND table_name NOT IN %s""",
(tuple(known_tables),),
)
res = [(0, 0, {"name": row[0]}) for row in self.env.cr.fetchall()]
if not res:
raise UserError(_("No orphaned tables found"))
return res
purge_line_ids = fields.One2many(
"cleanup.purge.line.table", "wizard_id", "Tables to purge"
)
| 37.833333
| 4,540
|
2,760
|
py
|
PYTHON
|
15.0
|
# Copyright 2014-2016 Therp BV <http://therp.nl>
# Copyright 2021 Camptocamp <https://camptocamp.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
# pylint: disable=consider-merging-classes-inherited
import logging
from odoo import _, api, fields, models
from odoo.exceptions import AccessDenied
class CleanupPurgeLine(models.AbstractModel):
"""Abstract base class for the purge wizard lines"""
_name = "cleanup.purge.line"
_order = "name"
_description = "Purge Column Abstract Wizard"
name = fields.Char(readonly=True)
purged = fields.Boolean(readonly=True)
wizard_id = fields.Many2one("cleanup.purge.wizard")
logger = logging.getLogger("odoo.addons.database_cleanup")
def purge(self):
raise NotImplementedError
@api.model
def create(self, values):
# make sure the user trying this is actually supposed to do it
if self.env.ref("base.group_erp_manager") not in self.env.user.groups_id:
raise AccessDenied
return super().create(values)
class PurgeWizard(models.AbstractModel):
"""Abstract base class for the purge wizards"""
_name = "cleanup.purge.wizard"
_description = "Purge stuff"
@api.model
def default_get(self, fields_list):
res = super().default_get(fields_list)
if "purge_line_ids" in fields_list:
res["purge_line_ids"] = self.find()
return res
def find(self):
raise NotImplementedError
def purge_all(self):
self.mapped("purge_line_ids").purge()
return True
@api.model
def get_wizard_action(self):
wizard = self.create({})
return {
"type": "ir.actions.act_window",
"name": wizard.display_name,
"views": [(False, "form")],
"res_model": self._name,
"res_id": wizard.id,
"flags": {
"action_buttons": False,
"sidebar": False,
},
}
def select_lines(self):
return {
"type": "ir.actions.act_window",
"name": _("Select lines to purge"),
"views": [(False, "tree"), (False, "form")],
"res_model": self._fields["purge_line_ids"].comodel_name,
"domain": [("wizard_id", "in", self.ids)],
}
def name_get(self):
return [(this.id, self._description) for this in self]
@api.model
def create(self, values):
# make sure the user trying this is actually supposed to do it
if self.env.ref("base.group_erp_manager") not in self.env.user.groups_id:
raise AccessDenied
return super().create(values)
purge_line_ids = fields.One2many("cleanup.purge.line", "wizard_id")
| 30.666667
| 2,760
|
4,321
|
py
|
PYTHON
|
15.0
|
# Copyright 2014-2016 Therp BV <http://therp.nl>
# Copyright 2021 Camptocamp <https://camptocamp.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
# pylint: disable=consider-merging-classes-inherited
import logging
from odoo import _, api, fields, models
from odoo.exceptions import UserError
from odoo.addons.base.models.ir_model import MODULE_UNINSTALL_FLAG
_logger = logging.getLogger(__name__)
class IrModel(models.Model):
_inherit = "ir.model"
def _drop_table(self):
"""this function crashes for undefined models"""
self = self.filtered(lambda x: x.model in self.env)
return super()._drop_table()
@api.depends()
def _inherited_models(self):
"""this function crashes for undefined models"""
self = self.filtered(lambda x: x.model in self.env)
return super()._inherited_models()
class IrModelFields(models.Model):
_inherit = "ir.model.fields"
def _prepare_update(self):
"""this function crashes for undefined models"""
self = self.filtered(lambda x: x.model in self.env)
return super()._prepare_update()
class CleanupPurgeLineModel(models.TransientModel):
_inherit = "cleanup.purge.line"
_name = "cleanup.purge.line.model"
_description = "Cleanup Purge Line Model"
wizard_id = fields.Many2one(
"cleanup.purge.wizard.model", "Purge Wizard", readonly=True
)
def purge(self):
"""
Unlink models upon manual confirmation.
"""
context_flags = {
MODULE_UNINSTALL_FLAG: True,
"purge": True,
}
if self:
objs = self
else:
objs = self.env["cleanup.purge.line.model"].browse(
self._context.get("active_ids")
)
for line in objs:
self.env.cr.execute(
"SELECT id, model from ir_model WHERE model = %s", (line.name,)
)
row = self.env.cr.fetchone()
if not row:
continue
self.logger.info("Purging model %s", row[1])
attachments = self.env["ir.attachment"].search(
[("res_model", "=", line.name)]
)
if attachments:
self.env.cr.execute(
"UPDATE ir_attachment SET res_model = NULL " "WHERE id in %s",
(tuple(attachments.ids),),
)
self.env["ir.model.constraint"].search(
[
("model", "=", line.name),
]
).unlink()
relations = (
self.env["ir.model.fields"]
.search(
[
("relation", "=", row[1]),
]
)
.with_context(**context_flags)
)
for relation in relations:
try:
# Fails if the model on the target side
# cannot be instantiated
relation.unlink()
except KeyError as exc:
self.logger.exception("Unlink models - KeyError: %s" % exc)
except AttributeError as exc:
self.logger.exception("Unlink models - AttributeError: %s" % exc)
self.env["ir.model.relation"].search(
[("model", "=", line.name)]
).with_context(**context_flags).unlink()
self.env["ir.model"].browse([row[0]]).with_context(**context_flags).unlink()
line.write({"purged": True})
return True
class CleanupPurgeWizardModel(models.TransientModel):
_inherit = "cleanup.purge.wizard"
_name = "cleanup.purge.wizard.model"
_description = "Purge models"
@api.model
def find(self):
"""
Search for models that cannot be instantiated.
"""
res = []
self.env.cr.execute("SELECT model from ir_model")
for (model,) in self.env.cr.fetchall():
if model not in self.env:
res.append((0, 0, {"name": model}))
if not res:
raise UserError(_("No orphaned models found"))
return res
purge_line_ids = fields.One2many(
"cleanup.purge.line.model", "wizard_id", "Models to purge"
)
| 32.984733
| 4,321
|
2,946
|
py
|
PYTHON
|
15.0
|
# Copyright 2017 Therp BV <http://therp.nl>
# Copyright 2021 Camptocamp <https://camptocamp.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
# pylint: disable=consider-merging-classes-inherited
from odoo import fields, models
from ..identifier_adapter import IdentifierAdapter
class CreateIndexesLine(models.TransientModel):
_inherit = "cleanup.purge.line"
_name = "cleanup.create_indexes.line"
_description = "Cleanup Create Indexes line"
purged = fields.Boolean("Created")
wizard_id = fields.Many2one("cleanup.create_indexes.wizard")
field_id = fields.Many2one("ir.model.fields", required=True)
def purge(self):
for field in self.mapped("field_id"):
model = self.env[field.model]
name = "{}_{}_index".format(model._table, field.name)
self.env.cr.execute(
"create index %s ON %s (%s)",
(
IdentifierAdapter(name, quote=False),
IdentifierAdapter(model._table),
IdentifierAdapter(field.name),
),
)
self.env.cr.execute("analyze %s", (IdentifierAdapter(model._table),))
self.write(
{
"purged": True,
}
)
class CreateIndexesWizard(models.TransientModel):
_inherit = "cleanup.purge.wizard"
_name = "cleanup.create_indexes.wizard"
_description = "Create indexes"
purge_line_ids = fields.One2many(
"cleanup.create_indexes.line",
"wizard_id",
)
def find(self):
res = list()
for field in self.env["ir.model.fields"].search(
[
("index", "=", True),
]
):
if field.model not in self.env.registry:
continue
model = self.env[field.model]
name = "{}_{}_index".format(model._table, field.name)
self.env.cr.execute(
"select indexname from pg_indexes "
"where indexname=%s and tablename=%s",
(name, model._table),
)
if self.env.cr.rowcount:
continue
self.env.cr.execute(
"select a.attname "
"from pg_attribute a "
"join pg_class c on a.attrelid=c.oid "
"join pg_tables t on t.tablename=c.relname "
"where attname=%s and c.relname=%s",
(
field.name,
model._table,
),
)
if not self.env.cr.rowcount:
continue
res.append(
(
0,
0,
{
"name": "{}.{}".format(field.model, field.name),
"field_id": field.id,
},
)
)
return res
| 32.021739
| 2,946
|
2,284
|
py
|
PYTHON
|
15.0
|
# Copyright 2014-2016 Therp BV <http://therp.nl>
# Copyright 2021 Camptocamp <https://camptocamp.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
# pylint: disable=consider-merging-classes-inherited
from odoo import _, api, fields, models
from odoo.exceptions import UserError
class CleanupPurgeLineMenu(models.TransientModel):
_inherit = "cleanup.purge.line"
_name = "cleanup.purge.line.menu"
_description = "Cleanup Purge Line Menu"
wizard_id = fields.Many2one(
"cleanup.purge.wizard.menu", "Purge Wizard", readonly=True
)
menu_id = fields.Many2one("ir.ui.menu", "Menu entry")
def purge(self):
"""Unlink menu entries upon manual confirmation."""
if self:
objs = self
else:
objs = self.env["cleanup.purge.line.menu"].browse(
self._context.get("active_ids")
)
to_unlink = objs.filtered(lambda x: not x.purged and x.menu_id)
self.logger.info("Purging menu entries: %s", to_unlink.mapped("name"))
to_unlink.mapped("menu_id").unlink()
return to_unlink.write({"purged": True})
class CleanupPurgeWizardMenu(models.TransientModel):
_inherit = "cleanup.purge.wizard"
_name = "cleanup.purge.wizard.menu"
_description = "Purge menus"
@api.model
def find(self):
"""
Search for models that cannot be instantiated.
"""
res = []
for menu in (
self.env["ir.ui.menu"]
.with_context(active_test=False)
.search([("action", "!=", False)])
):
if menu.action.type != "ir.actions.act_window":
continue
if menu.action.res_model and menu.action.res_model not in self.env:
res.append(
(
0,
0,
{
"name": menu.complete_name,
"menu_id": menu.id,
},
)
)
if not res:
raise UserError(_("No dangling menu entries found"))
return res
purge_line_ids = fields.One2many(
"cleanup.purge.line.menu", "wizard_id", "Menus to purge"
)
| 33.588235
| 2,284
|
3,120
|
py
|
PYTHON
|
15.0
|
# Copyright 2014-2016 Therp BV <http://therp.nl>
# Copyright 2021 Camptocamp <https://camptocamp.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import _, api, fields, models
from odoo.exceptions import UserError
from ..identifier_adapter import IdentifierAdapter
class CleanupPurgeLineData(models.TransientModel):
_inherit = "cleanup.purge.line"
_name = "cleanup.purge.line.data"
_description = "Cleanup Purge Line Data"
data_id = fields.Many2one("ir.model.data", "Data entry")
wizard_id = fields.Many2one(
"cleanup.purge.wizard.data", "Purge Wizard", readonly=True
)
def purge(self):
"""Unlink data entries upon manual confirmation."""
if self:
objs = self
else:
objs = self.env["cleanup.purge.line.data"].browse(
self._context.get("active_ids")
)
to_unlink = objs.filtered(lambda x: not x.purged and x.data_id)
self.logger.info("Purging data entries: %s", to_unlink.mapped("name"))
to_unlink.mapped("data_id").unlink()
return to_unlink.write({"purged": True})
class CleanupPurgeWizardData(models.TransientModel):
_inherit = "cleanup.purge.wizard"
_name = "cleanup.purge.wizard.data"
_description = "Purge data"
@api.model
def find(self):
"""Collect all rows from ir_model_data that refer
to a nonexisting model, or to a nonexisting
row in the model's table."""
res = []
data_ids = []
unknown_models = []
self.env.cr.execute("""SELECT DISTINCT(model) FROM ir_model_data""")
for (model,) in self.env.cr.fetchall():
if not model:
continue
if model not in self.env:
unknown_models.append(model)
continue
self.env.cr.execute(
"""
SELECT id FROM ir_model_data
WHERE model = %s
AND res_id IS NOT NULL
AND NOT EXISTS (
SELECT id FROM %s WHERE id=ir_model_data.res_id)
""",
(model, IdentifierAdapter(self.env[model]._table)),
)
data_ids.extend(data_row for data_row, in self.env.cr.fetchall())
data_ids += (
self.env["ir.model.data"]
.search(
[
("model", "in", unknown_models),
]
)
.ids
)
for data in self.env["ir.model.data"].browse(data_ids):
res.append(
(
0,
0,
{
"data_id": data.id,
"name": "%s.%s, object of type %s"
% (data.module, data.name, data.model),
},
)
)
if not res:
raise UserError(_("No orphaned data entries found"))
return res
purge_line_ids = fields.One2many(
"cleanup.purge.line.data", "wizard_id", "Data to purge"
)
| 33.913043
| 3,120
|
4,929
|
py
|
PYTHON
|
15.0
|
# Copyright 2014-2016 Therp BV <http://therp.nl>
# Copyright 2021 Camptocamp <https://camptocamp.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
# pylint: disable=consider-merging-classes-inherited
from odoo import _, api, fields, models
from odoo.exceptions import UserError
from ..identifier_adapter import IdentifierAdapter
class CleanupPurgeLineColumn(models.TransientModel):
_inherit = "cleanup.purge.line"
_name = "cleanup.purge.line.column"
_description = "Cleanup Purge Line Column"
model_id = fields.Many2one("ir.model", "Model", required=True, ondelete="CASCADE")
wizard_id = fields.Many2one(
"cleanup.purge.wizard.column", "Purge Wizard", readonly=True
)
def purge(self):
"""
Unlink columns upon manual confirmation.
"""
if self:
objs = self
else:
objs = self.env["cleanup.purge.line.column"].browse(
self._context.get("active_ids")
)
for line in objs:
if line.purged:
continue
model_pool = self.env[line.model_id.model]
# Check whether the column actually still exists.
# Inheritance such as stock.picking.in from stock.picking
# can lead to double attempts at removal
self.env.cr.execute(
"SELECT count(attname) FROM pg_attribute "
"WHERE attrelid = "
"( SELECT oid FROM pg_class WHERE relname = %s ) "
"AND attname = %s",
(model_pool._table, line.name),
)
if not self.env.cr.fetchone()[0]:
continue
self.logger.info(
"Dropping column %s from table %s", line.name, model_pool._table
)
self.env.cr.execute(
"ALTER TABLE %s DROP COLUMN %s",
(IdentifierAdapter(model_pool._table), IdentifierAdapter(line.name)),
)
line.write({"purged": True})
# we need this commit because the ORM will deadlock if
# we still have a pending transaction
self.env.cr.commit() # pylint: disable=invalid-commit
return True
class CleanupPurgeWizardColumn(models.TransientModel):
_inherit = "cleanup.purge.wizard"
_name = "cleanup.purge.wizard.column"
_description = "Purge columns"
# List of known columns in use without corresponding fields
# Format: {table: [fields]}
blacklist = {
"wkf_instance": ["uid"], # lp:1277899
"res_users": ["password", "password_crypt"],
}
@api.model
def get_orphaned_columns(self, model_pools):
"""
From openobject-server/openerp/osv/orm.py
Iterate on the database columns to identify columns
of fields which have been removed
"""
columns = list(
{
column.name
for model_pool in model_pools
for column in model_pool._fields.values()
if not (column.compute is not None and not column.store)
}
)
columns += models.MAGIC_COLUMNS
columns += self.blacklist.get(model_pools[0]._table, [])
self.env.cr.execute(
"SELECT a.attname FROM pg_class c, pg_attribute a "
"WHERE c.relname=%s AND c.oid=a.attrelid AND a.attisdropped=False "
"AND pg_catalog.format_type(a.atttypid, a.atttypmod) "
"NOT IN ('cid', 'tid', 'oid', 'xid') "
"AND a.attname NOT IN %s",
(model_pools[0]._table, tuple(columns)),
)
return [column for column, in self.env.cr.fetchall()]
@api.model
def find(self):
"""
Search for columns that are not in the corresponding model.
Group models by table to prevent false positives for columns
that are only in some of the models sharing the same table.
Example of this is 'sale_id' not being a field of stock.picking.in
"""
res = []
# mapping of tables to tuples (model id, [pool1, pool2, ...])
table2model = {}
for model in self.env["ir.model"].search([]):
if model.model not in self.env:
continue
model_pool = self.env[model.model]
if not model_pool._auto:
continue
table2model.setdefault(model_pool._table, (model.id, []))[1].append(
model_pool
)
for _table, model_spec in table2model.items():
for column in self.get_orphaned_columns(model_spec[1]):
res.append((0, 0, {"name": column, "model_id": model_spec[0]}))
if not res:
raise UserError(_("No orphaned columns found"))
return res
purge_line_ids = fields.One2many(
"cleanup.purge.line.column", "wizard_id", "Columns to purge"
)
| 36.511111
| 4,929
|
5,759
|
py
|
PYTHON
|
15.0
|
# Copyright 2017 Therp BV <http://therp.nl>
# Copyright 2021 Camptocamp <https://camptocamp.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
# pylint: disable=consider-merging-classes-inherited
from odoo import api, fields, models
REASON_DUPLICATE = "REASON_DUPLICATE"
REASON_DEFAULT = "REASON_DEFAULT"
REASON_DEFAULT_FALSE = "REASON_DEFAULT_FALSE"
REASON_UNKNOWN_MODEL = "REASON_UNKNOWN_MODEL"
class CleanupPurgeLineProperty(models.TransientModel):
_inherit = "cleanup.purge.line"
_name = "cleanup.purge.line.property"
_description = "Cleanup Purge Line Property"
wizard_id = fields.Many2one(
"cleanup.purge.wizard.property", "Purge Wizard", readonly=True
)
property_id = fields.Many2one("ir.property")
reason = fields.Selection(
[
(REASON_DUPLICATE, "Duplicated property"),
(REASON_DEFAULT, "Same value as default"),
(REASON_DEFAULT_FALSE, "Empty default property"),
(REASON_UNKNOWN_MODEL, "Unknown model"),
]
)
def purge(self):
"""Delete properties"""
self.write({"purged": True})
return self.mapped("property_id").unlink()
class CleanupPurgeWizardProperty(models.TransientModel):
_inherit = "cleanup.purge.wizard"
_name = "cleanup.purge.wizard.property"
_description = "Purge properties"
@api.model
def find(self):
"""
Search property records which are duplicated or the same as the default
"""
result = []
default_properties = self.env["ir.property"].search(
[
("res_id", "=", False),
]
)
handled_field_ids = []
for prop in default_properties:
value = None
try:
value = prop.get_by_record()
except KeyError:
result.append(
{
"name": "{}@{}: {}".format(prop.name, prop.res_id, value),
"property_id": prop.id,
"reason": REASON_UNKNOWN_MODEL,
}
)
continue
if not value:
result.append(
{
"name": "{}@{}: {}".format(prop.name, prop.res_id, value),
"property_id": prop.id,
"reason": REASON_DEFAULT_FALSE,
}
)
continue
if prop.fields_id.id in handled_field_ids:
continue
domain = [
("id", "!=", prop.id),
("fields_id", "=", prop.fields_id.id),
# =? explicitly tests for None or False, not falsyness
("value_float", "=?", prop.value_float or False),
("value_integer", "=?", prop.value_integer or False),
("value_text", "=?", prop.value_text or False),
("value_binary", "=?", prop.value_binary or False),
("value_reference", "=?", prop.value_reference or False),
("value_datetime", "=?", prop.value_datetime or False),
]
if prop.company_id:
domain.append(("company_id", "=", prop.company_id.id))
else:
domain.extend(
[
"|",
("company_id", "=", False),
(
"company_id",
"in",
self.env["res.company"]
.search(
[
(
"id",
"not in",
default_properties.filtered(
lambda x: x.company_id
and x.fields_id == prop.fields_id
).ids,
)
]
)
.ids,
),
]
)
for redundant_property in self.env["ir.property"].search(domain):
result.append(
{
"name": "{}@{}: {}".format(
prop.name, redundant_property.res_id, prop.get_by_record()
),
"property_id": redundant_property.id,
"reason": REASON_DEFAULT,
}
)
handled_field_ids.append(prop.fields_id.id)
self.env.cr.execute(
"""
with grouped_properties(ids, cnt) as (
select array_agg(id), count(*)
from ir_property group by res_id, company_id, fields_id
)
select ids from grouped_properties where cnt > 1
"""
)
for (ids,) in self.env.cr.fetchall():
# odoo uses the first property found by search
for prop in self.env["ir.property"].search([("id", "in", ids)])[1:]:
result.append(
{
"name": "{}@{}: {}".format(
prop.name, prop.res_id, prop.get_by_record()
),
"property_id": prop.id,
"reason": REASON_DUPLICATE,
}
)
return result
purge_line_ids = fields.One2many(
"cleanup.purge.line.property", "wizard_id", "Properties to purge"
)
| 37.396104
| 5,759
|
3,197
|
py
|
PYTHON
|
15.0
|
# Copyright 2014-2016 Therp BV <http://therp.nl>
# Copyright 2021 Camptocamp <https://camptocamp.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
# pylint: disable=consider-merging-classes-inherited
from odoo import _, api, fields, models
from odoo.exceptions import UserError
from odoo.modules.module import get_module_path
from odoo.addons.base.models.ir_model import MODULE_UNINSTALL_FLAG
class IrModelData(models.Model):
_inherit = "ir.model.data"
@api.model
def _module_data_uninstall(self, modules_to_remove):
"""this function crashes for xmlids on undefined models or fields
referring to undefined models"""
for this in self.search([("module", "in", modules_to_remove)]):
if this.model == "ir.model.fields":
field = (
self.env[this.model]
.with_context(**{MODULE_UNINSTALL_FLAG: True})
.browse(this.res_id)
)
if not field.exists() or field.model not in self.env:
this.unlink()
continue
if this.model not in self.env:
this.unlink()
return super()._module_data_uninstall(modules_to_remove)
class CleanupPurgeLineModule(models.TransientModel):
_inherit = "cleanup.purge.line"
_name = "cleanup.purge.line.module"
_description = "Cleanup Purge Line Module"
wizard_id = fields.Many2one(
"cleanup.purge.wizard.module", "Purge Wizard", readonly=True
)
def purge(self):
"""
Uninstall modules upon manual confirmation, then reload
the database.
"""
module_names = self.filtered(lambda x: not x.purged).mapped("name")
modules = self.env["ir.module.module"].search([("name", "in", module_names)])
if not modules:
return True
self.logger.info("Purging modules %s", ", ".join(module_names))
modules.filtered(
lambda x: x.state not in ("uninstallable", "uninstalled")
).button_immediate_uninstall()
modules.invalidate_cache()
modules.unlink()
return self.write({"purged": True})
class CleanupPurgeWizardModule(models.TransientModel):
_inherit = "cleanup.purge.wizard"
_name = "cleanup.purge.wizard.module"
_description = "Purge modules"
@api.model
def find(self):
res = []
IrModule = self.env["ir.module.module"]
for module in IrModule.search(
[("to_buy", "=", False), ("name", "!=", "studio_customization")]
):
if get_module_path(module.name, display_warning=False):
continue
if module.state == "uninstalled":
self.env["cleanup.purge.line.module"].create(
{
"name": module.name,
}
)
continue
res.append((0, 0, {"name": module.name}))
if not res:
raise UserError(_("No modules found to purge"))
return res
purge_line_ids = fields.One2many(
"cleanup.purge.line.module", "wizard_id", "Modules to purge"
)
| 35.522222
| 3,197
|
1,111
|
py
|
PYTHON
|
15.0
|
# Copyright 2015-2017 Camptocamp SA
# Copyright 2020 Onestein (<https://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Track record changesets",
"version": "15.0.1.0.0",
"development_status": "Alpha",
"author": "Onestein, Camptocamp, Odoo Community Association (OCA)",
"maintainers": ["astirpe"],
"license": "AGPL-3",
"category": "Tools",
"depends": ["web"],
"website": "https://github.com/OCA/server-tools",
"data": [
"security/groups.xml",
"security/ir.model.access.csv",
"security/rules.xml",
"views/record_changeset_views.xml",
"views/record_changeset_change_views.xml",
"views/changeset_field_rule_views.xml",
"views/menu.xml",
],
"assets": {
"web.assets_backend": [
"base_changeset/static/src/js/backend.js",
"base_changeset/static/src/scss/backend.scss",
],
"web.assets_qweb": ["base_changeset/static/src/xml/backend.xml"],
},
"demo": ["demo/changeset_field_rule.xml"],
"installable": True,
}
| 33.666667
| 1,111
|
11,185
|
py
|
PYTHON
|
15.0
|
# Copyright 2015-2017 Camptocamp SA
# Copyright 2020 Onestein (<https://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from markupsafe import Markup
from odoo import fields
from odoo.tests.common import TransactionCase
from ..models.base import disable_changeset
from .common import ChangesetTestCommon
class TestChangesetFieldType(ChangesetTestCommon, TransactionCase):
"""Check that changeset changes are stored expectingly to their types"""
def _setup_rules(self):
ChangesetFieldRule = self.env["changeset.field.rule"]
ChangesetFieldRule.search([]).unlink()
fields = (
("char", "ref"),
("text", "comment"),
("boolean", "is_company"),
("date", "date"),
("integer", "color"),
("float", "credit_limit"),
("selection", "type"),
("many2one", "country_id"),
("many2many", "category_id"),
("one2many", "user_ids"),
("binary", "image_1920"),
)
for field_type, field in fields:
attr_name = "field_%s" % field_type
field_record = self.env["ir.model.fields"].search(
[("model", "=", "res.partner"), ("name", "=", field)]
)
self.assertTrue(field_record, "Field %s not available" % field)
# set attribute such as 'self.field_char' is a
# ir.model.fields record of the field res_partner.ref
setattr(self, attr_name, field_record)
ChangesetFieldRule.create(
{"field_id": field_record.id, "action": "validate"}
)
def setUp(self):
super().setUp()
self._setup_rules()
self.partner = self.env["res.partner"].create(
{"name": "Original Name", "street": "Original Street"}
)
# Add context for this test for compatibility with other modules' tests
self.partner = self.partner.with_context(test_record_changeset=True)
def test_new_changeset_char(self):
"""Add a new changeset on a Char field"""
self.partner.write({self.field_char.name: "New value"})
self.assert_changeset(
self.partner,
self.env.user,
[
(
self.field_char,
self.partner[self.field_char.name],
"New value",
"draft",
)
],
)
def test_new_changeset_text(self):
"""Add a new changeset on a Text field"""
self.partner.write({self.field_text.name: "New comment\non 2 lines"})
self.assert_changeset(
self.partner,
self.env.user,
[
(
self.field_text,
self.partner[self.field_text.name],
"New comment\non 2 lines",
"draft",
)
],
)
def test_new_changeset_boolean(self):
"""Add a new changeset on a Boolean field"""
# ensure the changeset has to change the value
self.partner.with_context(__no_changeset=disable_changeset).write(
{self.field_boolean.name: False}
)
self.partner.write({self.field_boolean.name: True})
self.assert_changeset(
self.partner,
self.env.user,
[
(
self.field_boolean,
self.partner[self.field_boolean.name],
True,
"draft",
)
],
)
def test_new_changeset_date(self):
"""Add a new changeset on a Date field"""
self.partner.write({self.field_date.name: "2015-09-15"})
self.assert_changeset(
self.partner,
self.env.user,
[
(
self.field_date,
self.partner[self.field_date.name],
fields.Date.from_string("2015-09-15"),
"draft",
)
],
)
def test_new_changeset_integer(self):
"""Add a new changeset on a Integer field"""
self.partner.write({self.field_integer.name: 42})
self.assert_changeset(
self.partner,
self.env.user,
[(self.field_integer, self.partner[self.field_integer.name], 42, "draft")],
)
def test_new_changeset_float(self):
"""Add a new changeset on a Float field"""
self.partner.write({self.field_float.name: 3.1415})
self.assert_changeset(
self.partner,
self.env.user,
[(self.field_float, self.partner[self.field_float.name], 3.1415, "draft")],
)
def test_new_changeset_selection(self):
"""Add a new changeset on a Selection field"""
self.partner.write({self.field_selection.name: "delivery"})
self.assert_changeset(
self.partner,
self.env.user,
[
(
self.field_selection,
self.partner[self.field_selection.name],
"delivery",
"draft",
)
],
)
def test_new_changeset_many2one(self):
"""Add a new changeset on a Many2one field"""
self.partner.with_context(__no_changeset=disable_changeset).write(
{self.field_many2one.name: self.env.ref("base.fr").id}
)
self.partner.write({self.field_many2one.name: self.env.ref("base.ch").id})
self.assert_changeset(
self.partner,
self.env.user,
[
(
self.field_many2one,
self.partner[self.field_many2one.name],
self.env.ref("base.ch"),
"draft",
)
],
)
def test_new_changeset_many2many(self):
"""Add a new changeset on a Many2many field is not supported"""
with self.assertRaises(NotImplementedError):
self.partner.write(
{self.field_many2many.name: [self.env.ref("base.ch").id]}
)
def test_new_changeset_one2many(self):
"""Add a new changeset on a One2many field is not supported"""
with self.assertRaises(NotImplementedError):
self.partner.write(
{self.field_one2many.name: [self.env.ref("base.user_root").id]}
)
def test_new_changeset_binary(self):
"""Add a new changeset on a Binary field is not supported"""
with self.assertRaises(NotImplementedError):
self.partner.write({self.field_binary.name: "xyz"})
def test_apply_char(self):
"""Apply a change on a Char field"""
changes = [(self.field_char, "New Ref", "draft")]
changeset = self._create_changeset(self.partner, changes)
changeset.change_ids.apply()
self.assertEqual(self.partner[self.field_char.name], "New Ref")
def test_apply_text(self):
"""Apply a change on a Text field"""
changes = [(self.field_text, "New comment\non 2 lines", "draft")]
changeset = self._create_changeset(self.partner, changes)
changeset.change_ids.apply()
self.assertEqual(
self.partner[self.field_text.name], Markup("<p>New comment\non 2 lines</p>")
)
def test_apply_boolean(self):
"""Apply a change on a Boolean field"""
# ensure the changeset has to change the value
self.partner.write({self.field_boolean.name: False})
changes = [(self.field_boolean, True, "draft")]
changeset = self._create_changeset(self.partner, changes)
changeset.change_ids.apply()
self.assertEqual(self.partner[self.field_boolean.name], True)
# Cannot do this while it is on the same transaction. The cache may not
# be updated
# changes = [(self.field_boolean, False, 'draft')]
# changeset = self._create_changeset(self.partner, changes)
# changeset.change_ids.apply()
# self.assertEqual(self.partner[self.field_boolean.name], False)
def test_apply_date(self):
"""Apply a change on a Date field"""
changes = [(self.field_date, "2015-09-15", "draft")]
changeset = self._create_changeset(self.partner, changes)
changeset.change_ids.apply()
self.assertAlmostEqual(
self.partner[self.field_date.name], fields.Date.from_string("2015-09-15")
)
def test_apply_integer(self):
"""Apply a change on a Integer field"""
changes = [(self.field_integer, 42, "draft")]
changeset = self._create_changeset(self.partner, changes)
changeset.change_ids.apply()
self.assertAlmostEqual(self.partner[self.field_integer.name], 42)
def test_apply_float(self):
"""Apply a change on a Float field"""
changes = [(self.field_float, 52.47, "draft")]
changeset = self._create_changeset(self.partner, changes)
changeset.change_ids.apply()
self.assertAlmostEqual(self.partner[self.field_float.name], 52.47)
def test_apply_selection(self):
"""Apply a change on a Selection field"""
changes = [(self.field_selection, "delivery", "draft")]
changeset = self._create_changeset(self.partner, changes)
changeset.change_ids.apply()
self.assertAlmostEqual(self.partner[self.field_selection.name], "delivery")
def test_apply_many2one(self):
"""Apply a change on a Many2one field"""
self.partner.with_context(__no_changeset=disable_changeset).write(
{self.field_many2one.name: self.env.ref("base.fr").id}
)
changes = [
(
self.field_many2one,
"res.country,%d" % self.env.ref("base.ch").id,
"draft",
)
]
changeset = self._create_changeset(self.partner, changes)
changeset.change_ids.apply()
self.assertEqual(
self.partner[self.field_many2one.name], self.env.ref("base.ch")
)
def test_apply_many2many(self):
"""Apply a change on a Many2many field is not supported"""
changes = [(self.field_many2many, self.env.ref("base.ch").id, "draft")]
with self.assertRaises(NotImplementedError):
self._create_changeset(self.partner, changes)
def test_apply_one2many(self):
"""Apply a change on a One2many field is not supported"""
changes = [
(
self.field_one2many,
[self.env.ref("base.user_root").id, self.env.ref("base.user_demo").id],
"draft",
)
]
with self.assertRaises(NotImplementedError):
self._create_changeset(self.partner, changes)
def test_apply_binary(self):
"""Apply a change on a Binary field is not supported"""
changes = [(self.field_one2many, "", "draft")]
with self.assertRaises(NotImplementedError):
self._create_changeset(self.partner, changes)
| 37.408027
| 11,185
|
20,690
|
py
|
PYTHON
|
15.0
|
# Copyright 2015-2017 Camptocamp SA
# Copyright 2020 Onestein (<https://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from datetime import datetime, timedelta
from odoo import fields
from odoo.exceptions import UserError
from odoo.tests.common import TransactionCase
from ..models.base import disable_changeset
from .common import ChangesetTestCommon
class TestChangesetFlow(ChangesetTestCommon, TransactionCase):
"""Check how changeset are generated and applied based on the rules.
We do not really care about the types of the fields in this test
suite, so we only use 'char' fields. We have to ensure that the
general changeset flows work as expected, that is:
* create a changeset when a manual/system write is made on partner
* create a changeset according to the changeset rules when a source model
is specified
* apply a changeset change writes the value on the partner
* apply a whole changeset writes all the changes' values on the partner
* changes in state 'cancel' or 'done' do not write on the partner
* when all the changes are either 'cancel' or 'done', the changeset
becomes 'done'
"""
def _setup_rules(self):
ChangesetFieldRule = self.env["changeset.field.rule"]
ChangesetFieldRule.search([]).unlink()
self.field_name = self.env.ref("base.field_res_partner__name")
self.field_street = self.env.ref("base.field_res_partner__street")
self.field_street2 = self.env.ref("base.field_res_partner__street2")
ChangesetFieldRule.create({"field_id": self.field_name.id, "action": "auto"})
ChangesetFieldRule.create(
{"field_id": self.field_street.id, "action": "validate"}
)
ChangesetFieldRule.create(
{"field_id": self.field_street2.id, "action": "never"}
)
def setUp(self):
super().setUp()
self._setup_rules()
self.partner = self.env["res.partner"].create(
{"name": "X", "street": "street X", "street2": "street2 X"}
)
# Add context for this test for compatibility with other modules' tests
self.partner = self.partner.with_context(test_record_changeset=True)
def test_new_changeset(self):
"""Add a new changeset on a partner
A new changeset is created when we write on a partner
"""
self.partner.write({"name": "Y", "street": "street Y", "street2": "street2 Y"})
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 1)
self.assertEqual(self.partner.count_pending_changeset_changes, 1)
self.assert_changeset(
self.partner,
self.env.user,
[
(self.field_name, "X", "Y", "done"),
(self.field_street, "street X", "street Y", "draft"),
(self.field_street2, "street2 X", "street2 Y", "cancel"),
],
)
self.assertEqual(self.partner.name, "Y")
self.assertEqual(self.partner.street, "street X")
self.assertEqual(self.partner.street2, "street2 X")
def test_create_new_changeset(self):
"""Create a new partner with a changeset"""
new = (
self.env["res.partner"]
.with_context(test_record_changeset=True)
.create(
{
"name": "partner",
"street": "street",
"street2": "street2",
}
)
)
new._compute_changeset_ids()
new._compute_count_pending_changesets()
self.assertEqual(new.count_pending_changesets, 1)
self.assert_changeset(
new,
self.env.user,
[
(self.field_name, False, "partner", "done"),
(self.field_street, False, "street", "draft"),
(self.field_street2, False, "street2", "cancel"),
],
)
self.assertEqual(new.name, "partner")
self.assertFalse(new.street)
self.assertFalse(new.street2)
def test_create_new_changeset_empty_value(self):
"""No change is created for empty values on create"""
new = (
self.env["res.partner"]
.with_context(test_record_changeset=True)
.create(
{
"name": "partner",
"street": "street",
"street2": False,
}
)
)
new._compute_changeset_ids()
new._compute_count_pending_changesets()
self.assertEqual(new.count_pending_changesets, 1)
self.assert_changeset(
new,
self.env.user,
[
(self.field_name, False, "partner", "done"),
(self.field_street, False, "street", "draft"),
],
)
self.assertEqual(new.name, "partner")
self.assertFalse(new.street)
self.assertFalse(new.street2)
def test_new_changeset_empty_value(self):
"""Create a changeset change that empty a value"""
self.partner.write({"street": False})
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 1)
self.assert_changeset(
self.partner,
self.env.user,
[(self.field_street, "street X", False, "draft")],
)
def test_no_changeset_empty_value_both_sides(self):
"""No changeset created when both sides have an empty value"""
# we have to ensure that even if we write '' to a False field, we won't
# write a changeset
self.partner.with_context(__no_changeset=disable_changeset).write(
{"street": False}
)
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 0)
self.partner.write({"street": ""})
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 0)
self.assertFalse(self.partner.changeset_ids)
def test_apply_change(self):
"""Apply a changeset change on a partner"""
changes = [(self.field_name, "Y", "draft")]
changeset = self._create_changeset(self.partner, changes)
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 1)
for change in changeset.change_ids:
change.get_fields_changeset_changes(changeset.model, changeset.res_id)
changeset.change_ids.apply()
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 0)
self.assertEqual(self.partner.name, "Y")
self.assertEqual(changeset.change_ids.state, "done")
# All computed fields are assigned
changeset.change_ids.read()
def test_apply_change_with_prevent_self_validation(self):
"""Don't apply a changeset change and prevent self validation"""
self.partner.write({"street": "street Z"})
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 1)
self.assertEqual(self.partner.count_pending_changeset_changes, 1)
self.partner.changeset_ids.change_ids.rule_id.prevent_self_validation = True
with self.assertRaises(
UserError, msg="You don't have the rights to reject the changes."
):
self.partner.changeset_ids.change_ids.apply()
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 1)
self.assertEqual(self.partner.count_pending_changeset_changes, 1)
self.assertEqual(self.partner.street, "street X")
self.assertEqual(self.partner.changeset_ids.change_ids.state, "draft")
user = self.env.ref("base.user_demo")
user.groups_id += self.env.ref("base_changeset.group_changeset_user")
self.partner.changeset_ids.change_ids.with_user(user).apply()
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 0)
self.assertEqual(self.partner.count_pending_changeset_changes, 0)
self.assertEqual(self.partner.street, "street Z")
self.assertEqual(self.partner.changeset_ids.change_ids.state, "done")
def test_apply_done_change(self):
"""Done changes do not apply (already applied)"""
changes = [(self.field_name, "Y", "done")]
changeset = self._create_changeset(self.partner, changes)
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 0)
with self.assertRaises(UserError):
changeset.change_ids.apply()
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 0)
self.assertEqual(self.partner.name, "X")
changeset.apply()
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 0)
self.assertEqual(self.partner.name, "X")
def test_apply_cancel_change(self):
"""Cancel changes do not apply"""
changes = [(self.field_name, "Y", "cancel")]
changeset = self._create_changeset(self.partner, changes)
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 0)
with self.assertRaises(UserError):
changeset.change_ids.apply()
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 0)
self.assertEqual(self.partner.name, "X")
changeset.apply()
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 0)
self.assertEqual(self.partner.name, "X")
def test_apply_empty_value(self):
"""Apply a change that empty a value"""
changes = [(self.field_street, False, "draft")]
changeset = self._create_changeset(self.partner, changes)
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 1)
for change in changeset.change_ids:
change.get_fields_changeset_changes(changeset.model, changeset.res_id)
changeset.change_ids.apply()
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 0)
self.assertFalse(self.partner.street)
def test_apply_change_loop(self):
"""Test multiple changes"""
changes = [
(self.field_name, "Y", "draft"),
(self.field_street, "street Y", "draft"),
(self.field_street2, "street2 Y", "draft"),
]
changeset = self._create_changeset(self.partner, changes)
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 1)
for change in changeset.change_ids:
change.get_fields_changeset_changes(changeset.model, changeset.res_id)
changeset.change_ids.apply()
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 0)
self.assertEqual(self.partner.name, "Y")
self.assertEqual(self.partner.street, "street Y")
self.assertEqual(self.partner.street2, "street2 Y")
def test_apply(self):
"""Apply a full changeset on a partner"""
changes = [
(self.field_name, "Y", "draft"),
(self.field_street, "street Y", "draft"),
(self.field_street2, "street2 Y", "draft"),
]
changeset = self._create_changeset(self.partner, changes)
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 1)
self.assertEqual(self.partner.count_pending_changeset_changes, 3)
for change in changeset.change_ids:
change.get_fields_changeset_changes(changeset.model, changeset.res_id)
changeset.apply()
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 0)
self.assertEqual(self.partner.count_pending_changeset_changes, 0)
self.assertEqual(self.partner.name, "Y")
self.assertEqual(self.partner.street, "street Y")
self.assertEqual(self.partner.street2, "street2 Y")
def test_changeset_state_on_done(self):
"""Check that changeset state becomes done when changes are done"""
changes = [(self.field_name, "Y", "draft")]
changeset = self._create_changeset(self.partner, changes)
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 1)
self.assertEqual(changeset.state, "draft")
changeset.change_ids.apply()
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 0)
self.assertEqual(changeset.state, "done")
def test_changeset_state_on_cancel(self):
"""Check that rev. state becomes done when changes are canceled"""
changes = [(self.field_name, "Y", "draft")]
changeset = self._create_changeset(self.partner, changes)
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 1)
self.assertEqual(changeset.state, "draft")
changeset.change_ids.cancel()
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 0)
self.assertEqual(changeset.state, "done")
def test_changeset_state(self):
"""Check that changeset state becomes done with multiple changes"""
changes = [
(self.field_name, "Y", "draft"),
(self.field_street, "street Y", "draft"),
(self.field_street2, "street2 Y", "draft"),
]
changeset = self._create_changeset(self.partner, changes)
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 1)
self.assertEqual(self.partner.count_pending_changeset_changes, 3)
self.assertEqual(changeset.state, "draft")
changeset.apply()
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 0)
self.assertEqual(self.partner.count_pending_changeset_changes, 0)
self.assertEqual(changeset.state, "done")
def test_apply_changeset_with_other_pending(self):
"""Error when applying when previous pending changesets exist"""
changes = [(self.field_name, "Y", "draft")]
old_changeset = self._create_changeset(self.partner, changes)
# if the date is the same, both changeset can be applied
to_string = fields.Datetime.to_string
old_changeset.date = to_string(datetime.now() - timedelta(days=1))
changes = [(self.field_name, "Z", "draft")]
changeset = self._create_changeset(self.partner, changes)
with self.assertRaises(UserError):
changeset.change_ids.with_context(
require_previous_changesets_done=True
).apply()
changeset.change_ids.apply()
def test_apply_different_changesets(self):
"""Apply different changesets at once"""
partner2 = self.env["res.partner"].create({"name": "P2"})
changes = [
(self.field_name, "Y", "draft"),
(self.field_street, "street Y", "draft"),
(self.field_street2, "street2 Y", "draft"),
]
changeset = self._create_changeset(self.partner, changes)
self.partner._compute_changeset_ids()
self.partner._compute_count_pending_changesets()
self.assertEqual(self.partner.count_pending_changesets, 1)
self.assertEqual(self.partner.count_pending_changeset_changes, 3)
for change in changeset.change_ids:
change.get_fields_changeset_changes(changeset.model, changeset.res_id)
changeset2 = self._create_changeset(partner2, changes)
partner2._compute_changeset_ids()
partner2._compute_count_pending_changesets()
self.assertEqual(changeset.state, "draft")
self.assertEqual(changeset2.state, "draft")
self.assertEqual(partner2.count_pending_changesets, 1)
self.assertEqual(partner2.count_pending_changeset_changes, 3)
for change in changeset2.change_ids:
change.get_fields_changeset_changes(changeset2.model, changeset2.res_id)
(changeset + changeset2).apply()
self.assertEqual(self.partner.name, "Y")
self.assertEqual(self.partner.street, "street Y")
self.assertEqual(self.partner.street2, "street2 Y")
self.assertEqual(partner2.name, "Y")
self.assertEqual(partner2.street, "street Y")
self.assertEqual(partner2.street2, "street2 Y")
self.assertEqual(changeset.state, "done")
self.assertEqual(changeset2.state, "done")
def test_new_changeset_source(self):
"""Source is the user who made the change"""
self.partner.write({"street": False})
self.partner._compute_changeset_ids()
changeset = self.partner.changeset_ids
self.assertEqual(changeset.source, self.env.user)
def test_new_changeset_source_other_model(self):
"""Define source from another model"""
company = self.env.ref("base.main_company")
keys = {
"force_changeset_for_partners": True,
"__changeset_rules_source_model": "res.company",
"__changeset_rules_source_id": company.id,
}
self.partner.with_context(**keys).write({"street": False})
self.partner._compute_changeset_ids()
changeset = self.partner.changeset_ids
self.assertEqual(changeset.source, company)
def test_name_get(self):
"""Test the name_get of a changeset for a model without name field"""
self.env["changeset.field.rule"].create(
{
"field_id": self.env.ref("base.field_res_partner_bank__active").id,
"action": "validate",
}
)
bank = self.env.ref("base.bank_partner_demo").with_context(
test_record_changeset=True
)
bank.active = False
self.assertTrue(bank.changeset_ids)
self.assertIn(bank.acc_number, bank.changeset_ids.name_get()[0][1])
def test_new_changeset_expression(self):
"""Test that rules can be conditional"""
self.env["changeset.field.rule"].search(
[
("field_id", "=", self.field_street.id),
]
).expression = "object.street != 'street X'"
self.partner.street = "street Y"
self.partner.refresh()
self.assertEqual(self.partner.street, "street Y")
self.assertFalse(self.partner.changeset_ids)
self.partner.street = "street Z"
self.partner.refresh()
self.assertTrue(self.partner.changeset_ids)
self.assertEqual(self.partner.street, "street Y")
| 45.673289
| 20,690
|
5,971
|
py
|
PYTHON
|
15.0
|
# Copyright 2015-2017 Camptocamp SA
# Copyright 2020 Onestein (<https://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests.common import Form, TransactionCase
from ..models.base import disable_changeset
from .common import ChangesetTestCommon
class TestChangesetOrigin(ChangesetTestCommon, TransactionCase):
"""Check that origin - old fields are stored as expected.
'origin' fields dynamically read fields from the partner when the state
of the change is 'draft'. Once a change becomes 'done' or 'cancel', the
'old' field copies the value from the partner and then the 'origin' field
displays the 'old' value.
"""
def _setup_rules(self):
ChangesetFieldRule = self.env["changeset.field.rule"]
ChangesetFieldRule.search([]).unlink()
self.field_name = self.env.ref("base.field_res_partner__name")
ChangesetFieldRule.create(
{"field_id": self.field_name.id, "action": "validate"}
)
def setUp(self):
super().setUp()
self._setup_rules()
self.partner = self.env["res.partner"].create({"name": "X"})
# Add context for this test for compatibility with other modules' tests
self.partner = self.partner.with_context(test_record_changeset=True)
def test_origin_value_of_change_with_apply(self):
"""Origin field is read from the parter or 'old' - with apply
According to the state of the change.
"""
with Form(self.partner) as partner_form:
partner_form.name = "Y"
self.assertEqual(self.partner.count_pending_changesets, 1)
changeset = self.partner.changeset_ids
change = changeset.change_ids
self.assertEqual(self.partner.name, "X")
self.assertEqual(change.origin_value_char, "X")
self.assertEqual(change.origin_value_display, "X")
with Form(
self.partner.with_context(__no_changeset=disable_changeset)
) as partner_form:
partner_form.name = "A"
self.assertEqual(change.origin_value_char, "A")
self.assertEqual(change.origin_value_display, "A")
change.apply()
self.assertEqual(change.origin_value_char, "A")
self.assertEqual(change.origin_value_display, "A")
with Form(
self.partner.with_context(__no_changeset=disable_changeset)
) as partner_form:
partner_form.name = "B"
self.assertEqual(change.origin_value_char, "A")
self.assertEqual(change.origin_value_display, "A")
self.assertEqual(self.partner.count_pending_changesets, 0)
def test_origin_value_of_change_with_cancel(self):
"""Origin field is read from the parter or 'old' - with cancel
According to the state of the change.
"""
with Form(self.partner) as partner_form:
partner_form.name = "Y"
self.assertEqual(self.partner.count_pending_changesets, 1)
changeset = self.partner.changeset_ids
change = changeset.change_ids
self.assertEqual(self.partner.name, "X")
self.assertEqual(change.origin_value_char, "X")
self.assertEqual(change.origin_value_display, "X")
with Form(
self.partner.with_context(__no_changeset=disable_changeset)
) as partner_form:
partner_form.name = "A"
self.assertEqual(change.origin_value_char, "A")
self.assertEqual(change.origin_value_display, "A")
change.cancel()
self.assertEqual(change.origin_value_char, "A")
self.assertEqual(change.origin_value_display, "A")
with Form(
self.partner.with_context(__no_changeset=disable_changeset)
) as partner_form:
partner_form.name = "B"
self.assertEqual(change.origin_value_char, "A")
self.assertEqual(change.origin_value_display, "A")
self.assertEqual(self.partner.count_pending_changesets, 0)
def test_old_field_of_change_with_apply(self):
"""Old field is stored when the change is applied"""
with Form(self.partner) as partner_form:
partner_form.name = "Y"
self.assertEqual(self.partner.count_pending_changesets, 1)
changeset = self.partner.changeset_ids
change = changeset.change_ids
self.assertEqual(self.partner.name, "X")
self.assertFalse(change.old_value_char)
with Form(
self.partner.with_context(__no_changeset=disable_changeset)
) as partner_form:
partner_form.name = "A"
self.assertFalse(change.old_value_char)
change.apply()
self.assertEqual(change.old_value_char, "A")
with Form(
self.partner.with_context(__no_changeset=disable_changeset)
) as partner_form:
partner_form.name = "B"
self.assertEqual(change.old_value_char, "A")
self.assertEqual(self.partner.count_pending_changesets, 0)
def test_old_field_of_change_with_cancel(self):
"""Old field is stored when the change is canceled"""
with Form(self.partner) as partner_form:
partner_form.name = "Y"
self.assertEqual(self.partner.count_pending_changesets, 1)
changeset = self.partner.changeset_ids
change = changeset.change_ids
self.assertEqual(self.partner.name, "X")
self.assertFalse(change.old_value_char)
with Form(
self.partner.with_context(__no_changeset=disable_changeset)
) as partner_form:
partner_form.name = "A"
self.assertFalse(change.old_value_char)
change.cancel()
self.assertEqual(change.old_value_char, "A")
with Form(
self.partner.with_context(__no_changeset=disable_changeset)
) as partner_form:
partner_form.name = "B"
self.assertEqual(change.old_value_char, "A")
self.assertEqual(self.partner.count_pending_changesets, 0)
| 42.956835
| 5,971
|
3,298
|
py
|
PYTHON
|
15.0
|
# Copyright 2015-2017 Camptocamp SA
# Copyright 2020 Onestein (<https://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
class ChangesetTestCommon(object):
def assert_changeset(self, record, expected_source, expected_changes):
"""Check if a changeset has been created according to expected values
The record should have no prior changeset than the one created in the
test (so it has exactly 1 changeset).
The expected changes are tuples with (field, origin_value,
new_value, state)
:param record: record of record having a changeset
:param expected_changes: contains tuples with the changes
:type expected_changes: list(tuple))
"""
changeset = self.env["record.changeset"].search(
[("model", "=", record._name), ("res_id", "=", record.id)]
)
self.assertEqual(
len(changeset), 1, "1 changeset expected, got {}".format(changeset)
)
self.assertEqual(changeset.source, expected_source)
changes = changeset.change_ids
missing = []
for expected_change in expected_changes:
for change in changes:
if (
change.field_id,
change.get_origin_value(),
change.get_new_value(),
change.state,
) == expected_change:
changes -= change
break
else:
missing.append(expected_change)
message = ""
for field, origin_value, new_value, state in missing:
message += (
"- field: '%s', origin_value: '%s', "
"new_value: '%s', state: '%s'\n"
% (field.name, origin_value, new_value, state)
)
for change in changes:
message += (
"+ field: '%s', origin_value: '%s', "
"new_value: '%s', state: '%s'\n"
% (
change.field_id.name,
change.get_origin_value(),
change.get_new_value(),
change.state,
)
)
if message:
raise AssertionError("Changes do not match\n\n:%s" % message)
def _create_changeset(self, record, changes):
"""Create a changeset and its associated changes
:param record: 'record' record
:param changes: list of changes [(field, new value, state)]
:returns: 'record.changeset' record
"""
ChangesetChange = self.env["record.changeset.change"]
get_field = ChangesetChange.get_field_for_type
change_values = []
for field, value, state in changes:
change = {
"field_id": field.id,
# write in the field of the appropriate type for the
# origin field (char, many2one, ...)
get_field(field, "new"): value,
"state": state,
}
change_values.append((0, 0, change))
values = {
"model": record._name,
"res_id": record.id,
"change_ids": change_values,
}
return self.env["record.changeset"].create(values)
| 38.348837
| 3,298
|
1,346
|
py
|
PYTHON
|
15.0
|
# Copyright 2021 Hunki Enterprises BV (<https://hunki-enterprises.com>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests.common import TransactionCase
from .common import ChangesetTestCommon
class TestChangesetFlow(ChangesetTestCommon, TransactionCase):
"""Check that changesets don't leak information"""
def setUp(self):
super().setUp()
self.env["changeset.field.rule"].search([]).unlink()
self.rule = self.env["changeset.field.rule"].create(
{
"model_id": self.env.ref("base.model_ir_config_parameter").id,
"field_id": self.env.ref("base.field_ir_config_parameter__key").id,
"action": "auto",
}
)
def test_change_unprivileged_user(self):
"""
Check that unprivileged users can't see changesets they didn't create
"""
user = self.env.ref("base.user_demo")
self.env["ir.config_parameter"].with_context(
test_record_changeset=True,
).set_param("hello", "world")
changeset = self.env["record.changeset.change"].search(
[
("rule_id", "=", self.rule.id),
]
)
self.assertTrue(changeset)
self.assertFalse(changeset.with_user(user).search([("id", "=", changeset.id)]))
| 36.378378
| 1,346
|
2,923
|
py
|
PYTHON
|
15.0
|
# Copyright 2015-2017 Camptocamp SA
# Copyright 2020 Onestein (<https://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests import common
class TestChangesetFieldRule(common.TransactionCase):
def setUp(self):
super().setUp()
self.company_model_id = self.env.ref("base.model_res_company").id
self.field_name = self.env.ref("base.field_res_partner__name")
self.field_street = self.env.ref("base.field_res_partner__street")
def test_get_rules(self):
ChangesetFieldRule = self.env["changeset.field.rule"]
ChangesetFieldRule.search([]).unlink()
rule1 = ChangesetFieldRule.create(
{"field_id": self.field_name.id, "action": "validate"}
)
rule2 = ChangesetFieldRule.create(
{"field_id": self.field_street.id, "action": "never"}
)
get_rules = ChangesetFieldRule.get_rules(None, "res.partner")
self.assertEqual(get_rules, {"name": rule1, "street": rule2})
def test_get_rules_source(self):
ChangesetFieldRule = self.env["changeset.field.rule"]
ChangesetFieldRule.search([]).unlink()
rule1 = ChangesetFieldRule.create(
{"field_id": self.field_name.id, "action": "validate"}
)
rule2 = ChangesetFieldRule.create(
{"field_id": self.field_street.id, "action": "never"}
)
rule3 = ChangesetFieldRule.create(
{
"source_model_id": self.company_model_id,
"field_id": self.field_street.id,
"action": "never",
}
)
model = ChangesetFieldRule
rules = model.get_rules(None, "res.partner")
self.assertEqual(rules, {"name": rule1, "street": rule2})
rules = model.get_rules("res.company", "res.partner")
self.assertEqual(rules, {"name": rule1, "street": rule3})
def test_get_rules_cache(self):
ChangesetFieldRule = self.env["changeset.field.rule"]
ChangesetFieldRule.search([]).unlink()
rule = ChangesetFieldRule.create(
{"field_id": self.field_name.id, "action": "validate"}
)
self.assertEqual(
ChangesetFieldRule.get_rules(None, "res.partner")["name"].action, "validate"
)
# Write on cursor to bypass the cache invalidation for the
# matter of the test
self.env.cr.execute(
"UPDATE changeset_field_rule " "SET action = 'never' " "WHERE id = %s",
(rule.id,),
)
self.assertEqual(
ChangesetFieldRule.get_rules(None, "res.partner")["name"].action, "validate"
)
rule.action = "auto"
self.assertEqual(
ChangesetFieldRule.get_rules(None, "res.partner")["name"].action, "auto"
)
rule.unlink()
self.assertFalse(ChangesetFieldRule.get_rules(None, "res.partner"))
| 40.597222
| 2,923
|
6,283
|
py
|
PYTHON
|
15.0
|
# Copyright 2015-2017 Camptocamp SA
# Copyright 2020 Onestein (<https://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models, tools
from odoo.tools.cache import ormcache
class ChangesetFieldRule(models.Model):
_name = "changeset.field.rule"
_description = "Changeset Field Rules"
_rec_name = "field_id"
model_id = fields.Many2one(related="field_id.model_id", store=True)
field_id = fields.Many2one(
comodel_name="ir.model.fields", ondelete="cascade", required=True
)
action = fields.Selection(
selection="_selection_action",
required=True,
help="Auto: always apply a change.\n"
"Validate: manually applied by an administrator.\n"
"Never: change never applied.",
)
source_model_id = fields.Many2one(
comodel_name="ir.model",
string="Source Model",
ondelete="cascade",
domain=lambda self: [("id", "in", self._domain_source_models().ids)],
help="If a source model is defined, the rule will be applied only "
"when the change is made from this origin. "
"Rules without source model are global and applies to all "
"backends.\n"
"Rules with a source model have precedence over global rules, "
"but if a field has no rule with a source model, the global rule "
"is used.",
)
company_id = fields.Many2one("res.company", default=lambda self: self.env.company)
active = fields.Boolean(default=True)
prevent_self_validation = fields.Boolean(default=False)
expression = fields.Text(
help="Use this rule only on records where this is true. "
"Available variables: object, user",
)
validator_group_ids = fields.Many2many(
"res.groups",
"changeset_field_rule_validator_group_rel",
string="Validator Groups",
default=lambda self: self.env.ref(
"base_changeset.group_changeset_user",
raise_if_not_found=False,
)
or self.env["res.groups"],
)
def init(self):
"""Ensure there is at most one rule with source_model_id NULL."""
self.env.cr.execute(
"""
CREATE UNIQUE INDEX IF NOT EXISTS source_model_null_field_uniq
ON %s (field_id)
WHERE source_model_id IS NULL
"""
% self._table
)
_sql_constraints = [
(
"model_field_uniq",
"unique (source_model_id, field_id)",
"A rule already exists for this field.",
)
]
@api.model
def _domain_source_models(self):
"""Returns the models for which we can define rules.
Example for submodules (replace by the xmlid of the model):
::
models = super()._domain_source_models()
return models | self.env.ref('base.model_res_users')
Rules without model are global and apply for all models.
"""
return self.env.ref("base.model_res_users")
@api.model
def _selection_action(self):
return [("auto", "Auto"), ("validate", "Validate"), ("never", "Never")]
@api.constrains("expression")
def _check_expression(self):
for this in self:
this._evaluate_expression(self.env[this.model_id.model].new({}))
@ormcache(skiparg=1)
@api.model
def _get_rules(self, source_model_name, record_model_name):
"""Cache rules
Keep only the id of the rules, because if we keep the recordsets
in the ormcache, we won't be able to browse them once their
cursor is closed.
The public method ``get_rules`` return the rules with the recordsets
when called.
"""
domain = self._get_rules_search_domain(record_model_name, source_model_name)
model_rules = self.search(
domain,
# using 'ASC' means that 'NULLS LAST' is the default
order="source_model_id ASC",
)
# model's rules have precedence over global ones so we iterate
# over rules which have a source model first, then we complete
# them with the global rules
result = {}
for rule in model_rules:
# we already have a model's rule
if result.get(rule.field_id.name):
continue
result[rule.field_id.name] = rule.id
return result
def _get_rules_search_domain(self, record_model_name, source_model_name):
return [
("model_id.model", "=", record_model_name),
"|",
("source_model_id.model", "=", source_model_name),
("source_model_id", "=", False),
]
@api.model
def get_rules(self, source_model_name, record_model_name):
"""Return the rules for a model
When a model is specified, it will return the rules for this
model. Fields that have no rule for this model will use the
global rules (those without source).
The source model is the model which ask for a change, it will be
for instance ``res.users``, ``lefac.backend`` or
``magellan.backend``.
The second argument (``source_model_name``) is optional but
cannot be an optional keyword argument otherwise it would not be
in the key for the cache. The callers have to pass ``None`` if
they want only global rules.
"""
rules = {}
cached_rules = self._get_rules(source_model_name, record_model_name)
for field, rule_id in cached_rules.items():
rules[field] = self.browse(rule_id)
return rules
def _evaluate_expression(self, record):
"""Evaluate expression if set"""
self.ensure_one()
return not self.expression or tools.safe_eval.safe_eval(
self.expression, {"object": record, "user": self.env.user}
)
@api.model
def create(self, vals):
record = super().create(vals)
self.clear_caches()
return record
def write(self, vals):
result = super().write(vals)
self.clear_caches()
return result
def unlink(self):
result = super().unlink()
self.clear_caches()
return result
| 34.712707
| 6,283
|
6,777
|
py
|
PYTHON
|
15.0
|
# Copyright 2015-2017 Camptocamp SA
# Copyright 2020 Onestein (<https://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class RecordChangeset(models.Model):
_name = "record.changeset"
_description = "Record Changeset"
_order = "date desc"
_rec_name = "date"
model = fields.Char(index=True, required=True, readonly=True)
res_id = fields.Many2oneReference(
string="Record ID",
index=True,
required=True,
readonly=True,
model_field="model",
)
change_ids = fields.One2many(
comodel_name="record.changeset.change",
inverse_name="changeset_id",
string="Changes",
readonly=True,
)
date = fields.Datetime(
string="Modified on", default=fields.Datetime.now(), index=True, readonly=True
)
modified_by_id = fields.Many2one(
"res.users", default=lambda self: self.env.user, readonly=True
)
state = fields.Selection(
compute="_compute_state",
selection=[("draft", "Pending"), ("done", "Done")],
store=True,
)
note = fields.Text()
source = fields.Reference(
string="Source of the change", selection="_reference_models", readonly=True
)
company_id = fields.Many2one("res.company")
record_id = fields.Reference(
selection="_reference_models", compute="_compute_resource_record", readonly=True
)
@api.depends("model", "res_id")
def _compute_resource_record(self):
for changeset in self:
changeset.record_id = "{},{}".format(changeset.model, changeset.res_id or 0)
@api.model
def _reference_models(self):
models = self.env["ir.model"].sudo().search([])
return [(model.model, model.name) for model in models]
@api.depends("change_ids", "change_ids.state")
def _compute_state(self):
for rec in self:
changes = rec.mapped("change_ids")
if all(change.state in ("done", "cancel") for change in changes):
rec.state = "done"
else:
rec.state = "draft"
def name_get(self):
result = []
for changeset in self:
name = "{} ({})".format(changeset.date, changeset.record_id.display_name)
result.append((changeset.id, name))
return result
def apply(self):
self.with_context(skip_pending_status_check=True).mapped("change_ids").apply()
def cancel(self):
self.with_context(skip_pending_status_check=True).mapped("change_ids").cancel()
@api.model
def add_changeset(self, record, values, create=False):
"""Add a changeset on a record
By default, when a record is modified by a user or by the
system, the the changeset will follow the rules configured for
the global rules.
A caller should pass the following keys in the context:
* ``__changeset_rules_source_model``: name of the model which
asks for the change
* ``__changeset_rules_source_id``: id of the record which asks
for the change
When the source model and id are not defined, the current user
is considered as the origin of the change.
Should be called before the execution of ``write`` on the record
so we can keep track of the existing value and also because the
returned values should be used for ``write`` as some of the
values may have been removed.
:param values: the values being written on the record
:type values: dict
:param create: in create mode, no check is made to see if the field
value consitutes a change.
:type creatie: boolean
:returns: dict of values that should be wrote on the record
(fields with a 'Validate' or 'Never' rule are excluded)
"""
record.ensure_one()
source_model = self.env.context.get("__changeset_rules_source_model")
source_id = self.env.context.get("__changeset_rules_source_id")
if not source_model:
# if the changes source is not defined, log the user who
# made the change
source_model = "res.users"
if not source_id:
source_id = self.env.uid
if source_model and source_id:
source = "{},{}".format(source_model, source_id)
else:
source = False
change_model = self.env["record.changeset.change"]
write_values = values.copy()
changes = []
rules = self.env["changeset.field.rule"].get_rules(
source_model_name=source_model, record_model_name=record._name
)
for field in values:
rule = rules.get(field)
if (
not rule
or not rule._evaluate_expression(record)
or (create and not values[field])
):
continue
if field in values:
if not create and not change_model._has_field_changed(
record, field, values[field]
):
continue
change, pop_value = change_model._prepare_changeset_change(
record,
rule,
field,
values[field],
create=create,
)
if pop_value:
write_values.pop(field)
if create:
# overwrite with null value for new records
write_values[field] = (
# but create some default for required text fields
record._fields[field].required
and record._fields[field].type in ("char", "text")
and "/"
or record._fields[field].null(record)
)
changes.append(change)
if changes:
changeset_vals = self._prepare_changeset_vals(changes, record, source)
self.env["record.changeset"].create(changeset_vals)
return write_values
@api.model
def _prepare_changeset_vals(self, changes, record, source):
has_company = "company_id" in self.env[record._name]._fields
has_company = has_company and record.company_id
company = record.company_id if has_company else self.env.company
return {
# newly created records are passed as newid records with the id in ref
"res_id": record.id or record.id.ref,
"model": record._name,
"company_id": company.id,
"change_ids": [(0, 0, vals) for vals in changes],
"date": fields.Datetime.now(),
"source": source,
}
| 36.831522
| 6,777
|
16,486
|
py
|
PYTHON
|
15.0
|
# Copyright 2015-2017 Camptocamp SA
# Copyright 2020 Onestein (<https://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from itertools import groupby
from operator import attrgetter
from odoo import _, api, fields, models
from odoo.exceptions import UserError
from .base import disable_changeset
# sentinel object to be sure that no empty value was passed to
# RecordChangesetChange._value_for_changeset
_NO_VALUE = object()
class RecordChangesetChange(models.Model):
"""Store the change of one field for one changeset on one record
This model is composed of 3 sets of fields:
* 'origin'
* 'old'
* 'new'
The 'new' fields contain the value that needs to be validated.
The 'old' field copies the actual value of the record when the
change is either applied either canceled. This field is used as a storage
place but never shown by itself.
The 'origin' fields is a related field towards the actual values of
the record until the change is either applied either canceled, past
that it shows the 'old' value.
The reason behind this is that the values may change on a record between
the moment when the changeset is created and when it is applied.
On the views, we show the origin fields which represent the actual
record values or the old values and we show the new fields.
The 'origin' and 'new_value_display' are displayed on
the tree view where we need a unique of field, the other fields are
displayed on the form view so we benefit from their widgets.
"""
_name = "record.changeset.change"
_description = "Record Changeset Change"
_rec_name = "field_id"
changeset_id = fields.Many2one(
comodel_name="record.changeset",
required=True,
ondelete="cascade",
readonly=True,
)
field_id = fields.Many2one(
comodel_name="ir.model.fields",
required=True,
readonly=True,
ondelete="cascade",
)
field_name = fields.Char(related="field_id.name", readonly=True)
field_type = fields.Selection(related="field_id.ttype", readonly=True)
model = fields.Char(related="field_id.model", readonly=True, store=True)
origin_value_display = fields.Char(
string="Previous", compute="_compute_value_display"
)
new_value_display = fields.Char(string="New", compute="_compute_value_display")
# Fields showing the origin record's value or the 'old' value if
# the change is applied or canceled.
origin_value_char = fields.Char(compute="_compute_origin_values", readonly=True)
origin_value_date = fields.Date(compute="_compute_origin_values", readonly=True)
origin_value_datetime = fields.Datetime(
compute="_compute_origin_values", readonly=True
)
origin_value_float = fields.Float(compute="_compute_origin_values", readonly=True)
origin_value_monetary = fields.Float(
compute="_compute_origin_values", readonly=True
)
origin_value_integer = fields.Integer(
compute="_compute_origin_values", readonly=True
)
origin_value_text = fields.Text(compute="_compute_origin_values", readonly=True)
origin_value_boolean = fields.Boolean(
compute="_compute_origin_values", readonly=True
)
origin_value_reference = fields.Reference(
compute="_compute_origin_values", selection="_reference_models", readonly=True
)
# Fields storing the previous record's values (saved when the
# changeset is applied)
old_value_char = fields.Char(readonly=True)
old_value_date = fields.Date(readonly=True)
old_value_datetime = fields.Datetime(readonly=True)
old_value_float = fields.Float(readonly=True)
old_value_monetary = fields.Float(readonly=True)
old_value_integer = fields.Integer(readonly=True)
old_value_text = fields.Text(readonly=True)
old_value_boolean = fields.Boolean(readonly=True)
old_value_reference = fields.Reference(selection="_reference_models", readonly=True)
# Fields storing the value applied on the record
new_value_char = fields.Char(readonly=True)
new_value_date = fields.Date(readonly=True)
new_value_datetime = fields.Datetime(readonly=True)
new_value_float = fields.Float(readonly=True)
new_value_monetary = fields.Float(readonly=True)
new_value_integer = fields.Integer(readonly=True)
new_value_text = fields.Text(readonly=True)
new_value_boolean = fields.Boolean(readonly=True)
new_value_reference = fields.Reference(selection="_reference_models", readonly=True)
state = fields.Selection(
selection=[("draft", "Pending"), ("done", "Approved"), ("cancel", "Rejected")],
required=True,
default="draft",
readonly=True,
)
record_id = fields.Reference(related="changeset_id.record_id")
rule_id = fields.Many2one("changeset.field.rule", readonly=True)
user_can_validate_changeset = fields.Boolean(
compute="_compute_user_can_validate_changeset"
)
date = fields.Datetime(related="changeset_id.date")
modified_by_id = fields.Many2one(related="changeset_id.modified_by_id")
verified_on_date = fields.Datetime(string="Verified on", readonly=True)
verified_by_id = fields.Many2one("res.users", readonly=True)
@api.model
def _reference_models(self):
models = self.env["ir.model"].search([])
return [(model.model, model.name) for model in models]
_suffix_to_types = {
"char": ("char", "selection"),
"date": ("date",),
"datetime": ("datetime",),
"float": ("float",),
"monetary": ("monetary",),
"integer": ("integer",),
"text": ("text", "html"),
"boolean": ("boolean",),
"reference": ("many2one",),
}
_type_to_suffix = {
ftype: suffix for suffix, ftypes in _suffix_to_types.items() for ftype in ftypes
}
_origin_value_fields = ["origin_value_%s" % suffix for suffix in _suffix_to_types]
_old_value_fields = ["old_value_%s" % suffix for suffix in _suffix_to_types]
_new_value_fields = ["new_value_%s" % suffix for suffix in _suffix_to_types]
_value_fields = _origin_value_fields + _old_value_fields + _new_value_fields
@api.depends("changeset_id.res_id", "changeset_id.model")
def _compute_origin_values(self):
states = self.get_pending_changes_states()
field_names = [
field_name
for field_name in self._fields.keys()
if field_name.startswith("origin_value_")
and field_name != "origin_value_display"
]
for rec in self:
field_name = rec.get_field_for_type(rec.field_id, "origin")
for fname in field_names:
if fname == field_name:
if rec.state in states:
value = rec.record_id[rec.field_id.name]
else:
old_field = rec.get_field_for_type(rec.field_id, "old")
value = rec[old_field]
setattr(rec, fname, value)
else:
setattr(rec, fname, False)
@api.depends(lambda self: self._value_fields)
def _compute_value_display(self):
for rec in self:
for prefix in ("origin", "new"):
value = getattr(rec, "get_%s_value" % prefix)()
if rec.field_id.ttype == "many2one" and value:
value = value.display_name
setattr(rec, "%s_value_display" % prefix, value)
@api.model
def get_field_for_type(self, field, prefix):
assert prefix in ("origin", "old", "new")
field_type = self._type_to_suffix.get(field.ttype)
if not field_type:
raise NotImplementedError("field type %s is not supported" % field_type)
return "{}_value_{}".format(prefix, field_type)
def get_origin_value(self):
self.ensure_one()
field_name = self.get_field_for_type(self.field_id, "origin")
return self[field_name]
def get_new_value(self):
self.ensure_one()
field_name = self.get_field_for_type(self.field_id, "new")
return self[field_name]
def set_old_value(self):
"""Copy the value of the record to the 'old' field"""
for change in self:
# copy the existing record's value for the history
old_value_for_write = self._value_for_changeset(
change.record_id, change.field_id.name
)
old_field_name = self.get_field_for_type(change.field_id, "old")
change.write({old_field_name: old_value_for_write})
def apply(self):
"""Apply the change on the changeset's record
It is optimized thus that it makes only one write on the record
per changeset if many changes are applied at once.
"""
for change in self:
if not change.user_can_validate_changeset:
raise UserError(_("You don't have the rights to apply the changes."))
changes_ok = self.browse()
key = attrgetter("changeset_id")
for changeset, changes in groupby(
self.with_context(__no_changeset=disable_changeset).sorted(key=key), key=key
):
values = {}
for change in changes:
if change.state in ("cancel", "done"):
continue
field = change.field_id
new_value = change.get_new_value()
value_for_write = change._convert_value_for_write(new_value)
values[field.name] = value_for_write
change.set_old_value()
changes_ok |= change
if not values:
continue
self._check_previous_changesets(changeset)
changeset.record_id.with_context(__no_changeset=disable_changeset).write(
values
)
changes_ok._finalize_change_approval()
def _check_previous_changesets(self, changeset):
if self.env.context.get("require_previous_changesets_done"):
states = self.get_pending_changes_states()
previous_changesets = self.env["record.changeset"].search(
[
("date", "<", changeset.date),
("state", "in", states),
("model", "=", changeset.model),
("res_id", "=", changeset.res_id),
],
limit=1,
)
if previous_changesets:
raise UserError(
_(
"This change cannot be applied because a previous "
"changeset for the same record is pending.\n"
"Apply all the anterior changesets before applying "
"this one."
)
)
def cancel(self):
"""Reject the change"""
for change in self:
if not change.user_can_validate_changeset:
raise UserError(_("You don't have the rights to reject the changes."))
if any(change.state == "done" for change in self):
raise UserError(_("This change has already be applied."))
self.set_old_value()
self._finalize_change_rejection()
def _finalize_change_approval(self):
self.write(
{
"state": "done",
"verified_by_id": self.env.user.id,
"verified_on_date": fields.Datetime.now(),
}
)
def _finalize_change_rejection(self):
self.write(
{
"state": "cancel",
"verified_by_id": self.env.user.id,
"verified_on_date": fields.Datetime.now(),
}
)
@api.model
def _has_field_changed(self, record, field, value):
field_def = record._fields[field]
current_value = field_def.convert_to_write(record[field], record)
if not (current_value or value):
return False
return current_value != value
def _convert_value_for_write(self, value):
if not value:
return value
model = self.env[self.field_id.model_id.model]
model_field_def = model._fields[self.field_id.name]
return model_field_def.convert_to_write(value, self.record_id)
@api.model
def _value_for_changeset(self, record, field_name, value=_NO_VALUE):
"""Return a value from the record ready to write in a changeset field
:param record: modified record
:param field_name: name of the modified field
:param value: if no value is given, it is read from the record
"""
field_def = record._fields[field_name]
if value is _NO_VALUE:
# when the value is read from the record, we need to prepare
# it for the write (e.g. extract .id from a many2one record)
value = field_def.convert_to_write(record[field_name], record)
if field_def.type == "many2one":
# store as 'reference'
comodel = field_def.comodel_name
return "{},{}".format(comodel, value) if value else False
else:
return value
@api.model
def _prepare_changeset_change(self, record, rule, field_name, value, create=False):
"""Prepare data for a changeset change
It returns a dict of the values to write on the changeset change
and a boolean that indicates if the value should be popped out
of the values to write on the model.
:returns: dict of values, boolean
"""
new_field_name = self.get_field_for_type(rule.field_id, "new")
new_value = self._value_for_changeset(record, field_name, value=value)
change = {
new_field_name: new_value,
"field_id": rule.field_id.id,
"rule_id": rule.id,
}
if rule.action == "auto":
change["state"] = "done"
pop_value = False
elif rule.action == "validate":
change["state"] = "draft"
pop_value = True # change to apply manually
elif rule.action == "never":
change["state"] = "cancel"
pop_value = True # change never applied
if create or change["state"] in ("cancel", "done"):
# Normally the 'old' value is set when we use the 'apply'
# button, but since we short circuit the 'apply', we
# directly set the 'old' value here
old_field_name = self.get_field_for_type(rule.field_id, "old")
# get values ready to write as expected by the changeset
# (for instance, a many2one is written in a reference
# field)
origin_value = self._value_for_changeset(
record, field_name, value=False if create else _NO_VALUE
)
change[old_field_name] = origin_value
return change, pop_value
@api.model
def get_fields_changeset_changes(self, model, res_id):
fields = [
"new_value_display",
"origin_value_display",
"field_name",
"user_can_validate_changeset",
]
states = self.get_pending_changes_states()
domain = [
("changeset_id.model", "=", model),
("changeset_id.res_id", "=", res_id),
("state", "in", states),
]
return self.search_read(domain, fields)
def _compute_user_can_validate_changeset(self):
is_superuser = self.env.is_superuser()
has_group = self.user_has_groups("base_changeset.group_changeset_user")
user_groups = self.env.user.groups_id
for rec in self:
can_validate = rec._is_change_pending() and (
is_superuser
or rec.rule_id.validator_group_ids & user_groups
or has_group
)
if rec.rule_id.prevent_self_validation:
can_validate = can_validate and rec.modified_by_id != self.env.user
rec.user_can_validate_changeset = can_validate
@api.model
def get_pending_changes_states(self):
return ["draft"]
def _is_change_pending(self):
self.ensure_one()
skip_status_check = self.env.context.get("skip_pending_status_check")
return skip_status_check or self.state in self.get_pending_changes_states()
| 39.346062
| 16,486
|
6,982
|
py
|
PYTHON
|
15.0
|
# Copyright 2020 Onestein (<https://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from lxml import etree
from odoo import _, api, fields, models
from odoo.tools import config, ormcache
# put this object into context key '__no_changeset' to disable changeset
# functionality
disable_changeset = object()
class Base(models.AbstractModel):
_inherit = "base"
changeset_ids = fields.One2many(
comodel_name="record.changeset",
compute="_compute_changeset_ids",
string="Changesets",
)
changeset_change_ids = fields.One2many(
comodel_name="record.changeset.change",
compute="_compute_changeset_ids",
string="Changeset Changes",
)
count_pending_changesets = fields.Integer(
compute="_compute_count_pending_changesets"
)
count_pending_changeset_changes = fields.Integer(
compute="_compute_count_pending_changesets"
)
user_can_see_changeset = fields.Boolean(compute="_compute_user_can_see_changeset")
def _compute_changeset_ids(self):
model_name = self._name
for record in self:
changesets = self.env["record.changeset"].search(
[("model", "=", model_name), ("res_id", "=", record.id)]
)
record.changeset_ids = changesets
record.changeset_change_ids = changesets.mapped("change_ids")
def _compute_count_pending_changesets(self):
model_name = self._name
if model_name in self.models_to_track_changeset():
for rec in self:
changesets = rec.changeset_ids.filtered(
lambda rev: rev.state == "draft"
and rev.res_id == rec.id
and rev.model == model_name
)
changes = changesets.mapped("change_ids")
changes = changes.filtered(
lambda c: c.state in c.get_pending_changes_states()
)
rec.count_pending_changesets = len(changesets)
rec.count_pending_changeset_changes = len(changes)
else:
for rec in self:
rec.count_pending_changesets = 0.0
rec.count_pending_changeset_changes = 0.0
@api.model
@ormcache(skiparg=1)
def models_to_track_changeset(self):
"""Models to be tracked for changes
:args:
:returns: list of models
"""
models = self.env["changeset.field.rule"].search([]).mapped("model_id.model")
if config["test_enable"] and self.env.context.get("test_record_changeset"):
if "res.partner" not in models:
models += ["res.partner"] # Used in tests
return models
@api.model_create_multi
def create(self, vals_list):
result = super().create(vals_list)
if self._changeset_disabled():
return result
for this, vals in zip(result, vals_list):
local_vals = self.env["record.changeset"].add_changeset(
this, vals, create=True
)
local_vals = {
key: value for key, value in local_vals.items() if vals[key] != value
}
if local_vals:
this.with_context(
__no_changeset=disable_changeset,
tracking_disable=True,
).write(local_vals)
return result
def write(self, values):
if self._changeset_disabled():
return super().write(values)
for record in self:
local_values = self.env["record.changeset"].add_changeset(record, values)
super(Base, record).write(local_values)
return True
def _changeset_disabled(self):
if self.env.context.get("__no_changeset") == disable_changeset:
return True
# To avoid conflicts with tests of other modules
if config["test_enable"] and not self.env.context.get("test_record_changeset"):
return True
if self._name not in self.models_to_track_changeset():
return True
return False
def action_record_changeset_change_view(self):
self.ensure_one()
res = {
"type": "ir.actions.act_window",
"res_model": "record.changeset.change",
"view_mode": "tree",
"views": [
[
self.env.ref("base_changeset.view_record_changeset_change_tree").id,
"list",
]
],
"context": self.env.context,
"name": _("Record Changes"),
"search_view_id": [
self.env.ref("base_changeset.view_record_changeset_change_search").id,
"search",
],
}
record_id = self.env.context.get("search_default_record_id")
if record_id:
res.update(
{
"domain": [
("model", "=", self._name),
("changeset_id.res_id", "=", record_id),
]
}
)
return res
@api.model
def _fields_view_get(
self, view_id=None, view_type="form", toolbar=False, submenu=False
):
res = super()._fields_view_get(
view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu
)
to_track_changeset = self._name in self.models_to_track_changeset()
can_see = len(self) == 1 and self.user_can_see_changeset
button_label = _("Changes")
if to_track_changeset and can_see and view_type == "form":
doc = etree.XML(res["arch"])
for node in doc.xpath("//div[@name='button_box']"):
xml_field = etree.Element(
"field",
{
"name": "count_pending_changeset_changes",
"string": button_label,
"widget": "statinfo",
},
)
xml_button = etree.Element(
"button",
{
"type": "object",
"name": "action_record_changeset_change_view",
"icon": "fa-code-fork",
"context": "{'search_default_draft': 1, "
"'search_default_record_id': active_id}",
},
)
xml_button.insert(0, xml_field)
node.insert(0, xml_button)
res["arch"] = etree.tostring(doc, encoding="unicode")
return res
def _compute_user_can_see_changeset(self):
is_superuser = self.env.is_superuser()
has_changeset_group = self.user_has_groups(
"base_changeset.group_changeset_user"
)
for rec in self:
rec.user_can_see_changeset = is_superuser or has_changeset_group
| 37.138298
| 6,982
|
1,500
|
py
|
PYTHON
|
15.0
|
# Copyright 2016 Eficent Business and IT Consulting Services S.L.
# Copyright 2016 Serpent Consulting Services Pvt. Ltd.
# Copyright 2017 LasLabs Inc.
# Copyright 2021 Tecnativa - Jairo Llopis
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from functools import wraps
from odoo.osv import expression
def patch_leaf_trgm(original):
@wraps(original)
def _wrapper(self, leaf, model, alias):
left, operator, right = leaf
# No overload for other operators...
if operator != "%":
# Except translation "inselect" queries
if operator == "inselect":
right = (right[0].replace(" % ", " %% "), right[1])
leaf = (left, operator, right)
return original(self, leaf, model, alias)
# The field must exist
if left not in model._fields:
raise ValueError(
"Invalid field {!r} in domain term {!r}".format(left, leaf)
)
# Generate correct WHERE clause part
query = '("{}"."{}" %% {})'.format(
alias,
left,
model._fields[left].column_format,
)
params = [right]
return query, params
return _wrapper
def post_load():
"""Patch expression generators to enable the fuzzy search operator."""
expression.TERM_OPERATORS += ("%",)
expression.expression._expression__leaf_to_sql = patch_leaf_trgm(
expression.expression._expression__leaf_to_sql
)
| 33.333333
| 1,500
|
739
|
py
|
PYTHON
|
15.0
|
# Copyright 2016 ForgeFlow S.L.
# Copyright 2016 Serpent Consulting Services Pvt. Ltd.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Fuzzy Search",
"summary": "Fuzzy search with the PostgreSQL trigram extension",
"category": "Uncategorized",
"version": "15.0.1.0.0",
"website": "https://github.com/OCA/server-tools",
"author": "bloopark systems GmbH & Co. KG, "
"ForgeFlow, "
"Serpent CS, "
"Odoo Community Association (OCA)",
"license": "AGPL-3",
"depends": ["base"],
"data": ["views/trgm_index.xml", "security/ir.model.access.csv"],
"demo": ["demo/res_partner_demo.xml", "demo/TrgmIndex_demo.xml"],
"installable": True,
"post_load": "post_load",
}
| 36.95
| 739
|
3,989
|
py
|
PYTHON
|
15.0
|
# Copyright 2016 ForgeFlow S.L.
# Copyright 2016 Serpent Consulting Services Pvt. Ltd.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.osv import expression
from odoo.tests.common import TransactionCase
class QueryGenerationCase(TransactionCase):
def setUp(self):
super(QueryGenerationCase, self).setUp()
self.ResPartner = self.env["res.partner"]
self.TrgmIndex = self.env["trgm.index"]
self.ResPartnerCategory = self.env["res.partner.category"]
def test_fuzzy_where_generation(self):
"""Check the generation of the where clause."""
# the added fuzzy search operator should be available in the allowed
# operators
self.assertIn("%", expression.TERM_OPERATORS)
# create new query with fuzzy search operator
query = self.ResPartner._where_calc([("name", "%", "test")], active_test=False)
from_clause, where_clause, where_clause_params = query.get_sql()
# the % parameter has to be escaped (%%) for the string replation
self.assertEqual(where_clause, """("res_partner"."name" %% %s)""")
# test the right sql query statement creation
# now there should be only one '%'
complete_where = self.env.cr.mogrify(
"SELECT FROM {} WHERE {}".format(from_clause, where_clause),
where_clause_params,
)
self.assertEqual(
complete_where,
b'SELECT FROM "res_partner" WHERE ' b'("res_partner"."name" % \'test\')',
)
def test_fuzzy_where_generation_translatable(self):
"""Check the generation of the where clause for translatable fields."""
# create new query with fuzzy search operator
query = self.ResPartnerCategory.with_context(lang="de_DE")._where_calc(
[("name", "%", "Goschaeftlic")], active_test=False
)
from_clause, where_clause, where_clause_params = query.get_sql()
# the % parameter has to be escaped (%%) for the string replation
self.assertIn(
"""COALESCE("res_partner_category__name"."value", "res_partner_category"."name") %% %s""", # noqa
where_clause,
)
complete_where = self.env.cr.mogrify(
"SELECT FROM {} WHERE {}".format(from_clause, where_clause),
where_clause_params,
)
self.assertIn(
b"""SELECT FROM "res_partner_category" LEFT JOIN "ir_translation" AS "res_partner_category__name" ON ("res_partner_category"."id" = "res_partner_category__name"."res_id" AND "res_partner_category__name"."type" = \'model\' AND "res_partner_category__name"."name" = \'res.partner.category,name\' AND "res_partner_category__name"."lang" = \'de_DE\' AND "res_partner_category__name"."value" != \'\') WHERE COALESCE("res_partner_category__name"."value", "res_partner_category"."name") % \'Goschaeftlic\'""", # noqa
complete_where,
)
def test_fuzzy_search(self):
"""Test the fuzzy search itself."""
if self.TrgmIndex._trgm_extension_exists() != "installed":
return
if not self.TrgmIndex.index_exists("res.partner", "name"):
field_partner_name = self.env.ref("base.field_res_partner__name")
self.TrgmIndex.create(
{"field_id": field_partner_name.id, "index_type": "gin"}
)
partner1 = self.ResPartner.create({"name": "John Smith"})
partner2 = self.ResPartner.create({"name": "John Smizz"})
partner3 = self.ResPartner.create({"name": "Linus Torvalds"})
res = self.ResPartner.search([("name", "%", "Jon Smith")])
self.assertIn(partner1.id, res.ids)
self.assertIn(partner2.id, res.ids)
self.assertNotIn(partner3.id, res.ids)
res = self.ResPartner.search([("name", "%", "Smith John")])
self.assertIn(partner1.id, res.ids)
self.assertIn(partner2.id, res.ids)
self.assertNotIn(partner3.id, res.ids)
| 46.383721
| 3,989
|
5,669
|
py
|
PYTHON
|
15.0
|
# Copyright 2016 ForgeFlow S.L.
# Copyright 2016 Serpent Consulting Services Pvt. Ltd.
# Copyright 2017 LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import logging
from psycopg2.extensions import AsIs
from odoo import _, api, exceptions, fields, models
_logger = logging.getLogger(__name__)
class TrgmIndex(models.Model):
"""Model for Trigram Index."""
_name = "trgm.index"
_rec_name = "field_id"
_description = "Trigram Index"
field_id = fields.Many2one(
comodel_name="ir.model.fields",
string="Field",
ondelete="set default",
required=True,
help='You can either select a field of type "text" or "char".',
)
index_name = fields.Char(
readonly=True,
help="The index name is automatically generated like "
"fieldname_indextype_idx. If the index already exists and the "
"index is located in the same table then this index is reused. "
"If the index is located in another table then a number is added "
"at the end of the index name.",
)
index_type = fields.Selection(
selection=[("gin", "GIN"), ("gist", "GiST")],
default="gin",
required=True,
ondelete={"gin": "set default", "gist": "set default"},
help="Cite from PostgreSQL documentation: GIN indexes are "
"the preferred text search index type."
"See: https://www.postgresql.org/docs/current/textsearch-indexes.html",
)
def _trgm_extension_exists(self):
self.env.cr.execute(
"""
SELECT name, installed_version
FROM pg_available_extensions
WHERE name = 'pg_trgm'
LIMIT 1;
"""
)
extension = self.env.cr.fetchone()
if extension is None:
return "missing"
if extension[1] is None:
return "uninstalled"
return "installed"
def _is_postgres_superuser(self):
self.env.cr.execute("SHOW is_superuser;")
superuser = self.env.cr.fetchone()
return superuser is not None and superuser[0] == "on" or False
def _install_trgm_extension(self):
extension = self._trgm_extension_exists()
if extension == "missing":
_logger.warning(
"To use pg_trgm you have to install the postgres-contrib module."
)
elif extension == "uninstalled":
if self._is_postgres_superuser():
self.env.cr.execute("CREATE EXTENSION IF NOT EXISTS pg_trgm;")
return True
else:
_logger.warning(
"To use pg_trgm you have to create the "
"extension pg_trgm in your database or you "
"have to be the superuser."
)
else:
return True
return False
def _auto_init(self):
res = super()._auto_init()
if self._install_trgm_extension():
_logger.info(
"The pg_trgm is loaded in the database and the "
"fuzzy search can be used."
)
return res
def get_not_used_index(self, index_name, table_name, inc=1):
if inc > 1:
new_index_name = index_name + str(inc)
else:
new_index_name = index_name
self.env.cr.execute(
"""
SELECT tablename, indexname
FROM pg_indexes
WHERE indexname = %(index)s;
""",
{"index": new_index_name},
)
indexes = self.env.cr.fetchone()
if indexes is not None and indexes[0] == table_name:
return True, index_name
elif indexes is not None:
return self.get_not_used_index(index_name, table_name, inc + 1)
return False, new_index_name
def create_index(self):
self.ensure_one()
if not self._install_trgm_extension():
raise exceptions.UserError(
_("The pg_trgm extension does not exists or cannot be installed.")
)
table_name = self.env[self.field_id.model_id.model]._table
column_name = self.field_id.name
index_type = self.index_type
index_name = "{}_{}_idx".format(column_name, index_type)
index_exists, index_name = self.get_not_used_index(index_name, table_name)
if not index_exists:
self.env.cr.execute(
"""
CREATE INDEX %(index)s
ON %(table)s
USING %(indextype)s (%(column)s %(indextype)s_trgm_ops);
""",
{
"table": AsIs(table_name),
"index": AsIs(index_name),
"column": AsIs(column_name),
"indextype": AsIs(index_type),
},
)
return index_name
@api.model
def index_exists(self, model_name, field_name):
field = self.env["ir.model.fields"].search(
[("model", "=", model_name), ("name", "=", field_name)], limit=1
)
if not field:
return False
trgm_index = self.search([("field_id", "=", field.id)], limit=1)
return bool(trgm_index)
@api.model
def create(self, vals):
rec = super().create(vals)
rec.index_name = rec.create_index()
return rec
def unlink(self):
for rec in self:
self.env.cr.execute(
"""
DROP INDEX IF EXISTS %(index)s;
""",
{"index": AsIs(rec.index_name)},
)
return super().unlink()
| 31.494444
| 5,669
|
1,529
|
py
|
PYTHON
|
15.0
|
# Copyright 2016 ForgeFlow S.L.
# Copyright 2016 Serpent Consulting Services Pvt. Ltd.
# Copyright 2017 LasLabs Inc.
# Copyright 2020 NextERP SRL.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.osv import query
Oridinal_Query_obj = query.Query
def percent_search_fuzzy(self, where_claus):
if " % " in " ".join(where_claus):
new_where_clause = [x.replace(" % ", " %% ") for x in where_claus]
return tuple(new_where_clause)
return where_claus
Oridinal_Query_obj.percent_search_fuzzy = percent_search_fuzzy
# @property
def where_clause_new(self):
ok_where = self.percent_search_fuzzy(self._where_clausess)
return tuple(ok_where)
# original return(self._where_clausess)
Oridinal_Query_obj.where_clause = where_clause_new
def get_sql_new(self):
"""Returns (query_from, query_where, query_params)."""
tables = [query._from_table(table, alias) for alias, table in self._tables.items()]
joins = []
params = []
for alias, (kind, table, condition, condition_params) in self._joins.items():
joins.append(f"{kind} {query._from_table(table, alias)} ON ({condition})")
params.extend(condition_params)
from_clause = " ".join([", ".join(tables)] + joins)
ok_where = self.percent_search_fuzzy(self._where_clauses)
where_clause = " AND ".join(ok_where)
# original where_clause = " AND ".join(self._where_clauses)
return from_clause, where_clause, params + self._where_params
Oridinal_Query_obj.get_sql = get_sql_new
| 31.204082
| 1,529
|
1,014
|
py
|
PYTHON
|
15.0
|
# Copyright 2019 Ecosoft Co., Ltd (http://ecosoft.co.th/)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html)
{
"name": "Excel Import/Export/Report",
"summary": "Base module for developing Excel import/export/report",
"version": "15.0.1.0.0",
"author": "Ecosoft,Odoo Community Association (OCA)",
"license": "AGPL-3",
"website": "https://github.com/OCA/server-tools",
"category": "Tools",
"depends": ["mail"],
"external_dependencies": {"python": ["openpyxl"]},
"data": [
"security/ir.model.access.csv",
"wizard/export_xlsx_wizard.xml",
"wizard/import_xlsx_wizard.xml",
"wizard/report_xlsx_wizard.xml",
"views/xlsx_template_view.xml",
"views/xlsx_report.xml",
],
"installable": True,
"development_status": "Beta",
"maintainers": ["kittiu"],
"assets": {
"web.assets_backend": [
"/excel_import_export/static/src/js/report/action_manager_report.esm.js"
]
},
}
| 33.8
| 1,014
|
711
|
py
|
PYTHON
|
15.0
|
# Copyright 2019 Ecosoft Co., Ltd (http://ecosoft.co.th/)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html)
import ast
from odoo import fields, models
class ReportXLSXWizard(models.TransientModel):
_name = "report.xlsx.wizard"
_description = "Generic Report Wizard, used with template reporting option"
res_model = fields.Char()
domain = fields.Char(string="Search Criterias")
def action_report(self):
action_id = self._context.get("report_action_id")
action = self.env["ir.actions.report"].browse(action_id)
res = action.read()[0]
return res
def safe_domain(self, str_domain):
return ast.literal_eval(str_domain or "[]")
| 32.318182
| 711
|
6,155
|
py
|
PYTHON
|
15.0
|
# Copyright 2019 Ecosoft Co., Ltd (http://ecosoft.co.th/)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html)
from odoo import _, api, fields, models
from odoo.exceptions import RedirectWarning, ValidationError
class ImportXLSXWizard(models.TransientModel):
"""This wizard is used with the template (xlsx.template) to import
xlsx template back to active record"""
_name = "import.xlsx.wizard"
_description = "Wizard for importing excel"
import_file = fields.Binary(string="Import File (*.xlsx)")
template_id = fields.Many2one(
"xlsx.template",
string="Template",
required=True,
ondelete="cascade",
domain=lambda self: self._context.get("template_domain", []),
)
res_id = fields.Integer(string="Resource ID", readonly=True)
res_model = fields.Char(string="Resource Model", readonly=True, size=500)
datas = fields.Binary(string="Sample", related="template_id.datas", readonly=True)
fname = fields.Char(
string="Template Name", related="template_id.fname", readonly=True
)
attachment_ids = fields.Many2many(
"ir.attachment",
string="Import File(s) (*.xlsx)",
required=True,
help="You can select multiple files to import.",
)
state = fields.Selection(
[("choose", "Choose"), ("get", "Get")],
default="choose",
help="* Choose: wizard show in user selection mode"
"\n* Get: wizard show results from user action",
)
@api.model
def view_init(self, fields_list):
"""This template only works on some context of active record"""
res = super(ImportXLSXWizard, self).view_init(fields_list)
res_model = self._context.get("active_model", False)
res_id = self._context.get("active_id", False)
if not res_model or not res_id:
return res
record = self.env[res_model].browse(res_id)
messages = []
valid = True
# For all import, only allow import in draft state (for documents)
import_states = self._context.get("template_import_states", [])
if import_states: # states specified in context, test this
if "state" in record and record["state"] not in import_states:
messages.append(_("Document must be in %s states") % import_states)
valid = False
else: # no specific state specified, test with draft
if "state" in record and "draft" not in record["state"]: # not in
messages.append(_("Document must be in draft state"))
valid = False
# Context testing
if self._context.get("template_context", False):
template_context = self._context["template_context"]
for key, value in template_context.items():
if (
key not in record
or (
record._fields[key].type == "many2one"
and record[key].id
or record[key]
)
!= value
):
valid = False
messages.append(
_(
"This import action is not usable "
"in this document context"
)
)
break
if not valid:
raise ValidationError("\n".join(messages))
return res
@api.model
def default_get(self, fields):
res_model = self._context.get("active_model", False)
res_id = self._context.get("active_id", False)
template_domain = self._context.get("template_domain", [])
templates = self.env["xlsx.template"].search(template_domain)
if not templates:
raise ValidationError(_("No template found"))
defaults = super(ImportXLSXWizard, self).default_get(fields)
for template in templates:
if not template.datas:
act = self.env.ref("excel_import_export.action_xlsx_template")
raise RedirectWarning(
_(
'File "%(fname)s" not found in template, %(name)s.',
fname=template.fname,
name=template.name,
),
act.id,
_("Set Templates"),
)
defaults["template_id"] = len(templates) == 1 and template.id or False
defaults["res_id"] = res_id
defaults["res_model"] = res_model
return defaults
def get_import_sample(self):
self.ensure_one()
return {
"name": _("Import Excel"),
"type": "ir.actions.act_window",
"res_model": "import.xlsx.wizard",
"view_mode": "form",
"res_id": self.id,
"views": [(False, "form")],
"target": "new",
"context": self._context.copy(),
}
def action_import(self):
self.ensure_one()
Import = self.env["xlsx.import"]
res_ids = []
if self.import_file:
record = Import.import_xlsx(
self.import_file, self.template_id, self.res_model, self.res_id
)
res_ids = [record.id]
elif self.attachment_ids:
for attach in self.attachment_ids:
record = Import.import_xlsx(attach.datas, self.template_id)
res_ids.append(record.id)
else:
raise ValidationError(_("Please select Excel file to import"))
# If redirect_action is specified, do redirection
if self.template_id.redirect_action:
vals = self.template_id.redirect_action.read()[0]
vals["domain"] = [("id", "in", res_ids)]
return vals
self.write({"state": "get"})
return {
"type": "ir.actions.act_window",
"res_model": self._name,
"view_mode": "form",
"res_id": self.id,
"views": [(False, "form")],
"target": "new",
}
| 39.709677
| 6,155
|
2,638
|
py
|
PYTHON
|
15.0
|
# Copyright 2019 Ecosoft Co., Ltd (http://ecosoft.co.th/)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html)
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
from odoo.tools.safe_eval import safe_eval
class ExportXLSXWizard(models.TransientModel):
"""This wizard is used with the template (xlsx.template) to export
xlsx template filled with data form the active record"""
_name = "export.xlsx.wizard"
_description = "Wizard for exporting excel"
name = fields.Char(string="File Name", readonly=True, size=500)
data = fields.Binary(string="File", readonly=True)
template_id = fields.Many2one(
"xlsx.template",
string="Template",
required=True,
ondelete="cascade",
domain=lambda self: self._context.get("template_domain", []),
)
res_ids = fields.Char(string="Resource IDs", readonly=True, required=True)
res_model = fields.Char(
string="Resource Model", readonly=True, required=True, size=500
)
state = fields.Selection(
[("choose", "Choose"), ("get", "Get")],
default="choose",
help="* Choose: wizard show in user selection mode"
"\n* Get: wizard show results from user action",
)
@api.model
def default_get(self, fields):
res_model = self._context.get("active_model", False)
res_ids = self._context.get("active_ids", False)
template_domain = self._context.get("template_domain", [])
templates = self.env["xlsx.template"].search(template_domain)
if not templates:
raise ValidationError(_("No template found"))
defaults = super(ExportXLSXWizard, self).default_get(fields)
for template in templates:
if not template.datas:
raise ValidationError(_("No file in %s") % (template.name,))
defaults["template_id"] = len(templates) == 1 and templates.id or False
defaults["res_ids"] = ",".join([str(x) for x in res_ids])
defaults["res_model"] = res_model
return defaults
def action_export(self):
self.ensure_one()
Export = self.env["xlsx.export"]
out_file, out_name = Export.export_xlsx(
self.template_id, self.res_model, safe_eval(self.res_ids)
)
self.write({"state": "get", "data": out_file, "name": out_name})
return {
"type": "ir.actions.act_window",
"res_model": "export.xlsx.wizard",
"view_mode": "form",
"res_id": self.id,
"views": [(False, "form")],
"target": "new",
}
| 39.373134
| 2,638
|
2,110
|
py
|
PYTHON
|
15.0
|
# Copyright 2019 Ecosoft Co., Ltd (http://ecosoft.co.th/)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html)
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class XLSXReport(models.AbstractModel):
"""Common class for xlsx reporting wizard"""
_name = "xlsx.report"
_description = "Excel Report AbstractModel"
name = fields.Char(string="File Name", readonly=True, size=500)
data = fields.Binary(string="File", readonly=True)
template_id = fields.Many2one(
"xlsx.template",
string="Template",
required=True,
ondelete="cascade",
domain=lambda self: self._context.get("template_domain", []),
)
choose_template = fields.Boolean(string="Allow Choose Template", default=False)
state = fields.Selection(
[("choose", "Choose"), ("get", "Get")],
default="choose",
help="* Choose: wizard show in user selection mode"
"\n* Get: wizard show results from user action",
)
@api.model
def default_get(self, fields):
template_domain = self._context.get("template_domain", [])
templates = self.env["xlsx.template"].search(template_domain)
if not templates:
raise ValidationError(_("No template found"))
defaults = super(XLSXReport, self).default_get(fields)
for template in templates:
if not template.datas:
raise ValidationError(_("No file in %s") % (template.name,))
defaults["template_id"] = len(templates) == 1 and templates.id or False
return defaults
def report_xlsx(self):
self.ensure_one()
Export = self.env["xlsx.export"]
out_file, out_name = Export.export_xlsx(self.template_id, self._name, self.id)
self.write({"state": "get", "data": out_file, "name": out_name})
return {
"type": "ir.actions.act_window",
"res_model": self._name,
"view_mode": "form",
"res_id": self.id,
"views": [(False, "form")],
"target": "new",
}
| 37.678571
| 2,110
|
1,517
|
py
|
PYTHON
|
15.0
|
# Copyright 2019 Ecosoft Co., Ltd (http://ecosoft.co.th/)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html)
from odoo import _, api, fields, models
from odoo.exceptions import UserError
class ReportAction(models.Model):
_inherit = "ir.actions.report"
report_type = fields.Selection(
selection_add=[("excel", "Excel")], ondelete={"excel": "cascade"}
)
@api.model
def _render_excel(self, docids, data):
if len(docids) != 1:
raise UserError(_("Only one id is allowed for excel_import_export"))
xlsx_template = self.env["xlsx.template"].search(
[("fname", "=", self.report_name), ("res_model", "=", self.model)]
)
if not xlsx_template or len(xlsx_template) != 1:
raise UserError(
_("Template %(report_name)s on model %(model)s is not unique!")
% {"report_name": self.report_name, "model": self.model}
)
Export = self.env["xlsx.export"]
return Export.export_xlsx(xlsx_template, self.model, docids[0])
@api.model
def _get_report_from_name(self, report_name):
res = super(ReportAction, self)._get_report_from_name(report_name)
if res:
return res
report_obj = self.env["ir.actions.report"]
qwebtypes = ["excel"]
conditions = [
("report_type", "in", qwebtypes),
("report_name", "=", report_name),
]
return report_obj.search(conditions, limit=1)
| 37
| 1,517
|
12,675
|
py
|
PYTHON
|
15.0
|
# Copyright 2019 Ecosoft Co., Ltd (http://ecosoft.co.th/)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html)
import base64
import uuid
from ast import literal_eval
from datetime import date, datetime as dt
from io import BytesIO
import xlrd
import xlwt
from odoo import _, api, models
from odoo.exceptions import ValidationError
from odoo.tools.float_utils import float_compare
from odoo.tools.safe_eval import safe_eval
from . import common as co
class XLSXImport(models.AbstractModel):
_name = "xlsx.import"
_description = "Excel Import AbstractModel"
@api.model
def get_eval_context(self, model=False, value=False):
eval_context = {
"float_compare": float_compare,
"datetime": dt,
"date": date,
"env": self.env,
"context": self._context,
"value": False,
"model": False,
}
if model:
eval_context.update({"model": self.env[model]})
if value:
if isinstance(value, str): # Remove non Ord 128 character
value = "".join([i if ord(i) < 128 else " " for i in value])
eval_context.update({"value": value})
return eval_context
@api.model
def get_external_id(self, record):
"""Get external ID of the record, if not already exists create one"""
ModelData = self.env["ir.model.data"]
xml_id = record.get_external_id()
if not xml_id or (record.id in xml_id and xml_id[record.id] == ""):
ModelData.create(
{
"name": "{}_{}".format(record._table, record.id),
"module": "__excel_import_export__",
"model": record._name,
"res_id": record.id,
}
)
xml_id = record.get_external_id()
return xml_id[record.id]
@api.model
def _get_field_type(self, model, field):
try:
record = self.env[model].new()
for f in field.split("/"):
field_type = record._fields[f].type
if field_type in ("one2many", "many2many"):
record = record[f]
else:
return field_type
except Exception as exc:
raise ValidationError(
_("Invalid declaration, %s has no valid field type") % field
) from exc
@api.model
def _delete_record_data(self, record, data_dict):
"""If no _NODEL_, delete existing lines before importing"""
if not record or not data_dict:
return
try:
for sheet_name in data_dict:
worksheet = data_dict[sheet_name]
line_fields = filter(lambda x: x != "_HEAD_", worksheet)
for line_field in line_fields:
if "_NODEL_" not in line_field:
if line_field in record and record[line_field]:
record[line_field].unlink()
# Remove _NODEL_ from dict
for s, _sv in data_dict.copy().items():
for f, _fv in data_dict[s].copy().items():
if "_NODEL_" in f:
new_fv = data_dict[s].pop(f)
data_dict[s][f.replace("_NODEL_", "")] = new_fv
except Exception as e:
raise ValidationError(_("Error deleting data\n%s") % e) from e
@api.model
def _get_end_row(self, st, worksheet, line_field):
"""Get max row or next empty row as the ending row"""
_x, max_row = co.get_line_max(line_field)
test_rows = {}
max_end_row = 0
for rc, _col in worksheet.get(line_field, {}).items():
rc, key_eval_cond = co.get_field_condition(rc)
row, col = co.pos2idx(rc)
# Use max_row, i.e., order_line[5], use it. Otherwise, use st.nrows
max_end_row = st.nrows if max_row is False else (row + max_row)
for idx in range(row, max_row and max_end_row or st.nrows):
cell_type = st.cell_type(idx, col) # empty type = 0
r_types = test_rows.get(idx, [])
r_types.append(cell_type)
test_rows[idx] = r_types
empty_list = filter(lambda y: all(i == 0 for i in y[1]), test_rows.items())
empty_rows = list(map(lambda z: z[0], empty_list))
next_empty_row = empty_rows and min(empty_rows) or max_end_row
return next_empty_row
@api.model
def _get_line_vals(self, st, worksheet, model, line_field):
"""Get values of this field from excel sheet"""
vals = {}
end_row = self._get_end_row(st, worksheet, line_field)
for rc, columns in worksheet.get(line_field, {}).items():
if not isinstance(columns, list):
columns = [columns]
for field in columns:
rc, key_eval_cond = co.get_field_condition(rc)
x_field, val_eval_cond = co.get_field_condition(field)
row, col = co.pos2idx(rc)
new_line_field, _x = co.get_line_max(line_field)
out_field = "{}/{}".format(new_line_field, x_field)
field_type = self._get_field_type(model, out_field)
vals.update({out_field: []})
for idx in range(row, end_row):
value = co._get_cell_value(st.cell(idx, col), field_type=field_type)
eval_context = self.get_eval_context(model=model, value=value)
if key_eval_cond:
value = safe_eval(key_eval_cond, eval_context)
if val_eval_cond:
value = safe_eval(val_eval_cond, eval_context)
vals[out_field].append(value)
if not filter(lambda x: x != "", vals[out_field]):
vals.pop(out_field)
return vals
@api.model
def _process_worksheet(self, wb, out_wb, out_st, model, data_dict, header_fields):
col_idx = 1
for sheet_name in data_dict: # For each Sheet
worksheet = data_dict[sheet_name]
st = False
if isinstance(sheet_name, str):
st = co.xlrd_get_sheet_by_name(wb, sheet_name)
elif isinstance(sheet_name, int):
st = wb.sheet_by_index(sheet_name - 1)
if not st:
raise ValidationError(_("Sheet %s not found") % sheet_name)
# HEAD updates
for rc, field in worksheet.get("_HEAD_", {}).items():
rc, key_eval_cond = co.get_field_condition(rc)
field, val_eval_cond = co.get_field_condition(field)
field_type = self._get_field_type(model, field)
try:
row, col = co.pos2idx(rc)
value = co._get_cell_value(st.cell(row, col), field_type=field_type)
except Exception:
value = False
eval_context = self.get_eval_context(model=model, value=value)
if key_eval_cond:
value = str(safe_eval(key_eval_cond, eval_context))
if val_eval_cond:
value = str(safe_eval(val_eval_cond, eval_context))
out_st.write(0, col_idx, field) # Next Column
out_st.write(1, col_idx, value) # Next Value
header_fields.append(field)
col_idx += 1
# Line Items
line_fields = filter(lambda x: x != "_HEAD_", worksheet)
for line_field in line_fields:
vals = self._get_line_vals(st, worksheet, model, line_field)
for field in vals:
# Columns, i.e., line_ids/field_id
out_st.write(0, col_idx, field)
header_fields.append(field)
# Data
i = 1
for value in vals[field]:
out_st.write(i, col_idx, value)
i += 1
col_idx += 1
@api.model
def _import_record_data(self, import_file, record, data_dict):
"""From complex excel, create temp simple excel and do import"""
if not data_dict:
return
try:
header_fields = []
model = record._name
decoded_data = base64.decodebytes(import_file)
wb = xlrd.open_workbook(file_contents=decoded_data)
out_wb = xlwt.Workbook()
out_st = out_wb.add_sheet("Sheet 1")
xml_id = (
record
and self.get_external_id(record)
or "{}.{}".format("__excel_import_export__", uuid.uuid4())
)
out_st.write(0, 0, "id") # id and xml_id on first column
out_st.write(1, 0, xml_id)
header_fields.append("id")
# Process on all worksheets
self._process_worksheet(wb, out_wb, out_st, model, data_dict, header_fields)
# --
content = BytesIO()
out_wb.save(content)
content.seek(0) # Set index to 0, and start reading
xls_file = content.read()
# Do the import
Import = self.env["base_import.import"]
imp = Import.create(
{
"res_model": model,
"file": xls_file,
"file_type": "application/vnd.ms-excel",
"file_name": "temp.xls",
}
)
errors = imp.execute_import(
header_fields,
header_fields,
{
"has_headers": True,
"advanced": True,
"keep_matches": False,
"encoding": "",
"separator": "",
"quoting": '"',
"date_format": "%Y-%m-%d",
"datetime_format": "%Y-%m-%d %H:%M:%S",
"float_thousand_separator": ",",
"float_decimal_separator": ".",
"fields": [],
},
)
if errors.get("messages"):
message = _("Error importing data")
messages = errors["messages"]
if isinstance(messages, dict):
message = messages["message"]
if isinstance(messages, list):
message = ", ".join([x["message"] for x in messages])
raise ValidationError(message.encode("utf-8"))
return self.env.ref(xml_id)
except xlrd.XLRDError as exc:
raise ValidationError(
_("Invalid file style, only .xls or .xlsx file allowed")
) from exc
except Exception as e:
raise e
@api.model
def _post_import_operation(self, record, operation):
"""Run python code after import"""
if not record or not operation:
return
try:
if "${" in operation:
code = (operation.split("${"))[1].split("}")[0]
eval_context = {"object": record}
safe_eval(code, eval_context)
except Exception as e:
raise ValidationError(_("Post import operation error\n%s") % e) from e
@api.model
def import_xlsx(self, import_file, template, res_model=False, res_id=False):
"""
- If res_id = False, we want to create new document first
- Delete fields' data according to data_dict['__IMPORT__']
- Import data from excel according to data_dict['__IMPORT__']
"""
self = self.sudo()
if res_model and template.res_model != res_model:
raise ValidationError(_("Template's model mismatch"))
record = self.env[template.res_model].browse(res_id)
data_dict = literal_eval(template.instruction.strip())
if not data_dict.get("__IMPORT__"):
raise ValidationError(
_("No data_dict['__IMPORT__'] in template %s") % template.name
)
if record:
# Delete existing data first
self._delete_record_data(record, data_dict["__IMPORT__"])
# Fill up record with data from excel sheets
record = self._import_record_data(import_file, record, data_dict["__IMPORT__"])
# Post Import Operation, i.e., cleanup some data
if data_dict.get("__POST_IMPORT__", False):
self._post_import_operation(record, data_dict["__POST_IMPORT__"])
return record
| 42.25
| 12,675
|
10,693
|
py
|
PYTHON
|
15.0
|
# Copyright 2019 Ecosoft Co., Ltd (http://ecosoft.co.th/)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html)
import csv
import itertools
import logging
import re
import string
import uuid
from ast import literal_eval
from datetime import datetime as dt
from io import StringIO
from dateutil.parser import parse
from odoo import _
from odoo.exceptions import ValidationError
_logger = logging.getLogger(__name__)
try:
import xlrd
except ImportError:
_logger.debug('Cannot import "xlrd". Please make sure it is installed.')
def adjust_cell_formula(value, k):
"""Cell formula, i.e., if i=5, val=?(A11)+?(B12) -> val=A16+B17"""
if isinstance(value, str):
for i in range(value.count("?(")):
if value and "?(" in value and ")" in value:
i = value.index("?(")
j = value.index(")", i)
val = value[i + 2 : j]
col, row = split_row_col(val)
new_val = "{}{}".format(col, row + k)
value = value.replace("?(%s)" % val, new_val)
return value
def get_field_aggregation(field):
"""i..e, 'field@{sum}'"""
if field and "@{" in field and "}" in field:
i = field.index("@{")
j = field.index("}", i)
cond = field[i + 2 : j]
try:
if cond or cond == "":
return (field[:i], cond)
except Exception:
return (field.replace("@{%s}" % cond, ""), False)
return (field, False)
def get_field_condition(field):
"""i..e, 'field${value > 0 and value or False}'"""
if field and "${" in field and "}" in field:
i = field.index("${")
j = field.index("}", i)
cond = field[i + 2 : j]
try:
if cond or cond == "":
return (field.replace("${%s}" % cond, ""), cond)
except Exception:
return (field, False)
return (field, False)
def get_field_style(field):
"""
Available styles
- font = bold, bold_red
- fill = red, blue, yellow, green, grey
- align = left, center, right
- number = true, false
i.e., 'field#{font=bold;fill=red;align=center;style=number}'
"""
if field and "#{" in field and "}" in field:
i = field.index("#{")
j = field.index("}", i)
cond = field[i + 2 : j]
try:
if cond or cond == "":
return (field.replace("#{%s}" % cond, ""), cond)
except Exception:
return (field, False)
return (field, False)
def get_field_style_cond(field):
"""i..e, 'field#?object.partner_id and #{font=bold} or #{}?'"""
if field and "#?" in field and "?" in field:
i = field.index("#?")
j = field.index("?", i + 2)
cond = field[i + 2 : j]
try:
if cond or cond == "":
return (field.replace("#?%s?" % cond, ""), cond)
except Exception:
return (field, False)
return (field, False)
def fill_cell_style(field, field_style, styles):
field_styles = field_style.split(";") if field_style else []
for f in field_styles:
(key, value) = f.split("=")
if key not in styles.keys():
raise ValidationError(_("Invalid style type %s") % key)
if value.lower() not in styles[key].keys():
raise ValidationError(
_("Invalid value %(value)s for style type %(key)s")
% {"value": value, "key": key}
)
cell_style = styles[key][value]
if key == "font":
field.font = cell_style
if key == "fill":
field.fill = cell_style
if key == "align":
field.alignment = cell_style
if key == "style":
if value == "text":
try:
# In case value can't be encoded as utf, we do normal str()
field.value = field.value.encode("utf-8")
except Exception:
field.value = str(field.value)
field.number_format = cell_style
def get_line_max(line_field):
"""i.e., line_field = line_ids[100], max = 100 else 0"""
if line_field and "[" in line_field and "]" in line_field:
i = line_field.index("[")
j = line_field.index("]")
max_str = line_field[i + 1 : j]
try:
if len(max_str) > 0:
return (line_field[:i], int(max_str))
else:
return (line_field, False)
except Exception:
return (line_field, False)
return (line_field, False)
def get_groupby(line_field):
"""i.e., line_field = line_ids["a_id, b_id"], groupby = ["a_id", "b_id"]"""
if line_field and "[" in line_field and "]" in line_field:
i = line_field.index("[")
j = line_field.index("]")
groupby = literal_eval(line_field[i : j + 1])
return groupby
return False
def split_row_col(pos):
match = re.match(r"([a-z]+)([0-9]+)", pos, re.I)
if not match:
raise ValidationError(_("Position %s is not valid") % pos)
col, row = match.groups()
return col, int(row)
def openpyxl_get_sheet_by_name(book, name):
"""Get sheet by name for openpyxl"""
i = 0
for sheetname in book.sheetnames:
if sheetname == name:
return book.worksheets[i]
i += 1
raise ValidationError(_("'%s' sheet not found") % (name,))
def xlrd_get_sheet_by_name(book, name):
try:
for idx in itertools.count():
sheet = book.sheet_by_index(idx)
if sheet.name == name:
return sheet
except IndexError as exc:
raise ValidationError(_("'%s' sheet not found") % (name,)) from exc
def isfloat(input_val):
try:
float(input_val)
return True
except ValueError:
return False
def isinteger(input_val):
try:
int(input_val)
return True
except ValueError:
return False
def isdatetime(input_val):
try:
if len(input_val) == 10:
dt.strptime(input_val, "%Y-%m-%d")
elif len(input_val) == 19:
dt.strptime(input_val, "%Y-%m-%d %H:%M:%S")
else:
return False
return True
except ValueError:
return False
def str_to_number(input_val):
if isinstance(input_val, str):
if " " not in input_val:
if isdatetime(input_val):
return parse(input_val)
elif isinteger(input_val):
if not (len(input_val) > 1 and input_val[:1] == "0"):
return int(input_val)
elif isfloat(input_val):
if not (input_val.find(".") > 2 and input_val[:1] == "0"):
return float(input_val)
return input_val
def csv_from_excel(excel_content, delimiter, quote):
wb = xlrd.open_workbook(file_contents=excel_content)
sh = wb.sheet_by_index(0)
content = StringIO()
quoting = csv.QUOTE_ALL
if not quote:
quoting = csv.QUOTE_NONE
if delimiter == " " and quoting == csv.QUOTE_NONE:
quoting = csv.QUOTE_MINIMAL
wr = csv.writer(content, delimiter=delimiter, quoting=quoting)
for rownum in range(sh.nrows):
row = []
for x in sh.row_values(rownum):
if quoting == csv.QUOTE_NONE and delimiter in x:
raise ValidationError(
_(
"Template with CSV Quoting = False, data must not "
'contain the same char as delimiter -> "%s"'
)
% delimiter
)
row.append(x)
wr.writerow(row)
content.seek(0) # Set index to 0, and start reading
out_file = content.getvalue().encode("utf-8")
return out_file
def pos2idx(pos):
match = re.match(r"([a-z]+)([0-9]+)", pos, re.I)
if not match:
raise ValidationError(_("Position %s is not valid") % (pos,))
col, row = match.groups()
col_num = 0
for c in col:
if c in string.ascii_letters:
col_num = col_num * 26 + (ord(c.upper()) - ord("A")) + 1
return (int(row) - 1, col_num - 1)
def _get_cell_value(cell, field_type=False):
"""If Odoo's field type is known, convert to valid string for import,
if not know, just get value as is"""
value = False
datemode = 0 # From book.datemode, but we fix it for simplicity
if field_type in ["date", "datetime"]:
ctype = xlrd.sheet.ctype_text.get(cell.ctype, "unknown type")
if ctype in ("xldate", "number"):
is_datetime = cell.value % 1 != 0.0
time_tuple = xlrd.xldate_as_tuple(cell.value, datemode)
date = dt(*time_tuple)
value = (
date.strftime("%Y-%m-%d %H:%M:%S")
if is_datetime
else date.strftime("%Y-%m-%d")
)
else:
value = cell.value
elif field_type in ["integer", "float"]:
value_str = str(cell.value).strip().replace(",", "")
if len(value_str) == 0:
value = ""
elif value_str.replace(".", "", 1).isdigit(): # Is number
if field_type == "integer":
value = int(float(value_str))
elif field_type == "float":
value = float(value_str)
else: # Is string, no conversion
value = value_str
elif field_type in ["many2one"]:
# If number, change to string
if isinstance(cell.value, (int, float, complex)):
value = str(cell.value)
else:
value = cell.value
else: # text, char
value = cell.value
# If string, cleanup
if isinstance(value, str):
if value[-2:] == ".0":
value = value[:-2]
# Except boolean, when no value, we should return as ''
if field_type not in ["boolean"]:
if not value:
value = ""
return value
def _add_column(column_name, column_value, file_txt):
i = 0
txt_lines = []
for line in file_txt.split("\n"):
if line and i == 0:
line = '"' + str(column_name) + '",' + line
elif line:
line = '"' + str(column_value) + '",' + line
txt_lines.append(line)
i += 1
file_txt = "\n".join(txt_lines)
return file_txt
def _add_id_column(file_txt):
i = 0
txt_lines = []
for line in file_txt.split("\n"):
if line and i == 0:
line = '"id",' + line
elif line:
line = f"__excel_import_export__.{uuid.uuid4()},{line}"
txt_lines.append(line)
i += 1
file_txt = "\n".join(txt_lines)
return file_txt
| 31.357771
| 10,693
|
1,623
|
py
|
PYTHON
|
15.0
|
# Copyright 2019 Ecosoft Co., Ltd (http://ecosoft.co.th/)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html)
import logging
from odoo import api, models
_logger = logging.getLogger(__name__)
try:
from openpyxl.styles import Alignment, Font, PatternFill
except ImportError:
_logger.debug('Cannot import "openpyxl". Please make sure it is installed.')
class XLSXStyles(models.AbstractModel):
_name = "xlsx.styles"
_description = "Available styles for excel"
@api.model
def get_openpyxl_styles(self):
"""List all syles that can be used with styleing directive #{...}"""
return {
"font": {
"bold": Font(name="Arial", size=10, bold=True),
"bold_red": Font(name="Arial", size=10, color="FF0000", bold=True),
},
"fill": {
"red": PatternFill("solid", fgColor="FF0000"),
"grey": PatternFill("solid", fgColor="DDDDDD"),
"yellow": PatternFill("solid", fgColor="FFFCB7"),
"blue": PatternFill("solid", fgColor="9BF3FF"),
"green": PatternFill("solid", fgColor="B0FF99"),
},
"align": {
"left": Alignment(horizontal="left"),
"center": Alignment(horizontal="center"),
"right": Alignment(horizontal="right"),
},
"style": {
"number": "#,##0.00",
"date": "dd/mm/yyyy",
"datestamp": "yyyy-mm-dd",
"text": "@",
"percent": "0.00%",
},
}
| 34.531915
| 1,623
|
23,462
|
py
|
PYTHON
|
15.0
|
# Copyright 2019 Ecosoft Co., Ltd (http://ecosoft.co.th/)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html)
import base64
import os
from ast import literal_eval
from os.path import join as opj
from odoo import _, api, fields, models
from odoo.exceptions import UserError, ValidationError
from odoo.modules.module import get_module_path
from . import common as co
class XLSXTemplate(models.Model):
"""Master Data for XLSX Templates
- Excel Template
- Import/Export Meta Data (dict text)
- Default values, etc.
"""
_name = "xlsx.template"
_description = "Excel template file and instruction"
_order = "name"
name = fields.Char(string="Template Name", required=True)
res_model = fields.Char(
string="Resource Model",
help="The database object this attachment will be attached to.",
)
fname = fields.Char(string="File Name")
gname = fields.Char(
string="Group Name",
help="Multiple template of same model, can belong to same group,\n"
"result in multiple template selection",
)
description = fields.Char()
input_instruction = fields.Text(
string="Instruction (Input)",
help="This is used to construct instruction in tab Import/Export",
)
instruction = fields.Text(
compute="_compute_output_instruction",
help="Instruction on how to import/export, prepared by system.",
)
datas = fields.Binary(string="File Content")
to_csv = fields.Boolean(
string="Convert to CSV?",
default=False,
help="Convert file into CSV format on export",
)
csv_delimiter = fields.Char(
string="CSV Delimiter",
size=1,
default=",",
required=True,
help="Optional for CSV, default is comma.",
)
csv_extension = fields.Char(
string="CSV File Extension",
size=5,
default="csv",
required=True,
help="Optional for CSV, default is .csv",
)
csv_quote = fields.Boolean(
string="CSV Quoting",
default=True,
help="Optional for CSV, default is full quoting.",
)
export_ids = fields.One2many(
comodel_name="xlsx.template.export", inverse_name="template_id"
)
import_ids = fields.One2many(
comodel_name="xlsx.template.import", inverse_name="template_id"
)
post_import_hook = fields.Char(
string="Post Import Function Hook",
help="Call a function after successful import, i.e.,\n"
"${object.post_import_do_something()}",
)
show_instruction = fields.Boolean(
string="Show Output",
default=False,
help="This is the computed instruction based on tab Import/Export,\n"
"to be used by xlsx import/export engine",
)
redirect_action = fields.Many2one(
comodel_name="ir.actions.act_window",
string="Return Action",
domain=[("type", "=", "ir.actions.act_window")],
help="Optional action, redirection after finish import operation",
)
# Utilities
export_action_id = fields.Many2one(
comodel_name="ir.actions.act_window",
ondelete="set null",
)
import_action_id = fields.Many2one(
comodel_name="ir.actions.act_window",
ondelete="set null",
)
use_report_wizard = fields.Boolean(
string="Easy Reporting",
help="Use common report wizard model, instead of create specific model",
)
result_model_id = fields.Many2one(
comodel_name="ir.model",
string="Report Model",
help="When use commone wizard, choose the result model",
)
result_field = fields.Char(
compute="_compute_result_field",
)
report_menu_id = fields.Many2one(
comodel_name="ir.ui.menu",
string="Report Menu",
readonly=True,
)
report_action_id = fields.Many2one(
comodel_name="ir.actions.report",
string="Report Action",
)
def _compute_result_field(self):
for rec in self:
rec.result_field = (
("x_%s_results" % rec.id) if rec.result_model_id else False
)
@api.constrains("redirect_action", "res_model")
def _check_action_model(self):
for rec in self:
if (
rec.res_model
and rec.redirect_action
and rec.res_model != rec.redirect_action.res_model
):
raise ValidationError(
_("The selected redirect action is " "not for model %s")
% rec.res_model
)
@api.model
def load_xlsx_template(self, template_ids, addon=False):
for template in self.browse(template_ids):
if not addon:
addon = list(template.get_external_id().values())[0].split(".")[0]
addon_path = get_module_path(addon)
file_path = False
for root, _dirs, files in os.walk(addon_path):
for name in files:
if name == template.fname:
file_path = os.path.abspath(opj(root, name))
if file_path:
template.datas = base64.b64encode(open(file_path, "rb").read())
return True
@api.model
def create(self, vals):
rec = super().create(vals)
if vals.get("input_instruction"):
rec._compute_input_export_instruction()
rec._compute_input_import_instruction()
rec._compute_input_post_import_hook()
if vals.get("result_model_id"):
rec._update_result_field_common_wizard()
rec._update_result_export_ids()
return rec
def write(self, vals):
res = super().write(vals)
if vals.get("input_instruction"):
for rec in self:
rec._compute_input_export_instruction()
rec._compute_input_import_instruction()
rec._compute_input_post_import_hook()
if vals.get("result_model_id"):
for rec in self:
rec._update_result_field_common_wizard()
rec._update_result_export_ids()
return res
def unlink(self):
self.env["ir.model.fields"].search(
[
("model", "=", "report.xlsx.wizard"),
("name", "=", self.mapped("result_field")),
]
).unlink()
return super().unlink()
def _update_result_field_common_wizard(self):
self.ensure_one()
_model = self.env["ir.model"].search([("model", "=", "report.xlsx.wizard")])
_model.ensure_one()
_field = self.env["ir.model.fields"].search(
[("model", "=", "report.xlsx.wizard"), ("name", "=", self.result_field)]
)
if not _field:
_field = self.env["ir.model.fields"].create(
{
"model_id": _model.id,
"name": self.result_field,
"field_description": "Results",
"ttype": "many2many",
"relation": self.result_model_id.model,
"store": False,
"depends": "res_model",
}
)
else:
_field.ensure_one()
_field.write({"relation": self.result_model_id.model})
_field.compute = """
self['{}'] = self.env['{}'].search(self.safe_domain(self.domain))
""".format(
self.result_field,
self.result_model_id.model,
)
def _update_result_export_ids(self):
self.ensure_one()
results = self.env["xlsx.template.export"].search(
[("template_id", "=", self.id), ("row_field", "=", self.result_field)]
)
if not results:
self.export_ids.unlink()
self.write(
{
"export_ids": [
(0, 0, {"sequence": 10, "section_type": "sheet", "sheet": 1}),
(
0,
0,
{
"sequence": 20,
"section_type": "row",
"row_field": self.result_field,
},
),
(
0,
0,
{
"sequence": 30,
"section_type": "data",
"excel_cell": "A1",
"field_name": "id",
},
),
],
}
)
@api.onchange("use_report_wizard")
def _onchange_use_report_wizard(self):
self.res_model = "report.xlsx.wizard" if self.use_report_wizard else False
self.redirect_action = False
def _compute_input_export_instruction(self):
self = self.with_context(compute_from_input=True)
for rec in self:
# Export Instruction
input_dict = literal_eval(rec.input_instruction.strip())
rec.export_ids.unlink()
export_dict = input_dict.get("__EXPORT__")
if not export_dict:
continue
export_lines = []
sequence = 0
# Sheet
for sheet, rows in export_dict.items():
sequence += 1
vals = {
"sequence": sequence,
"section_type": "sheet",
"sheet": str(sheet),
}
export_lines.append((0, 0, vals))
# Rows
for row_field, lines in rows.items():
sequence += 1
is_cont = False
if "_CONT_" in row_field:
is_cont = True
row_field = row_field.replace("_CONT_", "")
is_extend = False
if "_EXTEND_" in row_field:
is_extend = True
row_field = row_field.replace("_EXTEND_", "")
vals = {
"sequence": sequence,
"section_type": (row_field == "_HEAD_" and "head" or "row"),
"row_field": row_field,
"is_cont": is_cont,
"is_extend": is_extend,
}
export_lines.append((0, 0, vals))
for excel_cell, field_name in lines.items():
sequence += 1
vals = {
"sequence": sequence,
"section_type": "data",
"excel_cell": excel_cell,
"field_name": field_name,
}
export_lines.append((0, 0, vals))
rec.write({"export_ids": export_lines})
def _compute_input_import_instruction(self):
self = self.with_context(compute_from_input=True)
for rec in self:
# Import Instruction
input_dict = literal_eval(rec.input_instruction.strip())
rec.import_ids.unlink()
import_dict = input_dict.get("__IMPORT__")
if not import_dict:
continue
import_lines = []
sequence = 0
# Sheet
for sheet, rows in import_dict.items():
sequence += 1
vals = {
"sequence": sequence,
"section_type": "sheet",
"sheet": str(sheet),
}
import_lines.append((0, 0, vals))
# Rows
for row_field, lines in rows.items():
sequence += 1
no_delete = False
if "_NODEL_" in row_field:
no_delete = True
row_field = row_field.replace("_NODEL_", "")
vals = {
"sequence": sequence,
"section_type": (row_field == "_HEAD_" and "head" or "row"),
"row_field": row_field,
"no_delete": no_delete,
}
import_lines.append((0, 0, vals))
for excel_cell, field_name in lines.items():
sequence += 1
vals = {
"sequence": sequence,
"section_type": "data",
"excel_cell": excel_cell,
"field_name": field_name,
}
import_lines.append((0, 0, vals))
rec.write({"import_ids": import_lines})
def _compute_input_post_import_hook(self):
self = self.with_context(compute_from_input=True)
for rec in self:
# Import Instruction
input_dict = literal_eval(rec.input_instruction.strip())
rec.post_import_hook = input_dict.get("__POST_IMPORT__")
def _compute_output_instruction(self):
"""From database, compute back to dictionary"""
for rec in self:
inst_dict = {}
prev_sheet = False
prev_row = False
# Export Instruction
itype = "__EXPORT__"
inst_dict[itype] = {}
for line in rec.export_ids:
if line.section_type == "sheet":
sheet = co.isinteger(line.sheet) and int(line.sheet) or line.sheet
sheet_dict = {sheet: {}}
inst_dict[itype].update(sheet_dict)
prev_sheet = sheet
continue
if line.section_type in ("head", "row"):
row_field = line.row_field
if line.section_type == "row" and line.is_cont:
row_field = "_CONT_%s" % row_field
if line.section_type == "row" and line.is_extend:
row_field = "_EXTEND_%s" % row_field
row_dict = {row_field: {}}
inst_dict[itype][prev_sheet].update(row_dict)
prev_row = row_field
continue
if line.section_type == "data":
excel_cell = line.excel_cell
field_name = line.field_name or ""
field_name += line.field_cond or ""
field_name += line.style or ""
field_name += line.style_cond or ""
if line.is_sum:
field_name += "@{sum}"
cell_dict = {excel_cell: field_name}
inst_dict[itype][prev_sheet][prev_row].update(cell_dict)
continue
# Import Instruction
itype = "__IMPORT__"
inst_dict[itype] = {}
for line in rec.import_ids:
if line.section_type == "sheet":
sheet = co.isinteger(line.sheet) and int(line.sheet) or line.sheet
sheet_dict = {sheet: {}}
inst_dict[itype].update(sheet_dict)
prev_sheet = sheet
continue
if line.section_type in ("head", "row"):
row_field = line.row_field
if line.section_type == "row" and line.no_delete:
row_field = "_NODEL_%s" % row_field
row_dict = {row_field: {}}
inst_dict[itype][prev_sheet].update(row_dict)
prev_row = row_field
continue
if line.section_type == "data":
excel_cell = line.excel_cell
field_name = line.field_name or ""
field_name += line.field_cond or ""
cell_dict = {excel_cell: field_name}
inst_dict[itype][prev_sheet][prev_row].update(cell_dict)
continue
itype = "__POST_IMPORT__"
inst_dict[itype] = False
if rec.post_import_hook:
inst_dict[itype] = rec.post_import_hook
rec.instruction = inst_dict
def add_export_action(self):
self.ensure_one()
vals = {
"name": "Export Excel",
"res_model": "export.xlsx.wizard",
"binding_model_id": self.env["ir.model"]
.search([("model", "=", self.res_model)])
.id,
"binding_type": "action",
"target": "new",
"view_mode": "form",
"context": """
{'template_domain': [('res_model', '=', '%s'),
('fname', '=', '%s'),
('gname', '=', False)]}
"""
% (self.res_model, self.fname),
}
action = self.env["ir.actions.act_window"].create(vals)
self.export_action_id = action
def remove_export_action(self):
self.ensure_one()
if self.export_action_id:
self.export_action_id.unlink()
def add_import_action(self):
self.ensure_one()
vals = {
"name": "Import Excel",
"res_model": "import.xlsx.wizard",
"binding_model_id": self.env["ir.model"]
.search([("model", "=", self.res_model)])
.id,
"binding_type": "action",
"target": "new",
"view_mode": "form",
"context": """
{'template_domain': [('res_model', '=', '%s'),
('fname', '=', '%s'),
('gname', '=', False)]}
"""
% (self.res_model, self.fname),
}
action = self.env["ir.actions.act_window"].create(vals)
self.import_action_id = action
def remove_import_action(self):
self.ensure_one()
if self.import_action_id:
self.import_action_id.unlink()
def add_report_menu(self):
self.ensure_one()
if not self.fname:
raise UserError(_("No file content!"))
# Create report action
vals = {
"name": self.name,
"report_type": "excel",
"model": "report.xlsx.wizard",
"report_name": self.fname,
"report_file": self.fname,
}
report_action = self.env["ir.actions.report"].create(vals)
self.report_action_id = report_action
# Create window action
vals = {
"name": self.name,
"res_model": "report.xlsx.wizard",
"binding_type": "action",
"target": "new",
"view_mode": "form",
"context": {
"report_action_id": report_action.id,
"default_res_model": self.result_model_id.model,
},
}
action = self.env["ir.actions.act_window"].create(vals)
# Create menu
vals = {
"name": self.name,
"action": "{},{}".format(action._name, action.id),
}
menu = self.env["ir.ui.menu"].create(vals)
self.report_menu_id = menu
def remove_report_menu(self):
self.ensure_one()
if self.report_action_id:
self.report_action_id.unlink()
if self.report_menu_id:
self.report_menu_id.action.unlink()
self.report_menu_id.unlink()
class XLSXTemplateImport(models.Model):
_name = "xlsx.template.import"
_description = "Detailed of how excel data will be imported"
_order = "sequence"
template_id = fields.Many2one(
comodel_name="xlsx.template",
string="XLSX Template",
index=True,
ondelete="cascade",
readonly=True,
)
sequence = fields.Integer(default=10)
sheet = fields.Char()
section_type = fields.Selection(
[("sheet", "Sheet"), ("head", "Head"), ("row", "Row"), ("data", "Data")],
required=True,
)
row_field = fields.Char(help="If section type is row, this field is required")
no_delete = fields.Boolean(
default=False,
help="By default, all rows will be deleted before import.\n"
"Select No Delete, otherwise",
)
excel_cell = fields.Char(string="Cell")
field_name = fields.Char(string="Field")
field_cond = fields.Char(string="Field Cond.")
@api.model
def create(self, vals):
new_vals = self._extract_field_name(vals)
return super().create(new_vals)
@api.model
def _extract_field_name(self, vals):
if self._context.get("compute_from_input") and vals.get("field_name"):
field_name, field_cond = co.get_field_condition(vals["field_name"])
field_cond = field_cond and "${%s}" % (field_cond or "") or False
vals.update({"field_name": field_name, "field_cond": field_cond})
return vals
class XLSXTemplateExport(models.Model):
_name = "xlsx.template.export"
_description = "Detailed of how excel data will be exported"
_order = "sequence"
template_id = fields.Many2one(
comodel_name="xlsx.template",
string="XLSX Template",
index=True,
ondelete="cascade",
readonly=True,
)
sequence = fields.Integer(default=10)
sheet = fields.Char()
section_type = fields.Selection(
[("sheet", "Sheet"), ("head", "Head"), ("row", "Row"), ("data", "Data")],
required=True,
)
row_field = fields.Char(help="If section type is row, this field is required")
is_cont = fields.Boolean(
string="Continue", default=False, help="Continue data rows after last data row"
)
is_extend = fields.Boolean(
string="Extend",
default=False,
help="Extend a blank row after filling each record, to extend the footer",
)
excel_cell = fields.Char(string="Cell")
field_name = fields.Char(string="Field")
field_cond = fields.Char(string="Field Cond.")
is_sum = fields.Boolean(string="Sum", default=False)
style = fields.Char(string="Default Style")
style_cond = fields.Char(string="Style w/Cond.")
@api.model
def create(self, vals):
new_vals = self._extract_field_name(vals)
return super().create(new_vals)
@api.model
def _extract_field_name(self, vals):
if self._context.get("compute_from_input") and vals.get("field_name"):
field_name, field_cond = co.get_field_condition(vals["field_name"])
field_cond = field_cond or 'value or ""'
field_name, style = co.get_field_style(field_name)
field_name, style_cond = co.get_field_style_cond(field_name)
field_name, func = co.get_field_aggregation(field_name)
vals.update(
{
"field_name": field_name,
"field_cond": "${%s}" % (field_cond or ""),
"style": "#{%s}" % (style or ""),
"style_cond": "#?%s?" % (style_cond or ""),
"is_sum": func == "sum" and True or False,
}
)
return vals
| 37.479233
| 23,462
|
12,675
|
py
|
PYTHON
|
15.0
|
# Copyright 2019 Ecosoft Co., Ltd (http://ecosoft.co.th/)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html)
import base64
import logging
import os
import zipfile
from datetime import date, datetime as dt
from io import BytesIO
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
from odoo.tools.float_utils import float_compare
from odoo.tools.safe_eval import safe_eval
from . import common as co
_logger = logging.getLogger(__name__)
try:
from openpyxl import load_workbook
from openpyxl.utils.exceptions import IllegalCharacterError
except ImportError:
_logger.debug('Cannot import "openpyxl". Please make sure it is installed.')
class XLSXExport(models.AbstractModel):
_name = "xlsx.export"
_description = "Excel Export AbstractModel"
@api.model
def get_eval_context(self, model, record, value):
eval_context = {
"float_compare": float_compare,
"datetime": dt,
"date": date,
"value": value,
"object": record,
"model": self.env[model],
"env": self.env,
"context": self._context,
}
return eval_context
@api.model
def _get_line_vals(self, record, line_field, fields):
"""Get values of this field from record set and return as dict of vals
- record: main object
- line_field: rows object, i.e., line_ids
- fields: fields in line_ids, i.e., partner_id.display_name
"""
line_field, max_row = co.get_line_max(line_field)
line_field = line_field.replace("_CONT_", "") # Remove _CONT_ if any
line_field = line_field.replace("_EXTEND_", "") # Remove _EXTEND_ if any
lines = record[line_field]
if max_row > 0 and len(lines) > max_row:
raise Exception(_("Records in %s exceed max records allowed") % line_field)
vals = {field: [] for field in fields} # value and do_style
# Get field condition & aggre function
field_cond_dict = {}
aggre_func_dict = {}
field_style_dict = {}
style_cond_dict = {}
pair_fields = [] # I.e., ('debit${value and . or .}@{sum}', 'debit')
for field in fields:
temp_field, eval_cond = co.get_field_condition(field)
eval_cond = eval_cond or 'value or ""'
temp_field, field_style = co.get_field_style(temp_field)
temp_field, style_cond = co.get_field_style_cond(temp_field)
raw_field, aggre_func = co.get_field_aggregation(temp_field)
# Dict of all special conditions
field_cond_dict.update({field: eval_cond})
aggre_func_dict.update({field: aggre_func})
field_style_dict.update({field: field_style})
style_cond_dict.update({field: style_cond})
# --
pair_fields.append((field, raw_field))
for line in lines:
for field in pair_fields: # (field, raw_field)
value = self._get_field_data(field[1], line)
eval_cond = field_cond_dict[field[0]]
eval_context = self.get_eval_context(line._name, line, value)
if eval_cond:
value = safe_eval(eval_cond, eval_context)
# style w/Cond takes priority
style_cond = style_cond_dict[field[0]]
style = self._eval_style_cond(line._name, line, value, style_cond)
if style is None:
style = False # No style
elif style is False:
style = field_style_dict[field[0]] # Use default style
vals[field[0]].append((value, style))
return (vals, aggre_func_dict)
@api.model
def _eval_style_cond(self, model, record, value, style_cond):
eval_context = self.get_eval_context(model, record, value)
field = style_cond = style_cond or "#??"
styles = {}
for i in range(style_cond.count("#{")):
i += 1
field, style = co.get_field_style(field)
styles.update({i: style})
style_cond = style_cond.replace("#{%s}" % style, str(i))
if not styles:
return False
res = safe_eval(style_cond, eval_context)
if res is None or res is False:
return res
return styles[res]
@api.model
def _fill_workbook_data(self, workbook, record, data_dict):
"""Fill data from record with style in data_dict to workbook"""
if not record or not data_dict:
return
try:
for sheet_name in data_dict:
ws = data_dict[sheet_name]
st = False
if isinstance(sheet_name, str):
st = co.openpyxl_get_sheet_by_name(workbook, sheet_name)
elif isinstance(sheet_name, int):
if sheet_name > len(workbook.worksheets):
raise Exception(_("Not enough worksheets"))
st = workbook.worksheets[sheet_name - 1]
if not st:
raise ValidationError(_("Sheet %s not found") % sheet_name)
# Fill data, header and rows
self._fill_head(ws, st, record)
self._fill_lines(ws, st, record)
except KeyError as e:
raise ValidationError(_("Key Error\n%s") % e) from e
except IllegalCharacterError as e:
raise ValidationError(
_(
"IllegalCharacterError\n"
"Some exporting data contain special character\n%s"
)
% e
) from e
except Exception as e:
raise ValidationError(
_("Error filling data into Excel sheets\n%s") % e
) from e
@api.model
def _get_field_data(self, _field, _line):
"""Get field data, and convert data type if needed"""
if not _field:
return None
line_copy = _line
for f in _field.split("."):
line_copy = line_copy[f]
if isinstance(line_copy, str):
line_copy = line_copy.encode("utf-8")
return line_copy
@api.model
def _fill_head(self, ws, st, record):
for rc, field in ws.get("_HEAD_", {}).items():
tmp_field, eval_cond = co.get_field_condition(field)
eval_cond = eval_cond or 'value or ""'
tmp_field, field_style = co.get_field_style(tmp_field)
tmp_field, style_cond = co.get_field_style_cond(tmp_field)
value = tmp_field and self._get_field_data(tmp_field, record)
# Eval
eval_context = self.get_eval_context(record._name, record, value)
if eval_cond:
value = safe_eval(eval_cond, eval_context)
if value is not None:
st[rc] = value
fc = not style_cond and True or safe_eval(style_cond, eval_context)
if field_style and fc: # has style and pass style_cond
styles = self.env["xlsx.styles"].get_openpyxl_styles()
co.fill_cell_style(st[rc], field_style, styles)
@api.model
def _fill_lines(self, ws, st, record):
line_fields = list(ws)
if "_HEAD_" in line_fields:
line_fields.remove("_HEAD_")
cont_row = 0 # last data row to continue
for line_field in line_fields:
fields = ws.get(line_field, {}).values()
vals, func = self._get_line_vals(record, line_field, fields)
is_cont = "_CONT_" in line_field and True or False # continue row
is_extend = "_EXTEND_" in line_field and True or False # extend row
cont_set = 0
rows_inserted = False # flag to insert row
for rc, field in ws.get(line_field, {}).items():
col, row = co.split_row_col(rc) # starting point
# Case continue, start from the last data row
if is_cont and not cont_set: # only once per line_field
cont_set = cont_row + 1
if is_cont:
row = cont_set
rc = "{}{}".format(col, cont_set)
i = 0
new_row = 0
new_rc = False
row_count = len(vals[field])
# Insert rows to preserve total line
if is_extend and not rows_inserted:
rows_inserted = True
st.insert_rows(row + 1, row_count - 1)
# --
for (row_val, style) in vals[field]:
new_row = row + i
new_rc = "{}{}".format(col, new_row)
row_val = co.adjust_cell_formula(row_val, i)
if row_val not in ("None", None):
st[new_rc] = co.str_to_number(row_val)
if style:
styles = self.env["xlsx.styles"].get_openpyxl_styles()
co.fill_cell_style(st[new_rc], style, styles)
i += 1
# Add footer line if at least one field have sum
f = func.get(field, False)
if f and new_row > 0:
new_row += 1
f_rc = "{}{}".format(col, new_row)
st[f_rc] = "={}({}:{})".format(f, rc, new_rc)
styles = self.env["xlsx.styles"].get_openpyxl_styles()
co.fill_cell_style(st[f_rc], style, styles)
cont_row = cont_row < new_row and new_row or cont_row
return
@api.model
def export_xlsx(self, template, res_model, res_ids):
if template.res_model != res_model:
raise ValidationError(_("Template's model mismatch"))
data_dict = co.literal_eval(template.instruction.strip())
export_dict = data_dict.get("__EXPORT__", False)
out_name = template.name
if not export_dict: # If there is not __EXPORT__ formula, just export
out_name = template.fname
out_file = template.datas
return (out_file, out_name)
# Prepare temp file (from now, only xlsx file works for openpyxl)
decoded_data = base64.decodebytes(template.datas)
ConfParam = self.env["ir.config_parameter"].sudo()
ptemp = ConfParam.get_param("path_temp_file") or "/tmp"
stamp = dt.utcnow().strftime("%H%M%S%f")[:-3]
ftemp = "{}/temp{}.xlsx".format(ptemp, stamp)
# Start working with workbook
records = res_model and self.env[res_model].browse(res_ids) or False
outputs = []
for record in records:
f = open(ftemp, "wb")
f.write(decoded_data)
f.seek(0)
f.close()
# Workbook created, temp file removed
wb = load_workbook(ftemp)
os.remove(ftemp)
self._fill_workbook_data(wb, record, export_dict)
# Return file as .xlsx
content = BytesIO()
wb.save(content)
content.seek(0) # Set index to 0, and start reading
out_file = content.read()
if record and "name" in record and record.name:
out_name = record.name.replace(" ", "").replace("/", "")
else:
fname = out_name.replace(" ", "").replace("/", "")
ts = fields.Datetime.context_timestamp(self, dt.now())
out_name = "{}_{}".format(fname, ts.strftime("%Y%m%d_%H%M%S"))
if not out_name or len(out_name) == 0:
out_name = "noname"
out_ext = "xlsx"
# CSV (convert only on 1st sheet)
if template.to_csv:
delimiter = template.csv_delimiter
out_file = co.csv_from_excel(out_file, delimiter, template.csv_quote)
out_ext = template.csv_extension
outputs.append((out_file, "{}.{}".format(out_name, out_ext)))
# If outputs > 1 files, zip it
if len(outputs) > 1:
zip_buffer = BytesIO()
with zipfile.ZipFile(
zip_buffer, "a", zipfile.ZIP_DEFLATED, False
) as zip_file:
for data, file_name in outputs:
zip_file.writestr(file_name, data)
zip_buffer.seek(0)
out_file = base64.encodebytes(zip_buffer.read())
out_name = "files.zip"
return (out_file, out_name)
else:
(out_file, out_name) = outputs[0]
return (base64.encodebytes(out_file), out_name)
| 43.556701
| 12,675
|
4,138
|
py
|
PYTHON
|
15.0
|
# Copyright 2019 Ecosoft Co., Ltd (http://ecosoft.co.th/)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html)
import base64
import json
import logging
from werkzeug.urls import url_decode
from odoo import http
from odoo.http import (
content_disposition,
request,
route,
serialize_exception as _serialize_exception,
)
from odoo.tools import html_escape
from odoo.tools.safe_eval import safe_eval, time
from odoo.addons.web.controllers import main as report
_logger = logging.getLogger(__name__)
class ReportController(report.ReportController):
@route()
def report_routes(self, reportname, docids=None, converter=None, **data):
if converter == "excel":
report = request.env["ir.actions.report"]._get_report_from_name(reportname)
context = dict(request.env.context)
if docids:
docids = [int(i) for i in docids.split(",")]
if data.get("options"):
data.update(json.loads(data.pop("options")))
if data.get("context"):
# Ignore 'lang' here, because the context in data is the one
# from the webclient *but* if the user explicitely wants to
# change the lang, this mechanism overwrites it.
data["context"] = json.loads(data["context"])
if data["context"].get("lang"):
del data["context"]["lang"]
context.update(data["context"])
excel, report_name = report.with_context(**context)._render_excel(
docids, data=data
)
excel = base64.decodestring(excel)
if docids:
records = request.env[report.model].browse(docids)
if report.print_report_name and not len(records) > 1:
# this is a bad idea, this sou ld only be .xlsx
extension = report_name.split(".")[-1:].pop()
report_name = safe_eval(
report.print_report_name, {"object": records, "time": time}
)
report_name = f"{report_name}.{extension}"
excelhttpheaders = [
(
"Content-Type",
"application/vnd.openxmlformats-"
"officedocument.spreadsheetml.sheet",
),
("Content-Length", len(excel)),
("Content-Disposition", content_disposition(report_name)),
]
return request.make_response(excel, headers=excelhttpheaders)
return super().report_routes(reportname, docids, converter, **data)
@http.route()
def report_download(self, data, context=None):
requestcontent = json.loads(data)
url, report_type = requestcontent[0], requestcontent[1]
if report_type != "excel":
return super().report_download(data, context)
reportname = "???"
try:
pattern = "/report/excel/"
reportname = url.split(pattern)[1].split("?")[0]
docids = None
if "/" in reportname:
reportname, docids = reportname.split("/")
_logger.warning(reportname)
if docids:
return self.report_routes(
reportname, docids=docids, converter="excel", context=context
)
data = dict(url_decode(url.split("?")[1]).items())
if "context" in data:
context, data_context = json.loads(context or "{}"), json.loads(
data.pop("context")
)
context = json.dumps({**context, **data_context})
return self.report_routes(
reportname, converter="excel", context=context, **data
)
except Exception as e:
_logger.exception("Error while generating report %s", reportname)
se = _serialize_exception(e)
error = {"code": 200, "message": "Odoo Server Error", "data": se}
return request.make_response(html_escape(json.dumps(error)))
| 41.38
| 4,138
|
100
|
py
|
PYTHON
|
15.0
|
import setuptools
setuptools.setup(
setup_requires=['setuptools-odoo'],
odoo_addon=True,
)
| 16.666667
| 100
|
100
|
py
|
PYTHON
|
15.0
|
import setuptools
setuptools.setup(
setup_requires=['setuptools-odoo'],
odoo_addon=True,
)
| 16.666667
| 100
|
100
|
py
|
PYTHON
|
15.0
|
import setuptools
setuptools.setup(
setup_requires=['setuptools-odoo'],
odoo_addon=True,
)
| 16.666667
| 100
|
100
|
py
|
PYTHON
|
15.0
|
import setuptools
setuptools.setup(
setup_requires=['setuptools-odoo'],
odoo_addon=True,
)
| 16.666667
| 100
|
100
|
py
|
PYTHON
|
15.0
|
import setuptools
setuptools.setup(
setup_requires=['setuptools-odoo'],
odoo_addon=True,
)
| 16.666667
| 100
|
2,353
|
py
|
PYTHON
|
15.0
|
import setuptools
with open('VERSION.txt', 'r') as f:
version = f.read().strip()
setuptools.setup(
name="odoo-addons-oca-server-tools",
description="Meta package for oca-server-tools Odoo addons",
version=version,
install_requires=[
'odoo-addon-auditlog>=15.0dev,<15.1dev',
'odoo-addon-auto_backup>=15.0dev,<15.1dev',
'odoo-addon-base_changeset>=15.0dev,<15.1dev',
'odoo-addon-base_conditional_image>=15.0dev,<15.1dev',
'odoo-addon-base_cron_exclusion>=15.0dev,<15.1dev',
'odoo-addon-base_exception>=15.0dev,<15.1dev',
'odoo-addon-base_fontawesome>=15.0dev,<15.1dev',
'odoo-addon-base_model_restrict_update>=15.0dev,<15.1dev',
'odoo-addon-base_multi_image>=15.0dev,<15.1dev',
'odoo-addon-base_remote>=15.0dev,<15.1dev',
'odoo-addon-base_search_fuzzy>=15.0dev,<15.1dev',
'odoo-addon-base_technical_user>=15.0dev,<15.1dev',
'odoo-addon-base_time_window>=15.0dev,<15.1dev',
'odoo-addon-base_view_inheritance_extension>=15.0dev,<15.1dev',
'odoo-addon-database_cleanup>=15.0dev,<15.1dev',
'odoo-addon-datetime_formatter>=15.0dev,<15.1dev',
'odoo-addon-dbfilter_from_header>=15.0dev,<15.1dev',
'odoo-addon-excel_import_export>=15.0dev,<15.1dev',
'odoo-addon-excel_import_export_demo>=15.0dev,<15.1dev',
'odoo-addon-fetchmail_incoming_log>=15.0dev,<15.1dev',
'odoo-addon-fetchmail_incoming_log_test>=15.0dev,<15.1dev',
'odoo-addon-fetchmail_notify_error_to_sender>=15.0dev,<15.1dev',
'odoo-addon-html_image_url_extractor>=15.0dev,<15.1dev',
'odoo-addon-html_text>=15.0dev,<15.1dev',
'odoo-addon-iap_alternative_provider>=15.0dev,<15.1dev',
'odoo-addon-letsencrypt>=15.0dev,<15.1dev',
'odoo-addon-module_analysis>=15.0dev,<15.1dev',
'odoo-addon-module_auto_update>=15.0dev,<15.1dev',
'odoo-addon-module_change_auto_install>=15.0dev,<15.1dev',
'odoo-addon-onchange_helper>=15.0dev,<15.1dev',
'odoo-addon-scheduler_error_mailer>=15.0dev,<15.1dev',
'odoo-addon-sentry>=15.0dev,<15.1dev',
'odoo-addon-upgrade_analysis>=15.0dev,<15.1dev',
],
classifiers=[
'Programming Language :: Python',
'Framework :: Odoo',
'Framework :: Odoo :: 15.0',
]
)
| 47.06
| 2,353
|
100
|
py
|
PYTHON
|
15.0
|
import setuptools
setuptools.setup(
setup_requires=['setuptools-odoo'],
odoo_addon=True,
)
| 16.666667
| 100
|
100
|
py
|
PYTHON
|
15.0
|
import setuptools
setuptools.setup(
setup_requires=['setuptools-odoo'],
odoo_addon=True,
)
| 16.666667
| 100
|
100
|
py
|
PYTHON
|
15.0
|
import setuptools
setuptools.setup(
setup_requires=['setuptools-odoo'],
odoo_addon=True,
)
| 16.666667
| 100
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.