index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
68,273 | hvac/hvac-cli | refs/heads/master | /tests/test_status_cmd.py | import json
from hvac_cli.cmd import main
def test_status(vault_server, capsys):
assert main(['--address', vault_server['http'],
'status',
'--format=json',
]) == 0
captured = capsys.readouterr()
assert json.loads(captured.out)['initialized'] is True
| {"/tests/test_kv.py": ["/hvac_cli/kv.py"], "/hvac_cli/cmd.py": ["/hvac_cli/version.py"], "/tests/test_status_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/kv.py": ["/hvac_cli/cli.py"], "/tests/test_kv_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/status.py": ["/hvac_cli/cli.py"], "/tests/test_cli.py": ["/hvac_cli/cli.py"]} |
68,274 | hvac/hvac-cli | refs/heads/master | /hvac_cli/kv.py | import json
import logging
import packaging.version
from cliff.show import ShowOne
from cliff.lister import Lister
from cliff.command import Command
from hvac_cli.cli import CLI
import re
import sys
logger = logging.getLogger(__name__)
class ReadSecretVersion(Exception):
pass
class SecretVersion(Exception):
pass
def kvcli_factory(super_args, args):
cli = CLI(super_args)
if not args.kv_version:
try:
mounts = cli.list_mounts()
except Exception:
logger.error('failed to read sys/mount to determine the KV version, '
'try setting --kv-version')
raise
path = args.mount_point + '/'
assert path in mounts, f'path {path} is not found in mounts {mounts}'
args.kv_version = mounts[path]['options']['version']
if args.kv_version == '1':
return KVv1CLI(super_args, args)
else:
return KVv2CLI(super_args, args)
class KVCLI(CLI):
def __init__(self, args, parsed_args):
super().__init__(args)
self.kv_version = parsed_args.kv_version
self.mount_point = parsed_args.mount_point
self.rewrite_key = getattr(parsed_args, 'rewrite_key', None)
self.parsed_args = parsed_args
self.status = self.vault.sys.read_health_status(method='GET')
@staticmethod
def sanitize(path, status, args):
def log_sanitation(path, fun):
new_path, reason = fun(path)
if new_path != path:
logger.info(f'{path} replaced by {new_path} to {reason}')
return new_path
def user_friendly(path):
"""replace control characters and DEL because they would be
difficult for the user to type in the CLI or the web UI.
Also replace % because it is used in URLs to express %20 etc.
"""
return re.sub(r'[\x00-\x1f%\x7f]', '_', path), user_friendly.__doc__
path = log_sanitation(path, user_friendly)
if not args.no_workaround_6282:
def bug_6282(path):
"workaround https://github.com/hashicorp/vault/issues/6282"
if packaging.version.parse(status['version']) >= packaging.version.parse('1.1.0'):
logger.info("Applying workaround for the bug "
"https://github.com/hashicorp/vault/issues/6282")
logger.info("The bug 6282 was fixed in vault 1.1.0 and the workaround "
"can be disabled with --no-workaround-6282")
return re.sub(r'[#*+(\\[]', '_', path), bug_6282.__doc__
path = log_sanitation(path, bug_6282)
def bug_6213(path):
"workaround https://github.com/hashicorp/vault/issues/6213"
path = re.sub(r'\s+/', '/', path)
path = re.sub(r'\s+$', '', path)
return path, bug_6213.__doc__
path = log_sanitation(path, bug_6213)
return path
def list_secrets(self, path):
return self.kv.list_secrets(path, mount_point=self.mount_point)['data']['keys']
def dump(self):
r = {}
self._dump(r, '')
json.dump(r, sys.stdout)
def _dump(self, r, prefix):
keys = self.list_secrets(prefix)
for key in keys:
path = prefix + key
if path.endswith('/'):
self._dump(r, path)
else:
r[path] = self.read_secret(path, version=None)
def load(self, filepath):
secrets = json.load(open(filepath))
for k, v in secrets.items():
self.create_or_update_secret(k, v, cas=None)
def erase(self, prefix=''):
keys = self.list_secrets(prefix)
for key in keys:
path = prefix + key
if path.endswith('/'):
self.erase(path)
else:
logger.debug(f'erase {path}')
self.delete_metadata_and_all_versions(path)
class KVv1CLI(KVCLI):
def __init__(self, super_args, args):
super().__init__(super_args, args)
self.kv = self.vault.secrets.kv.v1
def delete_metadata_and_all_versions(self, path):
self.delete(path, versions=None)
def read_secret_metadata(self, path):
raise SecretVersion(
f'{self.mount_point} is KV {self.kv_version} and does not support metadata')
def update_metadata(self, path, max_version, cas_required):
raise SecretVersion(
f'{self.mount_point} is KV {self.kv_version} and does not support metadata')
def create_or_update_secret(self, path, entry, cas):
if cas:
raise SecretVersion(
f'{self.mount_point} is KV {self.kv_version} and does not support --cas')
if self.rewrite_key:
path = self.sanitize(path, self.status, self.parsed_args)
logger.info(f'put {path} {list(entry.keys())}')
if not self.args.dry_run:
self.kv.create_or_update_secret(path, entry, mount_point=self.mount_point)
return path
def patch(self, path, entry):
raise SecretVersion(
f'{self.mount_point} is KV {self.kv_version} and does not support patch')
def read_secret(self, path, version):
if version:
raise ReadSecretVersion(
f'{self.mount_point} is KV {self.kv_version} and does not support --from-version')
return self.kv.read_secret(path, mount_point=self.mount_point)['data']
def destroy(self, path, versions):
raise SecretVersion(
f'{self.mount_point} is KV {self.kv_version} and does not support destroy')
def delete(self, path, versions):
if versions:
raise SecretVersion(
f'{self.mount_point} is KV {self.kv_version} and does not support --versions')
logger.info(f'permanently delete {path}')
if not self.args.dry_run:
self.kv.delete_secret(path, mount_point=self.mount_point)
return 0
def undelete(self, path, versions):
raise SecretVersion(
f'{self.mount_point} is KV {self.kv_version} and does not support undelete')
def rollback(self, path, version):
raise SecretVersion(
f'{self.mount_point} is KV {self.kv_version} and does not support rollback')
class KVv2CLI(KVCLI):
def __init__(self, super_args, args):
super().__init__(super_args, args)
self.kv = self.vault.secrets.kv.v2
def delete_metadata_and_all_versions(self, path):
logger.info(f'permanently delete metadata and all versions for {path}')
if not self.args.dry_run:
self.kv.delete_metadata_and_all_versions(path, mount_point=self.mount_point)
return 0
def read_secret_metadata(self, path):
try:
return self.kv.read_secret_metadata(path, mount_point=self.mount_point)
except Exception:
logger.error(f'failed to read metadata for {path}')
raise
def update_metadata(self, path, max_versions, cas_required):
logger.info(f'set metadata for {path}')
if not self.args.dry_run:
self.kv.update_metadata(path, max_versions, cas_required, mount_point=self.mount_point)
return self.read_secret_metadata(path)
def create_or_update_secret(self, path, entry, cas):
if self.rewrite_key:
path = self.sanitize(path, self.status, self.parsed_args)
logger.info(f'put {path} {list(entry.keys())}')
if not self.args.dry_run:
self.kv.create_or_update_secret(path, entry, cas=cas, mount_point=self.mount_point)
return path
def patch(self, path, entry):
if self.rewrite_key:
path = self.sanitize(path, self.status, self.parsed_args)
logger.info(f'patch {path} {list(entry.keys())}')
if not self.args.dry_run:
self.kv.patch(path, entry, mount_point=self.mount_point)
return path
def read_secret(self, path, version):
return self.kv.read_secret_version(
path, version=version, mount_point=self.mount_point)['data']['data']
def destroy(self, path, versions):
logger.info(f'permanently delete (i.e. destroy) {path} at versions {versions}')
if not self.args.dry_run:
self.kv.destroy_secret_versions(path, versions, mount_point=self.mount_point)
return 0
def delete(self, path, versions):
if versions:
logger.info(f'delete (can undelete later) {path} at versions {versions}')
if not self.args.dry_run:
self.kv.delete_secret_versions(
path, versions=versions, mount_point=self.mount_point)
else:
logger.info(f'delete (can undelete later) the most recent version of {path}')
if not self.args.dry_run:
self.kv.delete_latest_version_of_secret(path, mount_point=self.mount_point)
return 0
def undelete(self, path, versions):
logger.info(f'undelete {path} at versions {versions}')
if not self.args.dry_run:
self.kv.undelete_secret_versions(
path, versions=versions, mount_point=self.mount_point)
return 0
def rollback(self, path, version):
try:
entry = self.read_secret(path, version=version)
except Exception:
logger.error(f'failed to read_secret {path} at version {version}')
raise
logger.info(f'rollback {path} from version {version}')
if not self.args.dry_run:
self.kv.create_or_update_secret(path, entry, mount_point=self.mount_point)
return 0
class KvCommand(object):
@staticmethod
def set_rewrite_key(parser):
parser.add_argument(
'--rewrite-key',
action='store_true',
help=('Rewrite the key to avoid UI problems and print a warning. '
'Workaround https://github.com/hashicorp/vault/issues/6282; '
'https://github.com/hashicorp/vault/issues/6213; replace '
'control characters and percent with an underscore'
)
)
parser.add_argument(
'--no-workaround-6282',
action='store_true',
help='Do not workaround bug https://github.com/hashicorp/vault/issues/6282'
)
@staticmethod
def set_common_options(parser):
parser.add_argument(
'--mount-point',
default='secret',
help='KV path mount point, as found in vault read /sys/mounts',
)
parser.add_argument(
'--kv-version',
choices=['1', '2'],
required=False,
help=('Force the Vault KV backend version (1 or 2). '
'Autodetect from `vault read /sys/mounts` if not set.')
)
class Get(KvCommand, ShowOne):
"""
Retrieves the value from Vault key-value store at the given key name
If no key exists with that name, an error is returned. If a key exists with that
name but has no data, nothing is returned.
$ hvac-cli kv get secret/foo
To view the given key name at a specific version in time, specify the "--from-version"
flag:
$ hvac-cli kv get --from-version=1 secret/foo
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
self.set_common_options(parser)
parser.add_argument(
'--from-version',
help='If passed, the value at the version number will be returned. (KvV2 only)',
)
parser.add_argument(
'key',
help='key to fetch',
)
return parser
def take_action(self, parsed_args):
kv = kvcli_factory(self.app_args, parsed_args)
return self.dict2columns(kv.read_secret(parsed_args.key, parsed_args.from_version))
class Delete(KvCommand, Command):
"""
Deletes the data for the provided version and path in the key-value store
The versioned data will not be fully removed, but marked as deleted and will no
longer be returned in normal get requests.
To delete the latest version of the key "foo":
$ hvac-cli kv delete secret/foo
To delete version 3 of key foo:
$ hvac-cli kv delete --versions=3 secret/foo
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
self.set_common_options(parser)
parser.add_argument(
'--versions',
help='The comma separate list of version numbers to delete',
)
parser.add_argument(
'key',
help='key to delete',
)
return parser
def take_action(self, parsed_args):
kv = kvcli_factory(self.app_args, parsed_args)
if parsed_args.versions:
versions = parsed_args.versions.split(',')
else:
versions = None
return kv.delete(parsed_args.key, versions)
class Destroy(KvCommand, Command):
"""
Permanently removes the specified versions data from the key-value store
If no key exists at the path, no action is taken.
To destroy version 3 of key foo:
$ hvac-cli kv destroy --versions=3 secret/foo
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
self.set_common_options(parser)
parser.add_argument(
'--versions',
required=True,
help='The comma separate list of version numbers to destroy',
)
parser.add_argument(
'key',
help='key to destroy',
)
return parser
def take_action(self, parsed_args):
kv = kvcli_factory(self.app_args, parsed_args)
return kv.destroy(parsed_args.key, parsed_args.versions.split(','))
class Undelete(KvCommand, Command):
"""
Undeletes the data for the provided version and path in the key-value store
This restores the data, allowing it to be returned on get requests.
To undelete version 3 of key "foo":
$ hvac-cli kv undelete --versions=3 secret/foo
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
self.set_common_options(parser)
parser.add_argument(
'--versions',
required=True,
help='The comma separate list of version numbers to undelete',
)
parser.add_argument(
'key',
help='key to undelete',
)
return parser
def take_action(self, parsed_args):
kv = kvcli_factory(self.app_args, parsed_args)
return kv.undelete(parsed_args.key, parsed_args.versions.split(','))
class Rollback(KvCommand, Command):
"""
Restores a given previous version to the current version at the given path
The value is written as a new version; for instance, if the current version
is 5 and the rollback version is 2, the data from version 2 will become
version 6.
$ hvac-cli kv rollback --from-version=2 secret/foo
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
self.set_common_options(parser)
parser.add_argument(
'--from-version',
required=True,
help='The version number that should be made current again',
)
parser.add_argument(
'key',
help='key to rollback',
)
return parser
def take_action(self, parsed_args):
kv = kvcli_factory(self.app_args, parsed_args)
return kv.rollback(parsed_args.key, parsed_args.from_version)
class PutOrPatch(KvCommand, ShowOne):
"""
Writes the data to the given path in the key-value store
The data can be of any type.
$ hvac-cli kv put secret/foo bar=baz
The data can also be consumed from a JSON file on disk. For example:
$ hvac-cli kv put secret/foo --file=/path/data.json
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
self.set_common_options(parser)
self.set_rewrite_key(parser)
parser.add_argument(
'--file',
help='A JSON object containing the secrets',
)
parser.add_argument(
'key',
help='key to set',
)
parser.add_argument(
'kvs',
nargs='*',
help='k=v secrets that can be repeated. They are ignored if --file is set.',
)
return parser
def parse_kvs(self, kvs):
r = {}
for kv in kvs:
k, v = kv.split('=')
r[k] = v
return r
def take_action(self, parsed_args):
kv = kvcli_factory(self.app_args, parsed_args)
if parsed_args.file:
secrets = json.load(open(parsed_args.file))
else:
secrets = self.parse_kvs(parsed_args.kvs)
path = self.kv_action(kv, parsed_args, secrets)
if kv.args.dry_run:
return self.dict2columns({})
else:
return self.dict2columns(kv.read_secret(path, version=None))
class Put(PutOrPatch):
"""
Writes the data to the given path in the key-value store
The data can be of any type.
$ hvac-cli kv put secret/foo bar=baz
The data can also be consumed from a JSON file on disk. For example:
$ hvac-cli kv put secret/foo --file=/path/data.json
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument(
'--cas',
help=('Specifies to use a Check-And-Set operation. If not set the write will be '
'allowed. If set to 0 a write will only be allowed if the key doesn’t '
'exist. If the index is non-zero the write will only be allowed if '
'the key’s current version matches the version specified in the cas '
'parameter. (KvV2 only)'),
)
return parser
def kv_action(self, kv, parsed_args, secrets):
return kv.create_or_update_secret(parsed_args.key, secrets, cas=parsed_args.cas)
class Patch(PutOrPatch):
"""
Read the data from the given path and merge it with the data provided
If the existing data is a dictionary named OLD and the data provided
is a dictionary named NEW, the data stored is the merge of OLD and NEW.
If a key exists in both NEW and OLD, the one from NEW takes precedence.
$ hvac-cli kv patch secret/foo bar=baz
The data can also be consumed from a JSON file on disk. For example:
$ hvac-cli kv patch secret/foo --file=/path/data.json
"""
def kv_action(self, kv, parsed_args, secrets):
return kv.patch(parsed_args.key, secrets)
class List(KvCommand, Lister):
"""
Lists data from Vault key-value store at the given path.
List values under the "my-app" folder of the key-value store:
$ hvac-cli kv list secret/my-app/
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
self.set_common_options(parser)
parser.add_argument(
'path',
help='path to list',
)
return parser
def take_action(self, parsed_args):
kv = kvcli_factory(self.app_args, parsed_args)
r = [[x] for x in kv.list_secrets(parsed_args.path)]
return (['Keys'], r)
class Dump(KvCommand, Command):
"""Dump all secrets as a JSON object where the keys are the path
and the values are the secrets. For instance::
{
"a/secret/path": { "key1": "value1" },
"another/secret/path": { "key2": "value2" }
}
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
self.set_common_options(parser)
return parser
def take_action(self, parsed_args):
kv = kvcli_factory(self.app_args, parsed_args)
return kv.dump()
class Load(KvCommand, Command):
"""Load secrets from a JSON object for which the key is the path
and the value is the secret. For instance::
{
"a/secret/path": { "key1": "value1" },
"another/secret/path": { "key2": "value2" }
}
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
self.set_common_options(parser)
self.set_rewrite_key(parser)
parser.add_argument(
'path',
help='path containing secrets in JSON',
)
return parser
def take_action(self, parsed_args):
kv = kvcli_factory(self.app_args, parsed_args)
return kv.load(parsed_args.path)
class Erase(KvCommand, Command):
"Erase all secrets"
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
self.set_common_options(parser)
return parser
def take_action(self, parsed_args):
kv = kvcli_factory(self.app_args, parsed_args)
return kv.erase()
class MetadataDelete(KvCommand, Command):
"""
Deletes all versions and metadata for the provided key
$ hvac-cli kv metadata delete secret/foo
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
self.set_common_options(parser)
parser.add_argument(
'key',
help='key to delete',
)
return parser
def take_action(self, parsed_args):
kv = kvcli_factory(self.app_args, parsed_args)
return kv.delete_metadata_and_all_versions(parsed_args.key)
class MetadataGet(KvCommand, ShowOne):
"""
Retrieves the metadata from Vault key-value store at the given key name
If no key exists with that name, an error is returned.
$ hvac-cli kv metadata get secret/foo
This command only works with KVv2
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
self.set_common_options(parser)
parser.add_argument(
'key',
help='get metadata for this key',
)
return parser
def take_action(self, parsed_args):
kv = kvcli_factory(self.app_args, parsed_args)
return self.dict2columns(kv.read_secret_metadata(parsed_args.key))
class MetadataPut(KvCommand, ShowOne):
"""
Update the metadata associated with an existing key
Set a max versions setting on the key:
$ hvac-cli kv metadata put --max-versions=5 secret/foo
Require Check-and-Set for this key:
$ hvac-cli kv metadata put --cas-required=true secret/foo
This command only works with KVv2
"""
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
self.set_common_options(parser)
parser.add_argument(
'--cas-required',
type=bool,
default=False,
help=('If true the key will require the cas parameter to be set on all write '
'requests. If false, the backend’s configuration will be used. The '
'default is false.')
)
parser.add_argument(
'--max-versions',
type=int,
help=('The number of versions to keep. If not set, the backend’s configured '
'max version is used.')
)
parser.add_argument(
'key',
help='set metadata for this key',
)
return parser
def take_action(self, parsed_args):
kv = kvcli_factory(self.app_args, parsed_args)
r = kv.update_metadata(parsed_args.key,
parsed_args.max_versions,
parsed_args.cas_required)
return self.dict2columns(r)
| {"/tests/test_kv.py": ["/hvac_cli/kv.py"], "/hvac_cli/cmd.py": ["/hvac_cli/version.py"], "/tests/test_status_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/kv.py": ["/hvac_cli/cli.py"], "/tests/test_kv_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/status.py": ["/hvac_cli/cli.py"], "/tests/test_cli.py": ["/hvac_cli/cli.py"]} |
68,275 | hvac/hvac-cli | refs/heads/master | /tests/test_kv_cmd.py | import json
import textwrap
from hvac_cli.cmd import main
def test_put_get(vault_server, capsys):
key = 'KEY'
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'put', key, 'a=b', 'c=d']) == 0
capsys.readouterr()
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'get', key]) == 0
captured = capsys.readouterr()
assert '| a | b |' in captured.out
assert '| c | d |' in captured.out
def test_get_from_version(vault_server, capsys):
key = 'KEY'
for i in ('1', '2'):
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'put', key, f'a={i}']) == 0
capsys.readouterr()
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'get', '--format=json', '--from-version', '1', key]) == 0
captured = capsys.readouterr()
assert json.loads(captured.out) == {'a': '1'}
def test_put_rewrite_key(vault_server, capsys):
key = 'A / B'
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'put', '--format=json', '--rewrite-key', key, 'a=b', 'c=d']) == 0
captured = capsys.readouterr()
assert json.loads(captured.out) == {'a': 'b', 'c': 'd'}
assert 'replaced by' in captured.err
def test_put_dry_run(vault_server, capsys):
key = 'A/B'
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'--dry-run',
'kv', 'put', '--format=json', key, 'a=b', 'c=d']) == 0
captured = capsys.readouterr()
assert json.loads(captured.out) == {}
def test_patch(vault_server, capsys):
key = 'KEY'
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'put', key, 'a=b', 'c=d']) == 0
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'patch', key, 'a=B', 'e=f']) == 0
capsys.readouterr()
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'get', '--format=json', key]) == 0
captured = capsys.readouterr()
assert json.loads(captured.out) == {'a': 'B', 'c': 'd', 'e': 'f'}
def test_put_file(vault_server, capsys):
key = 'KEY'
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'put', '--format=json', key, 'E=F', '--file=tests/secrets.json']) == 0
captured = capsys.readouterr()
print(captured.out)
assert json.loads(captured.out) == {'DIR/SECRET': {'a': 'b'}}
def test_list(vault_server, capsys):
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'put', 'DIR/SECRET', 'a=b']) == 0
capsys.readouterr()
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'list', 'DIR']) == 0
captured = capsys.readouterr()
expected = textwrap.dedent("""\
+--------+
| Keys |
+--------+
| SECRET |
+--------+
""")
assert expected in captured.out
def test_load_dump(vault_server, capsys):
secrets_file = 'tests/secrets.json'
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'load', secrets_file]) == 0
capsys.readouterr()
secrets = json.load(open('tests/secrets.json'))
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'dump']) == 0
captured = capsys.readouterr()
assert json.loads(captured.out) == secrets
def test_metadata_delete(vault_server):
key = 'KEY'
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'put', key, 'a=b']) == 0
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'get', key]) == 0
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'metadata', 'delete', key]) == 0
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'get', key]) == 1
def test_metadata_get_put(vault_server, capsys):
key = 'KEY'
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'put', key, 'a=b']) == 0
captured = capsys.readouterr()
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'metadata', 'get', '--format=json', key]) == 0
captured = capsys.readouterr()
metadata = json.loads(captured.out)
assert metadata['data']['cas_required'] is False
assert metadata['data']['max_versions'] == 0
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'metadata', 'put', '--format=json',
'--cas-required=true', '--max-versions=5', key]) == 0
captured = capsys.readouterr()
metadata = json.loads(captured.out)
assert metadata['data']['cas_required'] is True
assert metadata['data']['max_versions'] == 5
def test_erase(vault_server):
key = 'KEY'
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'put', key, 'a=b']) == 0
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'get', key]) == 0
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'erase']) == 0
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'get', key]) == 1
def test_delete(vault_server, capsys):
key = 'KEY'
for i in ('1', '2'):
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'put', key, 'a=b']) == 0
captured = capsys.readouterr()
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'metadata', 'get', '--format=json', key]) == 0
captured = capsys.readouterr()
versions = json.loads(captured.out)['data']['versions']
for i in ('1', '2'):
assert not versions[i]['deletion_time']
assert not versions[i]['destroyed']
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'delete', key]) == 0
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'delete', '--versions=1,2', key]) == 0
captured = capsys.readouterr()
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'metadata', 'get', '--format=json', key]) == 0
captured = capsys.readouterr()
versions = json.loads(captured.out)['data']['versions']
for i in ('1', '2'):
assert versions[i]['deletion_time']
assert not versions[i]['destroyed']
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'undelete', '--versions=1,2', key]) == 0
captured = capsys.readouterr()
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'metadata', 'get', '--format=json', key]) == 0
captured = capsys.readouterr()
versions = json.loads(captured.out)['data']['versions']
for i in ('1', '2'):
assert not versions[i]['deletion_time']
assert not versions[i]['destroyed']
def test_destroy(vault_server, capsys):
key = 'KEY'
for i in ('1', '2'):
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'put', key, 'a=b']) == 0
captured = capsys.readouterr()
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'metadata', 'get', '--format=json', key]) == 0
captured = capsys.readouterr()
versions = json.loads(captured.out)['data']['versions']
for i in ('1', '2'):
assert not versions[i]['deletion_time']
assert not versions[i]['destroyed']
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'destroy', '--versions=1,2', key]) == 0
captured = capsys.readouterr()
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'metadata', 'get', '--format=json', key]) == 0
captured = capsys.readouterr()
versions = json.loads(captured.out)['data']['versions']
for i in ('1', '2'):
assert not versions[i]['deletion_time']
assert versions[i]['destroyed']
def test_rollback(vault_server, capsys):
key = 'KEY'
for i in ('1', '2'):
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'put', key, f'a={i}']) == 0
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'rollback', '--from-version=1', key]) == 0
captured = capsys.readouterr()
assert main(['--token', vault_server['token'], '--address', vault_server['http'],
'kv', 'get', '--format=json', key]) == 0
captured = capsys.readouterr()
assert json.loads(captured.out) == {'a': '1'}
| {"/tests/test_kv.py": ["/hvac_cli/kv.py"], "/hvac_cli/cmd.py": ["/hvac_cli/version.py"], "/tests/test_status_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/kv.py": ["/hvac_cli/cli.py"], "/tests/test_kv_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/status.py": ["/hvac_cli/cli.py"], "/tests/test_cli.py": ["/hvac_cli/cli.py"]} |
68,276 | hvac/hvac-cli | refs/heads/master | /tests/conftest.py | import hvac
import logging
import os
import pytest
import requests
import sh
import time
@pytest.fixture(params=["1.0.3", "1.1.2"])
def vault_server(tmpdir, request):
tmppath = str(tmpdir)
opensslconfig = tmppath + '/opensslconfig'
open(opensslconfig, 'w').write("""
[ req ]
default_bits = 2048
default_keyfile = keyfile.pem
distinguished_name = req_distinguished_name
attributes = req_attributes
prompt = no
output_password = mypass
[ req_distinguished_name ]
C = GB
ST = Test State or Province
L = Test Locality
O = Organization Name
OU = Organizational Unit Name
CN = 127.0.0.1
emailAddress = test@email.address
[ req_attributes ]
challengePassword = A challenge password
""")
sh.openssl.req(
'-config', opensslconfig,
'-nodes', '-new', '-x509', '-keyout', 'server.key', '-out', 'server.crt',
_cwd=tmppath)
os.chmod(tmppath + '/server.key', 0o644)
config = tmppath + '/config.hcl'
open(config, 'w').write("""
listener tcp {
address = "0.0.0.0:8300"
tls_cert_file = "/etc/test_ssl/server.crt"
tls_key_file = "/etc/test_ssl/server.key"
tls_client_ca_file = "/etc/test_ssl/server.crt"
tls_require_and_verify_client_cert = true
}
""")
token = 'mytoken'
container = 'test-hvac-cli'
sh.docker('rm', '-f', container, _ok_code=[1, 0])
sh.docker('run', '-e', f'VAULT_DEV_ROOT_TOKEN_ID={token}',
'-p', '8200:8200',
'-p', '8300:8300',
'-v', f'{config}:/vault/config/config.hcl',
'-v', f'{tmppath}:/etc/test_ssl',
'-d',
'--rm', '--cap-add=IPC_LOCK', f'--name={container}', f'vault:{request.param}')
crt = tmppath + '/server.crt'
key = tmppath + '/server.key'
client = hvac.Client(
url='http://127.0.0.1:8200', token=token, cert=(crt, key), verify=False
)
for _ in range(60):
try:
client.sys.read_health_status()
break
except requests.exceptions.ConnectionError:
time.sleep(1)
client.sys.read_health_status()
yield {
'token': token,
'http': 'http://127.0.0.1:8200',
'https': 'https://127.0.0.1:8300',
'crt': crt,
'key': key,
}
# reduce the sh verbosity so it does not try to read on file
# descriptors that may have been closed by the capsys fixture
logging.getLogger('sh').setLevel(logging.ERROR)
sh.docker('rm', '-f', container, _ok_code=[1, 0])
| {"/tests/test_kv.py": ["/hvac_cli/kv.py"], "/hvac_cli/cmd.py": ["/hvac_cli/version.py"], "/tests/test_status_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/kv.py": ["/hvac_cli/cli.py"], "/tests/test_kv_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/status.py": ["/hvac_cli/cli.py"], "/tests/test_cli.py": ["/hvac_cli/cli.py"]} |
68,277 | hvac/hvac-cli | refs/heads/master | /hvac_cli/status.py | import logging
from cliff.show import ShowOne
from hvac_cli.cli import CLI
logger = logging.getLogger(__name__)
class StatusCLI(CLI):
def status(self):
return self.vault.sys.read_health_status(method='GET')
class Status(ShowOne):
"""
Prints the current state of Vault including whether it is sealed
and if HA mode is enabled.
This command prints regardless of whether the Vault is sealed.
$ hvac-cli status
"""
def take_action(self, parsed_args):
status = StatusCLI(self.app_args)
return self.dict2columns(status.status())
| {"/tests/test_kv.py": ["/hvac_cli/kv.py"], "/hvac_cli/cmd.py": ["/hvac_cli/version.py"], "/tests/test_status_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/kv.py": ["/hvac_cli/cli.py"], "/tests/test_kv_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/status.py": ["/hvac_cli/cli.py"], "/tests/test_cli.py": ["/hvac_cli/cli.py"]} |
68,278 | hvac/hvac-cli | refs/heads/master | /tests/test_cli.py | from hvac_cli.cli import CLI
import mock
import pytest
import requests
def test_open(vault_server):
args = mock.MagicMock()
args.address = vault_server['http']
args.token = vault_server['token']
CLI(args)
def test_client_cert(vault_server):
args = mock.MagicMock()
args.address = vault_server['https']
args.token = vault_server['token']
# FAILURE with missing client certificate
with pytest.raises(requests.exceptions.SSLError):
args.tls_skip_verify = False
args.ca_cert = vault_server['crt']
args.client_cert = None
args.client_key = None
CLI(args).vault.sys.read_health_status()
# FAILURE with missing CA
with pytest.raises(requests.exceptions.SSLError):
args.tls_skip_verify = False
args.ca_cert = None
args.client_cert = vault_server['crt']
args.client_key = vault_server['key']
CLI(args).vault.sys.read_health_status()
# SUCCESS with CA and client certificate provided
args.tls_skip_verify = False
args.ca_cert = vault_server['crt']
args.client_cert = vault_server['crt']
args.client_key = vault_server['key']
CLI(args).vault.sys.read_health_status().status_code == 200
# SUCCESS with CA missing but tls_skip_verify True and client certificate provided
args.tls_skip_verify = True
args.ca_cert = None
args.client_cert = vault_server['crt']
args.client_key = vault_server['key']
CLI(args).vault.sys.read_health_status().status_code == 200
| {"/tests/test_kv.py": ["/hvac_cli/kv.py"], "/hvac_cli/cmd.py": ["/hvac_cli/version.py"], "/tests/test_status_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/kv.py": ["/hvac_cli/cli.py"], "/tests/test_kv_cmd.py": ["/hvac_cli/cmd.py"], "/hvac_cli/status.py": ["/hvac_cli/cli.py"], "/tests/test_cli.py": ["/hvac_cli/cli.py"]} |
68,280 | LePiN/virtual_academy | refs/heads/main | /e_academy/e_campus/models.py | from django.db import models
class BaseModel(models.Model):
date_created = models.DateTimeField(auto_now_add=True)
date_updated = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Course(BaseModel):
description = models.TextField()
duration = models.DurationField()
holder_image = models.ImageField(blank=True)
name = models.CharField(max_length=255)
class Meta:
verbose_name = "Curso"
verbose_name_plural = "Cursos"
def __str__(self):
return self.name
class Student(BaseModel):
avatar = models.ImageField(blank=True)
name = models.CharField(max_length=255)
nickname = models.CharField(max_length=25)
phone = models.CharField(max_length=12)
class Meta:
verbose_name = "Aluno"
verbose_name_plural = "Alunos"
def __str__(self):
return self.name
class Enrollment(BaseModel):
ANDAMENTO = "AN"
APROVADO = "AP"
REPROVADO = "RE"
STATUS_CHOICES = [
(ANDAMENTO, "Andamento"),
(APROVADO, "Aprovado"),
(REPROVADO, "Reprovado"),
]
course = models.ForeignKey(
Course, related_name="matriculas", on_delete=models.PROTECT
)
date_close = models.DateField(blank=True, null=True)
date_enroll = models.DateField(auto_now_add=True)
score = models.DecimalField(blank=True, decimal_places=1, max_digits=2, null=True)
status = models.CharField(
max_length=2,
choices=STATUS_CHOICES,
default=ANDAMENTO,
)
student = models.ForeignKey(
Student, related_name="matriculas", on_delete=models.PROTECT
)
class Meta:
verbose_name = "Matricula"
verbose_name_plural = "Matriculas"
def __str__(self):
return f"{self.student.name}/{self.course.name}/{self.status}"
| {"/e_academy/api/urls.py": ["/e_academy/api/views.py"], "/e_academy/e_campus/admin.py": ["/e_academy/e_campus/models.py"], "/e_academy/api/views.py": ["/e_academy/api/filters.py", "/e_academy/api/serializers.py"]} |
68,281 | LePiN/virtual_academy | refs/heads/main | /e_academy/api/urls.py | from django.urls import path
from .views import CourseAPIView
from .views import CoursesAPIView
from .views import EnrollmentAPIView
from .views import EnrollmentsAPIView
from .views import StudentAPIView
from .views import StudentsAPIView
urlpatterns = [
path("courses/", CoursesAPIView.as_view(), name="courses"),
path("courses/<int:pk>/", CourseAPIView.as_view(), name="course"),
path("enrollments/", EnrollmentsAPIView.as_view(), name="enrollments"),
path("enrollments/<int:pk>/", EnrollmentAPIView.as_view(), name="enrollment"),
path("students/", StudentsAPIView.as_view(), name="students"),
path("students/<int:pk>/", StudentAPIView.as_view(), name="student"),
]
| {"/e_academy/api/urls.py": ["/e_academy/api/views.py"], "/e_academy/e_campus/admin.py": ["/e_academy/e_campus/models.py"], "/e_academy/api/views.py": ["/e_academy/api/filters.py", "/e_academy/api/serializers.py"]} |
68,282 | LePiN/virtual_academy | refs/heads/main | /e_academy/e_campus/apps.py | from django.apps import AppConfig
class ECampusConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "e_campus"
| {"/e_academy/api/urls.py": ["/e_academy/api/views.py"], "/e_academy/e_campus/admin.py": ["/e_academy/e_campus/models.py"], "/e_academy/api/views.py": ["/e_academy/api/filters.py", "/e_academy/api/serializers.py"]} |
68,283 | LePiN/virtual_academy | refs/heads/main | /e_academy/e_campus/migrations/0002_auto_20211003_2258.py | # Generated by Django 3.2.7 on 2021-10-04 01:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("e_campus", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="course",
name="description",
field=models.TextField(),
),
migrations.AlterField(
model_name="course",
name="holder_image",
field=models.ImageField(blank=True, upload_to=""),
),
migrations.AlterField(
model_name="student",
name="avatar",
field=models.ImageField(blank=True, upload_to=""),
),
]
| {"/e_academy/api/urls.py": ["/e_academy/api/views.py"], "/e_academy/e_campus/admin.py": ["/e_academy/e_campus/models.py"], "/e_academy/api/views.py": ["/e_academy/api/filters.py", "/e_academy/api/serializers.py"]} |
68,284 | LePiN/virtual_academy | refs/heads/main | /e_academy/e_campus/admin.py | from django.contrib import admin
from .models import Course
from .models import Enrollment
from .models import Student
@admin.register(Course)
class CourseAdmin(admin.ModelAdmin):
list_display = (
"description",
"duration",
"holder_image",
"name",
"date_created",
"date_updated",
)
@admin.register(Enrollment)
class EnrollmentAdmin(admin.ModelAdmin):
list_display = (
"course",
"date_close",
"date_enroll",
"score",
"status",
"student",
"date_created",
"date_updated",
)
@admin.register(Student)
class StudentAdmin(admin.ModelAdmin):
list_display = (
"avatar",
"name",
"nickname",
"phone",
"date_created",
"date_updated",
)
| {"/e_academy/api/urls.py": ["/e_academy/api/views.py"], "/e_academy/e_campus/admin.py": ["/e_academy/e_campus/models.py"], "/e_academy/api/views.py": ["/e_academy/api/filters.py", "/e_academy/api/serializers.py"]} |
68,285 | LePiN/virtual_academy | refs/heads/main | /e_academy/e_campus/tests/test_models.py | from datetime import timedelta
from django.test import TestCase
from e_campus.models import Course
from e_campus.models import Enrollment
from e_campus.models import Student
class TestCourse(TestCase):
def setUp(self):
self.course = Course.objects.create(
description="Mock", duration=timedelta(hours=12), name="Foo"
)
self.course_str = "Foo"
def test_course_creation(self):
self.assertIsInstance(self.course, Course)
self.assertEqual(Course.objects.all().count(), 1)
self.assertEqual(self.course.__str__(), self.course_str)
class TestEnrollment(TestCase):
def setUp(self):
self.course = Course.objects.create(
description="Mock", duration=timedelta(hours=12), name="Foo"
)
self.student = Student.objects.create(
name="Mock Silva", nickname="MS", phone="47999999999"
)
self.enroll = Enrollment.objects.create(
course=self.course,
date_close="2021-12-14",
score=6,
status="RE",
student=self.student,
)
self.enroll_str = "Mock Silva/Foo/RE"
def test_enroll_creation(self):
self.assertIsInstance(self.enroll, Enrollment)
self.assertEqual(Enrollment.objects.all().count(), 1)
self.assertEqual(self.enroll.__str__(), self.enroll_str)
class TestStudent(TestCase):
def setUp(self):
self.student = Student.objects.create(
name="Mock Silva", nickname="MS", phone="47999999999"
)
self.student_str = "Mock Silva"
def test_student_creation(self):
self.assertIsInstance(self.student, Student)
self.assertEqual(Student.objects.all().count(), 1)
self.assertEqual(self.student.__str__(), self.student_str)
| {"/e_academy/api/urls.py": ["/e_academy/api/views.py"], "/e_academy/e_campus/admin.py": ["/e_academy/e_campus/models.py"], "/e_academy/api/views.py": ["/e_academy/api/filters.py", "/e_academy/api/serializers.py"]} |
68,286 | LePiN/virtual_academy | refs/heads/main | /e_academy/api/views.py | from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import generics
from e_campus.models import Course
from e_campus.models import Enrollment
from e_campus.models import Student
from .filters import EnrollmentsFilter
from .filters import StudentsFilter
from .serializers import CourseSerializer
from .serializers import EnrollmentSerializer
from .serializers import StudentSerializer
class CoursesAPIView(generics.ListCreateAPIView):
queryset = Course.objects.all()
serializer_class = CourseSerializer
class CourseAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = Course.objects.all()
serializer_class = CourseSerializer
class EnrollmentsAPIView(generics.ListCreateAPIView):
queryset = Enrollment.objects.all()
serializer_class = EnrollmentSerializer
filter_backends = [DjangoFilterBackend]
filterset_class = EnrollmentsFilter
class EnrollmentAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = Enrollment.objects.all()
serializer_class = EnrollmentSerializer
class StudentsAPIView(generics.ListCreateAPIView):
queryset = Student.objects.all()
serializer_class = StudentSerializer
filter_backends = [DjangoFilterBackend]
filterset_class = StudentsFilter
class StudentAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = Student.objects.all()
serializer_class = StudentSerializer
| {"/e_academy/api/urls.py": ["/e_academy/api/views.py"], "/e_academy/e_campus/admin.py": ["/e_academy/e_campus/models.py"], "/e_academy/api/views.py": ["/e_academy/api/filters.py", "/e_academy/api/serializers.py"]} |
68,287 | LePiN/virtual_academy | refs/heads/main | /e_academy/api/tests/test_serializers.py | from datetime import timedelta
from django.test import TestCase
from e_campus.models import Course
from e_campus.models import Enrollment
from e_campus.models import Student
from api.serializers import CourseSerializer
from api.serializers import EnrollmentSerializer
from api.serializers import StudentSerializer
class TestCourseSerializer(TestCase):
def setUp(self):
self.course = Course.objects.create(
description="Mock", duration=timedelta(hours=12), name="Foo"
)
self.course_serializer = CourseSerializer(self.course)
self.course_expected_data = {
"description": "Mock",
"duration": "12:00:00",
"name": "Foo",
}
def test_course_contains_expected_fields(self):
courses_serialized = self.course_serializer.data
self.assertEqual(
set(courses_serialized.keys()),
set(["description", "duration", "holder_image", "name"]),
)
def test_course_field_content(self):
data = self.course_serializer.data
for key in self.course_expected_data.keys():
with self.subTest(key=key):
self.assertEqual(data[key], self.course_expected_data[key])
class TestEnrollmentSerializer(TestCase):
def setUp(self):
self.course = Course.objects.create(
description="Mock", duration=timedelta(hours=12), name="Foo"
)
self.student = Student.objects.create(
name="Mock Silva", nickname="MS", phone="47999999999"
)
self.enroll = Enrollment.objects.create(
course=self.course,
date_close="2021-12-14",
score=6,
status="RE",
student=self.student,
)
self.enroll_serializer = EnrollmentSerializer(self.enroll)
self.enroll_expected_data = {
"course": self.course.pk,
"date_close": "2021-12-14",
"score": "6.0",
"status": "RE",
"student": self.student.pk,
}
def test_enroll_contains_expected_fields(self):
data = self.enroll_serializer.data
self.assertEqual(
set(data.keys()),
set(["course", "date_close", "date_enroll", "score", "status", "student"]),
)
def test_enroll_field_content(self):
data = self.enroll_serializer.data
for key in self.enroll_expected_data.keys():
with self.subTest(key=key):
self.assertEqual(data[key], self.enroll_expected_data[key])
class TestStudentSerializer(TestCase):
def setUp(self):
self.student = Student.objects.create(
name="Mock Silva", nickname="MS", phone="47999999999"
)
self.student_serializer = StudentSerializer(self.student)
self.student_expected_data = {
"name": "Mock Silva",
"nickname": "MS",
"phone": "47999999999",
}
def test_student_contains_expected_fields(self):
data = self.student_serializer.data
self.assertEqual(set(data.keys()), set(["avatar", "name", "nickname", "phone"]))
def test_student_field_content(self):
data = self.student_serializer.data
for key in self.student_expected_data.keys():
with self.subTest(key=key):
self.assertEqual(data[key], self.student_expected_data[key])
| {"/e_academy/api/urls.py": ["/e_academy/api/views.py"], "/e_academy/e_campus/admin.py": ["/e_academy/e_campus/models.py"], "/e_academy/api/views.py": ["/e_academy/api/filters.py", "/e_academy/api/serializers.py"]} |
68,288 | LePiN/virtual_academy | refs/heads/main | /e_academy/api/tests/test_views.py | from datetime import timedelta
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse
from rest_framework.test import APITestCase
from rest_framework import status
from e_campus.models import Course
from e_campus.models import Enrollment
from e_campus.models import Student
from api.serializers import CourseSerializer
from api.serializers import EnrollmentSerializer
from api.serializers import StudentSerializer
def image_default():
small_gif = (
b"\x47\x49\x46\x38\x39\x61\x01\x00\x01\x00\x00\x00\x00\x21\xf9\x04"
b"\x01\x0a\x00\x01\x00\x2c\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02"
b"\x02\x4c\x01\x00\x3b"
)
return SimpleUploadedFile("small.gif", small_gif, content_type="image/gif")
class TestCourseAPI(APITestCase):
required_fields = ["description", "duration", "name"]
invalid_course = {"duration": "xxx", "holder_image": "wrong.jpg"}
def setUp(self):
self.course_a = Course.objects.create(
description="Mock 1", duration=timedelta(hours=12), name="Foo 1"
)
self.course_b = Course.objects.create(
description="Mock 2", duration=timedelta(hours=24), name="Foo 2"
)
self.course_c = Course.objects.create(
description="Mock 3", duration=timedelta(hours=36), name="Foo 3"
)
self.course_d = Course.objects.create(
description="Mock 4", duration=timedelta(hours=48), name="Foo 4"
)
def test_get_all_courses(self):
courses = Course.objects.all()
courses_serialized = CourseSerializer(courses, many=True)
response = self.client.get(reverse("courses"))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsInstance(response.data, list)
self.assertEqual(len(response.data), 4)
self.assertEqual(response.data, courses_serialized.data)
def test_get_valid_single_course(self):
course = Course.objects.get(pk=self.course_c.pk)
course_serialized = CourseSerializer(course)
response = self.client.get(reverse("course", kwargs={"pk": self.course_c.pk}))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsInstance(response.data, dict)
self.assertEqual(response.data, course_serialized.data)
def test_get_nonexistent_single_course(self):
response = self.client.get(reverse("course", kwargs={"pk": 99}))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertIsInstance(response.data, dict)
def test_post_create_valid_course(self):
previous_number_courses = Course.objects.all().count()
data = {
"description": "Mock 5",
"duration": 7200,
"holder_image": "",
"name": "Foo 5",
}
response = self.client.post(reverse("courses"), data)
result = Course.objects.last()
self.assertEqual(result, result)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Course.objects.all().count(), previous_number_courses + 1)
self.assertEqual(data["description"], result.description)
self.assertEqual(timedelta(seconds=data["duration"]), result.duration)
self.assertEqual(data["name"], result.name)
def test_post_create_invalid_course(self):
previous_number_courses = Course.objects.all().count()
data = {
"description": "Mock 5",
"duration": 7200,
"holder_image": "",
"name": "Foo 5",
}
for field in self.required_fields:
with self.subTest(field=field):
data.pop(field)
response = self.client.post(reverse("courses"), data)
result = Course.objects.last()
self.assertEqual(result, result)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(Course.objects.all().count(), previous_number_courses)
def test_put_update_valid_course(self):
course = Course.objects.get(pk=self.course_d.pk)
data = {
"description": "New Mock",
"duration": "03:00:00",
"holder_image": image_default(),
"name": "New Foo",
}
self.assertNotEqual(course.description, data["description"])
self.assertNotEqual(course.duration, data["duration"])
self.assertNotEqual(course.name, data["name"])
response = self.client.put(
reverse("course", kwargs={"pk": self.course_d.pk}), data=data
)
result = Course.objects.last()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(result.description, data["description"])
self.assertEqual(result.duration, timedelta(seconds=10800))
self.assertEqual(result.name, data["name"])
def test_put_update_invalid_course(self):
duration_error = [
"Formato inválido para Duração. Use um dos formatos a seguir: [DD] [HH:[MM:]]ss[.uuuuuu]."
]
image_error = [
"O dado submetido não é um arquivo. Certifique-se do tipo de codificação no formulário."
]
required_error = ["Este campo é obrigatório."]
response = self.client.put(
reverse("course", kwargs={"pk": self.course_d.pk}), data=self.invalid_course
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data["description"], required_error)
self.assertEqual(response.data["duration"], duration_error)
self.assertEqual(response.data["holder_image"], image_error)
self.assertEqual(response.data["name"], required_error)
def test_put_update_nonexistent_course(self):
data = {
"description": "New Mock",
"duration": "03:00:00",
"holder_image": image_default(),
"name": "New Foo",
}
response = self.client.put(reverse("course", kwargs={"pk": 99}), data=data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_patch_update_valid_course(self):
course = Course.objects.get(pk=self.course_d.pk)
update_data = {
"description": "New Mock",
"duration": "03:00:00",
"holder_image": image_default(),
"name": "New Foo",
}
self.assertNotEqual(course.description, update_data["description"])
self.assertNotEqual(course.duration, update_data["duration"])
self.assertNotEqual(course.name, update_data["name"])
for key, value in update_data.items():
with self.subTest(key=key, value=value):
data = {key: value}
response = self.client.patch(
reverse("course", kwargs={"pk": self.course_d.pk}), data=data
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_patch_update_invalid_course(self):
for key, value in self.invalid_course.items():
with self.subTest(key=key, value=value):
data = {key: value}
response = self.client.patch(
reverse("course", kwargs={"pk": self.course_d.pk}), data=data
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_patch_update_nonexistent_course(self):
for key, value in self.invalid_course.items():
with self.subTest(key=key, value=value):
data = {key: value}
response = self.client.patch(
reverse("course", kwargs={"pk": 99}), data=data
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_valid_course(self):
previous_number_courses = Course.objects.all().count()
response = self.client.delete(
reverse("course", kwargs={"pk": self.course_d.pk})
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(Course.objects.all().count(), previous_number_courses - 1)
def test_delete_nonexistent_course(self):
previous_number_courses = Course.objects.all().count()
response = self.client.delete(reverse("course", kwargs={"pk": 99}))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(Course.objects.all().count(), previous_number_courses)
class TestEnrollmentAPI(APITestCase):
required_fields = ["course", "status", "student"]
invalid_enrollment = {
"course": "z",
"date_close": "xxx",
"score": 1000,
"status": "Foo",
"student": "b",
}
def setUp(self):
self.course_a = Course.objects.create(
description="Mock 1", duration=timedelta(hours=12), name="Foo 1"
)
self.course_b = Course.objects.create(
description="Mock 2", duration=timedelta(hours=24), name="Foo 2"
)
self.course_c = Course.objects.create(
description="Mock 3", duration=timedelta(hours=36), name="Foo 3"
)
self.student_a = Student.objects.create(
name="Mock Silva", nickname="MS", phone="47999999999"
)
self.student_b = Student.objects.create(
name="Mock Faria", nickname="MF", phone="47888888888"
)
self.enroll_a = Enrollment.objects.create(
course=self.course_a, score=9, status="AN", student=self.student_a
)
self.enroll_b = Enrollment.objects.create(
course=self.course_b,
date_close="2021-12-14",
score=6,
status="RE",
student=self.student_b,
)
def test_get_all_enrollments(self):
enrollments = Enrollment.objects.all()
enrollments_serialized = EnrollmentSerializer(enrollments, many=True)
response = self.client.get(reverse("enrollments"))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsInstance(response.data, list)
self.assertEqual(len(response.data), 2)
self.assertEqual(response.data, enrollments_serialized.data)
def test_get_valid_single_enrollment(self):
enroll = Enrollment.objects.get(pk=self.enroll_a.pk)
enroll_serialized = EnrollmentSerializer(enroll)
response = self.client.get(
reverse("enrollment", kwargs={"pk": self.enroll_a.pk})
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsInstance(response.data, dict)
self.assertEqual(response.data, enroll_serialized.data)
def test_get_nonexistent_single_enrollment(self):
response = self.client.get(reverse("enrollment", kwargs={"pk": 99}))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertIsInstance(response.data, dict)
def test_post_create_valid_enrollment(self):
previous_number_courses = Enrollment.objects.all().count()
data = {
"course": self.course_a.pk,
"date_close": "2022-06-15",
"score": 3,
"status": "AN",
"student": self.student_b.pk,
}
response = self.client.post(reverse("enrollments"), data)
result = Enrollment.objects.last()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Enrollment.objects.all().count(), previous_number_courses + 1)
self.assertEqual(data["course"], result.course.pk)
self.assertEqual(data["date_close"], result.date_close.strftime("%Y-%m-%d"))
self.assertEqual(data["score"], result.score)
self.assertEqual(data["status"], result.status)
self.assertEqual(data["student"], result.student.pk)
def test_post_create_invalid_enrollment(self):
previous_number_enrollments = Enrollment.objects.all().count()
data = {
"course": self.course_a.pk,
"date_close": "2022-06-15",
"score": 3,
"status": "AN",
"student": self.student_b.pk,
}
for field in self.required_fields:
with self.subTest(field=field):
data.pop(field)
response = self.client.post(reverse("enrollments"), data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
Enrollment.objects.all().count(), previous_number_enrollments
)
def test_put_update_valid_enrollment(self):
enrollment = Enrollment.objects.get(pk=self.enroll_b.pk)
data = {
"course": self.course_c.pk,
"date_close": "2022-06-15",
"score": 3,
"status": "AN",
"student": self.student_a.pk,
}
self.assertNotEqual(enrollment.course.pk, data["course"])
self.assertNotEqual(
enrollment.date_close.strftime("%Y-%m-%d"), data["date_close"]
)
self.assertNotEqual(enrollment.score, data["score"])
self.assertNotEqual(enrollment.status, data["status"])
self.assertNotEqual(enrollment.student.pk, data["student"])
response = self.client.put(
reverse("enrollment", kwargs={"pk": self.enroll_b.pk}), data=data
)
result = Enrollment.objects.last()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(result.course.pk, data["course"])
self.assertEqual(result.date_close.strftime("%Y-%m-%d"), data["date_close"])
self.assertEqual(result.score, data["score"])
self.assertEqual(result.status, data["status"])
self.assertEqual(result.student.pk, data["student"])
def test_put_update_invalid_enrollment(self):
response = self.client.put(
reverse("enrollment", kwargs={"pk": self.enroll_b.pk}),
data=self.invalid_enrollment,
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_put_update_nonexistent_enrollment(self):
data = {
"course": self.course_c.pk,
"date_close": "2022-06-15",
"score": 3,
"status": "AN",
"student": self.student_a.pk,
}
response = self.client.put(reverse("enrollment", kwargs={"pk": 99}), data=data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_patch_update_valid_enrollment(self):
enrollment = Enrollment.objects.get(pk=self.enroll_b.pk)
update_data = {
"course": self.course_c.pk,
"date_close": "2022-06-15",
"score": 3,
"status": "AN",
"student": self.student_a.pk,
}
self.assertNotEqual(enrollment.course.pk, update_data["course"])
self.assertNotEqual(
enrollment.date_close.strftime("%Y-%m-%d"), update_data["date_close"]
)
self.assertNotEqual(enrollment.score, update_data["score"])
self.assertNotEqual(enrollment.status, update_data["status"])
self.assertNotEqual(enrollment.student.pk, update_data["student"])
for key, value in update_data.items():
with self.subTest(key=key, value=value):
data = {key: value}
response = self.client.patch(
reverse("enrollment", kwargs={"pk": self.enroll_b.pk}), data=data
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_patch_update_invalid_enrollment(self):
for key, value in self.invalid_enrollment.items():
with self.subTest(key=key, value=value):
data = {key: value}
response = self.client.patch(
reverse("enrollment", kwargs={"pk": self.enroll_b.pk}), data=data
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_patch_update_nonexistent_enrollment(self):
for key, value in self.invalid_enrollment.items():
with self.subTest(key=key, value=value):
data = {key: value}
response = self.client.patch(
reverse("enrollment", kwargs={"pk": 99}), data=data
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_valid_enrollment(self):
previous_number_enrollments = Enrollment.objects.all().count()
response = self.client.delete(
reverse("enrollment", kwargs={"pk": self.enroll_b.pk})
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(
Enrollment.objects.all().count(), previous_number_enrollments - 1
)
def test_delete_nonexistent_course(self):
previous_number_enrollments = Enrollment.objects.all().count()
response = self.client.delete(reverse("enrollment", kwargs={"pk": 99}))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(Enrollment.objects.all().count(), previous_number_enrollments)
class TestStudentAPI(APITestCase):
invalid_student = {
"avatar": "wrong.jpg",
"name": 300 * "x",
"nickname": 300 * "y",
"phone": 9999999999999999,
}
required_fields = ["name", "nickname", "phone"]
def setUp(self):
self.student_a = Student.objects.create(
name="Mock Silva", nickname="MS", phone="47999999999"
)
self.student_b = Student.objects.create(
name="Mock Faria", nickname="MF", phone="47888888888"
)
self.student_c = Student.objects.create(
name="Mock Pereira", nickname="MP", phone="476666666666"
)
def test_get_all_students(self):
students = Student.objects.all()
students_serialized = StudentSerializer(students, many=True)
response = self.client.get(reverse("students"))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsInstance(response.data, list)
self.assertEqual(len(response.data), 3)
self.assertEqual(response.data, students_serialized.data)
def test_get_valid_single_student(self):
student = Student.objects.get(pk=self.student_c.pk)
student_serialized = StudentSerializer(student)
response = self.client.get(reverse("student", kwargs={"pk": self.student_c.pk}))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIsInstance(response.data, dict)
self.assertEqual(response.data, student_serialized.data)
def test_get_nonexistent_single_student(self):
response = self.client.get(reverse("student", kwargs={"pk": 99}))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertIsInstance(response.data, dict)
def test_post_create_valid_student(self):
previous_number_students = Student.objects.all().count()
data = {"name": "Mock Junior", "nickname": "MJ", "phone": "47555555555"}
response = self.client.post(reverse("students"), data)
result = Student.objects.last()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Student.objects.all().count(), previous_number_students + 1)
self.assertEqual(data["name"], result.name)
self.assertEqual(data["nickname"], result.nickname)
self.assertEqual(data["phone"], result.phone)
def test_post_create_invalid_student(self):
previous_number_students = Student.objects.all().count()
data = {"name": "Mock Junior", "nickname": "MJ", "phone": "47555555555"}
for field in self.required_fields:
with self.subTest(field=field):
data.pop(field)
response = self.client.post(reverse("students"), data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
Student.objects.all().count(), previous_number_students
)
def test_put_update_valid_student(self):
student = Student.objects.get(pk=self.student_c.pk)
data = {
"avatar": image_default(),
"name": "Mock Junior",
"nickname": "MJ",
"phone": "47555555555",
}
self.assertNotEqual(student.name, data["name"])
self.assertNotEqual(student.nickname, data["nickname"])
self.assertNotEqual(student.phone, data["phone"])
response = self.client.put(
reverse("student", kwargs={"pk": self.student_c.pk}), data=data
)
result = Student.objects.last()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(result.name, data["name"])
self.assertEqual(result.nickname, data["nickname"])
self.assertEqual(result.phone, data["phone"])
def test_put_update_invalid_student(self):
response = self.client.put(
reverse("student", kwargs={"pk": self.student_c.pk}),
data=self.invalid_student,
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_put_update_nonexistent_student(self):
data = {
"avatar": image_default(),
"name": "Mock Junior",
"nickname": "MJ",
"phone": "47555555555",
}
response = self.client.put(reverse("student", kwargs={"pk": 99}), data=data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_patch_update_valid_student(self):
update_data = {
"avatar": image_default(),
"name": "Mock Junior",
"nickname": "MJ",
"phone": "47555555555",
}
for key, value in update_data.items():
with self.subTest(key=key, value=value):
data = {key: value}
response = self.client.patch(
reverse("student", kwargs={"pk": self.student_c.pk}), data=data
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_patch_update_invalid_course(self):
for key, value in self.invalid_student.items():
with self.subTest(key=key, value=value):
data = {key: value}
response = self.client.patch(
reverse("student", kwargs={"pk": self.student_c.pk}), data=data
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_patch_update_nonexistent_course(self):
for key, value in self.invalid_student.items():
with self.subTest(key=key, value=value):
data = {key: value}
response = self.client.patch(
reverse("student", kwargs={"pk": 99}), data=data
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_valid_student(self):
previous_number_students = Student.objects.all().count()
response = self.client.delete(
reverse("student", kwargs={"pk": self.student_c.pk})
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(Student.objects.all().count(), previous_number_students - 1)
def test_delete_nonexistent_student(self):
previous_number_students = Student.objects.all().count()
response = self.client.delete(reverse("student", kwargs={"pk": 99}))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(Student.objects.all().count(), previous_number_students)
| {"/e_academy/api/urls.py": ["/e_academy/api/views.py"], "/e_academy/e_campus/admin.py": ["/e_academy/e_campus/models.py"], "/e_academy/api/views.py": ["/e_academy/api/filters.py", "/e_academy/api/serializers.py"]} |
68,289 | LePiN/virtual_academy | refs/heads/main | /e_academy/e_academy/urls.py | from django.contrib import admin
from django.urls import include
from django.urls import path
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="E-academyI",
default_version="v1",
description="E-academy documentation.",
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path("api/v1/", include("api.urls")),
path("admin/", admin.site.urls),
path("", schema_view.with_ui("swagger", cache_timeout=0)),
]
| {"/e_academy/api/urls.py": ["/e_academy/api/views.py"], "/e_academy/e_campus/admin.py": ["/e_academy/e_campus/models.py"], "/e_academy/api/views.py": ["/e_academy/api/filters.py", "/e_academy/api/serializers.py"]} |
68,290 | LePiN/virtual_academy | refs/heads/main | /e_academy/api/serializers.py | from rest_framework import serializers
from e_campus.models import Course
from e_campus.models import Student
from e_campus.models import Enrollment
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = ("description", "duration", "holder_image", "name")
class StudentSerializer(serializers.ModelSerializer):
class Meta:
model = Student
fields = (
"avatar",
"name",
"nickname",
"phone",
)
class EnrollmentSerializer(serializers.ModelSerializer):
class Meta:
model = Enrollment
fields = (
"course",
"date_close",
"date_enroll",
"score",
"status",
"student",
)
| {"/e_academy/api/urls.py": ["/e_academy/api/views.py"], "/e_academy/e_campus/admin.py": ["/e_academy/e_campus/models.py"], "/e_academy/api/views.py": ["/e_academy/api/filters.py", "/e_academy/api/serializers.py"]} |
68,291 | LePiN/virtual_academy | refs/heads/main | /e_academy/api/filters.py | from django_filters import FilterSet
from django_filters.rest_framework import filters
from e_campus.models import Enrollment
from e_campus.models import Student
class EnrollmentsFilter(FilterSet):
date_enroll_start = filters.DateFilter(field_name="date_enroll", lookup_expr="gte")
date_enroll_end = filters.DateFilter(field_name="date_enroll", lookup_expr="lte")
date_close_start = filters.DateFilter(field_name="date_close", lookup_expr="gte")
date_close_end = filters.DateFilter(field_name="date_close", lookup_expr="lte")
class Meta:
model = Enrollment
fields = [
"course",
"date_close_end",
"date_close_start",
"date_enroll_end",
"date_enroll_start",
"status",
"student",
]
class StudentsFilter(FilterSet):
date_created_start = filters.DateFilter(
field_name="date_created", lookup_expr="gte"
)
date_created_end = filters.DateFilter(field_name="date_created", lookup_expr="lte")
class Meta:
model = Student
fields = ["date_created_end", "date_created_start"]
| {"/e_academy/api/urls.py": ["/e_academy/api/views.py"], "/e_academy/e_campus/admin.py": ["/e_academy/e_campus/models.py"], "/e_academy/api/views.py": ["/e_academy/api/filters.py", "/e_academy/api/serializers.py"]} |
68,292 | LePiN/virtual_academy | refs/heads/main | /e_academy/e_campus/migrations/0001_initial.py | # Generated by Django 3.2.7 on 2021-10-01 14:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Course",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("date_created", models.DateTimeField(auto_now_add=True)),
("date_updated", models.DateTimeField(auto_now=True)),
("description", models.TextField(blank=True, default="")),
("duration", models.DurationField()),
("holder_image", models.ImageField(upload_to="")),
("name", models.CharField(max_length=255)),
],
options={
"verbose_name": "Curso",
"verbose_name_plural": "Cursos",
},
),
migrations.CreateModel(
name="Student",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("date_created", models.DateTimeField(auto_now_add=True)),
("date_updated", models.DateTimeField(auto_now=True)),
("avatar", models.ImageField(upload_to="")),
("name", models.CharField(max_length=255)),
("nickname", models.CharField(max_length=25)),
("phone", models.CharField(max_length=12)),
],
options={
"verbose_name": "Aluno",
"verbose_name_plural": "Alunos",
},
),
migrations.CreateModel(
name="Enrollment",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("date_created", models.DateTimeField(auto_now_add=True)),
("date_updated", models.DateTimeField(auto_now=True)),
("date_close", models.DateField(blank=True, null=True)),
("date_enroll", models.DateField(auto_now_add=True)),
(
"score",
models.DecimalField(
blank=True, decimal_places=1, max_digits=2, null=True
),
),
(
"status",
models.CharField(
choices=[
("AN", "Andamento"),
("AP", "Aprovado"),
("RE", "Reprovado"),
],
default="AN",
max_length=2,
),
),
(
"course",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="matriculas",
to="e_campus.course",
),
),
(
"student",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="matriculas",
to="e_campus.student",
),
),
],
options={
"verbose_name": "Matricula",
"verbose_name_plural": "Matriculas",
},
),
]
| {"/e_academy/api/urls.py": ["/e_academy/api/views.py"], "/e_academy/e_campus/admin.py": ["/e_academy/e_campus/models.py"], "/e_academy/api/views.py": ["/e_academy/api/filters.py", "/e_academy/api/serializers.py"]} |
68,301 | ryanvisioncord/ubereats_scrapy_selenium_scraping | refs/heads/master | /ubereats/cities.py |
cities_list = [
"Alexander City,AL",
"Andalusia,AL",
"Anniston,AL",
"Athens,AL",
"Atmore,AL",
"Auburn,AL",
"Bessemer,AL",
"Birmingham,AL",
"Chickasaw,AL",
"Clanton,AL",
"Cullman,AL",
"Decatur,AL",
"Demopolis,AL",
"Dothan,AL",
"Enterprise,AL",
"Eufaula,AL",
"Florence,AL",
"Fort Payne,AL",
"Gadsden,AL",
"Greenville,AL",
"Guntersville,AL",
"Huntsville,AL",
"Jasper,AL",
"Marion,AL",
"Mobile,AL",
"Montgomery,AL",
"Opelika,AL",
"Ozark,AL",
"Phenix City,AL",
"Prichard,AL",
"Scottsboro,AL",
"Selma,AL",
"Sheffield,AL",
"Sylacauga,AL",
"Talladega,AL",
"Troy,AL",
"Tuscaloosa,AL",
"Tuscumbia,AL",
"Tuskegee,AL",
"Anchorage,AK",
"Cordova,AK",
"Fairbanks,AK",
"Haines,AK",
"Homer,AK",
"Juneau,AK",
"Ketchikan,AK",
"Kodiak,AK",
"Kotzebue,AK",
"Nome,AK",
"Palmer,AK",
"Seward,AK",
"Sitka,AK",
"Skagway,AK",
"Valdez,AK",
"Ajo,AZ",
"Avondale,AZ",
"Bisbee,AZ",
"Casa Grande,AZ",
"Chandler,AZ",
"Clifton,AZ",
"Douglas,AZ",
"Flagstaff,AZ",
"Florence,AZ",
"Gila Bend,AZ",
"Glendale,AZ",
"Globe,AZ",
"Kingman,AZ",
"Lake Havasu City,AZ",
"Mesa,AZ",
"Nogales,AZ",
"Oraibi,AZ",
"Phoenix,AZ",
"Prescott,AZ",
"Scottsdale,AZ",
"Sierra Vista,AZ",
"Tempe,AZ",
"Tombstone,AZ",
"Tucson,AZ",
"Walpi,AZ",
"Window Rock,AZ",
"Winslow,AZ",
"Yuma,AZ",
"Arkadelphia,AR",
"Arkansas Post,AR",
"Batesville,AR",
"Benton,AR",
"Blytheville,AR",
"Camden,AR",
"Conway,AR",
"Crossett,AR",
"El Dorado,AR",
"Fayetteville,AR",
"Forrest City,AR",
"Fort Smith,AR",
"Harrison,AR",
"Helena,AR",
"Hope,AR",
"Hot Springs,AR",
"Jacksonville,AR",
"Jonesboro,AR",
"Little Rock,AR",
"Magnolia,AR",
"Morrilton,AR",
"Newport,AR",
"North Little Rock,AR",
"Osceola,AR",
"Pine Bluff,AR",
"Rogers,AR",
"Searcy,AR",
"Stuttgart,AR",
"Van Buren,AR",
"West Memphis,AR",
"Alameda,CA",
"Alhambra,CA",
"Anaheim,CA",
"Antioch,CA",
"Arcadia,CA",
"Bakersfield,CA",
"Barstow,CA",
"Belmont,CA",
"Berkeley,CA",
"Beverly Hills,CA",
"Brea,CA",
"Buena Park,CA",
"Burbank,CA",
"Calexico,CA",
"Calistoga,CA",
"Carlsbad,CA",
"Carmel,CA",
"Chico,CA",
"Chula Vista,CA",
"Claremont,CA",
"Compton,CA",
"Concord,CA",
"Corona,CA",
"Coronado,CA",
"Costa Mesa,CA",
"Culver City,CA",
"Daly City,CA",
"Davis,CA",
"Downey,CA",
"El Centro,CA",
"El Cerrito,CA",
"El Monte,CA",
"Escondido,CA",
"Eureka,CA",
"Fairfield,CA",
"Fontana,CA",
"Fremont,CA",
"Fresno,CA",
"Fullerton,CA",
"Garden Grove,CA",
"Glendale,CA",
"Hayward,CA",
"Hollywood,CA",
"Huntington Beach,CA",
"Indio,CA",
"Inglewood,CA",
"Irvine,CA",
"La Habra,CA",
"Laguna Beach,CA",
"Lancaster,CA",
"Livermore,CA",
"Lodi,CA",
"Lompoc,CA",
"Long Beach,CA",
"Los Angeles,CA",
"Malibu,CA",
"Martinez,CA",
"Marysville,CA",
"Menlo Park,CA",
"Merced,CA",
"Modesto,CA",
"Monterey,CA",
"Mountain View,CA",
"Napa,CA",
"Needles,CA",
"Newport Beach,CA",
"Norwalk,CA",
"Novato,CA",
"Oakland,CA",
"Oceanside,CA",
"Ojai,CA",
"Ontario,CA",
"Orange,CA",
"Oroville,CA",
"Oxnard,CA",
"Pacific Grove,CA",
"Palm Springs,CA",
"Palmdale,CA",
"Palo Alto,CA",
"Pasadena,CA",
"Petaluma,CA",
"Pomona,CA",
"Port Hueneme,CA",
"Rancho Cucamonga,CA",
"Red Bluff,CA",
"Redding,CA",
"Redlands,CA",
"Redondo Beach,CA",
"Redwood City,CA",
"Richmond,CA",
"Riverside,CA",
"Roseville,CA",
"Sacramento,CA",
"Salinas,CA",
"San Bernardino,CA",
"San Clemente,CA",
"San Diego,CA",
"San Fernando,CA",
"San Francisco,CA",
"San Gabriel,CA",
"San Jose,CA",
"San Juan Capistrano,CA",
"San Leandro,CA",
"San Luis Obispo,CA",
"San Marino,CA",
"San Mateo,CA",
"San Pedro,CA",
"San Rafael,CA",
"San Simeon,CA",
"Santa Ana,CA",
"Santa Barbara,CA",
"Santa Clara,CA",
"Santa Clarita,CA",
"Santa Cruz,CA",
"Santa Monica,CA",
"Santa Rosa,CA",
"Sausalito,CA",
"Simi Valley,CA",
"Sonoma,CA",
"South San Francisco,CA",
"Stockton,CA",
"Sunnyvale,CA",
"Susanville,CA",
"Thousand Oaks,CA",
"Torrance,CA",
"Turlock,CA",
"Ukiah,CA",
"Vallejo,CA",
"Ventura,CA",
"Victorville,CA",
"Visalia,CA",
"Walnut Creek,CA",
"Watts,CA",
"West Covina,CA",
"Whittier,CA",
"Woodland,CA",
"Yorba Linda,CA",
"Yuba City,CA",
"Alamosa,CO",
"Aspen,CO",
"Aurora,CO",
"Boulder,CO",
"Breckenridge,CO",
"Brighton,CO",
"Canon City,CO",
"Central City,CO",
"Climax,CO",
"Colorado Springs,CO",
"Cortez,CO",
"Cripple Creek,CO",
"Denver,CO",
"Durango,CO",
"Englewood,CO",
"Estes Park,CO",
"Fort Collins,CO",
"Fort Morgan,CO",
"Georgetown,CO",
"Glenwood Springs,CO",
"Golden,CO",
"Grand Junction,CO",
"Greeley,CO",
"Gunnison,CO",
"La Junta,CO",
"Leadville,CO",
"Littleton,CO",
"Longmont,CO",
"Loveland,CO",
"Montrose,CO",
"Ouray,CO",
"Pagosa Springs,CO",
"Pueblo,CO",
"Silverton,CO",
"Steamboat Springs,CO",
"Sterling,CO",
"Telluride,CO",
"Trinidad,CO",
"Vail,CO",
"Walsenburg,CO",
"Westminster,CO",
"Ansonia,CT",
"Berlin,CT",
"Bloomfield,CT",
"Branford,CT",
"Bridgeport,CT",
"Bristol,CT",
"Coventry,CT",
"Danbury,CT",
"Darien,CT",
"Derby,CT",
"East Hartford,CT",
"East Haven,CT",
"Enfield,CT",
"Fairfield,CT",
"Farmington,CT",
"Greenwich,CT",
"Groton,CT",
"Guilford,CT",
"Hamden,CT",
"Hartford,CT",
"Lebanon,CT",
"Litchfield,CT",
"Manchester,CT",
"Mansfield,CT",
"Meriden,CT",
"Middletown,CT",
"Milford,CT",
"Mystic,CT",
"Naugatuck,CT",
"New Britain,CT",
"New Haven,CT",
"New London,CT",
"North Haven,CT",
"Norwalk,CT",
"Norwich,CT",
"Old Saybrook,CT",
"Orange,CT",
"Seymour,CT",
"Shelton,CT",
"Simsbury,CT",
"Southington,CT",
"Stamford,CT",
"Stonington,CT",
"Stratford,CT",
"Torrington,CT",
"Wallingford,CT",
"Waterbury,CT",
"Waterford,CT",
"Watertown,CT",
"West Hartford,CT",
"West Haven,CT",
"Westport,CT",
"Wethersfield,CT",
"Willimantic,CT",
"Windham,CT",
"Windsor,CT",
"Windsor Locks,CT",
"Winsted,CT",
"Dover,DE",
"Lewes,DE",
"Milford,DE",
"New Castle,DE",
"Newark,DE",
"Smyrna,DE",
"Wilmington,DE",
"Apalachicola,FL",
"Bartow,FL",
"Belle Glade,FL",
"Boca Raton,FL",
"Bradenton,FL",
"Cape Coral,FL",
"Clearwater,FL",
"Cocoa Beach,FL",
"Cocoa-Rockledge,FL",
"Coral Gables,FL",
"Daytona Beach,FL",
"De Land,FL",
"Deerfield Beach,FL",
"Delray Beach,FL",
"Fernandina Beach,FL",
"Fort Lauderdale,FL",
"Fort Myers,FL",
"Fort Pierce,FL",
"Fort Walton Beach,FL",
"Gainesville,FL",
"Hallandale Beach,FL",
"Hialeah,FL",
"Hollywood,FL",
"Homestead,FL",
"Jacksonville,FL",
"Key West,FL",
"Lake City,FL",
"Lake Wales,FL",
"Lakeland,FL",
"Largo,FL",
"Melbourne,FL",
"Miami,FL",
"Miami Beach,FL",
"Naples,FL",
"New Smyrna Beach,FL",
"Ocala,FL",
"Orlando,FL",
"Ormond Beach,FL",
"Palatka,FL",
"Palm Bay,FL",
"Palm Beach,FL",
"Panama City,FL",
"Pensacola,FL",
"Pompano Beach,FL",
"Saint Augustine,FL",
"Saint Petersburg,FL",
"Sanford,FL",
"Sarasota,FL",
"Sebring,FL",
"Tallahassee,FL",
"Tampa,FL",
"Tarpon Springs,FL",
"Titusville,FL",
"Venice,FL",
"West Palm Beach,FL",
"White Springs,FL",
"Winter Haven,FL",
"Winter Park,FL",
"Albany,GA",
"Americus,GA",
"Andersonville,GA",
"Athens,GA",
"Atlanta,GA",
"Augusta,GA",
"Bainbridge,GA",
"Blairsville,GA",
"Brunswick,GA",
"Calhoun,GA",
"Carrollton,GA",
"Columbus,GA",
"Dahlonega,GA",
"Dalton,GA",
"Darien,GA",
"Decatur,GA",
"Douglas,GA",
"East Point,GA",
"Fitzgerald,GA",
"Fort Valley,GA",
"Gainesville,GA",
"La Grange,GA",
"Macon,GA",
"Marietta,GA",
"Milledgeville,GA",
"Plains,GA",
"Rome,GA",
"Savannah,GA",
"Toccoa,GA",
"Valdosta,GA",
"Warm Springs,GA",
"Warner Robins,GA",
"Washington,GA",
"Waycross,GA",
"Hanalei,HI",
"Hilo,HI",
"Honaunau,HI",
"Honolulu,HI",
"Kahului,HI",
"Kaneohe,HI",
"Kapaa,HI",
"Kawaihae,HI",
"Lahaina,HI",
"Laie,HI",
"Wahiawa,HI",
"Wailuku,HI",
"Waimea,HI",
"Blackfoot,ID",
"Boise,ID",
"Bonners Ferry,ID",
"Caldwell,ID",
"Coeur d’Alene,ID",
"Idaho City,ID",
"Idaho Falls,ID",
"Kellogg,ID",
"Lewiston,ID",
"Moscow,ID",
"Nampa,ID",
"Pocatello,ID",
"Priest River,ID",
"Rexburg,ID",
"Sun Valley,ID",
"Twin Falls,ID",
"Alton,IL",
"Arlington Heights,IL",
"Arthur,IL",
"Aurora,IL",
"Belleville,IL",
"Belvidere,IL",
"Bloomington,IL",
"Brookfield,IL",
"Cahokia,IL",
"Cairo,IL",
"Calumet City,IL",
"Canton,IL",
"Carbondale,IL",
"Carlinville,IL",
"Carthage,IL",
"Centralia,IL",
"Champaign,IL",
"Charleston,IL",
"Chester,IL",
"Chicago,IL",
"Chicago Heights,IL",
"Cicero,IL",
"Collinsville,IL",
"Danville,IL",
"Decatur,IL",
"DeKalb,IL",
"Des Plaines,IL",
"Dixon,IL",
"East Moline,IL",
"East Saint Louis,IL",
"Effingham,IL",
"Elgin,IL",
"Elmhurst,IL",
"Evanston,IL",
"Freeport,IL",
"Galena,IL",
"Galesburg,IL",
"Glen Ellyn,IL",
"Glenview,IL",
"Granite City,IL",
"Harrisburg,IL",
"Herrin,IL",
"Highland Park,IL",
"Jacksonville,IL",
"Joliet,IL",
"Kankakee,IL",
"Kaskaskia,IL",
"Kewanee,IL",
"La Salle,IL",
"Lake Forest,IL",
"Libertyville,IL",
"Lincoln,IL",
"Lisle,IL",
"Lombard,IL",
"Macomb,IL",
"Mattoon,IL",
"Moline,IL",
"Monmouth,IL",
"Mount Vernon,IL",
"Mundelein,IL",
"Naperville,IL",
"Nauvoo,IL",
"Normal,IL",
"North Chicago,IL",
"Oak Park,IL",
"Oregon,IL",
"Ottawa,IL",
"Palatine,IL",
"Park Forest,IL",
"Park Ridge,IL",
"Pekin,IL",
"Peoria,IL",
"Petersburg,IL",
"Pontiac,IL",
"Quincy,IL",
"Rantoul,IL",
"River Forest,IL",
"Rock Island,IL",
"Rockford,IL",
"Salem,IL",
"Shawneetown,IL",
"Skokie,IL",
"South Holland,IL",
"Springfield,IL",
"Streator,IL",
"Summit,IL",
"Urbana,IL",
"Vandalia,IL",
"Virden,IL",
"Waukegan,IL",
"Wheaton,IL",
"Wilmette,IL",
"Winnetka,IL",
"Wood River,IL",
"Zion,IL",
"Anderson,IN",
"Bedford,IN",
"Bloomington,IN",
"Columbus,IN",
"Connersville,IN",
"Corydon,IN",
"Crawfordsville,IN",
"East Chicago,IN",
"Elkhart,IN",
"Elwood,IN",
"Evansville,IN",
"Fort Wayne,IN",
"French Lick,IN",
"Gary,IN",
"Geneva,IN",
"Goshen,IN",
"Greenfield,IN",
"Hammond,IN",
"Hobart,IN",
"Huntington,IN",
"Indianapolis,IN",
"Jeffersonville,IN",
"Kokomo,IN",
"Lafayette,IN",
"Madison,IN",
"Marion,IN",
"Michigan City,IN",
"Mishawaka,IN",
"Muncie,IN",
"Nappanee,IN",
"Nashville,IN",
"New Albany,IN",
"New Castle,IN",
"New Harmony,IN",
"Peru,IN",
"Plymouth,IN",
"Richmond,IN",
"Santa Claus,IN",
"Shelbyville,IN",
"South Bend,IN",
"Terre Haute,IN",
"Valparaiso,IN",
"Vincennes,IN",
"Wabash,IN",
"West Lafayette,IN",
"Amana Colonies,IA",
"Ames,IA",
"Boone,IA",
"Burlington,IA",
"Cedar Falls,IA",
"Cedar Rapids,IA",
"Charles City,IA",
"Cherokee,IA",
"Clinton,IA",
"Council Bluffs,IA",
"Davenport,IA",
"Des Moines,IA",
"Dubuque,IA",
"Estherville,IA",
"Fairfield,IA",
"Fort Dodge,IA",
"Grinnell,IA",
"Indianola,IA",
"Iowa City,IA",
"Keokuk,IA",
"Mason City,IA",
"Mount Pleasant,IA",
"Muscatine,IA",
"Newton,IA",
"Oskaloosa,IA",
"Ottumwa,IA",
"Sioux City,IA",
"Waterloo,IA",
"Webster City,IA",
"West Des Moines,IA",
"Abilene,KS",
"Arkansas City,KS",
"Atchison,KS",
"Chanute,KS",
"Coffeyville,KS",
"Council Grove,KS",
"Dodge City,KS",
"Emporia,KS",
"Fort Scott,KS",
"Garden City,KS",
"Great Bend,KS",
"Hays,KS",
"Hutchinson,KS",
"Independence,KS",
"Junction City,KS",
"Kansas City,KS",
"Lawrence,KS",
"Leavenworth,KS",
"Liberal,KS",
"Manhattan,KS",
"McPherson,KS",
"Medicine Lodge,KS",
"Newton,KS",
"Olathe,KS",
"Osawatomie,KS",
"Ottawa,KS",
"Overland Park,KS",
"Pittsburg,KS",
"Salina,KS",
"Shawnee,KS",
"Smith Center,KS",
"Topeka,KS",
"Wichita,KS",
"Ashland,KY",
"Barbourville,KY",
"Bardstown,KY",
"Berea,KY",
"Boonesborough,KY",
"Bowling Green,KY",
"Campbellsville,KY",
"Covington,KY",
"Danville,KY",
"Elizabethtown,KY",
"Frankfort,KY",
"Harlan,KY",
"Harrodsburg,KY",
"Hazard,KY",
"Henderson,KY",
"Hodgenville,KY",
"Hopkinsville,KY",
"Lexington,KY",
"Louisville,KY",
"Mayfield,KY",
"Maysville,KY",
"Middlesboro,KY",
"Newport,KY",
"Owensboro,KY",
"Paducah,KY",
"Paris,KY",
"Richmond,KY",
"Abbeville,LA",
"Alexandria,LA",
"Bastrop,LA",
"Baton Rouge,LA",
"Bogalusa,LA",
"Bossier City,LA",
"Gretna,LA",
"Houma,LA",
"Lafayette,LA",
"Lake Charles,LA",
"Monroe,LA",
"Morgan City,LA",
"Natchitoches,LA",
"New Iberia,LA",
"New Orleans,LA",
"Opelousas,LA",
"Ruston,LA",
"Saint Martinville,LA",
"Shreveport,LA",
"Thibodaux,LA",
"Auburn,ME",
"Augusta,ME",
"Bangor,ME",
"Bar Harbor,ME",
"Bath,ME",
"Belfast,ME",
"Biddeford,ME",
"Boothbay Harbor,ME",
"Brunswick,ME",
"Calais,ME",
"Caribou,ME",
"Castine,ME",
"Eastport,ME",
"Ellsworth,ME",
"Farmington,ME",
"Fort Kent,ME",
"Gardiner,ME",
"Houlton,ME",
"Kennebunkport,ME",
"Kittery,ME",
"Lewiston,ME",
"Lubec,ME",
"Machias,ME",
"Orono,ME",
"Portland,ME",
"Presque Isle,ME",
"Rockland,ME",
"Rumford,ME",
"Saco,ME",
"Scarborough,ME",
"Waterville,ME",
"York,ME",
"Aberdeen,MD",
"Annapolis,MD",
"Baltimore,MD",
"Bethesda-Chevy Chase,MD",
"Bowie,MD",
"Cambridge,MD",
"Catonsville,MD",
"College Park,MD",
"Columbia,MD",
"Cumberland,MD",
"Easton,MD",
"Elkton,MD",
"Emmitsburg,MD",
"Frederick,MD",
"Greenbelt,MD",
"Hagerstown,MD",
"Hyattsville,MD",
"Laurel,MD",
"Oakland,MD",
"Ocean City,MD",
"Rockville,MD",
"Saint Marys City,MD",
"Salisbury,MD",
"Silver Spring,MD",
"Takoma Park,MD",
"Towson,MD",
"Westminster,MD",
"Abington,MA",
"Adams,MA",
"Amesbury,MA",
"Amherst,MA",
"Andover,MA",
"Arlington,MA",
"Athol,MA",
"Attleboro,MA",
"Barnstable,MA",
"Bedford,MA",
"Beverly,MA",
"Boston,MA",
"Bourne,MA",
"Braintree,MA",
"Brockton,MA",
"Brookline,MA",
"Cambridge,MA",
"Canton,MA",
"Charlestown,MA",
"Chelmsford,MA",
"Chelsea,MA",
"Chicopee,MA",
"Clinton,MA",
"Cohasset,MA",
"Concord,MA",
"Danvers,MA",
"Dartmouth,MA",
"Dedham,MA",
"Dennis,MA",
"Duxbury,MA",
"Eastham,MA",
"Edgartown,MA",
"Everett,MA",
"Fairhaven,MA",
"Fall River,MA",
"Falmouth,MA",
"Fitchburg,MA",
"Framingham,MA",
"Gloucester,MA",
"Great Barrington,MA",
"Greenfield,MA",
"Groton,MA",
"Harwich,MA",
"Haverhill,MA",
"Hingham,MA",
"Holyoke,MA",
"Hyannis,MA",
"Ipswich,MA",
"Lawrence,MA",
"Lenox,MA",
"Leominster,MA",
"Lexington,MA",
"Lowell,MA",
"Ludlow,MA",
"Lynn,MA",
"Malden,MA",
"Marblehead,MA",
"Marlborough,MA",
"Medford,MA",
"Milton,MA",
"Nahant,MA",
"Natick,MA",
"New Bedford,MA",
"Newburyport,MA",
"Newton,MA",
"North Adams,MA",
"Northampton,MA",
"Norton,MA",
"Norwood,MA",
"Peabody,MA",
"Pittsfield,MA",
"Plymouth,MA",
"Provincetown,MA",
"Quincy,MA",
"Randolph,MA",
"Revere,MA",
"Salem,MA",
"Sandwich,MA",
"Saugus,MA",
"Somerville,MA",
"South Hadley,MA",
"Springfield,MA",
"Stockbridge,MA",
"Stoughton,MA",
"Sturbridge,MA",
"Sudbury,MA",
"Taunton,MA",
"Tewksbury,MA",
"Truro,MA",
"Watertown,MA",
"Webster,MA",
"Wellesley,MA",
"Wellfleet,MA",
"West Bridgewater,MA",
"West Springfield,MA",
"Westfield,MA",
"Weymouth,MA",
"Whitman,MA",
"Williamstown,MA",
"Woburn,MA",
"Woods Hole,MA",
"Worcester,MA",
"Adrian,MI",
"Alma,MI",
"Ann Arbor,MI",
"Battle Creek,MI",
"Bay City,MI",
"Benton Harbor,MI",
"Bloomfield Hills,MI",
"Cadillac,MI",
"Charlevoix,MI",
"Cheboygan,MI",
"Dearborn,MI",
"Detroit,MI",
"East Lansing,MI",
"Eastpointe,MI",
"Ecorse,MI",
"Escanaba,MI",
"Flint,MI",
"Grand Haven,MI",
"Grand Rapids,MI",
"Grayling,MI",
"Grosse Pointe,MI",
"Hancock,MI",
"Highland Park,MI",
"Holland,MI",
"Houghton,MI",
"Interlochen,MI",
"Iron Mountain,MI",
"Ironwood,MI",
"Ishpeming,MI",
"Jackson,MI",
"Kalamazoo,MI",
"Lansing,MI",
"Livonia,MI",
"Ludington,MI",
"Mackinaw City,MI",
"Manistee,MI",
"Marquette,MI",
"Menominee,MI",
"Midland,MI",
"Monroe,MI",
"Mount Clemens,MI",
"Mount Pleasant,MI",
"Muskegon,MI",
"Niles,MI",
"Petoskey,MI",
"Pontiac,MI",
"Port Huron,MI",
"Royal Oak,MI",
"Saginaw,MI",
"Saint Ignace,MI",
"Saint Joseph,MI",
"Sault Sainte Marie,MI",
"Traverse City,MI",
"Trenton,MI",
"Warren,MI",
"Wyandotte,MI",
"Ypsilanti,MI",
"Albert Lea,MN",
"Alexandria,MN",
"Austin,MN",
"Bemidji,MN",
"Bloomington,MN",
"Brainerd,MN",
"Crookston,MN",
"Duluth,MN",
"Ely,MN",
"Eveleth,MN",
"Faribault,MN",
"Fergus Falls,MN",
"Hastings,MN",
"Hibbing,MN",
"International Falls,MN",
"Little Falls,MN",
"Mankato,MN",
"Minneapolis,MN",
"Moorhead,MN",
"New Ulm,MN",
"Northfield,MN",
"Owatonna,MN",
"Pipestone,MN",
"Red Wing,MN",
"Rochester,MN",
"Saint Cloud,MN",
"Saint Paul,MN",
"Sauk Centre,MN",
"South Saint Paul,MN",
"Stillwater,MN",
"Virginia,MN",
"Willmar,MN",
"Winona,MN",
"Bay Saint Louis,MN",
"Biloxi,MN",
"Canton,MN",
"Clarksdale,MN",
"Columbia,MN",
"Columbus,MN",
"Corinth,MN",
"Greenville,MN",
"Greenwood,MN",
"Grenada,MN",
"Gulfport,MN",
"Hattiesburg,MN",
"Holly Springs,MN",
"Jackson,MN",
"Laurel,MN",
"Meridian,MN",
"Natchez,MN",
"Ocean Springs,MN",
"Oxford,MN",
"Pascagoula,MN",
"Pass Christian,MN",
"Philadelphia,MN",
"Port Gibson,MN",
"Starkville,MN",
"Tupelo,MN",
"Vicksburg,MN",
"West Point,MN",
"Yazoo City,MN",
"Boonville,MO",
"Branson,MO",
"Cape Girardeau,MO",
"Carthage,MO",
"Chillicothe,MO",
"Clayton,MO",
"Columbia,MO",
"Excelsior Springs,MO",
"Ferguson,MO",
"Florissant,MO",
"Fulton,MO",
"Hannibal,MO",
"Independence,MO",
"Jefferson City,MO",
"Joplin,MO",
"Kansas City,MO",
"Kirksville,MO",
"Lamar,MO",
"Lebanon,MO",
"Lexington,MO",
"Maryville,MO",
"Mexico,MO",
"Monett,MO",
"Neosho,MO",
"New Madrid,MO",
"Rolla,MO",
"Saint Charles,MO",
"Saint Joseph,MO",
"Saint Louis,MO",
"Sainte Genevieve,MO",
"Salem,MO",
"Sedalia,MO",
"Springfield,MO",
"Warrensburg,MO",
"West Plains,MO",
"Anaconda,MT",
"Billings,MT",
"Bozeman,MT",
"Butte,MT",
"Dillon,MT",
"Fort Benton,MT",
"Glendive,MT",
"Great Falls,MT",
"Havre,MT",
"Helena,MT",
"Kalispell,MT",
"Lewistown,MT",
"Livingston,MT",
"Miles City,MT",
"Missoula,MT",
"Virginia City,MT",
"Beatrice,NE",
"Bellevue,NE",
"Boys Town,NE",
"Chadron,NE",
"Columbus,NE",
"Fremont,NE",
"Grand Island,NE",
"Hastings,NE",
"Kearney,NE",
"Lincoln,NE",
"McCook,NE",
"Minden,NE",
"Nebraska City,NE",
"Norfolk,NE",
"North Platte,NE",
"Omaha,NE",
"Plattsmouth,NE",
"Red Cloud,NE",
"Sidney,NE",
"Boulder City,NE",
"Carson City,NE",
"Elko,NE",
"Ely,NE",
"Fallon,NE",
"Genoa,NE",
"Goldfield,NE",
"Henderson,NE",
"Las Vegas,NE",
"North Las Vegas,NE",
"Reno,NE",
"Sparks,NE",
"Virginia City,NE",
"Winnemucca,NE",
"Berlin,NH",
"Claremont,NH",
"Concord,NH",
"Derry,NH",
"Dover,NH",
"Durham,NH",
"Exeter,NH",
"Franklin,NH",
"Hanover,NH",
"Hillsborough,NH",
"Keene,NH",
"Laconia,NH",
"Lebanon,NH",
"Manchester,NH",
"Nashua,NH",
"Peterborough,NH",
"Plymouth,NH",
"Portsmouth,NH",
"Rochester,NH",
"Salem,NH",
"Somersworth,NH",
"Asbury Park,NJ",
"Atlantic City,NJ",
"Bayonne,NJ",
"Bloomfield,NJ",
"Bordentown,NJ",
"Bound Brook,NJ",
"Bridgeton,NJ",
"Burlington,NJ",
"Caldwell,NJ",
"Camden,NJ",
"Cape May,NJ",
"Clifton,NJ",
"Cranford,NJ",
"East Orange,NJ",
"Edison,NJ",
"Elizabeth,NJ",
"Englewood,NJ",
"Fort Lee,NJ",
"Glassboro,NJ",
"Hackensack,NJ",
"Haddonfield,NJ",
"Hoboken,NJ",
"Irvington,NJ",
"Jersey City,NJ",
"Lakehurst,NJ",
"Lakewood,NJ",
"Long Beach,NJ",
"Long Branch,NJ",
"Madison,NJ",
"Menlo Park,NJ",
"Millburn,NJ",
"Millville,NJ",
"Montclair,NJ",
"Morristown,NJ",
"Mount Holly,NJ",
"New Brunswick,NJ",
"New Milford,NJ",
"Newark,NJ",
"Ocean City,NJ",
"Orange,NJ",
"Parsippany–Troy Hills,NJ",
"Passaic,NJ",
"Paterson,NJ",
"Perth Amboy,NJ",
"Plainfieldv,NJ",
"Princeton,NJ",
"Ridgewood,NJ",
"Roselle,NJ",
"Rutherford,NJ",
"Salem,NJ",
"Somerville,NJ",
"South Orange Village,NJ",
"Totowa,NJ",
"Trenton,NJ",
"Union,NJ",
"Union City,NJ",
"Vineland,NJ",
"Wayne,NJ",
"Weehawken,NJ",
"West New York,NJ",
"West Orange,NJ",
"Willingboro,NJ",
"Woodbridge,NJ",
"Acoma,NM",
"Alamogordo,NM",
"Albuquerque,NM",
"Artesia,NM",
"Belen,NM",
"Carlsbad,NM",
"Clovis,NM",
"Deming,NM",
"Farmington,NM",
"Gallup,NM",
"Grants,NM",
"Hobbs,NM",
"Las Cruces,NM",
"Las Vegas,NM",
"Los Alamos,NM",
"Lovington,NM",
"Portales,NM",
"Raton,NM",
"Roswell,NM",
"Santa Fe,NM",
"Shiprock,NM",
"Silver City,NM",
"Socorro,NM",
"Taos,NM",
"Truth or Consequences,NM",
"Tucumcari,NM",
"Albany,NY",
"Amsterdam,NY",
"Auburn,NY",
"Babylon,NY",
"Batavia,NY",
"Beacon,NY",
"Bedford,NY",
"Binghamton,NY",
"Bronx,NY",
"Brooklyn,NY",
"Buffalo,NY",
"Chautauqua,NY",
"Cheektowaga,NY",
"Clinton,NY",
"Cohoes,NY",
"Coney Island,NY",
"Cooperstown,NY",
"Corning,NY",
"Cortland,NY",
"Crown Point,NY",
"Dunkirk,NY",
"East Aurora,NY",
"East Hampton,NY",
"Eastchester,NY",
"Elmira,NY",
"Flushing,NY",
"Forest Hills,NY",
"Fredonia,NY",
"Garden City,NY",
"Geneva,NY",
"Glens Falls,NY",
"Gloversville,NY",
"Great Neck,NY",
"Hammondsport,NY",
"Harlem,NY",
"Hempstead,NY",
"Herkimer,NY",
"Hudson,NY",
"Huntington,NY",
"Hyde Park,NY",
"Ilion,NY",
"Ithaca,NY",
"Jamestown,NY",
"Johnstown,NY",
"Kingston,NY",
"Lackawanna,NY",
"Lake Placid,NY",
"Levittown,NY",
"Lockport,NY",
"Mamaroneck,NY",
"Manhattan,NY",
"Massena,NY",
"Middletown,NY",
"Mineola,NY",
"Mount Vernon,NY",
"New Paltz,NY",
"New Rochelle,NY",
"New Windsor,NY",
"New York City,NY",
"Newburgh,NY",
"Niagara Falls,NY",
"North Hempstead,NY",
"Nyack,NY",
"Ogdensburg,NY",
"Olean,NY",
"Oneida,NY",
"Oneonta,NY",
"Ossining,NY",
"Oswego,NY",
"Oyster Bay,NY",
"Palmyra,NY",
"Peekskill,NY",
"Plattsburgh,NY",
"Port Washington,NY",
"Potsdam,NY",
"Poughkeepsie,NY",
"Queens,NY",
"Rensselaer,NY",
"Rochester,NY",
"Rome,NY",
"Rotterdam,NY",
"Rye,NY",
"Sag Harbor,NY",
"Saranac Lake,NY",
"Saratoga Springs,NY",
"Scarsdale,NY",
"Schenectady,NY",
"Seneca Falls,NY",
"Southampton,NY",
"Staten Island,NY",
"Stony Brookv",
"Stony Point,NY",
"Syracuse,NY",
"Tarrytown,NY",
"Ticonderoga,NY",
"Tonawanda,NY",
"Troy,NY",
"Utica,NY",
"Watertown,NY",
"Watervliet,NY",
"Watkins Glen,NY",
"West Seneca,NY",
"White Plains,NY",
"Woodstock,NY",
"Yonkers,NY",
"Asheboro,NC",
"Asheville,NC",
"Bath,NC",
"Beaufort,NC",
"Boone,NC",
"Burlington,NC",
"Chapel Hill,NC",
"Charlotte,NC",
"Concord,NC",
"Durham,NC",
"Edenton,NC",
"Elizabeth City,NC",
"Fayetteville,NC",
"Gastonia,NC",
"Goldsboro,NC",
"Greensboro,NC",
"Greenville,NC",
"Halifax,NC",
"Henderson,NC",
"Hickory,NC",
"High Point,NC",
"Hillsborough,NC",
"Jacksonville,NC",
"Kinston,NC",
"Kitty Hawk,NC",
"Lumberton,NC",
"Morehead City,NC",
"Morganton,NC",
"Nags Head,NC",
"New Bern,NC",
"Pinehurst,NC",
"Raleigh,NC",
"Rocky Mount,NC",
"Salisbury,NC",
"Shelby,NC",
"Washington,NC",
"Wilmington,NC",
"Wilson,NC",
"Winston-Salem,NC",
"Bismarck,ND",
"Devils Lake,ND",
"Dickinson,ND",
"Fargo,ND",
"Grand Forks,ND",
"Jamestown,ND",
"Mandan,ND",
"Minot,ND",
"Rugby,ND",
"Valley City,ND",
"Wahpeton,ND",
"Williston,ND",
"Akron,OH",
"Alliance,OH",
"Ashtabula,OH",
"Athens,OH",
"Barberton,OH",
"Bedford,OH",
"Bellefontaine,OH",
"Bowling Green,OH",
"Canton,OH",
"Chillicothe,OH",
"Cincinnati,OH",
"Cleveland,OH",
"Cleveland Heights,OH",
"Columbus,OH",
"Conneaut,OH",
"Cuyahoga Falls,OH",
"Dayton,OH",
"Defiance,OH",
"Delaware,OH",
"East Cleveland,OH",
"East Liverpool,OH",
"Elyria,OH",
"Euclid,OH",
"Findlay,OH",
"Gallipolis,OH",
"Greenville,OH",
"Hamilton,OH",
"Kent,OH",
"Kettering,OH",
"Lakewood,OH",
"Lancaster,OH",
"Lima,OH",
"Lorain,OH",
"Mansfield,OH",
"Marietta,OH",
"Marionv",
"Martins Ferry,OH",
"Massillon,OH",
"Mentor,OH",
"Middletown,OH",
"Milan,OH",
"Mount Vernon,OH",
"New Philadelphia,OH",
"Newark,OH",
"Niles,OH",
"North College Hill,OH",
"Norwalk,OH",
"Oberlin,OH",
"Painesville,OH",
"Parma,OH",
"Piqua,OH",
"Portsmouth,OH",
"Put-in-Bay,OH",
"Salem,OH",
"Sandusky,OH",
"Shaker Heights,OH",
"Springfield,OH",
"Steubenville,OH",
"Tiffin,OH",
"Toledo,OH",
"Urbanav",
"Warren,OH",
"Wooster,OH",
"Worthington,OH",
"Xenia,OH",
"Yellow Springs,OH",
"Youngstown,OH",
"Zanesville,OH",
"Ada,OK",
"Altus,OK",
"Alva,OK",
"Anadarko,OK",
"Ardmore,OK",
"Bartlesville,OK",
"Bethany,OK",
"Chickasha,OK",
"Claremore,OK",
"Clinton,OK",
"Cushing,OK",
"Duncan,OK",
"Durant,OK",
"Edmond,OK",
"El Reno,OK",
"Elk City,OK",
"Enid,OK",
"Eufaula,OK",
"Frederick,OK",
"Guthrie,OK",
"Guymon,OK",
"Hobart,OK",
"Holdenville,OK",
"Hugo,OK",
"Lawton,OK",
"McAlester,OK",
"Miami,OK",
"Midwest City,OK",
"Moore,OK",
"Muskogee,OK",
"Norman,OK",
"Oklahoma City,OK",
"Okmulgee,OK",
"Pauls Valley,OK",
"Pawhuska,OK",
"Perry,OK",
"Ponca City,OK",
"Pryor,OK",
"Sallisaw,OK",
"Sand Springs,OK",
"Sapulpa,OK",
"Seminole,OK",
"Shawnee,OK",
"Stillwater,OK",
"Tahlequah,OK",
"The Village,OK",
"Tulsa,OK",
"Vinita,OK",
"Wewoka,OK",
"Woodward,OK",
"Albany,OR",
"Ashland,OR",
"Astoria,OR",
"Baker City,OR",
"Beaverton,OR",
"Bend,OR",
"Brookings,OR",
"Burns,OR",
"Coos Bay,OR",
"Corvallis,OR",
"Eugene,OR",
"Grants Pass,OR",
"Hillsboro,OR",
"Hood River,OR",
"Jacksonville,OR",
"John Day,OR",
"Klamath Falls,OR",
"La Grande,OR",
"Lake Oswego,OR",
"Lakeview,OR",
"McMinnville,OR",
"Medford,OR",
"Newberg,OR",
"Newport,OR",
"Ontario,OR",
"Oregon City,OR",
"Pendleton,OR",
"Port Orford,OR",
"Portland,OR",
"Prineville,OR",
"Redmond,OR",
"Reedsport,OR",
"Roseburg,OR",
"Salem,OR",
"Seaside,OR",
"Springfield,OR",
"The Dalles,OR",
"Tillamook,OR",
"Abington,OR",
"Aliquippa,OR",
"Allentown,OR",
"Altoona,OR",
"Ambridge,OR",
"Bedford,OR",
"Bethlehem,OR",
"Bloomsburg,OR",
"Bradford,OR",
"Bristol,OR",
"Carbondale,OR",
"Carlisle,OR",
"Chambersburg,OR",
"Chester,OR",
"Columbia,OR",
"Easton,OR",
"Erie,OR",
"Franklin,OR",
"Germantown,OR",
"Gettysburg,OR",
"Greensburgv",
"Hanover,OR",
"Harmony,OR",
"Harrisburg,OR",
"Hazleton,OR",
"Hershey,OR",
"Homestead,OR",
"Honesdale,OR",
"Indiana,OR",
"Jeannette,OR",
"Jim Thorpe,OR",
"Johnstown,OR",
"Lancaster,OR",
"Lebanon,OR",
"Levittown,OR",
"Lewistown,OR",
"Lock Haven,OR",
"Lower Southampton,OR",
"McKeesport,OR",
"Meadville,OR",
"Middletown,OR",
"Monroeville,OR",
"Nanticoke,OR",
"New Castle,OR",
"New Hope,OR",
"New Kensington,OR",
"Norristown,OR",
"Oil City,OR",
"Philadelphia,OR",
"Phoenixville,OR",
"Pittsburgh,OR",
"Pottstown,OR",
"Pottsville,OR",
"Reading,OR",
"Scranton,OR",
"Shamokin,OR",
"Sharon,OR",
"State College,OR",
"Stroudsburg,OR",
"Sunbury,OR",
"Swarthmore,OR",
"Tamaqua,OR",
"Titusville,OR",
"Uniontown,OR",
"Warren,OR",
"Washington,OR",
"West Chester,OR",
"Wilkes-Barre,OR",
"Williamsport,OR",
"York,OR",
"Barrington,RI",
"Bristol,RI",
"Central Falls,RI",
"Cranston,RI",
"East Greenwich,RI",
"East Providence,RI",
"Kingston,RI",
"Middletown,RI",
"Narragansett,RI",
"Newport,RI",
"North Kingstown,RI",
"Portsmouth,RI",
"Providence,RI",
"South Kingstown,RI",
"Tiverton,RI",
"Warren,RI",
"Warwick,RI",
"Westerly,RI",
"Wickford,RI",
"Woonsocket,RI",
"Abbeville,SC",
"Aiken,SC",
"Anderson,SC",
"Beaufort,SC",
"Camden,SC",
"Charleston,SC",
"Columbia,SC",
"Darlington,SC",
"Florence,SC",
"Gaffney,SC",
"Georgetown,SC",
"Greenville,SC",
"Greenwood,SC",
"Hartsville,SC",
"Lancaster,SC",
"Mount Pleasant,SC",
"Myrtle Beach,SC",
"Orangeburg,SC",
"Rock Hill,SC",
"Spartanbur,SCg",
"Sumter,SC",
"Union,SC",
"Aberdeen,SD",
"Belle Fourche,SD",
"Brookings,SD",
"Canton,SD",
"Custer,SD",
"De Smet,SD",
"Deadwood,SD",
"Hot Springs,SD",
"Huron,SD",
"Lead,SD",
"Madison,SD",
"Milbank,SD",
"Mitchell,SD",
"Mobridge,SD",
"Pierre,SD",
"Rapid City,SD",
"Sioux Falls,SD",
"Spearfish,SD",
"Sturgis,SD",
"Vermillion,SD",
"Watertown,SD",
"Yankton,SD",
"Alcoa,TN",
"Athens,TN",
"Chattanooga,TN",
"Clarksville,TN",
"Cleveland,TN",
"Columbia,TN",
"Cookeville,TN",
"Dayton,TN",
"Elizabethton,TN",
"Franklin,TN",
"Gallatin,TN",
"Gatlinburg,TN",
"Greeneville,TN",
"Jackson,TN",
"Johnson City,TN",
"Jonesborough,TN",
"Kingsport,TN",
"Knoxville,TN",
"Lebanon,TN",
"Maryville,TN",
"Memphis,TN",
"Morristown,TN",
"Murfreesboro,TN",
"Nashville,TN",
"Norris,TN",
"Oak Ridge,TN",
"Shelbyville,TN",
"Tullahoma,TN",
"Abilene,TX",
"Alpine,TX",
"Amarillo,TX",
"Arlington,TX",
"Austin,TX",
"Baytown,TX",
"Beaumont,TX",
"Big Spring,TX",
"Borger,TX",
"Brownsville,TX",
"Bryan,TX",
"Canyon,TX",
"Cleburne,TX",
"College Station,TX",
"Corpus Christi,TX",
"Crystal City,TX",
"Dallas,TX",
"Del Rio,TX",
"Denison,TX",
"Denton,TX",
"Eagle Pass,TX",
"Edinburg,TX",
"El Paso,TX",
"Fort Worth,TX",
"Freeport,TX",
"Galveston,TX",
"Garland,TX",
"Goliad,TX",
"Greenville,TX",
"Harlingen,TX",
"Houston,TX",
"Huntsville,TX",
"Irving,TX",
"Johnson City,TX",
"Kilgore,TX",
"Killeen,TX",
"Kingsville,TX",
"Laredo,TX",
"Longview,TX",
"Lubbock,TX",
"Lufkin,TX",
"Marshall,TX",
"McAllen,TX",
"McKinney,TX",
"Mesquite,TX",
"Midland,TX",
"Mission,TX",
"Nacogdoches,TX",
"New Braunfels,TX",
"Odessa,TX",
"Orange,TX",
"Pampa,TX",
"Paris,TX",
"Pasadena,TX",
"Pecos,TX",
"Pharr,TX",
"Plainview,TX",
"Plano,TX",
"Port Arthur,TX",
"Port Lavaca,TX",
"Richardson,TX",
"San Angelo,TX",
"San Antonio,TX",
"San Felipe,TX",
"San Marcosv",
"Sherman,TX",
"Sweetwater,TX",
"Temple,TX",
"Texarkana,TX",
"Texas City,TX",
"Tyler,TX",
"Uvalde,TX",
"Victoria,TX",
"Waco,TX",
"Weatherford,TX",
"Wichita Falls,TX",
"Ysleta,TX",
"Alta,UT",
"American Fork,UT",
"Bountiful,UT",
"Brigham City,UT",
"Cedar City,UT",
"Clearfield,UT",
"Delta,UT",
"Fillmore,UT",
"Green River,UT",
"Heber City,UT",
"Kanab,UT",
"Layton,UT",
"Lehi,UT",
"Logan,UT",
"Manti,UT",
"Moab,UT",
"Monticello,UT",
"Murray,UT",
"Nephi,UT",
"Ogden,UT",
"Orderville,UT",
"Orem,UT",
"Panguitch,UT",
"Park City,UT",
"Payson,UT",
"Price,UT",
"Provo,UT",
"Saint George,UT",
"Salt Lake City,UT",
"Spanish Fork,UT",
"Springville,UT",
"Tooele,UT",
"Vernal,UT",
"Barre,VT",
"Bellows Falls,VT",
"Bennington,VT",
"Brattleboro,VT",
"Burlington,VT",
"Essex,VT",
"Manchester,VT",
"Middlebury,VT",
"Montpelier,VT",
"Newport,VT",
"Plymouth,VT",
"Rutland,VT",
"Saint Albans,VT",
"Saint Johnsbury,VT",
"Sharon,VT",
"Winooski,VT",
"Abingdon,VA",
"Alexandria,VA",
"Bristol,VA",
"Charlottesville,VA",
"Chesapeake,VA",
"Danville,VA",
"Fairfax,VA",
"Falls Church,VA",
"Fredericksburg,VA",
"Hampton,VA",
"Hanover,VA",
"Hopewell,VA",
"Lexington,VA",
"Lynchburg,VA",
"Manassas,VA",
"Martinsville,VA",
"New Market,VA",
"Newport News,VA",
"Norfolk,VA",
"Petersburg,VA",
"Portsmouth,VA",
"Reston,VA",
"Richmond,VA",
"Roanoke,VA",
"Staunton,VA",
"Suffolk,VA",
"Virginia Beach,VA",
"Waynesboro,VA",
"Williamsburg,VA",
"Winchester,VA",
"Aberdeen,WA",
"Anacortes,WA",
"Auburn,WA",
"Bellevue,WA",
"Bellingham,WA",
"Bremerton,WA",
"Centralia,WA",
"Coulee Dam,WA",
"Coupeville,WA",
"Ellensburg,WA",
"Ephrata,WA",
"Everett,WA",
"Hoquiam,WA",
"Kelso,WA",
"Kennewick,WA",
"Longview,WA",
"Moses Lake,WA",
"Oak Harbor,WA",
"Olympia,WA",
"Pasco,WA",
"Point Roberts,WA",
"Port Angeles,WA",
"Pullman,WA",
"Puyallup,WA",
"Redmond,WA",
"Renton,WA",
"Richland,WA",
"Seattle,WA",
"Spokane,WA",
"Tacoma,WA",
"Vancouver,WA",
"Walla Walla,WA",
"Wenatchee,WA",
"Yakima,WA",
"Bath,WV",
"Beckley,WV",
"Bluefield,WV",
"Buckhannon,WV",
"Charles Town,WV",
"Charleston,WV",
"Clarksburg,WV",
"Elkins,WV",
"Fairmont,WV",
"Grafton,WV",
"Harpers Ferry,WV",
"Hillsboro,WV",
"Hinton,WV",
"Huntington,WV",
"Keyser,WV",
"Lewisburg,WV",
"Logan,WV",
"Martinsburg,WV",
"Morgantown,WV",
"Moundsville,WV",
"New Martinsville,WV",
"Parkersburg,WV",
"Philippi,WV",
"Point Pleasant,WV",
"Princeton,WV",
"Romney,WV",
"Shepherdstown,WV",
"South Charleston,WV",
"Summersville,WV",
"Weirton,WV",
"Welch,WV",
"Wellsburg,WV",
"Weston,WV",
"Wheeling,WV",
"White Sulphur Springs,WV",
"Williamson,WV",
"Appleton,WI",
"Ashland,WI",
"Baraboo,WI",
"Belmont,WI",
"Beloit,WI",
"Eau Claire,WI",
"Fond du Lac,WI",
"Green Bay,WI",
"Hayward,WI",
"Janesville,WI",
"Kenosha,WI",
"La Crosse,WI",
"Lake Geneva,WI",
"Madison,WI",
"Manitowoc,WI",
"Marinette,WI",
"Menasha,WI",
"Milwaukee,WI",
"Neenah,WI",
"New Glarus,WI",
"Oconto,WI",
"Oshkosh,WI",
"Peshtigo,WI",
"Portage,WI",
"Prairie du Chien,WI",
"Racine,WI",
"Rhinelander,WI",
"Ripon,WI",
"Sheboygan,WI",
"Spring Green,WI",
"Stevens Point,WI",
"Sturgeon Bay,WI",
"Superior,WI",
"Waukesha,WI",
"Wausau,WI",
"Wauwatosa,WI",
"West Allis,WI",
"West Bend,WI",
"Wisconsin Dells,WI",
"Buffalo,WY",
"Casper,WY",
"Cheyenne,WY",
"Cody,WY",
"Douglas,WY",
"Evanston,WY",
"Gillette,WY",
"Green River,WY",
"Jackson,WY",
"Lander,WY",
"Laramie,WY",
"Newcastle,WY",
"Powellv",
"Rawlins,WY",
"Riverton,WY",
"Rock Springs,WY",
"Sheridan,WY",
"Ten Sleep,WY",
"Thermopolis,WY",
"Torrington,WY",
"Worland,WY",
] | {"/ubereats/spiders/ubereatsSpider.py": ["/ubereats/items.py"]} |
68,302 | ryanvisioncord/ubereats_scrapy_selenium_scraping | refs/heads/master | /ubereats/items.py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class UbereatsItem(scrapy.Item):
# define the fields for your item here like:
restaurant_name = scrapy.Field()
food_type = scrapy.Field()
delivery_time = scrapy.Field()
rating = scrapy.Field()
address = scrapy.Field()
popular_food = scrapy.Field()
menu = scrapy.Field()
| {"/ubereats/spiders/ubereatsSpider.py": ["/ubereats/items.py"]} |
68,303 | ryanvisioncord/ubereats_scrapy_selenium_scraping | refs/heads/master | /ubereats/selectors.py | SELECTORS = {
# search page
'search_content': '//main/div[2]//h2',
'search_input': '//input[@id="location-typeahead-home-input"]',
'search_button': '//main//button[contains(text(), "Find Food")]',
# search result page
'show_more_button': '//button[text()="Show more"]',
'restaurant_link': '//a[contains(@href, "food")]',
'popular_food_near_you' : "//div[@class='ev au av ew']//text()[contains(., 'Popular Near You')]//ancestor::div[@class='ev au av ew']/following-sibling::div/a/@href",
# restaurant page
'rest_name': '//main/div[1]/div[1]/div[1]/div[2]/div/div[2]/h1/text()',
'food_type': "//div[@class='cd d2 bs as dl']/text()",
'delivery_time': "//div[@aria-hidden='true' and @class='au aw'][1]/text()",
'delivery_fee': "//div[@aria-hidden='true' and @class='au aw'][2]/text()",
'rating': "//div[@class='as']/text()",
# 'caption': '//main/div[1]/div[1]/div[1]/div[2]/div/div[2]/div[1]/text()',
'address': '//main/div[1]/div[1]/div[1]/div[2]/div/div[2]/p//text()',
'section': '//main/div[2]/ul/li',
'section_name': './h2/text()',
'item': './ul/li',
'item_name': './div/div/div/div[1]/h4//text()',
'item_text_areas': './div/div/div/div[1]/div/div//text()'
} | {"/ubereats/spiders/ubereatsSpider.py": ["/ubereats/items.py"]} |
68,304 | ryanvisioncord/ubereats_scrapy_selenium_scraping | refs/heads/master | /ubereats/spiders/ubereatsSpider.py | # -*- coding: utf-8 -*-
import scrapy
from scrapy.selector import Selector
from scrapy.http import Request
from ubereats.items import UbereatsItem
import time
import traceback
import re
import random
from time import sleep
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.proxy import Proxy, ProxyType
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from ubereats import selectors
from ubereats import cities
from ubereats import proxies
from selenium.webdriver.support.wait import WebDriverWait
class UbereatsspiderSpider(scrapy.Spider):
name = 'ubereatsSpider'
# allowed_domains = ['ubereats.com/']
#start_urls = ['https://www.ubereats.com/portland/food-delivery/wow-bao-por02-1/HN5Uqz2FSOO8f_J7JoQaiA']
def __init__(self,*args, **kwargs):
self.result = dict()
self.url = 'https://www.ubereats.com/'
self.cities = cities.cities_list
self.proxy_list = proxies.proxies_list
def get_random_proxy(self):
random_idx = random.randint(0, len(self.proxy_list)-1)
proxy_ip = self.proxy_list[random_idx]
return proxy_ip
def set_driver(self):
proxy_http = "http://"+ self.get_random_proxy()
webdriver.DesiredCapabilities.CHROME['proxy']={
"httpProxy":proxy_http,
"ftpProxy":proxy_http,
"sslProxy":proxy_http,
"proxyType":"MANUAL",
}
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/80.0.3987.132 Safari/537.36'
chrome_option = webdriver.ChromeOptions()
chrome_option.add_argument('--no-sandbox')
chrome_option.add_argument('--disable-dev-shm-usage')
chrome_option.add_argument('--ignore-certificate-errors')
chrome_option.add_argument("--disable-blink-features=AutomationControlled")
chrome_option.add_argument('user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36')
chrome_option.headless = True
driver = webdriver.Chrome(options=chrome_option)
return driver
def start_requests(self):
yield Request("https://www.ubereats.com/", callback=self.parse)
def parse(self, response):
self.cities = ['Anniston,AL']
# /* Loop All United State Cities */
for city in self.cities:
# /* Create Driver */
while True:
try:
self.driver = self.set_driver()
#wait = WebDriverWait(self.driver, 20)
self.driver.get("https://httpbin.org/ip")
print("###########################",self.driver.page_source)
#self.driver = self.set_driver()
wait = WebDriverWait(self.driver, 20)
self.driver.get(self.url)
#print("$$$$$$$$$$$$$$$$$$$$$$$$$$",self.driver.page_source)
sleep(10)
el_search_input = self.driver.find_element_by_xpath(selectors.SELECTORS['search_input'])
el_search_input.send_keys(city)
sleep(10)
el_search_submit =self.driver.find_element_by_xpath(selectors.SELECTORS['search_button'])
el_search_submit.click()
sleep(20)
break
except:
self.driver.quit()
#print(traceback.print_exc())
continue
# /* Get Popular Foods */
try:
popular_food_nodes = self.driver.find_elements_by_xpath("/html[1]/body[1]/div[1]/div[1]/main[1]/div[2]/div[2]/div[2]/section[1]/div/ul/li")
time.sleep(5)
popular_food_lst = []
for popular_food_node in popular_food_nodes:
food_name = popular_food_node.find_element_by_xpath(".//a").text
try:
delivery_cost = popular_food_node.find_element_by_xpath("./div[1]/div[1]/div[1]/div[1]/div[1]/div[1]/div[1]/div[1]").text
if 'fee' not in delivery_cost or '$' not in delivery_cost:
delivery_cost = ''
except:
delivery_cost = ''
try:
delivery_time = popular_food_node.find_element_by_xpath("./div[1]/div[1]/div[1]/div[1]/div[1]/div[1]/div[1]/div[3]/div[1]/span[1]").text
except:
delivery_time = ''
try:
food_type = popular_food_node.find_element_by_xpath("./div[1]/div[1]/div[1]/div[1]/div[2]/div[1]").text
except:
food_type = ''
try:
pop_food_dict = {'food_name':food_name, 'delivery_cost':delivery_cost, 'delivery_time':delivery_time, 'food_type':food_type}
except:
pop_food_dict = ''
popular_food_lst.append(pop_food_dict)
except :
pass
# /* Find More View Button and Click */
while True:
try:
view_more_btn = self.driver.find_element_by_xpath(selectors.SELECTORS['show_more_button'])
if view_more_btn:
view_more_btn.click()
print('#################-- ', 'Cliced View More Btn', '--################')
time.sleep(10)
else:
break
except:
break
# /* Get All Restaurants Urls and Data */20200211 16:26:40
restaurant_links = self.driver.find_elements_by_xpath(selectors.SELECTORS['restaurant_link'])
rest_links = []
for restaurant_link in restaurant_links:
final_link = restaurant_link.get_attribute('href')
if final_link:
rest_links.append(final_link)
print('#################-- ', len(rest_links), '--################')
for rest_link in rest_links:
try:
print(rest_link)
self.driver.get(rest_link)
item = UbereatsItem()
try:
item['restaurant_name'] = ''.join(self.driver.find_element_by_xpath("//main/div[1]/div[1]/div[1]/div[2]/div/div[2]/h1").text).strip()
except:
continue
try:
res1 = self.driver.page_source
item['food_type'] = res1[res1.index('"servesCuisine":[')+len('"servesCuisine":['):res1.index('],"priceRange"')].replace('"',"")
except:
item['food_type'] = ''
try:
item['delivery_time'] = ''.join(self.driver.find_element_by_xpath("//tr[contains(.,'Every Day')]/following::tr/td").text).strip()
except:
item['delivery_time'] = ''
try:
rating = ''.join(self.driver.find_element_by_xpath("//h1//following::div[5]").text).strip()
if '$' in rating:
rating = ''
except:
rating = ''
try:
address = ''.join(self.driver.find_element_by_xpath("//p[@class][1]").text).strip()
item['address'] = ''.join(re.findall('[0-9A-Za-z,. ]',address.replace("More info",""))).strip()
except :
item['address'] = ''
try:
item['city'] = city.split(',')[0]
item['state'] = city.split(',')[1]
except:
pass
parent_nodes = self.driver.find_elements_by_xpath("//div/ul/li")
print('@@@@@@@@@@@@@@@@@@@@@@@@@@@', len(parent_nodes), '@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
food_lst = []
for parent_node in parent_nodes:
try:
section = ''.join(parent_node.find_element_by_xpath("./h2").text).strip()
#print(section)
nodes = parent_node.find_elements_by_xpath(".//ul/li")
for node in nodes:
if len(node.find_elements_by_xpath(".//h4"))==1:
#print(cnt)
food_name = ''.join(node.find_element_by_xpath(".//h4").text).strip()
#print(food_name)
food_desc = ''.join(node.find_element_by_xpath(".//div/div").text).strip()
price = ''
food_description = ''
if "$" in food_desc:
food_description = food_desc.split("$")[0]
price = food_desc.split("$")[1]
dict = {'food_name':food_name, 'food_desc':food_description, 'price':price,'section':section}
food_lst.append(dict)
except :
continue
item['menu'] = food_lst
item['rating'] = {'rating':rating, 'popular_food':popular_food_lst}
yield item
except Exception as e:
print(e)
time.sleep(5)
continue
time.sleep(5)
self.driver.quit()
time.sleep(5)
| {"/ubereats/spiders/ubereatsSpider.py": ["/ubereats/items.py"]} |
68,305 | lospheris/steganographer | refs/heads/master | /tests/test.py | import sys
import os
sys.path.append("..")
from steganographer import *
from Crypto.Hash import SHA256
from message import CryptoHelper
# These are the values that should be expected based on explicit test cases.
pt_message = "The quick brown fox jumped over the lazy dog."
output_file_name = "text_message_output.txt"
output_image_file_name = "message_output_picture.png"
expected_hash = "6bb6c84384cffedb4529ccc23c635e7134a6bff6c0222e8806c28683cdb3559c"
key_file = "test_key.pem"
pubkey_file = "test_key_publiconly.pem"
encrypted_image_file_name = "encrypted_" + output_image_file_name
encrypted_output_file_name = "encrypted_" + output_file_name
steg = Steganographer(inputFile="test_input_picture.jpg",
outputFile=output_image_file_name)
steg.encode_message(pt_message)
decoded_message = steg.decode_message()
if decoded_message != pt_message:
print("Something went wrong either encoding or decoding the message.")
print("Should have returned: " + pt_message)
print("Did return: " + decoded_message)
exit(1)
else:
print("Unencrypted string message test successful!")
steg.encode_message_from_file("test_message.txt")
steg.decode_message_to_file(output_file_name)
my_hash = SHA256.new()
try:
my_hash.update(open(output_file_name, 'r').read())
except IOError as e:
print("Couldn't open the outputted text file. Permissions?")
exit(1)
computed_hash = my_hash.hexdigest()
if computed_hash != expected_hash:
print("The output file from the decoded image didn't not match the hash.")
print("Expected: " + expected_hash)
print("Computed: " + computed_hash)
exit(1)
else:
print("The outputed file matches the stored hash! Direct file functions work!")
print("Attempting to generate a 4096 bit key.")
try:
CryptoHelper.generate_keys(key_file, expected_hash, 4096)
except Exception as e:
print("There was a problem generating the key.")
print("The following error was encountered: ")
print(e)
print("The keys were generated successfully!")
steg = EncryptedSteganographer(inputFile="test_input_picture.jpg",
outputFile=encrypted_image_file_name,
recipientPublicKeyFileName=pubkey_file,
sendersKeyPairFileName=key_file,
passphrase=expected_hash)
print("Encode message.")
steg.encrypt_and_encode_message(pt_message)
decoded_message = steg.decrypt_and_decode_message()
if decoded_message != pt_message:
print("Something went wrong either encoding or decoding the message.")
print("Should have returned: " + pt_message )
print("Did return: " + decoded_message )
exit(1)
else:
print("Encrypted string message test successful!")
print("Encode message from file.")
steg.encrypt_and_encode_message_from_file("test_message.txt")
steg.decrypt_and_decode_message_to_file(encrypted_output_file_name)
my_hash = SHA256.new()
try:
my_hash.update(open(encrypted_output_file_name, 'r').read())
except IOError as e:
print("Couldn't open the outputted text file. Permissions?")
exit(1)
computed_hash = my_hash.hexdigest()
if computed_hash != expected_hash:
print("The output file from the decoded image didn't not match the hash.")
print("Expected: " + expected_hash)
print("Computed: " + computed_hash)
exit(1)
else:
print("The outputted file matches the stored hash! Encrypted direct file functions work!")
print("The library is functioning properly!")
print("Cleaning up!")
os.remove(output_file_name)
os.remove(key_file)
os.remove(pubkey_file)
os.remove(encrypted_output_file_name)
os.remove(encrypted_image_file_name)
os.remove(output_image_file_name)
print("Clean up complete, exiting.")
exit(0)
| {"/tests/test.py": ["/steganographer.py", "/message.py"], "/steganographer.py": ["/message.py"]} |
68,306 | lospheris/steganographer | refs/heads/master | /steganographer.py | #!/usr/bin/python
__author__ = "Dell-Ray Sackett"
__version__ = "0.6"
import argparse
from PIL import Image
import numpy
from message import Message
from message import CryptoHelper
class steganographer(object):
"""
An object for performing Steganography on an image.
"""
def __init__(self, **kwargs):
"""
Initialize a Steganography object
Keyword Arguments:
inputFile -- The filename of the image you wish to embed information information.
outputFile -- The filename of the image that will have your information in it.
"""
self._input_file = ""
self._output_file = ""
self.__image_data = numpy.empty((1, 1, 1))
self.__color_mode = ""
self.__color_size = 0
self.__image_size = (0, 0)
self.__max_bits_storable = 0
if kwargs:
for arg in kwargs:
if arg is "inputFile":
self._input_file = kwargs[arg]
elif arg is "outputFile":
self._output_file = kwargs[arg]
# Static Methods
@staticmethod
def int_to_bin_list(number):
"""
Return the least significant 32 bits of a number as a list.
Mandatory Arguments:
number -- The number you would like returned as a list.
"""
list_value = []
for i in reversed(range(32)):
"""
Iterate through the last 32 bits of the number passed. I single out
each bit by bit shifting the number 1 (essentially a bit mask here)
left by the current index then bitwise anding that against the
original number. That gives me the value of that position then I
shift it back by the index to make sure that the bit only occupies
the 1 bit. If you don't do that last part then python with
interpret it as whatever value that bit place holds. ie if it was
the 8 bit and it was set then you will get 8 instead of 1.
"""
list_value += [((1 << i) & number) >> i]
return list_value
@staticmethod
def bin_list_to_int(bin_list):
"""
Returns the integer value of a binary list.
Mandatory Arguments:
binList -- A list of 1s and 0s to be assembled back into an integer.
"""
int_value = 0
for i in range(32):
"""
This is pretty simple. You just get the value from the current
index. Which should only be a 1 or a 0. Then shift it left by the
index. Lastly you add the number created by the shift to the
current value.
"""
int_value += bin_list[31 - i] << i
return int_value
@staticmethod
def char_to_bin_list(char):
"""
Return a list of 1s and 0s representing the binary of a character.
Mandatory Arguments:
char -- A character to be broken down into a list.
"""
int_value = ord(char)
list_value = []
for i in reversed(range(8)):
list_value += [((1 << i) & int_value) >> i]
return list_value
@staticmethod
def bin_list_to_char(bin_list):
"""
Take a binary List and turn it back into a char.
Mandatory Arguments:
binList -- A list of 1s and 0s to be back into a char.
"""
int_value = 0
for i in range(8):
int_value += bin_list[7 - i] << i
return chr(int_value)
@staticmethod
def message_to_bin_list(message):
"""
Takes a message and turns it into a binary list
Mandatory Arguments:
message -- A string to be broken down into a list of binary values.
"""
list_value = []
for character in message:
list_value += steganographer.char_to_bin_list(character)
return list_value
@staticmethod
def bin_list_to_message(bin_list):
"""
This turns a binary list back into a message.
Mandatory Arguments:
binList -- A list of 1s and 0s to be converted back into a string.
Must be divisible by 8.
Exceptions:
ValueError -- This will be raised if the input bit_list is not evenly
divisable by 8.
"""
if (len(bin_list) % 8) is not 0:
raise ValueError("The input list is required to be evenly divisable by 8")
list_tmp = []
bit_counter = 0
message = ""
for value in range(0, len(bin_list)):
list_tmp.append(bin_list[value])
if bit_counter == 7:
message += steganographer.bin_list_to_char(list_tmp)
list_tmp = []
bit_counter = -1
bit_counter += 1
return message
# I "Borrowed" this wholesale from stack exchange
@staticmethod
def set_bit(v, index, x):
"""
Set the index:th bit of v to x, and return the new value.
Mandatory Arguments:
v -- A variable in which a bit is to be changed.
index -- The index of the bit to change.
x -- The value to change index in variable v to.
"""
mask = 1 << index
v &= ~mask
if x:
v |= mask
return v
@staticmethod
def compare_pixels(image_a, image_b, pixels=512):
"""
Compare the specified amount of pixel data of the two pictures given.
Manditory Arguments:
image_a -- The first image filename.
image_b -- The second image filename.
Optional Arguments:
pixels -- The number of pixels to compare.
Exceptions:
IOError -- This is raised if there is a problem opening one of the
input image files.
"""
print("Reading " + str(pixels) + " pixels from " + image_a + " and " + image_b + ".")
try:
__oim = Image.open(image_a)
__nim = Image.open(image_b)
except IOError as e:
raise e
__oimd = numpy.asarray(__oim)
__nimd = numpy.asarray(__nim)
if not (__nim.size[0] == __oim.size[0] and __nim.size[1] == __oim.size[1]):
print("The images need to be the same size!")
exit(1)
__pixelIndex = 0
try:
for heightIndex in range(0, __oim.size[1]):
for widthIndex in range(0, __oim.size[0]):
if __pixelIndex >= pixels:
raise LoopComplete("Done!")
else:
print(str(__pixelIndex) + ": " +
str(__oimd[widthIndex][heightIndex]) + " --> " +
str(__nimd[widthIndex][heightIndex]))
__pixelIndex += 1
except LoopComplete:
pass
__oim.close()
__nim.close()
@staticmethod
def read_message_from_file(filename):
"""
Return the contents of a file as a string.
Mandatory Arguments:
filename -- The filename to read the message from.
Exceptions:
IOError -- This is raised if the message file cannot be opened.
"""
try:
# This might not be a great idea but we are going to try to read the entire file into a string all at once.
fd = open(filename, 'r')
message = fd.read()
fd.close()
except IOError as e:
raise IOError("The following error was encountered opening the " +
"message file: " + e.message)
return message
@staticmethod
def write_message_to_file(message, filename):
"""
Write a message, as a string, to a file.
Mandatory Arguments:
message - The string to be written to the file.
filename - The name of the file to write the string to.
Exceptions:
IOError -- This is raised if the file to write the message to could not
be opened.
"""
try:
fd = open(filename, "w")
fd.write(message)
fd.close()
except IOError as e:
raise IOError("The following error was encountered opening the " +
"message file: " + e.message)
# Instance Methods
def initialize_image_data(self):
"""
This prepares the class for image manipulation.
Exceptions:
IOError -- This is raised if there is a problem opening the image.
ValueError -- This is raised if the input filename is empty.
ValueError -- This is raised if the image supplied has an unsupported
color model.
"""
if self._input_file == "":
raise ValueError("You must supply an input file name to encode "
+ "decode, or compare pixels.")
try:
__imageIn = Image.open(self._input_file)
except IOError as e:
raise e
# Without the numpy.copy() the data would be read only
self.__image_data = numpy.copy(numpy.asarray(__imageIn))
self.__color_mode = __imageIn.mode
self.__image_size = __imageIn.size
__imageIn.close()
# Set color size
if self.__color_mode == "RGB":
self.__color_size = 3
elif self.__color_mode == "RGBA":
# Don't encode to the alpha value
self.__color_size = 3
elif self.__color_mode == "CMYK":
self.__color_size = 4
elif self.__color_mode == "YCbCr":
self.__color_size = 4
else:
raise ValueError("The input image " + self._input_file +
" contains an unsupported color model.")
# Calculate the maximum number of bits we'll be able to store.
self.__max_bits_storable = self.__image_size[0] * self.__image_size[1]
self.__max_bits_storable *= self.__color_size
def save_output_image(self):
"""Save the stored image data to file.
Exceptions:
IOError -- Raised if the output file could not be opened.
"""
__imageOut = Image.fromarray(self.__image_data)
try:
__imageOut.save(self._output_file, 'PNG', compress_level=0)
except IOError as e:
raise IOError("The following error was encountered while attempting"
+ " to save the output image: " + e.message)
# It should be noted that I have left out the KeyError Exception that
# can be raised by the Image.save() method. Per the documentation this
# exception can be safely ignored if the format option is provided to the
# save function. I have explicitly defined the format option so I am not
# checking for the Exception.
# Close the image. I don't know if this is explicitly necessary but feels right. Ya know?
__imageOut.close()
# Sing songs of our success
print("Image encoded and saved as " + self._output_file)
def encode_message(self, message):
"""
Encode a message into a picture.
Mandatory Arguments:
message -- The message to be encoded into the image.
Exceptions:
IOError -- Raised from save_output_image if there was a problem saving
the output image.
IOError -- Raised from initialize_image_data if the input image file
could not be opened.
ValueError -- Raised if the outputFile name is blank.
ValueError -- Raised if the message argument is a blank string.
ValueError -- Raised if the message is too large for the supplied image.
ValueError -- Raised from initialize_image_data if the input filename
is blank.
ValueError -- Raised from initialize_image_data if the color model of
input picture is unsupported by steganographer.
"""
__message = message
# Error Handling
if self._output_file == "":
raise ValueError("No output filename specified. Please specify"
+ " a filename and call encode_image() again.")
if self.__image_data.shape == (1, 1, 1):
"""Uninitialized image or smallest image ever."""
try:
self.initialize_image_data()
except ValueError as e:
raise e
except IOError as e:
raise e
if __message == "":
raise ValueError("Message not set. Please set message and"
+ " call encode_image() again.")
__bit_sequence = steganographer.int_to_bin_list(len(__message))
__bit_sequence += steganographer.message_to_bin_list(__message)
# Pad the message
__padSize = self.__color_size - (len(__bit_sequence) % self.__color_size)
for i in range(0, __padSize):
__bit_sequence += [0]
if len(__bit_sequence) >= self.__max_bits_storable:
raise ValueError("The message or message file provided was too "
+ "large to be encoded onto image "
+ self._input_file + ".")
"""
I am pretty sure this formatting is more levels than I can count
against PEP8. I am going to leave it like this though because I think
it is easier to read. I feel like Clark, Johnny, and Joe would likely
agree.
"""
__bitIndex = 0
__bitList = [0, 0, 0]
try:
for heightIndex in range(0, self.__image_size[0]):
for widthIndex in range(0, self.__image_size[1]):
for colorIndex in range(0, self.__color_size):
if __bitIndex >= len(__bit_sequence):
raise LoopComplete("Done!")
else:
__bitList[colorIndex] = steganographer.set_bit(
self.__image_data[widthIndex][heightIndex][colorIndex],
0,
__bit_sequence[__bitIndex])
__bitIndex += 1
self.__image_data[widthIndex][heightIndex] = __bitList
__bitList = [0, 0, 0]
except LoopComplete:
pass
try:
self.save_output_image()
except IOError as e:
raise e
def decode_message(self):
"""
This method will decode a message that is embedded in an image.
Exceptions:
IOError -- Raised from initialize_image_data if the input image file
could not be opened.
ValueError -- Raised from initialize_image_data if the input filename
is blank.
ValueError -- Raised from initialize_image_data if the color model of
input picture is unsupported by steganographer.
"""
if self.__image_data.shape == (1, 1, 1):
try:
self.initialize_image_data()
except IOError as e:
raise e
except ValueError as e:
raise e
# Create a list to get the number of bits in the message
__len_list = []
# This shit...
# There are 32 bits (Integer presumably, Python is a little willy-nilly
# on primitive types) of length data at the beginning of the encoding
# 32/3 = 10 with 2 bits left over. So I need the first 10 pixels worth
# of LSBs and the Red and Green LSB out of the 11th pixel. So, I
# iterate through all 11 and on the 11th pixel I store normally until
# I hit the Blue value, then I just pass which ends both loops.
try:
__bit_index = 0
for heightIndex in range(0, self.__image_size[0]):
for widthIndex in range(0, self.__image_size[1]):
for colorIndex in range(0, self.__color_size):
if __bit_index >= 32:
raise LoopComplete("Done!")
else:
__len_list.append(self.__image_data[widthIndex][heightIndex][colorIndex] & 1)
__bit_index += 1
except LoopComplete:
pass
# Now we know how many bits to expect so we convert that back into an Int and store it for later
__message_length = steganographer.bin_list_to_int(__len_list)
# I found it was easier on me to just store the entire message with the length data at first.
# Also, to make the encoding loop easier I padded the end of it so it would be evenly
# divisible by the number of colors in the image. Here I will just grab everything
# out of the picture all at once and store it in total list. Then I will use the message
# length information to iterate through only the message bits so I don't have to do any
# silly shit in the inner for loop here to weed out the length/padding data.
__total_list = []
# I stored the message length in characters which are 8 bits a piece. However, I work mostly
# in number of bits instead of bytes so I
# have to convert it off of the bat.
__message_bit_length = __message_length * 8
# Iterate through all of the bits that I believe were encoded onto the image.
try:
__bits_processed = 0
for heightIndex in range(0, self.__image_size[0]):
for widthIndex in range(0, self.__image_size[1]):
for colorIndex in range(0, self.__color_size):
if __bits_processed >= (__message_bit_length + 32):
raise LoopComplete("Done!")
else:
__total_list.append(
self.__image_data[widthIndex][heightIndex][colorIndex] & 1)
__bits_processed += 1
except LoopComplete as e:
pass
__message_list = []
# Iterate from the end to the end of the message data. So the message will always start
# at the 33nd (decimal value 32) bit because the length data is 32 bits long. Then if the
# message is x long we want to count from 32 to x + 32 since the message data will essentially
# be offset in the picture by 32 bits. This also leaves out the padding data because we are
# only iterating to the end of the message data exactly so the padding will be left out of the
# message. That is good because the bitStringToMessage function will return and error string
# if the message data isn't cleanly divisible by 8. Which it wouldn't be with the padding.
for index in range(32, __message_bit_length + 32):
__message_list.append(__total_list[index])
# Convert the message from a list of binary values do a string
__message = steganographer.bin_list_to_message(__message_list)
return __message
def encode_message_from_file(self, filename):
"""
This function will open a file, read the contents, then pass the
contents as a message to encode_image.
Mandatory Arguments:
filename - The name of the file containing the message.
Exceptions:
IOError -- Raised from read_message_from_file if the message file
cannot be read.
IOError -- Raised from save_output_image through encode_message if there
was a problem saving the output image.
IOError -- Raised from initialize_image_data through encode_message if
the input image file could not be opened.
ValueError -- Raised from encode_message if the outputFile name is
blank.
ValueError -- Raised from encode_message if the message argument is a
blank string.
ValueError -- Raised from encode_message if the message is too large for
the supplied image.
ValueError -- Raised from initialize_image_data through encode_message
if the input filename is blank.
ValueError -- Raised from initialize_image_data through encode_message
if the color model of input picture is unsupported by
steganographer.
"""
try:
__message = steganographer.read_message_from_file(filename)
except IOError as e:
raise e
try:
self.encode_message(__message)
except IOError as e:
raise e
except ValueError as e:
raise e
def decode_message_to_file(self, filename):
"""
This function will decode the message in an image and dump the
message into a file.
Mandatory Arguments:
filename - The name of the file to save the message to.
Exceptions:
IOError -- Raised from initialize_image_data through decode_message if
the input image file could not be opened.
ValueError -- Raised from initialize_image_data through decode_message
if the input filename is blank.
ValueError -- Raised from initialize_image_data through decode_message
if the color model of input picture is unsupported by steganographer.
"""
try:
__message = self.decode_message()
except IOError as e:
raise e
except ValueError as e:
raise e
try:
steganographer.write_message_to_file(__message, filename)
except IOError as e:
raise e
print("Message saved to " + filename + ".")
class Encryptedsteganographer(steganographer):
"""
This subclass of the steganographer class adds encryption to the message.
It requires that the intended recipient's public key and the sender's RSA
key pair be provided. It will then generate a symmetric key to encrypt the
actual message data with. The symmetric key, the sender's public key, a
signature of the message, and the encrypted message with CBC IV attached
will be encoded into the picture. It is likewise able to decode and
decrypt messages embedded in a picture. To do so it requires the
recipient's RSA key pair.
"""
def __init__(self, **kwargs):
"""
Initialize an Encryptedsteganographer object
Keyword Arguments:
inputFile -- The filename of the image you wish to embed
information in.
outputFile -- The filename of the image that will have your
information in it.
recipientPublicKeyFileName -- The file name of the recipient's public
key.
sendersKeyPairFileName -- The file name of the sender's RSA key pair.
passphrase -- The passphrase for the senders key pair. Unprotected
key pairs will not be supported.
"""
try:
self._recipient_public_key_filename = kwargs.pop("recipientPublicKeyFileName")
except KeyError:
raise KeyError("The recipientPublicKeyFileName argument is required " +
"to initialize an Encryptedsteganographer.")
try:
self._senders_key_pair_filename = kwargs.pop("sendersKeyPairFileName")
self._passphrase = kwargs.pop("passphrase")
except KeyError:
raise KeyError("The passphrase and sendersKeyPairFileName arguments " +
"are required to initialize an Encryptedsteganographer")
super(Encryptedsteganographer, self).__init__(**kwargs)
def encrypt_and_encode_message(self, message):
"""
This function will encrypt a message and encode it onto an image.
"""
__message = CryptoHelper.encrypt_message(message, self._recipient_public_key_filename,
self._senders_key_pair_filename, self._passphrase).dump_message()
self.encode_message(__message)
def encrypt_and_encode_message_from_file(self, message_file):
"""
This function will encrypt a message and encode it onto an image.
"""
try:
__message = steganographer.read_message_from_file(message_file)
except IOError as e:
raise e
__message = CryptoHelper.encrypt_message(__message, self._recipient_public_key_filename,
self._senders_key_pair_filename, self._passphrase).dump_message()
self.encode_message(__message)
def decrypt_and_decode_message(self):
"""
This Method will decode an image with a message in it and then,
decrypt that message.
"""
__message = ""
try:
__message = self.decode_message()
except Exception as e:
print(e)
__message = CryptoHelper.decrypt_message(Message.load_message(__message), self._senders_key_pair_filename,
self._passphrase)
return __message
def decrypt_and_decode_message_to_file(self, message_file):
"""
This Method will decode an image with a message in it and then,
decrypt that message.
"""
__message = ""
try:
__message = self.decode_message()
except Exception as e:
raise e
__message = CryptoHelper.decrypt_message(Message.load_message(__message), self._senders_key_pair_filename,
self._passphrase)
try:
steganographer.write_message_to_file(__message, message_file)
except IOError as e:
raise e
if __name__ == "__main__":
description_string = "This program will embed a message into an image."
description_string += " It will also encrypt the message when called"
description_string += " with the appropriate arguments."
epilog_string = "Thank you for using steganographer!"
# If we are being executed independently then parse the necessary arguments.
parser = argparse.ArgumentParser(description=description_string,
epilog=epilog_string)
parser.add_argument("--inputimage", "-ii",
help="The to encode the message onto or the encoded" +
" image if decoding.")
parser.add_argument("--outputimage", "-oi",
help="The name of the encoded image.")
parser.add_argument("--encode", "-e", action="store_true",
help="Encode a message onto a picture.")
parser.add_argument("--decode", "-d", action="store_true",
help="Decode the input image and write message to" +
" terminal.")
parser.add_argument("--crypto", action="store_true",
help="Use ciphered steganographer instead plaintext")
parser.add_argument("--message", "-m",
help="The message to be encoded onto the picture.")
parser.add_argument("--inputfile", "-if", help="--inputfile <filename>.")
parser.add_argument("--outputfile", "-of",
help="--outputfile <filename> for decoded text.")
parser.add_argument("--generate", "-g",
help="Generate a key of of size --modulus. If no" +
" modulus is provided then 2048 will be used.")
parser.add_argument("--encryptionkey", "-ec",
help="The asymmetric key to use for encrypting.")
parser.add_argument("--signingkey", "-sk",
help="The asymmetric key to use for signing.")
parser.add_argument("--passphrase", "-p",
help="The passphrase to the singing key.")
parser.add_argument("--modulus", "-md", help="Key modulus size.")
parser.add_argument("--comparefiles", "-c",
help="Read back the first 512 pixels of an image.")
args = parser.parse_args()
steg = None
plain_text_message = ""
if args.generate:
if not args.passphrase:
print("A passphrase for the key must be provided.")
exit(1)
elif args.modulus:
CryptoHelper.generate_keys(args.generate, args.passphrase,
args.modulus)
else:
CryptoHelper.generate_keys(args.generate, args.passphrase)
elif args.encode:
if args.crypto:
if (not args.inputimage or not args.outputimage or not (args.message or args.inputfile) or not
args.encryptionkey or not args.signingkey or not args.passphrase):
try:
steg = Encryptedsteganographer(inputFile=args.inputimage,
outputfile=args.outputimage,
recipientPublicKeyFileName=args.encryptionkey,
sendersKeyPairFileName=args.signingkey,
passphrase=args.passphrase)
except KeyError as e:
print("The following error has occured: ")
print(e)
exit(1)
try:
if args.inputfile:
steg.encrypt_and_encode_message_from_file(args.inputfile)
else:
steg.encrypt_and_encode_message(args.message)
except IOError as e:
print("The following Error was encountered opening the " +
"message file.")
print(e)
exit(1)
else:
args.print_help()
else:
if (not args.inputimage or not args.outputimage
or not (args.message or args.inputfile)):
try:
steg = steganographer(inputFile=args.inputimage,
outputFile=args.outputimage)
except KeyError as e:
print("The following error occured: ")
print(e)
exit(1)
try:
if args.inputfile:
steg.encode_message_from_file(args.inputfile)
else:
steg.encode_message(args.message)
except IOError as e:
print("The following error occured: ")
print(e)
exit(1)
else:
args.print_help()
elif args.decode:
if args.crypto:
if (not args.inputimage or not args.outputimage or not args.encryptionkey or not
args.signingkey or not args.passphrase):
try:
steg = Encryptedsteganographer(inputFile=args.inputimage,
recipientPublicKeyFileName=args.encryptionkey,
sendersKeyPairFileName=args.signingkey,
passphrase=args.passphrase)
except KeyError as e:
print("The following error has occured: ")
print(e)
exit(1)
try:
if args.outputfile:
steg.decrypt_and_decode_message_to_file(args.outputfile)
print("Message successfully written to " +
args.outputfile + ".")
exit(0)
else:
print("Message:\n")
print(steg.decrypt_and_decode_message())
exit(0)
except IOError as e:
print("The following error was encountered: ")
print(e)
exit(1)
else:
args.print_help()
exit(1)
else:
if not args.inputimage or not args.outputimage:
try:
steg = steganographer(inputFile=args.inputimage)
except KeyError as e:
print("The following error has occured: ")
print(e)
exit(1)
try:
if args.outputfile:
steg.decode_message_to_file(args.outputfile)
print("Message successfully written to " +
args.outputfile + ".")
exit(0)
else:
print("Message:\n")
print(steg.decode_message())
exit(0)
except IOError as e:
print("The following error was encountered: ")
print(e)
exit(1)
else:
args.print_help()
exit(1)
else:
parser.print_help()
exit(1)
# Things went better than expected
exit(0)
"""
This Exception is used in the code to break out of nested loops easily. I don't know if this is really considered
good design but it sure simplifies the code in my opinion.
"""
class LoopComplete(Exception):
pass
| {"/tests/test.py": ["/steganographer.py", "/message.py"], "/steganographer.py": ["/message.py"]} |
68,307 | lospheris/steganographer | refs/heads/master | /message.py | __author__ = "Dell-Ray Sackett"
__version__ = "0.1"
import pickle
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
from Crypto.Cipher import AES
from Crypto import Random
import base64
class Message:
"""
This is a class to hold an encrypted message. It is specifically
designed to be pickeled and stored. Nothing more. Although I guess it
could have some use in networking?
"""
def __init__(self, public_key, symmetric_key, signature, message):
"""
Initialize the object.
Keyword Arguments:
public_key -- The public key of the sending party. Should be a x509 DER
sequence that can be directly imported by pyCrypto.
symmetric_key -- The asymmetrically encrypted symmetric key for AES256
encryption.
signature -- Message .
message -- The message encrypted.
"""
self._publicKey = public_key
self._symmetricKey = symmetric_key
self._signature = signature
self._message = message
"""
There is no real reason to only get 1 of these values. So I am only
providing a method for returning everything.
"""
def get_message(self):
"""Return a list containing all the message information."""
return [self._publicKey, self._symmetricKey, self._signature, self._message]
# Pickle and Unpickle
def dump_message(self):
"""Pickle the message and return it."""
return pickle.dumps(self)
@staticmethod
def load_message(message):
"""
Unpickle a message string and return the object.
Mandatory Arguments:
message -- A pickled message string.
"""
return pickle.loads(message)
class CryptoHelper:
"""
This class will do the encryption and decryption of a message object.
It will be almost completely static hah!
"""
"""
I took pad and unpad from a stack exchange post.
http://stackoverflow.com/a/12525165
"""
# Define the symmetric block size as a static variable.
BS = 16
# Static Methods
@staticmethod
def pad(s):
"""
Takes string s and returns it padded to blocksize CryptoHelper.BS.
"""
return s + (CryptoHelper.BS - len(s) % CryptoHelper.BS) * chr(
CryptoHelper.BS - len(s) % CryptoHelper.BS)
@staticmethod
def unpad(s):
"""
Takes padded string s and returns it sans padding.
"""
return s[:-ord(s[len(s) - 1:])]
@staticmethod
def generate_keys(filename, passphrase, modulus=2048):
"""
Generates a RSA keypair and returns it.
Manditory Arguments:
filename -- The name of the file to save the key to.
passphrase -- The passphrase for the key. If you think your key
doesn't need a key. You are dumb.
Optional Arguments:
modulus -- The size modulus to use. (String, default=2048)
"""
key = RSA.generate(modulus)
if passphrase == "" or passphrase is None:
raise ValueError("Passphrase cannot be empty")
if filename[len(filename) - 4:] != ".pem":
filename += ".pem"
try:
keyfile = open(filename, "w")
pubkeyfile = open(filename[:len(filename) - 4] + "_publiconly.pem", "w")
except Exception as e:
raise e
keyfile.write(key.exportKey(format="PEM", passphrase=passphrase))
pubkeyfile.write(key.exportKey(format="PEM", pkcs=8))
keyfile.close()
pubkeyfile.close()
return key
@staticmethod
def import_keys(filename, passphrase):
"""
Import a key from file and return a RSA key object
Mandatory Arguments:
filename -- The filename of the key to import.
passphrase -- The passphrase for your key
"""
try:
keyfile = open(filename, "r")
except Exception as e:
raise e
return RSA.importKey(keyfile.read(), passphrase)
@staticmethod
def get_public_key(key_pair):
"""Return the PEM encoded public key"""
return key_pair.publickey().exportKey()
@staticmethod
def symmetric_encrypt(plaintext):
"""
Takes a string and encrypts it. Returning a tuple with the IV,
the symmetric key (plaintext) and the encrypted
string. The IV and the ciphertext will be concatenated together with
the IV in front.
Mandatory Arguments:
plaintext -- A string to be encrypted.
"""
paddedplaintext = CryptoHelper.pad(plaintext)
IV = Random.new().read(AES.block_size)
key = SHA256.new(Random.new().read(1024)).digest()
cryptor = AES.new(key, AES.MODE_CBC, IV)
return (key, base64.b64encode(
IV + cryptor.encrypt(paddedplaintext)))
@staticmethod
def symmetric_decrypt(key, ciphertext):
"""
Takes a key and base64 encoded ciphertext with an IV concatenated at
the beginning and returns a plaintext string.
"""
decodedciphertext = base64.b64decode(ciphertext)
IV = decodedciphertext[:16]
cryptor = AES.new(key, AES.MODE_CBC, IV)
return CryptoHelper.unpad(cryptor.decrypt(decodedciphertext[16:]))
@staticmethod
def encrypt_message(message, encryption_key_filename, signing_key_filename, signing_key_passphrase):
"""
Takes a String message and encrypts it with the publickey from
the RSA publickey in the file from encryptionKeyFilename. Also signs
the message with the RSA keypair from file signingKeyFilename. Returns
a Message object.
Mandatory Arguments:
message -- A message in the form of a string.
encryptionKeyFilename -- Filename of the publickey to use for
encryption as a String.
signingKeyFilename -- Filename of the RSA keypair to use for
signing the message as a string
signingKeyPassphrase -- The passphrase to the singing keypair.
"""
enckey = CryptoHelper.import_keys(encryption_key_filename, "")
sigkey = CryptoHelper.import_keys(signing_key_filename,
signing_key_passphrase)
myhash = SHA256.new()
myhash.update(message)
cipheredmessage = CryptoHelper.symmetric_encrypt(message)
messagesig = base64.b64encode(str(sigkey.sign(myhash.digest(), "")[0]))
symmetrickey = base64.b64encode(enckey.encrypt(cipheredmessage[0], 32)[0])
pubkey = CryptoHelper.get_public_key(sigkey)
return Message(pubkey, symmetrickey, messagesig, cipheredmessage[1])
@staticmethod
def decrypt_message(message_object, decryption_key_filename, decryption_key_passphrase):
"""
Takes a message Object and a string containing the filename of the
decryption keypair. Decrypts and verifies the message. If the message
is verified returns a string containing the plaintext message.
Mandatory Arguments:
messageObject -- A Message object containing the encrypted message.
With senders publicKey and a signature.
deryptionKeyFilename -- String containing the filename of the RSA
keypair to be used for decryption.
decryptionKeyPassphrase -- String containing the passphrase for
decrypting the decryption key.
"""
try:
decryptkey = CryptoHelper.import_keys(decryption_key_filename, decryption_key_passphrase)
except Exception as e:
raise e
# A list with [publicKey, signature, encMessage]
expandedmessage = message_object.get_message()
sigkey = RSA.importKey(expandedmessage[0])
symmetrickey = decryptkey.decrypt(base64.b64decode(expandedmessage[1]))
plaintext = CryptoHelper.symmetric_decrypt(symmetrickey, expandedmessage[3])
messagehash = SHA256.new(plaintext).digest()
signature = (long(base64.b64decode(expandedmessage[2])),)
if not sigkey.verify(messagehash, signature):
raise ValueError("The message could not be verified")
else:
return plaintext
| {"/tests/test.py": ["/steganographer.py", "/message.py"], "/steganographer.py": ["/message.py"]} |
68,310 | solariscodes/Charlie-The-Bot | refs/heads/master | /server.py | from flask import Flask
from threading import Thread
porta = int(os.environ.get('PORT', 5000))
app = Flask('')
@app.route('/')
def home():
return "Hi! How are you?"
def run():
app.run(host='0.0.0.0',port=porta)
def keep_alive():
t = Thread(target=run)
t.start()
| {"/main.py": ["/server.py"]} |
68,311 | solariscodes/Charlie-The-Bot | refs/heads/master | /main.py | '''
Simple Discord bot written in Python
It returns a random Chuck Norris's quote when someone gets online
by Solariscodes
07/03/2021
'''
import discord
import os
import json
import requests
import logging
from server import keep_alive
'''
saving logs on discord.log
'''
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
'''
get a random quote from Chuck Norris API
'''
def Norris():
api = "https://api.chucknorris.io/jokes/random"
req = requests.get(api)
c1 = json.loads(req.text)
return c1['value']
intents = discord.Intents.all()
intents.members = True
client = discord.Client(intents=intents)
'''
connect to discord
'''
class Bot():
@client.event
async def on_ready():
print("Starting {0.user}".format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('hello'):
await message.channel.send("Hello back!")
@client.event
async def on_member_update(before, after):
await before.send(Norris())
#keep_alive()
client.run(os.getenv('TOKEN'))
Bot()
| {"/main.py": ["/server.py"]} |
68,312 | gaohaochenghc/industry-classification | refs/heads/master | /new_combine_models.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
#from scipy import stats
#from operator import itemgetter
from functools import reduce
from sklearn.linear_model import LinearRegression
class combine_models:
"""
This class is used to combine different cluster results;
or combine different embadding matrix according to company name;
or combine cluster results with embadding matrix( always used in performance_analysis)
"""
def __init__(self):
self.combine_cluster = None
def get_combine_cluster(self, class_list):
"""Inputs:
class_list: a list contains different class_df. class_df has this columes: ['company','class_name']
Notice: this class alway use inner join
"""
if class_list != None:
self.combine_cluster = class_list[0]
for i in range(1, len(class_list)):
temp_df = class_list[i].copy()
self.combine_cluster = self.combine_cluster.merge(
temp_df, on=["company"], how="inner"
)
self.cluster_len = len(self.combine_cluster)
return self.combine_cluster
def get_combine_embadding(self, company_list, embadding_list):
"""Inputs:
embadding is a embadding list: [embadding1, embadding2, ...]
embaddingX has following columns: ['company', embadding matrix]
"""
if embadding_list != None:
combine_embadding = []
company_list = pd.DataFrame(company_list, columns=["company"])
for embadding in embadding_list:
embadding = company_list.merge(
embadding, how="left", on=["company"]
).iloc[:, 1:]
combine_embadding.append(embadding)
return combine_embadding
def get_merged_matrix(self, embadding, class_list):
"""Inputs:
embadding is a embadding list: [embadding1, embadding2, ...]
embaddingX has following columns: ['company', embadding matrix]
"""
if self.combine_cluster == None:
self.get_combine_cluster(class_list)
new_embadding = []
for i in range(len(embadding)):
embadding[i] = embadding[i][
embadding[i]["company"].isin(self.combine_cluster["company"])
]
embadding[i]["company"].astype("category").cat.reorder_categories(
self.combine_cluster["company"], inplace=True
)
new_embadding.append(
embadding[i]
.sort_values("company")
.iloc[:, 1:]
.set_index(pd.Index(range(self.cluster_len)))
)
return new_embadding
class performance_analysis:
def __init__(self, class_df=None, return_df=None, embadding_list=None, market_return=None, factors=None):
"""Objective:
use stock return to see the classification wellness
Inputs:
this class take class_df, return_df
class_df is a dataframe with columns: ['company','class1','class2',...]
return_df is a dataframe with columns: ['company','date','return']
market_return is an optional dataframe with columns: ['date','market return']
factors is an optional dataframe with columns: ['date', factor1, ...]
date is a datetime object
"""
self.class_df = class_df
self.stock_return = return_df.merge(
class_df, on=["company"], how="inner")
self.company = list(set(class_df["company"]))
self.method = list(class_df.columns[1:])
self.company2int = {
classname: dict(zip(class_df["company"], class_df[classname]))
for classname in self.method
}
self.return_map = None
self.statistical_map = None
self.ret_df = None
self.R2 = None
self.has_mkt_ret = False
self.has_factors = False
self.mkt_ret = market_return
self.factors = factors
if not market_return is None:
self.has_mkt_ret = True
self.mkt_ret.columns = ['date', 'market_return']
if not factors is None:
self.has_factors = True
def get_daliy_return(self, threshold=5, ret_comp=60):
self.return_map = {}
self.ret_df = {}
pivot_df = self.stock_return[
self.stock_return["company"].isin(self.class_df["company"])
].pivot(index="date", columns="company", values="return")
pivot_df = pivot_df.dropna(thresh=pivot_df.shape[1]/2)
class_df_trim = self.class_df[
self.class_df["company"].isin(self.stock_return["company"])
]
self.company_num = len(set(class_df_trim['company']))
name_to_delete = []
for name in self.method:
ret_list = []
self.return_map[name] = dict()
cluster_namelist = set(class_df_trim[name].values.tolist())
int2comp = {
cl: class_df_trim[class_df_trim[name] == cl]["company"]
for cl in cluster_namelist
}
if len(int2comp) == 1:
self.return_map.pop(name, None)
name_to_delete.append(name)
continue
for cl, comps in int2comp.items():
if len(comps) > threshold:
self.return_map[name][cl] = pivot_df[comps]
self.return_map[name][cl].dropna(
1, thresh=ret_comp, inplace=True)
self.return_map[name][cl].fillna(0, inplace=True)
self.return_map[name][cl]["cluster_" + str(cl)] = self.return_map[name][
cl
].mean(1)
app_df = self.return_map[name][cl]["cluster_" + str(cl)]
ret_list.append(app_df)
self.ret_df[name] = reduce(
lambda x, y: pd.merge(
x, y, how="outer", left_index=True, right_index=True
),
ret_list,
)
self.ret_df[name].fillna(0, inplace=True)
if self.has_mkt_ret:
self.ret_df[name] = self.ret_df[name].merge(
self.mkt_ret, how='inner', on=['date'])
for col in self.ret_df[name].columns[1:-1]:
self.ret_df[name][col] = self.ret_df[name][col] - \
self.ret_df[name].market_return
if self.has_factors:
self.ret_df[name] = self.ret_df[name].merge(
self.factors, how='inner', on=['date'])
#self.ret_df[name] = self.ret_df[name].set_index('date')
for name in name_to_delete:
self.method.remove(name)
def get_statistical_describe(self):
if self.return_map == None:
self.get_daliy_return()
self.statistical_map = {}
self.R2 = {}
for name in self.method:
self.statistical_map[name] = {}
self.R2[name] = pd.DataFrame(index=["R2"])
class_num = len(self.return_map[name].keys())
for classes in self.return_map[name].keys():
self.statistical_map[name][classes] = pd.DataFrame(
index=self.return_map[name][classes].columns[:-1], columns=self.ret_df[name].columns)
r2 = []
for Xi in self.return_map[name][classes].columns[:-1]:
xy = self.ret_df[name].merge(
self.return_map[name][classes][Xi], how='inner', left_index=True, right_index=True)
reg = LinearRegression().fit(
xy.iloc[:, :-1], xy.iloc[:, -1])
r2.append(reg.score(xy.iloc[:, :-1], xy.iloc[:, -1]))
self.statistical_map[name][classes].loc[Xi, :] = reg.coef_
self.statistical_map[name][classes]['most_positive_related'] = self.statistical_map[name][classes].iloc[:, :class_num].astype(
float).idxmax(axis=1)
self.statistical_map[name][classes]['most_negative_related'] = (
-self.statistical_map[name][classes].iloc[:, :class_num].astype(float)).idxmax(axis=1)
self.statistical_map[name][classes]['isBiggest'] = self.statistical_map[
name][classes]['most_positive_related'] == "cluster_" + str(classes)
self.statistical_map[name][classes]['R2'] = r2
self.statistical_map[name][classes].loc['summary',
'isBiggest'] = self.statistical_map[name][classes]['isBiggest'].sum()
self.statistical_map[name][classes].loc['summary',
'R2'] = self.statistical_map[name][classes]['R2'].mean()
self.R2[name][classes] = [np.mean(r2)]
def print_table(self):
table = pd.DataFrame(index=self.method, columns=[
'R2', 'proportion of right classification'])
for i in table.index:
table.loc[i, 'R2'] = self.R2[i].mean(1).iloc[0]
right_classification = [
self.statistical_map[i][j].loc['summary', 'isBiggest'] for j in self.return_map[i].keys()]
table.loc[i, 'proportion of right classification'] = sum(
right_classification)/self.company_num
table.loc[i, 'classes number'] = int(len(right_classification))
table.sort_values(
by=['R2', 'proportion of right classification'], ascending=False, inplace=True)
print('summary table:')
return table
def plot_industry_dense(self,rows = 6):
fig, ax = plt.subplots(rows,len(self.method)//rows+1,figsize=(30,30))
j=0
k=0
for i in self.method:
sns.countplot(self.class_df[i], ax = ax[j,k])
ax[j,k].set_title(i)
j = j+1 if j<(rows-1) else 0
k = k+1 if j==0 else k | {"/utils.py": ["/combine_models.py"]} |
68,313 | gaohaochenghc/industry-classification | refs/heads/master | /fetch_data.py | # -*- coding: utf-8 -*-
"""
Created on Sun May 24 13:35:56 2020
@author: Gao haocheng
"""
import pandas as pd
import numpy as np
import time
from bert_serving.client import BertClient
from tqdm import tqdm
from tqdm.notebook import tqdm, trange
from selenium import webdriver
from lxml import etree
class fetch_data:
'''
fetch data from Yahoo. You can choose to fetch the description data or financial statement data
'''
def __init__(self, company):
self.company = company
def init_driver(self):
#options = webdriver.firefox.options.Options()
options = webdriver.ChromeOptions()
options.add_argument("-headless")
options.add_argument('--ignore-certificate-errors')
#driver = webdriver.Firefox(options=options)
self.driver = webdriver.Chrome(options=options)
def get_single_statement(self, ticker):
url='https://finance.yahoo.com/quote/'+ticker+'/key-statistics?p='+ticker
self.driver.get(url)
time.sleep(3)
html = self.driver.page_source
html = etree.HTML(html)
mkt_cap = html.xpath('//*[@id="Col1-0-KeyStatistics-Proxy"]/section/div[3]/div[1]/div[2]/div/div[1]/div[1]/table/tbody/tr[1]/td[3]//text()')[0]
pb_ratio = html.xpath('//*[@id="Col1-0-KeyStatistics-Proxy"]/section/div[3]/div[1]/div[2]/div/div[1]/div[1]/table/tbody/tr[7]/td[3]//text()')[0]
beta = html.xpath('//*[@id="Col1-0-KeyStatistics-Proxy"]/section/div[3]/div[2]/div/div[1]/div/div/table/tbody/tr[1]/td[2]//text()')[0]
profit_m = html.xpath('//*[@id="Col1-0-KeyStatistics-Proxy"]/section/div[3]/div[3]/div/div[2]/div/div/table/tbody/tr[1]/td[2]//text()')[0]
roa = html.xpath('//*[@id="Col1-0-KeyStatistics-Proxy"]/section/div[3]/div[3]/div/div[3]/div/div/table/tbody/tr[1]/td[2]//text()')[0]
roe = html.xpath('//*[@id="Col1-0-KeyStatistics-Proxy"]/section/div[3]/div[3]/div/div[3]/div/div/table/tbody/tr[2]/td[2]//text()')[0]
return [ticker,mkt_cap,pb_ratio, beta, profit_m, roa, roe]
def get_statement_data(self):
col=['company','mkt_cap','pb_ratio','beta','profit_m','roa','roe']
self.ratio_df=pd.DataFrame(columns=col)
self.init_driver()
try_num=0
single_iter=0
while True:
for t in trange(try_num, len(self.company)):
try:
self.ratio_df=self.ratio_df.append(pd.DataFrame([self.get_single_statement(self.company[t])],columns=col),ignore_index=True)
single_iter=0
except:
self.driver.close()
single_iter+=1
time.sleep(10)
self.init_driver()
if single_iter>=5:
single_iter=0
continue
try_num=t
break
else:
break
return self.ratio_df
def get_single_des(self, ticker):
url = 'https://finance.yahoo.com/quote/'+ticker+'/profile?p='+ticker
self.driver.get(url)
time.sleep(2)
html = self.driver.page_source
html = etree.HTML(html)
# page = requests.get(url)
# html = etree.HTML(page.text)
# items = html.xpath('//*[@id="Col1-0-Profile-Proxy"]/section/section[2]/p')
intro = html.xpath(
'//*[@id="Col1-0-Profile-Proxy"]/section/section[2]/descendant-or-self::text()')
segment = html.xpath(
'//*[@id="Col1-0-Profile-Proxy"]/section/div[1]/div/div/p[2]/span[2]//text()')
industry = html.xpath(
'//*[@id="Col1-0-Profile-Proxy"]/section/div[1]/div/div/p[2]/span[4]//text()')
return [ticker,intro,segment,industry]
def get_description_data(self):
col=['company','intro','segment','industry']
self.des_df=pd.DataFrame(columns=col)
self.init_driver()
try_num=0
single_iter=0
while True:
for t in trange(try_num, len(self.company)):
try:
self.des_df=self.des_df.append(pd.DataFrame([self.get_single_des(self.company[t])],columns=col),ignore_index=True)
single_iter=0
except:
self.driver.close()
single_iter+=1
time.sleep(10)
self.init_driver()
if single_iter>=5:
single_iter=0
continue
try_num=t
break
else:
break
return self.des_df
def get_bert(des):
des['len'] = des['intro'].str.len()
des.set_index('company', inplace=True)
short = des[des['intro'].str.len()<=512]
long = des[(des['intro'].str.len()>512) & (des['intro'].str.len()<1024)]
# max length of bert is 512
long_first_part = long['intro'].str[:512]
long_second_part = long['intro'].str[512:]
long_second_part = long_second_part[long_second_part.str.len()>100]
short_intro=short['intro'].values.tolist()
long_first_part_intro = long_first_part.values.tolist()
long_second_part_intro = long_second_part.values.tolist()
bc = BertClient()
short_embadding = bc.encode(short_intro)
long_first_part_embadding = bc.encode(long_first_part_intro)
long_second_part_embadding = bc.encode(long_second_part_intro)
short_embadding = pd.DataFrame(short_embadding, index = short_intro.index)
long_first_part_embadding = pd.DataFrame(long_first_part_embadding, index = long_first_part.index)
long_second_part_embadding = pd.DataFrame(long_second_part_embadding, index = long_second_part.index)
temp = long_first_part_embadding.reindex(long_second_part_embadding.index)
temp = (temp + long_second_part_embadding)/2
long_first_part_embadding.loc[temp.index,:] = temp
return pd.concat([short_embadding, long_first_part_embadding])
| {"/utils.py": ["/combine_models.py"]} |
68,314 | gaohaochenghc/industry-classification | refs/heads/master | /generate_word_vector.py | import jieba
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
from nltk.tokenize import RegexpTokenizer
import gensim
import pandas as pd
import numpy as np
class generate_doc_vector:
'''
all inputs must be a 1-dim np.array or list
and return will be a dataframe ['company','vector']
'''
def __init__(self,company_list,doc_list,stopwords,keep_prob=1.0,drop_num=0,language='Chinese'):
if len(company_list)!=len(doc_list):
raise ValueError('Pls enter a matched document list')
self.stopwords=list(stopwords)
if language=='Chinese':
self.doc_list=[[word for word in jieba.cut(doc) if word not in self.stopwords] \
for doc in doc_list]
elif language=='English':
tokenizer = RegexpTokenizer(r'\w+')
self.doc_list=[[WordNetLemmatizer().lemmatize(word, 'v') for word in tokenizer.tokenize(sentance.lower()) if word not in self.stopwords]\
for sentance in doc_list]
else:
raise ValueError('Only support Chinese and English')
self.lexicon=gensim.corpora.Dictionary(self.doc_list)##create dictionay in gensim
self.lexicon.filter_extremes(no_below=drop_num,no_above=keep_prob)
self.lexicon.compactify()
self.bow = [self.lexicon.doc2bow(i) for i in self.doc_list]##transfer words to ind use doc2bow
self.company_list=company_list
def doc2vector(self,vector_size,alpha,window=5,min_alpha=0.0001,workers=3,\
negative=10,dbow_words=0,min_count=5,dm=1):
TaggedDocument=gensim.models.doc2vec.TaggedDocument
corpus=[]
for i, word in enumerate(self.doc_list):
document=TaggedDocument(word,tags=[i])
corpus.append(document)
model=gensim.models.doc2vec.Doc2Vec(documents=corpus,dm=dm,dbow_words=dbow_words,\
vector_size=vector_size,epochs=6,window=window,alpha=alpha,\
min_alpha=min_alpha,min_count=min_count,negative=negative,workers=workers)
model.train(corpus,total_examples=len(corpus),epochs=model.epochs)
def _get_vectors(model,corpus):
vec=[np.array(model.docvecs[i.tags[0]]).reshape((1,-1)) for i in corpus]
return np.concatenate(vec)
doc_vector=pd.DataFrame(_get_vectors(model,corpus))
doc_vector.insert(0,'company',self.company_list)
return doc_vector
def bag_of_words(self,use_tfidf=True):
if use_tfidf:
tfidf=gensim.models.TfidfModel(self.bow)
tfidf_bow=tfidf[self.bow]
doc_vector=pd.DataFrame(gensim.matutils.corpus2dense(tfidf_bow,num_terms=len(self.lexicon.token2id)).T)
doc_vector.insert(0,'company',self.company_list)
return doc_vector
else:
doc_vector=pd.DataFrame(gensim.matutils.corpus2dense(self.bow,num_terms=len(self.lexicon.token2id)).T)
doc_vector.insert(0,'company',self.company_list)
return doc_vector
def LSI(self,num_topics,use_tfidf=True):
if use_tfidf:
tfidf=gensim.models.TfidfModel(self.bow)
tfidf_bow=tfidf[self.bow]
lsi=gensim.models.LsiModel(corpus=tfidf_bow,num_topics=num_topics)
doc_vector=pd.DataFrame(gensim.matutils.corpus2dense(lsi[tfidf_bow],num_terms=num_topics).T)
doc_vector.insert(0,'company',self.company_list)
return doc_vector
else:
lsi=gensim.models.LsiModel(corpus=self.bow,num_topics=num_topics)
doc_vector=pd.DataFrame(gensim.matutils.corpus2dense(lsi[self.bow],num_terms=num_topics).T)
doc_vector.insert(0,'company',self.company_list)
return doc_vector
| {"/utils.py": ["/combine_models.py"]} |
68,315 | gaohaochenghc/industry-classification | refs/heads/master | /clustering_class.py | import pandas as pd
import numpy as np
import functools
import jieba
from tqdm import tqdm
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import KMeans, DBSCAN
from sklearn.mixture import GaussianMixture
class bag_of_words:
##require pandas DataFrame
def __init__(self, company_list, company_disc):
assert len(company_list) == company_disc.shape[0]
self.company_list = company_list.copy()
self.company_disc = company_disc.copy()
self.word_embedding = None
def transform(self, stopwords, use_tfidf=True):
words = self.company_disc.apply(lambda x: " ".join(jieba.cut(x)))
cv = CountVectorizer(stop_words=stopwords)
word_embedding = cv.fit_transform(words)
if use_tfidf:
tf = TfidfTransformer()
word_embedding = tf.fit_transform(word_embedding)
self.word_embedding = pd.concat(
[self.company_list, pd.DataFrame(word_embedding.toarray())], axis=1
)
def get_vector(self):
return self.word_embedding
class paper_cluster:
def __init__(self, company_list, company_vec):
assert len(company_list) == company_vec.shape[0]
self.company_list = company_list.copy()
self.cosine_matrix = cosine_similarity(company_vec)
self.cluster_dict = dict()
self.name = "paper_class"
self.paper_class = None
def generate_clusters(self, n_clusters=10, adjusted_coef=1):
cov_matrix_enlarge = self.cosine_matrix.copy()
max_num = self.cosine_matrix.shape[0]
enlarge_company_list = self.company_list.copy()
# query all company in the same industry
def query_ind(index):
if type(enlarge_company_list[index]) == type("a"):
yield index
else:
sub_index = enlarge_company_list[index]
for i in sub_index:
for j in query_ind(i):
yield j
# sum correlation
def sum_corr_func(index1, index2):
return cov_matrix_enlarge[index1, index2]
# set the corr of companies within the same industry to 1
def elim_corr(comp_list):
for i in comp_list:
cov_matrix_enlarge[i, :] = 0
cov_matrix_enlarge[:, i] = 0
set_num_counter = list(range(max_num))
exempt_set = set()
record_dummy = -1
for i in range(max_num):
cov_matrix_enlarge[i, i] = 0
while len(set(set_num_counter)) > n_clusters:
# get minimun correlation in the matrix
min_pair = cov_matrix_enlarge.argmax()
row = min_pair // cov_matrix_enlarge.shape[0]
col = min_pair % cov_matrix_enlarge.shape[0]
enlarge_company_list.append([row, col])
# calculate the new generated industry correlation with other companies
ind_comp_list = [i for i in query_ind(row)] + [i for i in query_ind(col)]
for i in ind_comp_list:
exempt_set.add(i)
exempt_set.add(row)
exempt_set.add(col)
new_corr = []
for iter_company in range(cov_matrix_enlarge.shape[1]):
if_exempt = iter_company in exempt_set
if (iter_company >= max_num) and (not if_exempt):
iter_comp_com = [i for i in query_ind(iter_company)]
sum_corr = (
sum(
[
self.cosine_matrix[i, j]
for i in iter_comp_com
for j in ind_comp_list
]
)
/ (len(iter_comp_com) * len(ind_comp_list)) ** adjusted_coef
)
elif not if_exempt:
sum_corr = sum(
[sum_corr_func(i1, iter_company) for i1 in ind_comp_list]
) / len(ind_comp_list)
else:
sum_corr = 0
new_corr.append(sum_corr)
cov_matrix_enlarge = np.vstack((cov_matrix_enlarge, np.matrix(new_corr)))
cov_matrix_enlarge = np.hstack(
(cov_matrix_enlarge, np.transpose(np.matrix(new_corr + [0])))
)
# set the corr within the same industry to 1
elim_corr(ind_comp_list)
elim_corr([row, col])
# record the companies that have been categorized
for i in ind_comp_list:
set_num_counter[i] = record_dummy
record_dummy -= 1
# show the result
for i in set(set_num_counter):
self.cluster_dict[i] = set()
for i in range(len(set_num_counter)):
self.cluster_dict[set_num_counter[i]].add(self.company_list[i])
new_sequence_num = [i for i in range(len(self.cluster_dict))]
paper_class = [
[(name, key) for name in item]
for key, item in zip(new_sequence_num, self.cluster_dict.values())
]
paper_class = functools.reduce(lambda x, y: x + y, paper_class)
paper_class = pd.DataFrame(paper_class, columns=["company", "paper_class"])
self.paper_class = paper_class
def output_clusters_df(self):
return self.paper_class
def output_raw(self):
return self.cluster_dict
class ML_cluster:
def __init__(self, embedding, namelist):
self.embedding = embedding
self.namelist = namelist
self.cluster_df = None
self.name = None
self.embedding_type=["kmean", "dbscan", "gmm"]
self.how=["org", "cor", "cos"]
def generate_clusters(
self,
embedding_type,
how="org",
n_clusters=10,
random_state=None,
DBSCAN_eps=0.5,
DBSCAN_min_samples=5,
DBSCAN_metric="cosine"
):
assert how in ["org", "cor", "cos"]
assert embedding_type in ["kmean", "dbscan", "gmm"]
if how == "cor":
train_matrix = np.corrcoef(self.embedding)
elif how == "cos":
train_matrix = cosine_similarity(self.embedding, dense_output=False)
else:
train_matrix = self.embedding
if embedding_type == "kmean":
# k-means model
estimator = KMeans(n_clusters=n_clusters, random_state=random_state)
estimator.fit(train_matrix)
label_pred = estimator.labels_
self.name = "kmean_class"
elif embedding_type == "dbscan":
# DBSCAN model
estimator = DBSCAN(
eps=DBSCAN_eps, min_samples=DBSCAN_min_samples, metric=DBSCAN_metric
)
estimator.fit(train_matrix)
label_pred = estimator.labels_
self.name = "DBSCAN_class"
elif embedding_type == "gmm":
# gmm model
estimator = GaussianMixture(n_components=n_clusters,random_state=random_state)
label_pred = estimator.fit_predict(train_matrix)
self.name = "gmm_class"
self.cluster_df = pd.DataFrame(
zip([i for i in self.namelist], label_pred), columns=["company", self.name]
)
def output_clusters_df(self):
return self.cluster_df
class DEC_cluster:
def __init__(self, embedding, namelist, cluster_df):
self.embedding = embedding
self.namelist = namelist
self.cluster_df = cluster_df
self.name = 'DEC'
class get_all_cluster:
def __init__(self,company_list,embadding_list):
'''
embadding_list: [embadding of first method, second method, ...]
company_list and embadding of X method is a DataFrame or array. They Must have the same rank
you can get embadding_list through combine_model.combine_embadding
'''
self.embadding_list=embadding_list
self.company_list=company_list
self.all_cluster=pd.DataFrame(company_list,columns=['company'])
def get_all_cluster(self,
n_cluster,
random_state=None,
use_DBSCAN=False,
DBSCAN_eps=0.5,
DBSCAN_min_samples=5,
DBSCAN_metric="cosine"):
i=0
for embadding in tqdm(self.embadding_list,unit='per embadding',desc='handling...'):
i+=1
cluster=ML_cluster(embadding,self.company_list)
for how in cluster.how:
for method in cluster.embedding_type:
if use_DBSCAN and method=='dbscan':
cluster.generate_clusters(embedding_type=method,how=how,n_clusters=n_cluster)
df_i=cluster.output_clusters_df()
self.all_cluster[str(i)+'th embadding with '+how+' '+method]=df_i.iloc[:,-1]
elif method!='dbscan':
cluster.generate_clusters(embedding_type=method,how=how,n_clusters=n_cluster)
df_i=cluster.output_clusters_df()
self.all_cluster[str(i)+'th embadding with '+how+' '+method]=df_i.iloc[:,-1]
else:
continue
return self.all_cluster
| {"/utils.py": ["/combine_models.py"]} |
68,316 | gaohaochenghc/industry-classification | refs/heads/master | /combine_models.py | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
from operator import itemgetter
from sklearn.decomposition import PCA, TruncatedSVD
# combine the results of all clustering models
class combine_models:
'''
This class is used to combine different cluster results;
or combine different embadding matrix according to company name;
or combine cluster results with embadding matrix( always used in performance_analysis)
'''
def __init__(self):
self.combine_cluster=None
def get_combine_cluster(self,class_list):
'''Inputs:
class_list: a list contains different class_df. class_df has this columes: ['company','class_name']
Notice: this class alway use inner join
'''
if class_list!=None:
self.combine_cluster=class_list[0]
for i in range(1,len(class_list)):
temp_df=class_list[i].copy()
self.combine_cluster=self.combine_cluster.merge(temp_df,on=['company'],how='inner')
self.cluster_len=len(self.combine_cluster)
return self.combine_cluster
def get_combine_embadding(self,company_list,embadding_list):
'''Inputs:
embadding is a embadding list: [embadding1, embadding2, ...]
embaddingX has following columns: ['company', embadding matrix]
'''
if embadding_list!=None:
combine_embadding=[]
company_list=pd.DataFrame(company_list,columns=['company'])
for embadding in embadding_list:
embadding=company_list.merge(embadding,how='left',on=['company']).iloc[:,1:]
combine_embadding.append(embadding)
return combine_embadding
def get_merged_matrix(self,embadding,class_list):
'''Inputs:
embadding is a embadding list: [embadding1, embadding2, ...]
embaddingX has following columns: ['company', embadding matrix]
'''
if self.combine_cluster==None:
self.get_combine_cluster(class_list)
new_embadding=[]
for i in range(len(embadding)):
embadding[i]=embadding[i][embadding[i]['company'].isin(self.combine_cluster['company'])]
embadding[i]['company'].astype('category').cat.reorder_categories(self.combine_cluster['company'],inplace=True)
new_embadding.append(embadding[i].sort_values('company').iloc[:,1:].set_index(pd.Index(range(self.cluster_len))))
return new_embadding
class performance_analysis:
def __init__(self,mode,class_df=None,return_df=None,embadding_list=None):
'''Inputs:
This class has two mode:
1. apply dimension reduction method to see the performance of embadding matrix(word vector) and classification methods
2. use stock return to see the classification wellness
In the first mode:
this class take class_df, embadding_list
class_df is a dataframe, which columns are: ['company','class1','class2',...]
embadding_list: [embadding matrix of first method,embadding matrix of second method,...]
In the second mode:
this class take class_df, return_df
class_df is a dataframe, which columns are: ['company','class1','class2',...]
return_df is a dataframe, which columns are: ['company','date','return']
date is a datetime object
you can use mode=['matrix_performance','return_performance'] to switch types
'''
if mode=='matrix_performance':
self.class_df=class_df
self.embadding_list=embadding_list
self.embadding_len=len(embadding_list) if embadding_list!=None else None
self.class_len=class_df.shape[1]-1
elif mode=='return_performance':
self.class_df=class_df
self.stock_return=return_df.merge(class_df,on=['company'],how='inner')
self.company=list(set(class_df['company']))
self.method=class_df.columns[1:]
self.company2int={classname:dict(zip(class_df['company'],class_df[classname])) for classname in self.method}
self.return_map=None
self.statistical_map=None
self.total_return=None
self.R2=None
self.F=None
else:
raise ValueError('mode must in [matrix_performance, return_performance]')
def plot_count_plot(self):
fig, ax = plt.subplots(1,self.class_len,figsize=(20,6))
plt.suptitle("Company number in different industry")
for i in range(self.class_len):
sns.countplot(self.class_df.iloc[:,i+1],ax=ax[i], order=self.class_df.iloc[:,i+1].value_counts().index)
def get_reducted_vector(self,method='PCA'):
pca=PCA(n_components=2)
for matrix in self.embadding_list:
reducted_vector=pd.DataFrame(pca.fit_transform(matrix))
self.class_df=pd.concat([self.class_df,reducted_vector],axis=1)
def plot_reducted_classification(self):
self.class_df.iloc[:,1:]=self.class_df.iloc[:,1:].astype('category')
if self.class_df.shape[1]==self.class_len+1:
self.get_reducted_vector()
fig, ax= plt.subplots(self.class_len,self.embadding_len,figsize=(20,20))
for i in range(self.embadding_len):
for j in range(self.class_len):
sns.scatterplot(x=self.class_df.iloc[:,self.class_len+1+2*i],y=self.class_df.iloc[:,self.class_len+2+2*i],ax=ax[j,i],hue=self.class_df.iloc[:,j+1])
ax[j,i].set_title(self.class_df.columns[j+1]+' with the '+str(i)+'th embadding')
def get_daliy_return(self,threshold=5):
self.return_map={}
pivot_df=self.stock_return[self.stock_return['company'].isin(self.class_df['company'])].pivot(index='date',columns='company',values='return')
class_df_trim=self.class_df[self.class_df['company'].isin(self.stock_return['company'])]
for name in self.method:
self.return_map[name]=dict()
cluster_namelist=set(class_df_trim[name].values.tolist())
int2comp={cl:class_df_trim[class_df_trim[name]==cl]['company'] for cl in cluster_namelist}
for cl,comps in int2comp.items():
if len(comps)>threshold:
self.return_map[name][cl]=pivot_df[comps]
self.return_map[name][cl].dropna(1,'all',inplace=True)
self.return_map[name][cl]['return']=self.return_map[name][cl].mean(1)
def get_statistical_discribe(self):
if self.return_map==None:
self.get_daliy_return()
self.statistical_map={}
for name in self.method:
self.statistical_map[name]={key:pd.DataFrame(columns=item.columns,index=['abs_slope','p','unsignificance_num','R2']) for key, item in self.return_map[name].items()}
for classes in self.statistical_map[name]:
reg=[itemgetter(0,2,3)(stats.linregress(self.return_map[name][classes][j][self.return_map[name][classes][j].isna()==False],self.return_map[name][classes]['return'][self.return_map[name][classes][j].isna()==False])) for j in self.return_map[name][classes].columns]
self.statistical_map[name][classes].loc['unsignificance_num']=list(map(lambda x:x[2]<0.001,reg))
self.statistical_map[name][classes].loc['p']=list(map(lambda x: x[2],reg))
self.statistical_map[name][classes].loc['R2']=list(map(lambda x:x[1]**2,reg))
self.statistical_map[name][classes].loc['abs_slope']=list(map(lambda x:abs(x[0]),reg))
self.statistical_map[name][classes]['sum'+str(classes)]=self.statistical_map[name][classes].mean(axis=1)
self.statistical_map[name][classes].loc['unsignificance_num','sum'+str(classes)]=sum(self.statistical_map[name][classes].loc['unsignificance_num',]==False)
self.statistical_map[name][classes].drop(columns='return',inplace=True)
def get_wellness(self):
if self.statistical_map==None:
self.get_statistical_discribe()
self.wellness_map={name:pd.DataFrame(index=['abs_slope','p','unsignificance_num','R2']) for name in self.method}
self.R2=[]
for name in self.wellness_map:
for value in self.statistical_map[name].values():
self.wellness_map[name]=self.wellness_map[name].join(value.iloc[:,-1])
self.wellness_map[name]['mean']=self.wellness_map[name].mean(axis=1)
self.wellness_map[name].loc['unsignificance_num','mean']=sum(self.wellness_map[name].iloc[2,:-1])
self.R2.append((name,self.wellness_map[name].loc['R2','mean']))
def output(self,name='wellness'):
if name=='daliy_return':
return self.return_map
elif name=='discribe':
return self.statistical_map
elif name=='wellness':
return self.wellness_map
elif name=='F':
return self.F
elif name=='R2':
return self.R2
def get_regression_plot(self):
for name in self.wellness_map:
print('Mean of R square of '+name+' classification is: ',self.wellness_map[name].loc['R2','mean'])
methods=len(self.method)
fig, ax=plt.subplots(3,methods,figsize=(20,20))
i=0
color=['grey','red','blue','purple','green','yellow','black','grey']
for name in self.wellness_map:
sns.distplot(self.wellness_map[name].iloc[0,:-1],ax=ax[0,i],color=color[i])
ax[0,i].set_title('slope of '+name+' classification')
sns.distplot(self.wellness_map[name].iloc[3,:-1],ax=ax[1,i],color=color[i])
ax[1,i].set_title('R sqpare of '+name+' classification')
sns.barplot(x=self.wellness_map[name].columns[:-1],y=self.wellness_map[name].iloc[1,:-1],ax=ax[2,i],color=color[i])
ax[2,i].set_title('p value of '+name+' classification')
i+=1
def get_total_return(self):
self.total_return=self.stock_return.groupby(by='company').apply(lambda df: (df['return']+1).cumprod().iloc[-1]-1)
self.total_return.reset_index().rename(columns={0:'total_return'})
def anova(self):
if self.total_return==None:
self.get_total_return()
self.total_return=self.total_return.merge(self.class_df,on='company',how='inner')
self.F=[]
for method in self.method:
f=anova_lm(ols('total_return~C('+method+')',data=self.total_return).fit()).iloc[0,3]
self.F.append((method,f))
print('F of '+method+' is: ',f) | {"/utils.py": ["/combine_models.py"]} |
68,317 | gaohaochenghc/industry-classification | refs/heads/master | /utils.py | from sklearn.decomposition import PCA, TruncatedSVD
import pandas as pd
import copy
from combine_models import *
def compute_pca_svd(input_combine_models, cosine_matrix, type='pca'):
assert type in ['pca', 'svd']
combine_models=copy.deepcopy(input_combine_models)
if type == 'pca':
pca = PCA(n_components=2)
reduced_cov = pd.DataFrame(pca.fit_transform(
cosine_matrix), columns=['pca1', 'pca2'])
combine_models.combine_class = pd.concat(
[combine_models.combine_class, reduced_cov], axis=1)
combine_models.combine_class[combine_models.namelist] = combine_models.combine_class[combine_models.namelist].astype(
'category')
elif type == 'svd':
truncatedsvd = TruncatedSVD(n_components=2)
reduced_cov = pd.DataFrame(truncatedsvd.fit_transform(
cosine_matrix), columns=['svd1', 'svd2'])
combine_models.combine_class = pd.concat(
[combine_models.combine_class, reduced_cov], axis=1)
combine_models.combine_class[combine_models.namelist] = combine_models.combine_class[combine_models.namelist].astype(
'category')
return combine_models
def trans_ticker(price_data):
price_data.iloc[:,0]=price_data.iloc[:,0].map(lambda x:x[:-3]+'.SH' if x[0] in ['6','9'] else x[:-3]+'.SZ')
return price_data
| {"/utils.py": ["/combine_models.py"]} |
68,345 | eduardogpg/crm_san | refs/heads/master | /crm_san/urls.py | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from .views import index
from .views import donantes
from .views import donaciones
from .views import circulos
from .views import administracion
from .views import alianzas
urlpatterns = [
path('', index, name='index'),
path('admin/', admin.site.urls),
path('donantes/', donantes, name='donantes'),
path('donaciones/', donaciones, name='donaciones'),
path('circulos/', circulos, name='circulos'),
path('administracion/', administracion, name='administracion'),
path('alianzas/', alianzas, name='alianzas'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| {"/crm_san/urls.py": ["/crm_san/views.py"]} |
68,346 | eduardogpg/crm_san | refs/heads/master | /crm_san/views.py | from django.shortcuts import render
def index(request):
return render(request, 'dashboard.html', {
})
def donantes(request):
return render(request, 'donantes.html', {
})
def donaciones(request):
return render(request, 'donaciones.html', {
})
def circulos(request):
return render(request, 'circulos.html', {
})
def administracion(request):
return render(request, 'administracion.html', {
})
def alianzas(request):
return render(request, 'alianzas.html', {
}) | {"/crm_san/urls.py": ["/crm_san/views.py"]} |
68,347 | andrewpedia/django-rest-framework-crud | refs/heads/master | /jobs/models.py | from django.db import models
class job(models.Model):
hash = models.CharField(max_length=100)
resultid = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True) # When it was create
updated_at = models.DateTimeField(auto_now=True) # When i was update
creator = models.ForeignKey('auth.User', related_name='jobs', on_delete=models.CASCADE)
# resultid = models.ForeignKey('django_celery_results.TaskResult', related_name='jobid', on_delete=models.CASCADE,default='0')
| {"/jobs/views.py": ["/jobs/models.py", "/jobs/serializers.py"], "/jobs/serializers.py": ["/jobs/models.py"]} |
68,348 | andrewpedia/django-rest-framework-crud | refs/heads/master | /jobs/views.py | from rest_framework import status
from rest_framework.response import Response
from rest_framework.generics import RetrieveUpdateDestroyAPIView, ListCreateAPIView
from .models import job
from django.http import JsonResponse
from .permissions import IsOwnerOrReadOnly, IsAuthenticated
from .serializers import jobserializer
from .serializers import TaskResultserializer
from .pagination import CustomPagination
from jobs import joblist
from django_celery_results.models import TaskResult
from crack import celery_app
class get_delete_update_job(RetrieveUpdateDestroyAPIView):
serializer_class = TaskResultserializer
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly,)
def get_queryset(self, pk):
try:
jobins = TaskResult.objects.get(task_id=pk)
except TaskResult.DoesNotExist:
content = {
'status': 'Not Found'
}
return Response(content, status=status.HTTP_404_NOT_FOUND)
return jobins
# Get a job
def get(self, request, pk):
try:
jobins = TaskResult.objects.get(task_id=pk)
except TaskResult.DoesNotExist:
content = {
'status': 'Not Found'
}
return Response(content, status=status.HTTP_404_NOT_FOUND)
serializer = TaskResultserializer(jobins)
# if serializer.is_valid():
# print("serializer.data...")
# print(serializer.data)
#
# print(serializer.errors)
obj = job.objects.get(resultid=pk)
print("user name.....")
print(obj.creator.username)
serializer.username = obj.creator.username
response_data = {'status': 'True', 'msg': '任务获取成功!', 'data': {
"meta": {},
"result": [
{
"id": jobins.task_id,
"status": jobins.status,
"user_name": obj.creator.username,
"result": jobins.result,
"hash": obj.hash,
}
]
}}
return Response(response_data, status=status.HTTP_200_OK)
# Update a job
def put(self, request, pk):
obj = job.objects.get(resultid=pk)
if request.user == obj.creator.username: # If creator is who makes request
serializer = jobserializer(job, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
else:
content = {
'status': 'UNAUTHORIZED'
}
return Response(content, status=status.HTTP_401_UNAUTHORIZED)
# Delete a job
def delete(self, request, pk):
try:
obj = job.objects.get(resultid=pk)
except job.DoesNotExist:
response_data = {'status': 'False', 'msg': '任务不存在!', 'data': {
"meta": {},
"result": [
]
}}
return Response(response_data, status=status.HTTP_200_OK)
if request.user == obj.creator: # If creator is who makes request
celery_app.control.revoke(pk)
obj.delete()
jobresult = TaskResult.objects.get(task_id=pk)
jobresult.delete()
response_data = {'status': 'True', 'msg': '任务删除成功!', 'data': {
"meta": {},
"result": [
]
}}
return Response(response_data, status=status.HTTP_200_OK)
else:
content = {
'status': 'UNAUTHORIZED'
}
return Response(content, status=status.HTTP_401_UNAUTHORIZED)
# cancel a job
def post(self, request, pk):
try:
obj = job.objects.get(resultid=pk)
except job.DoesNotExist:
response_data = {'status': 'False', 'msg': '任务不存在!', 'data': {
"meta": {},
"result": [
]
}}
return Response(response_data, status=status.HTTP_200_OK)
if request.user == obj.creator: # If creator is who makes request
celery_app.control.revoke(pk)
response_data = {'status': 'True', 'msg': '任务取消成功!', 'data': {
"meta": {},
"result": [
]
}}
return Response(response_data, status=status.HTTP_200_OK)
else:
content = {
'status': 'UNAUTHORIZED'
}
return Response(content, status=status.HTTP_401_UNAUTHORIZED)
class get_post_jobs(ListCreateAPIView):
serializer_class = TaskResultserializer
permission_classes = (IsAuthenticated,)
pagination_class = CustomPagination
def get_queryset(self):
jobs = TaskResult.objects.all()
return jobs
# Get all jobs
def get(self, request):
jobins = self.get_queryset()
result = []
for record in jobins:
obj = job.objects.get(resultid=record.task_id)
result.append({
"id": record.task_id,
"status": record.status,
"user_name": obj.creator.username,
"result": record.result,
"hash": obj.hash,
})
response_data = {'status': 'True', 'msg': '任务获取成功!', 'data': {
"meta": {},
"result": result
}}
return Response(response_data, status=status.HTTP_200_OK)
# Create a new job
def post(self, request):
serializer = jobserializer(data=request.data)
if serializer.is_valid():
hashstr = request.data['hash']
ret = joblist.pojie.delay(hashstr)
serializer.save(creator=request.user, resultid=ret.task_id)
response_data = {'status': 'True', 'msg': '任务提交成功!', 'data': {
"meta": {},
"result": [
{
"id": ret.task_id,
"status": 0,
"result": ret.result
}
]
}}
return Response(response_data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| {"/jobs/views.py": ["/jobs/models.py", "/jobs/serializers.py"], "/jobs/serializers.py": ["/jobs/models.py"]} |
68,349 | andrewpedia/django-rest-framework-crud | refs/heads/master | /jobs/joblist.py | from crack import celery_app
# import shlex
import subprocess
import logging
def run_shell_command1(hash):
try:
command_line_process = subprocess.Popen(
"df -h",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
for l in iter(command_line_process.stdout.readline, b''):
logging.info(l.strip())
command_line_process.communicate()
command_line_process.wait()
except (OSError, subprocess.CalledProcessError) as exception:
logging.info('Exception occured: ' + str(exception))
logging.info('Subprocess failed')
return False
else:
# no exception was raised
logging.info('Subprocess finished')
return True
def run_shell_command2(hash):
try:
command_line_process = subprocess.run(["curl", "-I","www.google.com"], check=False, capture_output=True, text=True)
# for l in iter(command_line_process.stdout, b''):
# logging.info(l.strip())
value = command_line_process.stdout.splitlines()
for line in value:
logging.info(line.strip())
except (OSError, subprocess.CalledProcessError) as exception:
logging.info('Exception occured: ' + str(exception))
logging.info('Subprocess failed')
return False
else:
# no exception was raised
logging.info('Subprocess finished')
return True
@celery_app.task
def pojie(hash):
run_shell_command2(hash)
print("crack with hash key:" + hash + " ok done!")
return "this is result"
| {"/jobs/views.py": ["/jobs/models.py", "/jobs/serializers.py"], "/jobs/serializers.py": ["/jobs/models.py"]} |
68,350 | andrewpedia/django-rest-framework-crud | refs/heads/master | /jobs/migrations/0005_auto_20201006_0214.py | # Generated by Django 2.2.13 on 2020-10-06 02:14
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0004_auto_20201006_0203'),
]
operations = [
migrations.RenameField(
model_name='job',
old_name='title',
new_name='hash',
),
]
| {"/jobs/views.py": ["/jobs/models.py", "/jobs/serializers.py"], "/jobs/serializers.py": ["/jobs/models.py"]} |
68,351 | andrewpedia/django-rest-framework-crud | refs/heads/master | /jobs/migrations/0003_job_resultid.py | # Generated by Django 2.2.13 on 2020-10-06 01:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('django_celery_results', '0007_remove_taskresult_hidden'),
('jobs', '0002_auto_20201005_0830'),
]
operations = [
migrations.AddField(
model_name='job',
name='resultid',
field=models.ForeignKey(default='0', on_delete=django.db.models.deletion.CASCADE, related_name='jobid', to='django_celery_results.TaskResult'),
),
]
| {"/jobs/views.py": ["/jobs/models.py", "/jobs/serializers.py"], "/jobs/serializers.py": ["/jobs/models.py"]} |
68,352 | andrewpedia/django-rest-framework-crud | refs/heads/master | /jobs/serializers.py | from rest_framework import serializers
from .models import job
from django.contrib.auth.models import User
from django_celery_results.models import TaskResult
class jobserializer(serializers.ModelSerializer): # create class to serializer model
creator = serializers.ReadOnlyField(source='creator.username')
resultid=serializers.ReadOnlyField(source='resultid.task_id')
class Meta:
model = job
fields = ('id', 'hash', 'creator', 'resultid')
class UserSerializer(serializers.ModelSerializer): # create class to serializer usermodel
jobs = serializers.PrimaryKeyRelatedField(many=True, queryset=job.objects.all())
class Meta:
model = User
fields = ('id', 'username', 'jobs')
class TaskResultserializer(serializers.ModelSerializer): # create class to serializer model
user_name = serializers.SerializerMethodField()
def __init__(self,qs):
super().__init__(self,qs)
self.username=None
def get_user_name(self,obj):
"""getter method to add field retrieved_time"""
return self.username
class Meta:
model = TaskResult
fields = ('task_id', 'status', 'result','user_name')
| {"/jobs/views.py": ["/jobs/models.py", "/jobs/serializers.py"], "/jobs/serializers.py": ["/jobs/models.py"]} |
68,353 | andrewpedia/django-rest-framework-crud | refs/heads/master | /jobs/urls.py | from django.urls import include, path, re_path
from . import views
urlpatterns = [
re_path(r'^husky/v1/hash/tasks/(?P<pk>.+)$', # Url to get update or delete a job
views.get_delete_update_job.as_view(),
name='get_delete_update_job'
),
path('husky/v1/hash/tasks/', # urls list all and create new one
views.get_post_jobs.as_view(),
name='get_post_jobs'
)
] | {"/jobs/views.py": ["/jobs/models.py", "/jobs/serializers.py"], "/jobs/serializers.py": ["/jobs/models.py"]} |
68,362 | SPSG/sentiment | refs/heads/master | /senta.py | from pymongo import MongoClient
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import subjectivity
from nltk.sentiment import SentimentAnalyzer
from nltk.sentiment.util import *
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk import tokenize
sid=SentimentIntensityAnalyzer()
# NLTK sentiment training data
#subj_docs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')]
#obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')]
#training_docs = [subj_docs] + [obj_docs]
#sentim_analyzer = SentimentAnalyzer()
#all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs])
#unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4)
#sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
#training_set = sentim_analyzer.aply_features(training_docs)
#trainer = NaiveBayesCLassifier.train
#classifier = sentim_analyzer.train(trainer, training_set)
# mongodb initialization/import
def dbload(dbname, collname):
global client, db, tweets
client = MongoClient()
db = client[dbname]
tweets = db[collname]
def score(tweets):
for tweet in tweets.find():
id = tweet['_id']
scores=sid.polarity_scores(tweet['text'])
tweets.update_one({'_id': id}, {'$set': {'sentiment_info': scores}})
if __name__ == '__main__':
dbload('data', 'tweets')
score(tweets)
| {"/graphs.py": ["/twitterreader.py", "/senta.py"]} |
68,363 | SPSG/sentiment | refs/heads/master | /graphs.py | import twitterreader, senta
import numpy as np
import matplotlib.pyplot as plt
import mpld3
plt.plot([3,1,4,1,5], 'ks-', mec='w', mew=5, ms=20)
plt.xlabel('Autocorrelation of s1',fontsize=20)
mpld3.show()
| {"/graphs.py": ["/twitterreader.py", "/senta.py"]} |
68,364 | SPSG/sentiment | refs/heads/master | /twitterreader.py | import tweepy
import json
from pymongo import MongoClient
from pprint import *
import webbrowser
# access_token = '3320969365-Epm7HFGVOuPYKScLJYOpT9sMJMJHl1NvpNEnhn0'
# access_secret = '7saqiEs5O6slpaqdXbJEZ9BAtRcI3HVceqWTUAEKslcRc'
#Make a connection to localhost
connection = MongoClient()
#initialize and assign collection to a database
db = connection.data
def get_twitter_auth():
# Setup Twitter autentication
consumer_key = 'ZZHlgZYoCR72HWvqZ4LbFMNcb'
consumer_secret = 'Gx2g9Z5DwaBI3Bq0JyxK5pAwwPWqSdN4t1rf5mhghEhwMXGjzm'
callback_url = "https://testing-6f08d.firebaseapp.com/__/auth/handler"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret, callback_url)
try:
redirect_url = auth.get_authorization_url()
webbrowser.open_new_tab(redirect_url)
except tweepy.TweepError:
print('Error! Failed to get request token.')
verifier = auth.GET.get('oauth_verifier')
try:
auth.get_access_token(verifier)
except tweepy.TweepError:
print('Error! Failed to get access token')
key = auth.access_token
secret = auth.access_token_secret
auth.set_access_token(key, secret)
return auth
# def get_twitter_bot():
# consumer_key = 'ZZHlgZYoCR72HWvqZ4LbFMNcb'
# consumer_secret = 'Gx2g9Z5DwaBI3Bq0JyxK5pAwwPWqSdN4t1rf5mhghEhwMXGjzm'
# key = '3320969365-Epm7HFGVOuPYKScLJYOpT9sMJMJHl1NvpNEnhn0'
# secret = '7saqiEs5O6slpaqdXbJEZ9BAtRcI3HVceqWTUAEKslcRc'
# auth = OAuthHandler(consumer_key, consumer_secret)
# auth.set_access_token(key, secret)
# return auth
def get_twitter_client(auth): #pass in the authentication testing or user specific
client = tweepy.API(auth)
return client
def get_user_timeline():
# change the line below to reflect whether testing or not
client = get_twitter_client(get_twitter_auth())
for tweet in tweepy.Cursor(client.user_timeline).items():
db.tweets.insert(tweet._json)
get_user_timeline()
#sorts tweets from oldest to newest
timeline = db.tweets.find({'retweeted': False}).sort('created_at', 1)
#obtain one document where its an original tweet
user_cursor = db.tweets.find_one({'retweeted': False})
#if user_cursor:
# user_cursor = user_cursor['user']['name']
#get all tweets that aren't quoted or retweeted
tweets_cursor = db.tweets.find({'is_quote_status': False, 'retweeted': False, })
#gets all retweeted tweets
retweets_cursor = db.tweets.find({'retweeted': True})
#
#creates a dictionary of tweet id strings as keys and number of favorites as values
fav_dict = {}
def favorites(tweet):
for t in tweet:
fav_dict[t["id_str"]] = t["favorite_count"]
fav_dict['total_favs'] = sum(fav_dict.values())
fav_dict['total_posts'] = len(fav_dict) - 1
fav_dict['fav_ratio'] = fav_dict['total_favs'] / fav_dict['total_posts']
favorites(timeline)
pprint(user_cursor)
| {"/graphs.py": ["/twitterreader.py", "/senta.py"]} |
68,365 | Mozzo1000/movielst | refs/heads/master | /movielst/movielst.py | import textwrap
import argparse
import pkg_resources
import hashlib
import logging.config
import platform
import subprocess
import urllib.request
import urllib.error
import uuid
import hashlib
from .API import get_api
from .database import *
from guessit import guessit
from terminaltables import AsciiTable
from tqdm import tqdm
from colorama import init, Fore
init()
EXT = (".3g2 .3gp .3gp2 .3gpp .60d .ajp .asf .asx .avchd .avi .bik .bix"
".box .cam .dat .divx .dmf .dv .dvr-ms .evo .flc .fli .flic .flv"
".flx .gvi .gvp .h264 .m1v .m2p .m2ts .m2v .m4e .m4v .mjp .mjpeg"
".mjpg .mkv .moov .mov .movhd .movie .movx .mp4 .mpe .mpeg .mpg"
".mpv .mpv2 .mxf .nsv .nut .ogg .ogm .omf .ps .qt .ram .rm .rmvb"
".swf .ts .vfw .vid .video .viv .vivo .vob .vro .wm .wmv .wmx"
".wrap .wvx .wx .x264 .xvid")
EXT = tuple(EXT.split())
FORCE_INDEX = False
logger = logging.getLogger(__name__)
def main():
create_config()
create_movie_table()
create_user_table()
add_user('admin', 'admin')
upgrade()
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s - %(levelname)s - %(name)-12s/%(funcName)s():%(lineno)d - %(message)s'
},
},
'handlers': {
'rotate_file': {
'level': get_setting('General', 'log_level'),
'formatter': 'standard',
'class': 'logging.handlers.RotatingFileHandler',
'filename': get_setting('General', 'log_location') + 'movielst.log',
'encoding': 'utf8',
'maxBytes': 10*1024*1024,
'backupCount': 1,
}
},
'loggers': {
'': {
'handlers': ['rotate_file'],
'level': get_setting('General', 'log_level'),
'propagate': True
}
}
})
logger.debug("TESTING!")
parser = argparse.ArgumentParser()
parser.add_argument('PATH', nargs='?', default='')
parser.add_argument('-v', '--version', help='Show version.', action='version', version='%(prog)s ' + get_version())
parser.add_argument('-i', '--imdb', help='Sort acc. to IMDB rating.(dec)', action='store_true')
parser.add_argument('-t', '--tomato', help='Sort acc. to Tomato Rotten rating.(dec)', action='store_true')
parser.add_argument('-g', '--genre', help='Show movie name with its genre.', action='store_true')
parser.add_argument('-a', '--awards', help='Show movie name with awards recieved.', action='store_true')
parser.add_argument('-c', '--cast', help='Show movie name with its cast.', action='store_true')
parser.add_argument('-d', '--director', help='Show movie name with its director(s).', action='store_true')
parser.add_argument('-y', '--year', help='Show movie name with its release date.', action='store_true')
parser.add_argument('-r', '--runtime', help='Show movie name with its runtime.', action='store_true')
parser.add_argument('-e', '--export', help='Export list to either csv or excel', nargs=2, metavar=('type', 'output'))
parser.add_argument('-f', '--force', help='Force indexing', action='store_true')
parser.add_argument('-I', '--imdb-rev', help='Sort acc. to IMDB rating.(inc)', action='store_true')
parser.add_argument('-T', '--tomato-rev', help='Sort acc. to Tomato Rotten rating.(inc)', action='store_true')
parser.add_argument('-ec', '--edit-config', help='Open the configuration file in the default editor', action='store_true')
parser.add_argument('-ed', '--edit-index', help='Edit the index file', action='store_true')
parser.add_argument('--download-images', help='Will download and store all poster images, will write over the location url in the database', action='store_true')
util(parser.parse_args())
def get_version():
try:
return pkg_resources.get_distribution("movielst").version
except pkg_resources.DistributionNotFound:
return "NOT INSTALLED ON SYSTEM! - SHA: " + hashlib.sha256(open(os.path.realpath(__file__), 'rb').read()).hexdigest()
def util(args):
if args.PATH:
if args.force:
global FORCE_INDEX
FORCE_INDEX = True
con = connect_db()
cur = con.cursor()
cur.execute('DELETE FROM movies')
con.commit()
con.close()
if os.path.isdir(args.PATH):
print("\n\nIndexing all movies inside ",
args.PATH + "\n\n")
logger.info('Started new index at: ' + args.PATH)
dir_json = get_setting('Index', 'location') + 'movies.json'
scan_dir(args.PATH, dir_json, True if args.download_images else False)
if movie_name:
if movie_not_found:
print(Fore.RED + "\n\nData for the following movie(s)"
" could not be fetched -\n")
for val in movie_not_found:
print(Fore.RED + val)
if not_a_movie:
print(Fore.RED + "\n\nThe following media in the"
" folder is not movie type -\n")
for val in not_a_movie:
print(Fore.RED + val)
print(Fore.GREEN + "\n\nRun $movielst\n\n")
else:
print(Fore.RED + "\n\nGiven directory does not contain movies."
" Pass a directory containing movies\n\n")
logger.warning('Could not find movies in given directory: ' + args.PATH)
else:
print(Fore.RED + "\n\nDirectory does not exists."
" Please pass a valid directory containing movies.\n\n")
logger.warning('Directory does not exists.')
elif args.imdb:
table_data = [["TITLE", "IMDB RATING"]]
data, table = butler(table_data)
for item in data:
item["title"] = clean_table(item["title"], None, item,
table)
table_data.append([item["title"], item["imdb"]])
sort_table(table_data, 1, True)
elif args.tomato:
table_data = [["TITLE", "TOMATO RATING"]]
data, table = butler(table_data)
for item in data:
item["title"] = clean_table(item["title"], None, item,
table)
table_data.append([item["title"], item["tomato"]])
sort_table(table_data, 1, True)
elif args.genre:
table_data = [["TITLE", "GENRE"]]
data, table = butler(table_data)
for item in data:
item["title"] = clean_table(item["title"], None,
item, table)
table_data.append([item["title"], item["genre"]])
sort_table(table_data, 0, False)
elif args.awards:
table_data = [["TITLE", "AWARDS"]]
data, table = butler(table_data)
for item in data:
item["title"], item["awards"] = clean_table(item["title"],
item["awards"], item,
table)
table_data.append([item["title"], item["awards"]])
sort_table(table_data, 0, False)
elif args.cast:
table_data = [["TITLE", "CAST"]]
data, table = butler(table_data)
for item in data:
item["title"], item["cast"] = clean_table(item["title"],
item["cast"],
item, table)
table_data.append([item["title"], item["cast"]])
sort_table(table_data, 0, False)
elif args.director:
table_data = [["TITLE", "DIRECTOR(S)"]]
data, table = butler(table_data)
for item in data:
item["title"], item["director"] = clean_table(item["title"],
item["director"],
item, table)
table_data.append([item["title"], item["director"]])
sort_table(table_data, 0, False)
elif args.year:
table_data = [["TITLE", "RELEASED"]]
data, table = butler(table_data)
for item in data:
item["title"] = clean_table(item["title"], None, item,
table)
table_data.append([item["title"], item["year"]])
sort_table(table_data, 0, False)
elif args.runtime: # Sort result by handling numeric sort
table_data = [["TITLE", "RUNTIME"]]
data, table = butler(table_data)
for item in data:
item["title"] = clean_table(item["title"], None, item,
table)
table_data.append([item["title"], item["runtime"]])
print_table(table_data)
elif args.imdb_rev:
table_data = [["TITLE", "IMDB RATING"]]
data, table = butler(table_data)
for item in data:
item["title"] = clean_table(item["title"], None, item,
table)
table_data.append([item["title"], item["imdb"]])
sort_table(table_data, 1, False)
elif args.tomato_rev:
table_data = [["TITLE", "TOMATO RATING"]]
data, table = butler(table_data)
for item in data:
item["title"] = clean_table(item["title"], None, item,
table)
table_data.append([item["title"], item["tomato"]])
sort_table(table_data, 1, False)
elif args.export:
table_data = get_table_everything(return_item=True)
if 'excel' in args.export:
export_type = args.export.index('excel')
filename = args.export[:export_type] + args.export[export_type + 1:]
workbook = xlsxwriter.Workbook(filename[0])
worksheet = workbook.add_worksheet()
worksheet.set_row(0, None, workbook.add_format({'bold': True}))
worksheet.autofilter(0, 0, len(table_data[1]), 8)
row = 0
col = 0
for item in table_data[0]:
worksheet.write_string(row, col, item[0])
worksheet.write_string(row, col + 1, item[1])
worksheet.write_string(row, col + 2, item[2])
worksheet.write_string(row, col + 3, item[3])
worksheet.write_string(row, col + 4, item[4])
worksheet.write_string(row, col + 5, item[5])
worksheet.write_string(row, col + 6, item[6])
worksheet.write_string(row, col + 7, item[7])
worksheet.write_string(row, col + 8, item[8])
row += 1
workbook.close()
elif 'csv' in args.export:
export_type = args.export.index('csv')
filename = args.export[:export_type] + args.export[export_type + 1:]
with open(str(filename[0]), 'w', newline='') as outputfile:
wr = csv.writer(outputfile, quoting=csv.QUOTE_ALL)
wr.writerows(table_data[0])
else:
print("Unsupported character.")
logger.warning('Used something else than supported arguments for exporting.')
elif args.edit_config:
if platform.system() == 'Darwin':
subprocess.call(('open', CONFIG_PATH + CONFIG_FILE))
elif platform.system() == 'Linux':
subprocess.call(('xdg-open', CONFIG_PATH + CONFIG_FILE))
elif platform.system() == 'Windows':
subprocess.call(('start', CONFIG_PATH + CONFIG_FILE), shell=True)
else:
print("Can not open the configuration file. System not supported.")
elif args.edit_index:
table_data = [["TITLE", "FILE NAME"]]
data, table = butler(table_data)
i = 0
for item in data:
item["title"] = clean_table(item["title"], None, item,
table)
i += 1
table_data.append([item["title"], item["file_info"]["name"]])
sort_table(table_data, 1, False)
selection = input(Fore.GREEN + "Select FILE NAME to edit : " + Fore.RESET)
print("\n\n[0] - Edit name")
print("[1] - Edit genre")
print("[2] - Edit IMDb rating")
print("[3] - Edit runtime")
print("[4] - Edit Tomato rating")
print("[5] - Edit year")
print("[6] - Edit awards")
print("[7] - Edit cast")
print("[8] - Edit director")
print("[9] - Edit poster")
print("[10] - Exit")
option = input(Fore.RED + "Select option : " + Fore.RESET)
if option == "0":
edit('name', selection, input("New name : "))
elif option == "1":
edit('genre', selection, input("New genre(s) : "))
elif option == "2":
edit('imdb_rating', selection, input("New IMDb rating :"))
elif option == "3":
edit('runtime', selection, input('New runtime : '))
elif option == "4":
edit('tomato_rating', selection, input("New Tomato rating"))
elif option == "5":
edit('year', selection, input("New year : "))
elif option == "6":
edit('awards', selection, input("New awards : "))
elif option == "7":
edit('cast', selection, input("New cast : "))
elif option == "8":
edit('directory', selection, input("New director : "))
elif option == "9":
edit('poster', selection, input("New poster : "))
else:
print("Exiting")
elif args.download_images:
print('DOWNLOAD IMAGES!')
cache_images(get_all_images().fetchall())
else:
sort_table(get_table_everything(printout=True), 0, False)
def cache_images(urls):
if not os.path.exists(CONFIG_PATH + 'cache/'):
os.makedirs(CONFIG_PATH + 'cache/')
for i in urls:
logger.debug(i[1])
hash_name = hashlib.sha256(uuid.uuid4().hex.encode() + i[1].encode()).hexdigest()
logger.debug("HASH : " + str(hash_name))
try:
urllib.request.urlretrieve(i[1], CONFIG_PATH + 'cache/' + str(hash_name) + '.jpg')
edit('poster', i[0], CONFIG_PATH + 'cache/' + str(hash_name) + '.jpg')
except ValueError as error:
logger.error(error)
except urllib.error.URLError:
logger.error("Not a valid url")
def get_table_everything(printout=False, return_item=False):
if printout:
table_data = [
["TITLE", "GENRE", "IMDB", "RUNTIME", "TOMATO",
"YEAR"]]
data, table = butler(table_data)
for item in data:
item["title"], item["genre"] = clean_table(item["title"],
item["genre"], item,
table)
table_data.append([item["title"], item["genre"],
item["imdb"], item["runtime"],
item["tomato"], item["year"]])
else:
table_data = [
["TITLE", "GENRE", "IMDB", "RUNTIME", "TOMATO",
"YEAR", "AWARDS", "CAST", "DIRECTOR"]]
data, table = butler(table_data)
for item in data:
table_data.append([item["title"], item["genre"],
item["imdb"], item["runtime"],
item["tomato"], item["year"], item["awards"], item["cast"], item["director"]])
if return_item:
return table_data, item
else:
return table_data
def sort_table(table_data, index, reverse):
table_data = (table_data[:1] + sorted(table_data[1:],
key=lambda i: i[index],
reverse=reverse))
print_table(table_data)
def clean_table(tag1, tag2, item, table):
if tag1 and tag2:
if len(tag1) > table.column_max_width(0):
tag1 = textwrap.fill(
tag1, table.column_max_width(0))
if len(tag2) > table.column_max_width(1):
tag2 = textwrap.fill(
tag2, table.column_max_width(1))
elif len(tag2) > table.column_max_width(1):
tag2 = textwrap.fill(
tag2, table.column_max_width(1))
return tag1, tag2
elif tag1:
if len(tag1) > table.column_max_width(0):
tag1 = textwrap.fill(
tag1, table.column_max_width(0))
return tag1
def butler(table_data):
try:
movie_path = get_setting('Index', 'location') + 'movies.db'
except IOError:
print(Fore.RED, "\n\nRun `$movielst PATH` to "
"index your movies directory.\n\n")
logger.error('Movie index could not be found, please index before use.')
quit()
else:
table = AsciiTable(table_data)
try:
data = db_to_json()
return data, table
except IOError:
print(Fore.YELLOW, "\n\nRun `movielst PATH` to "
"index your movies directory.\n\n")
logger.error('Movie index could not be found, please index before use.')
quit()
def print_table(table_data):
table = AsciiTable(table_data)
table.inner_row_border = True
if table_data[:1] in ([['TITLE', 'IMDB RATING']],
[['TITLE', 'TOMATO RATING']]):
table.justify_columns[1] = 'center'
print("\n")
print(table.table)
movies = []
movie_name = []
not_a_movie = []
movie_not_found = []
def scan_dir(path, dir_json, download_images=False): # TODO CLEANUP dir_json value not used, remove.
original_path = path
# Preprocess the total files count
for root, dirs, files in tqdm(os.walk(path)):
for name in files:
path = os.path.join(root, name)
if os.path.getsize(path) > (int(get_setting('Index', 'min_size_to_index'))*1024*1024):
ext = os.path.splitext(name)[1]
if ext in EXT:
movie_name.append(name)
with tqdm(total=len(movie_name), leave=True, unit='B',
unit_scale=True) as pbar:
for name in movie_name:
data = get_movie_info(name)
pbar.update()
if data is not None and data['response'] == 'True':
for key, val in data.items():
if val == "N/A":
data[key] = "-" # Should N/A be replaced with `-`?
data.update({"file_info": {"name": name, "location": original_path, "extension": ext}})
movies = db_to_json()
add_movie(data, FORCE_INDEX)
if download_images:
cache_images([(data['file_info']['name'], data['poster'])])
else:
if data is not None:
movie_not_found.append(name)
def get_movie_info(name):
"""Find movie information"""
movie_info = guessit(name)
try:
if movie_info['type'] == "movie":
if 'year' in movie_info:
return get_api(movie_info['title'], movie_info['year'], external_api=get_setting('API', 'use_external_api'))
else:
return get_api(movie_info['title'], None, external_api=get_setting('API', 'use_external_api'))
else:
not_a_movie.append(name)
except KeyError:
not_a_movie.append(name)
if __name__ == '__main__':
main()
| {"/movielst/movielst.py": ["/movielst/API.py", "/movielst/database.py"], "/web/main.py": ["/movielst/__init__.py", "/web/forms.py", "/web/dependency.py"], "/movielst/database.py": ["/movielst/config.py"], "/movielst/omdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/__init__.py": ["/movielst/movielst.py"], "/movielst/tmdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/API.py": ["/movielst/omdb.py", "/movielst/tmdb.py"], "/web/dependency.py": ["/movielst/__init__.py"]} |
68,366 | Mozzo1000/movielst | refs/heads/master | /web/main.py | from flask import Flask, render_template, send_from_directory, send_file, session, request, redirect, Response
import json
import subprocess
import re
from movielst import config, database
from web.forms import SettingsForm, LoginForm, AddUserForm, IndexForm, SearchForm
from web.dependency import check_for_dep
app = Flask(__name__)
app.config['SECRET_KEY'] = 'not really secret but still a really useless secret key for this use case'
app.config['DEP_FOLDER'] = config.CONFIG_PATH + 'dep/'
app.config['CACHE_FOLDER'] = config.CONFIG_PATH + 'cache/'
regex_url_valid = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def main():
check_for_dep()
app.run(host=config.get_setting('Web', 'host'), port=config.get_setting('Web', 'port'), debug=False)
@app.route('/api/v1/dep/<path:filename>', methods=["GET"])
def dep_files(filename):
return send_from_directory(app.config['DEP_FOLDER'], filename=filename)
@app.route('/api/v1/cache/<path:filename>', methods=["GET"])
def cached_image_file(filename):
return send_from_directory(app.config['CACHE_FOLDER'], filename=filename)
@app.route('/api/v1/autocomplete', methods=['GET'])
def autocomplete():
movie_names_json = database.db_to_json()
movie_names = []
for i in movie_names_json:
movie_names.append(i['title'])
return Response(json.dumps(movie_names), mimetype='application/json')
@app.route('/', methods=['GET', 'POST'])
def index():
if not session.get('logged_in') and config.get_setting('Web', 'require_login') == "True":
return login()
else:
form = IndexForm()
error = None
cached = False
data = database.db_to_json()
if not data:
data = None
if form.validate_on_submit():
if form.run_index.data:
output = subprocess.check_output('movielst ' + form.index_location.data)
if "Directory does not exists." in str(output):
error = "invalid directory"
else:
return redirect('/')
form.process()
search_form = SearchForm(request.form)
if search_form.search.data:
if search_form.autocomp.data:
return redirect('/movie/' + search_form.autocomp.data)
if data is not None:
for movie in data:
if re.match(regex_url_valid, movie["poster"]):
# is a valid url, return cached False, ie. do nothing
cached = False
else:
# Is not a url, return cached True to show local file instead#
cached = True
return render_template('home.html', movie_list=data, form=form, error=error, cached=cached, search=search_form)
@app.route('/movie/<variable>')
def movie(variable):
if not session.get('logged_in') and config.get_setting('Web', 'require_login') == "True":
return login()
else:
data = database.db_to_json()
i = 0
list = {}
cached = None
for datas in data:
if datas["title"] == variable:
list["title"] = datas["title"]
list["genre"] = datas["genre"]
list["imdb"] = datas["imdb"]
list["runtime"] = datas["runtime"]
list["tomato"] = datas["tomato"]
list["year"] = datas["year"]
list["awards"] = datas["awards"]
list["cast"] = datas["cast"]
list["director"] = datas["director"]
list["poster"] = datas["poster"]
list["file_info_name"] = datas["file_info"]["name"]
if re.match(regex_url_valid, list["poster"]):
cached = False
else:
list["poster"] = list["poster"].replace(config.CONFIG_PATH + 'cache/', '')
cached = True
list["description"] = datas["description"]
i += 1
return render_template('movie.html', list=list, cached=cached)
@app.route('/settings', methods=['GET', 'POST'])
def settings():
if not session.get('logged_in') and config.get_setting('Web', 'require_login') == "True":
return login()
else:
form = SettingsForm()
form.log_level_field.default = config.get_setting('General', 'log_level')
form.log_location_field.default = config.get_setting('General', 'log_location')
form.location_field.default = config.get_setting('Index', 'location')
form.min_index_field.default = config.get_setting('Index', 'min_size_to_index')
form.use_external_api_field.default = config.get_setting('API', 'use_external_api')
form.omdb_api_key_field.default = config.get_setting('API', 'OMDb_API_key')
form.tmdb_api_key_field.default = config.get_setting('API', 'TMdb_API_key')
form.web_host_field.default = config.get_setting('Web', 'host')
form.web_port_field.default = config.get_setting('Web', 'port')
form.web_require_login_field.default = config.get_setting('Web', 'require_login')
if form.validate_on_submit():
config.update_config('General', 'log_level', form.log_level_field.data)
config.update_config('General', 'log_location', form.log_location_field.data)
config.update_config('Index', 'location', form.location_field.data)
config.update_config('Index', 'min_size_to_index', str(form.min_index_field.data))
config.update_config('API', 'use_external_api', form.use_external_api_field.data)
config.update_config('API', 'OMDb_API_key', form.omdb_api_key_field.data)
config.update_config('API', 'TMdb_API_key', form.tmdb_api_key_field.data)
config.update_config('Web', 'host', form.web_host_field.data)
config.update_config('Web', 'port', form.web_port_field.data)
config.update_config('Web', 'require_login', form.web_require_login_field.data)
if form.delete_index.data:
database.delete_all_movies()
form.process()
return render_template('settings/settings.html', form=form)
@app.route('/settings/users', methods=['GET', 'POST'])
def settings_user():
if not session.get('logged_in') and config.get_setting('Web', 'require_login') == "True":
return login()
else:
form = AddUserForm()
choices_list = [(i[0], i[0]) for i in database.get_users()]
form.user_list_field.choices = choices_list
if form.validate_on_submit():
if form.submit.data:
# Add user to database
database.add_user(form.username_field.data, form.password_field.data)
if form.delete.data:
database.delete_user(form.user_list_field.data)
form.process()
return render_template('settings/users.html', form=form)
@app.route('/export/<type>/<name>')
def export(type, name):
if not session.get('logged_in') and config.get_setting('Web', 'require_login') == "True":
return login()
else:
if type == 'csv':
database.export_to_csv(config.CONFIG_PATH + name)
return send_file(config.CONFIG_PATH + name, as_attachment=True)
elif type == 'xlsx':
database.export_to_xlsx(config.CONFIG_PATH + name)
return send_file(config.CONFIG_PATH + name, as_attachment=True)
else:
return "File type not supported"
@app.route('/movie/play/<filename>', methods=["GET"])
def play(filename):
location = database.get_location_of_movie(filename).fetchall()
print(str(location[0]).replace("('", '').replace("',)", ''))
app.config['MOVIE_FOLDER'] = str(location[0]).replace("('", '').replace("',)", '')
print(app.config['MOVIE_FOLDER'])
file = app.config['MOVIE_FOLDER'] + "\\" + filename
print(file)
return send_from_directory(app.config['MOVIE_FOLDER'], filename)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if request.method == "POST":
if form.validate_on_submit():
if database.verify_user(form.username_field.data, form.password_field.data):
session['logged_in'] = True
return redirect('/')
else:
print("login failed")
return render_template('login.html', form=form)
@app.route('/logout')
def logout():
session['logged_in'] = False
return redirect('/')
if __name__ == '__main__':
main()
| {"/movielst/movielst.py": ["/movielst/API.py", "/movielst/database.py"], "/web/main.py": ["/movielst/__init__.py", "/web/forms.py", "/web/dependency.py"], "/movielst/database.py": ["/movielst/config.py"], "/movielst/omdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/__init__.py": ["/movielst/movielst.py"], "/movielst/tmdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/API.py": ["/movielst/omdb.py", "/movielst/tmdb.py"], "/web/dependency.py": ["/movielst/__init__.py"]} |
68,367 | Mozzo1000/movielst | refs/heads/master | /movielst/database.py | import sqlite3
import csv
import xlsxwriter
import logging
from passlib.context import CryptContext
from .config import *
pwd_context = CryptContext(schemes=['pbkdf2_sha256'], default='pbkdf2_sha256', pbkdf2_sha256__default_rounds=30000)
LATEST_VERSION = 1
def connect_db():
con = sqlite3.connect(get_setting('Index', 'location') + 'movies.db')
return con
def upgrade():
con = connect_db()
cur = con.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS version (db_version INTEGER UNIQUE)')
con.commit()
version = cur.execute('SELECT db_version FROM version')
try:
current_version = version.fetchone()[0]
except TypeError:
cur.execute('INSERT INTO version (db_version) VALUES(?)', (LATEST_VERSION,))
con.commit()
current_version = LATEST_VERSION
if current_version == LATEST_VERSION:
logging.info('Database already latest version, doing nothing.')
elif current_version < LATEST_VERSION:
logging.info('Upgrading database to latest version')
db_upgrade_file = open('db_upgrade', 'r')
s = db_upgrade_file.read()
upgrade_sql = s[s.find('[UPGRADE_START_'+str(LATEST_VERSION)+']')+len('[UPGRADE_START_'+str(LATEST_VERSION)+']'):s.rfind('[UPGRADE_END_'+str(LATEST_VERSION)+']')]
cur.execute(upgrade_sql)
cur.execute('UPDATE version SET db_version=?', (LATEST_VERSION,))
con.commit()
cur.close()
def create_user_table():
sql = '''
CREATE TABLE IF NOT EXISTS users
(user TEXT PRIMARY KEY, password TEXT)
'''
con = connect_db()
cur = con.cursor()
cur.execute(sql)
con.commit()
con.close()
def add_user(username, password):
sql = '''
INSERT OR IGNORE INTO users
(user, password) VALUES(?, ?)
'''
con = connect_db()
cur = con.cursor()
cur.execute(sql, (username, pwd_context.encrypt(password)))
con.commit()
cur.close()
def verify_user(username, password):
con = connect_db()
cur = con.cursor()
cur.execute('SELECT password FROM users WHERE user=?', (username,))
try:
result = pwd_context.verify(password, cur.fetchone()[0])
except TypeError:
return False
return result
def get_users():
con = connect_db()
cur = con.cursor()
return cur.execute('SELECT user FROM users')
def delete_user(username):
con = connect_db()
cur = con.cursor()
cur.execute('DELETE FROM users WHERE user=?', (username,))
con.commit()
con.close()
def create_movie_table():
sql = '''
CREATE TABLE IF NOT EXISTS movies
(title TEXT, genre TEXT, imdb FLOAT, runtime TEXT, tomato TEXT, year INT,
awards TEXT, cast TEXT, director TEXT, poster TEXT, description TEXT, response BOOLEAN,
file_info_name TEXT UNIQUE, file_info_location TEXT, file_info_ext TEXT)
'''
con = connect_db()
cur = con.cursor()
cur.execute(sql)
con.commit()
con.close()
def add_movie(data, force_index):
if force_index is True:
sql = '''
INSERT INTO movies
(title, genre, imdb, runtime, tomato, year, awards, cast, director, poster,
description, response, file_info_name, file_info_location, file_info_ext)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
'''
elif force_index is False:
sql = '''
INSERT OR IGNORE INTO movies
(title, genre, imdb, runtime, tomato, year, awards, cast, director, poster,
description, response, file_info_name, file_info_location, file_info_ext)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
'''
con = connect_db()
cur = con.cursor()
cur.execute(sql, (data['title'], data['genre'], data['imdb'],
data['runtime'], data['tomato'], data['year'],
data['awards'], data['cast'], data['director'],
data['poster'], data['description'], data['response'], data['file_info']['name'],
data['file_info']['location'], data['file_info']['extension']))
con.commit()
def export_to_csv(output):
con = connect_db()
cur = con.cursor()
with open(output, 'w') as output_file:
writer = csv.writer(output_file, delimiter=',')
for row in cur.execute('SELECT * FROM movies'):
writer.writerow(row)
def export_to_xlsx(output):
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet()
con = connect_db()
cur = con.cursor()
for i, row in enumerate(cur.execute('SELECT * FROM movies')):
for j, value in enumerate(row):
worksheet.write(i, j, value)
workbook.close()
def db_to_json():
con = connect_db()
cur = con.cursor()
result = cur.execute('SELECT * FROM movies')
items = []
for row in result:
items.append({'title': row[0], 'genre': row[1], 'imdb': row[2], 'runtime': row[3], 'tomato': row[4],
'year': row[5], 'awards': row[6], 'cast': row[7], 'director': row[8], 'poster': row[9],
'description': row[10], 'response': row[11], 'file_info': {'name': row[12], 'location': row[13], 'extension': row[14]}})
return items
def edit(type, file_name, new_info):
con = connect_db()
cur = con.cursor()
if type == "name":
cur.execute('UPDATE movies SET title=? WHERE file_info_name=?', (new_info, file_name))
elif type == "genre":
cur.execute('UPDATE movies SET genre=? WHERE file_info_name=?', (new_info, file_name))
elif type == "imdb_rating":
cur.execute('UPDATE movies SET imdb=? WHERE file_info_name=?', (new_info, file_name))
elif type == "runtime":
cur.execute('UPDATE movies SET runtime=? WHERE file_info_name=?', (new_info, file_name))
elif type == "tomato_rating":
cur.execute('UPDATE movies SET tomato=? WHERE file_info_name=?', (new_info, file_name))
elif type == "year":
cur.execute('UPDATE movies SET year=? WHERE file_info_name=?', (new_info, file_name))
elif type == "awards":
cur.execute('UPDATE movies SET awards=? WHERE file_info_name=?', (new_info, file_name))
elif type == "cast":
cur.execute('UPDATE movies SET cast=? WHERE file_info_name=?', (new_info, file_name))
elif type == "director":
cur.execute('UPDATE movies SET director=? WHERE file_info_name=?', (new_info, file_name))
elif type == "poster":
cur.execute('UPDATE movies SET poster=? WHERE file_info_name=?', (new_info, file_name))
else:
logging.error("Unsupported edit type : " + type)
con.commit()
con.close()
def delete_all_movies():
con = connect_db()
cur = con.cursor()
cur.execute('DELETE FROM movies')
con.commit()
con.close()
def get_all_images():
con = connect_db()
cur = con.cursor()
return cur.execute('SELECT file_info_name, poster from movies')
def get_location_of_movie(name):
con = connect_db()
cur = con.cursor()
return cur.execute('SELECT file_info_location FROM movies WHERE file_info_name=?', (name,))
| {"/movielst/movielst.py": ["/movielst/API.py", "/movielst/database.py"], "/web/main.py": ["/movielst/__init__.py", "/web/forms.py", "/web/dependency.py"], "/movielst/database.py": ["/movielst/config.py"], "/movielst/omdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/__init__.py": ["/movielst/movielst.py"], "/movielst/tmdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/API.py": ["/movielst/omdb.py", "/movielst/tmdb.py"], "/web/dependency.py": ["/movielst/__init__.py"]} |
68,368 | Mozzo1000/movielst | refs/heads/master | /movielst/omdb.py | from .config import *
from .API_util import make_request
def get_omdb_movie(title, year):
""" Fetch data from OMDB API. """
omdb_url = 'http://www.omdbapi.com/?apikey=' + get_setting('API', 'OMDb_API_key') + '&'
params = {'t': title.encode('ascii', 'ignore'),
'plot': 'full',
'type': 'movie',
'tomatoes': 'true'}
if year:
params['y'] = year
return make_request(omdb_url, params, title)
| {"/movielst/movielst.py": ["/movielst/API.py", "/movielst/database.py"], "/web/main.py": ["/movielst/__init__.py", "/web/forms.py", "/web/dependency.py"], "/movielst/database.py": ["/movielst/config.py"], "/movielst/omdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/__init__.py": ["/movielst/movielst.py"], "/movielst/tmdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/API.py": ["/movielst/omdb.py", "/movielst/tmdb.py"], "/web/dependency.py": ["/movielst/__init__.py"]} |
68,369 | Mozzo1000/movielst | refs/heads/master | /movielst/__init__.py | from .movielst import main
| {"/movielst/movielst.py": ["/movielst/API.py", "/movielst/database.py"], "/web/main.py": ["/movielst/__init__.py", "/web/forms.py", "/web/dependency.py"], "/movielst/database.py": ["/movielst/config.py"], "/movielst/omdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/__init__.py": ["/movielst/movielst.py"], "/movielst/tmdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/API.py": ["/movielst/omdb.py", "/movielst/tmdb.py"], "/web/dependency.py": ["/movielst/__init__.py"]} |
68,370 | Mozzo1000/movielst | refs/heads/master | /web/forms.py | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, PasswordField, SelectField, IntegerField
class SettingsForm(FlaskForm):
log_level_field = SelectField('Log level', choices=[('CRITICAL', 'CRITICAL'), ('ERROR', 'ERROR'), ('WARNING', 'WARNING'), ('INFO', 'INFO'), ('DEBUG', 'DEBUG')])
log_location_field = StringField('Log location')
location_field = StringField('Index location')
min_index_field = IntegerField('Min size to index (in MB)')
use_external_api_field = SelectField('External API', choices=[('omdb', 'OMDb'), ('tmdb', 'TMDb')])
omdb_api_key_field = StringField('OMDb API key')
tmdb_api_key_field = StringField('TMDb API key')
web_host_field = StringField('Web host address')
web_port_field = StringField('Web port')
web_require_login_field = SelectField('Require login', choices=[('False', 'No'), ('True', 'Yes')])
submit = SubmitField('Save')
delete_index = SubmitField('Delete index')
class LoginForm(FlaskForm):
username_field = StringField('Username')
password_field = PasswordField('Password')
login = SubmitField("Login")
class AddUserForm(FlaskForm):
username_field = StringField('Username')
password_field = PasswordField('Password')
user_list_field = SelectField('Users', choices=[('admin', 'admin')])
submit = SubmitField('Add')
delete = SubmitField('Delete')
class IndexForm(FlaskForm):
index_location = StringField("Folder to index")
run_index = SubmitField("Run Indexing")
class SearchForm(FlaskForm):
autocomp = StringField('Search movie', id='autocomplete')
search = SubmitField('Search')
| {"/movielst/movielst.py": ["/movielst/API.py", "/movielst/database.py"], "/web/main.py": ["/movielst/__init__.py", "/web/forms.py", "/web/dependency.py"], "/movielst/database.py": ["/movielst/config.py"], "/movielst/omdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/__init__.py": ["/movielst/movielst.py"], "/movielst/tmdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/API.py": ["/movielst/omdb.py", "/movielst/tmdb.py"], "/web/dependency.py": ["/movielst/__init__.py"]} |
68,371 | Mozzo1000/movielst | refs/heads/master | /movielst/tmdb.py | from .config import *
from .API_util import make_request
import json
def get_tmdb_movie(title, year):
""" Fetch data from TMdb API. """
tmdb_url = 'https://api.themoviedb.org/3/search/movie?api_key=' + get_setting('API', 'TMdb_API_key') + '&'
params = {'query': title.encode('ascii', 'ignore'),
'language': 'en-US'}
if year:
params['year'] = year
return make_request(tmdb_url, params, title)
def get_tmdb_details(id):
tmdb_url = 'https://api.themoviedb.org/3/movie/' + str(id) + '?api_key=' + get_setting('API', 'TMdb_API_key') + '&'
params = {'language': 'en-US'}
return make_request(tmdb_url, params, 0)
def get_tmdb_genre(ids):
if not os.path.exists(get_setting('Index', 'location') + 'tmdb_genre_list.json'):
tmdb_url = 'https://api.themoviedb.org/3/genre/movie/list?api_key=' + get_setting('API', 'TMdb_API_key')
genre_json = make_request(tmdb_url, {}, 0)
with open(get_setting('Index', 'location') + 'tmdb_genre_list.json', "w") as out:
json.dump(genre_json, out, indent=2)
with open(get_setting('Index', 'location') + 'tmdb_genre_list.json') as genre_list:
data = json.load(genre_list)
for x in data['genres']:
try:
if ids[0] == x['id']:
return x['name']
except IndexError:
return 'N/A'
| {"/movielst/movielst.py": ["/movielst/API.py", "/movielst/database.py"], "/web/main.py": ["/movielst/__init__.py", "/web/forms.py", "/web/dependency.py"], "/movielst/database.py": ["/movielst/config.py"], "/movielst/omdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/__init__.py": ["/movielst/movielst.py"], "/movielst/tmdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/API.py": ["/movielst/omdb.py", "/movielst/tmdb.py"], "/web/dependency.py": ["/movielst/__init__.py"]} |
68,372 | Mozzo1000/movielst | refs/heads/master | /movielst/API.py | from .omdb import get_omdb_movie
from .tmdb import get_tmdb_movie, get_tmdb_genre, get_tmdb_details
def get_api(title, year, external_api="omdb"):
item = {
"title": None,
"genre": None,
"imdb": None,
"runtime": None,
"tomato": None,
"year": None,
"awards": None,
"cast": None,
"director": None,
"poster": None,
"description": None,
"response": False
}
if external_api == "omdb":
omdb = get_omdb_movie(title, year)
if omdb is not None and omdb['Response'] == 'True':
item["title"] = omdb["Title"]
item["genre"] = omdb["Genre"]
item["imdb"] = omdb["imdbRating"]
item["runtime"] = omdb["Runtime"]
item["tomato"] = get_rotten_score(omdb)
item["year"] = omdb["Year"]
item["awards"] = omdb["Awards"]
item["cast"] = omdb["Actors"]
item["director"] = omdb["Director"]
item["poster"] = omdb["Poster"]
item["description"] = omdb["Plot"]
item['response'] = omdb["Response"]
else:
item['response'] = 'False'
elif external_api == "tmdb":
tmdb = get_tmdb_movie(title, year)
try:
tmdb_details = get_tmdb_details(tmdb["results"][0]['id'])
except IndexError:
item['response'] = 'False'
if tmdb is not None and tmdb["results"]:
poster_path = tmdb["results"][0]['poster_path']
item["title"] = tmdb["results"][0]['title']
item["year"] = tmdb["results"][0]['release_date'].split('-', 1)[0]
item["genre"] = get_tmdb_genre(tmdb["results"][0]['genre_ids'])
item["imdb"] = "unsupported"
item["runtime"] = tmdb_details['runtime']
item["tomato"] = "unsupported"
item["awards"] = "unsupported"
item["cast"] = "unsupported"
item["director"] = "unsupported"
item["poster"] = "http://image.tmdb.org/t/p/w185" + str(poster_path)
item['response'] = 'True'
elif not tmdb["results"]:
item['response'] = 'False'
return item
def get_rotten_score(item):
try:
if item['Ratings'][1]['Source'] == "Rotten Tomatoes":
return item['Ratings'][1]['Value']
else:
return "N/A"
except IndexError:
return "N/A"
| {"/movielst/movielst.py": ["/movielst/API.py", "/movielst/database.py"], "/web/main.py": ["/movielst/__init__.py", "/web/forms.py", "/web/dependency.py"], "/movielst/database.py": ["/movielst/config.py"], "/movielst/omdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/__init__.py": ["/movielst/movielst.py"], "/movielst/tmdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/API.py": ["/movielst/omdb.py", "/movielst/tmdb.py"], "/web/dependency.py": ["/movielst/__init__.py"]} |
68,373 | Mozzo1000/movielst | refs/heads/master | /movielst/API_util.py | import requests
from urllib.parse import urlencode
import json
import logging
logger = logging.getLogger(__name__)
def make_request(url, params, title):
url = url + urlencode(params)
try:
r = requests.get(url)
except requests.exceptions.ConnectionError:
r.status_code = "Connection refused"
if r.status_code == 200:
if "application/json" in r.headers['content-type']:
return json.loads(r.text)
else:
print("Couldn't find the movie " + title)
logger.info('[API] Movie could not be found: ' + title)
return None
elif r.status_code == 401:
print("Invalid API key, please check config file.")
logger.error('[API] Invalid key.')
return None
else:
print("There was some error fetching info from " + url)
logger.error('[API] Error fetching info from: ' + url)
return None
| {"/movielst/movielst.py": ["/movielst/API.py", "/movielst/database.py"], "/web/main.py": ["/movielst/__init__.py", "/web/forms.py", "/web/dependency.py"], "/movielst/database.py": ["/movielst/config.py"], "/movielst/omdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/__init__.py": ["/movielst/movielst.py"], "/movielst/tmdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/API.py": ["/movielst/omdb.py", "/movielst/tmdb.py"], "/web/dependency.py": ["/movielst/__init__.py"]} |
68,374 | Mozzo1000/movielst | refs/heads/master | /setup.py | from setuptools import setup
setup(name='movielst',
version='2.5.0',
description='Everything about your movies within the command line.',
long_description=open('README.md', 'r').read(),
long_description_content_type='text/markdown',
url='https://github.com/Mozzo1000/movielst',
author='Andreas Backström',
author_email='andreas@simplymozzo.se',
license='MIT',
include_package_data=True,
packages=['movielst', 'web'],
entry_points={
'console_scripts': ['movielst=movielst:main', 'movielst_web=web.main:main']
},
install_requires=[
'guessit',
'terminaltables',
'tqdm',
'colorama',
'xlsxwriter',
'flask',
'Flask-WTF',
'passlib'
],
keywords=['movies', 'CLI', 'movies-within-CLI', 'python'],
classifiers=[
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: User Interfaces',
'Topic :: Software Development :: Version Control',
],)
| {"/movielst/movielst.py": ["/movielst/API.py", "/movielst/database.py"], "/web/main.py": ["/movielst/__init__.py", "/web/forms.py", "/web/dependency.py"], "/movielst/database.py": ["/movielst/config.py"], "/movielst/omdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/__init__.py": ["/movielst/movielst.py"], "/movielst/tmdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/API.py": ["/movielst/omdb.py", "/movielst/tmdb.py"], "/web/dependency.py": ["/movielst/__init__.py"]} |
68,375 | Mozzo1000/movielst | refs/heads/master | /movielst/config.py | import configparser
import os
CONFIG_PATH = os.path.expanduser('~/.movielst/')
CONFIG_FILE = os.path.expanduser('config.ini')
def create_config():
if not os.path.exists(CONFIG_PATH):
os.makedirs(CONFIG_PATH)
if not os.path.exists(CONFIG_PATH + CONFIG_FILE):
config = configparser.ConfigParser()
config.add_section('General')
config.add_section('Index')
config.add_section('API')
config.add_section('Web')
config.set('General', 'log_level', 'INFO')
config.set('General', 'log_location', CONFIG_PATH)
config.set('Index', 'location', CONFIG_PATH)
config.set('Index', 'min_size_to_index', '25')
config.set('API', 'use_external_api', 'omdb')
config.set('API', 'OMDb_API_key', '37835d63')
config.set('API', 'TMdb_API_key', '')
config.set('Web', 'host', 'localhost')
config.set('Web', 'port', '5000')
config.set('Web', 'require_login', "False")
with open(CONFIG_PATH + CONFIG_FILE, 'w') as config_file:
config.write(config_file)
def get_config():
config = configparser.ConfigParser()
config.read(CONFIG_PATH + CONFIG_FILE)
return config
def get_setting(section, setting, fallback=None):
config = get_config()
return config.get(section, setting, fallback=fallback)
def update_config(section, setting, value):
config = get_config()
config.set(section, setting, value)
with open(CONFIG_PATH + CONFIG_FILE, 'w') as config_file:
config.write(config_file)
| {"/movielst/movielst.py": ["/movielst/API.py", "/movielst/database.py"], "/web/main.py": ["/movielst/__init__.py", "/web/forms.py", "/web/dependency.py"], "/movielst/database.py": ["/movielst/config.py"], "/movielst/omdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/__init__.py": ["/movielst/movielst.py"], "/movielst/tmdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/API.py": ["/movielst/omdb.py", "/movielst/tmdb.py"], "/web/dependency.py": ["/movielst/__init__.py"]} |
68,376 | Mozzo1000/movielst | refs/heads/master | /tools/create_demo_movies.py | import argparse
import os
import csv
import base64
demo_movie_base64 = 'AAAAHGZ0eXBpc29tAAACAGlzb21pc28ybXA0MQAAAAhmcmVlAAAAGm1kYXQAAAGzABAHAAABthBgUYI9t+8AAAMNbW9vdg' \
'AAAGxtdmhkAAAAAMXMvvrFzL76AAAD6AAAACoAAQAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAA' \
'AAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAABhpb2RzAAAAABCAgIAHAE/////+/wAAAiF0cmFrAA' \
'AAXHRraGQAAAAPxcy++sXMvvoAAAABAAAAAAAAACoAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAABAAAAAAAA' \
'AAAAAAAAAABAAAAAAAgAAAAIAAAAAAG9bWRpYQAAACBtZGhkAAAAAMXMvvrFzL76AAAAGAAAAAEVxwAAAAAALWhkbHIAAA' \
'AAAAAAAHZpZGUAAAAAAAAAAAAAAABWaWRlb0hhbmRsZXIAAAABaG1pbmYAAAAUdm1oZAAAAAEAAAAAAAAAAAAAACRkaW5m' \
'AAAAHGRyZWYAAAAAAAAAAQAAAAx1cmwgAAAAAQAAAShzdGJsAAAAxHN0c2QAAAAAAAAAAQAAALRtcDR2AAAAAAAAAAEAAA' \
'AAAAAAAAAAAAAAAAAAAAgACABIAAAASAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGP//AAAA' \
'XmVzZHMAAAAAA4CAgE0AAQAEgICAPyARAAAAAAMNQAAAAAAFgICALQAAAbABAAABtYkTAAABAAAAASAAxI2IAMUARAEUQw' \
'AAAbJMYXZjNTMuMzUuMAaAgIABAgAAABhzdHRzAAAAAAAAAAEAAAABAAAAAQAAABxzdHNjAAAAAAAAAAEAAAABAAAAAQAA' \
'AAEAAAAUc3RzegAAAAAAAAASAAAAAQAAABRzdGNvAAAAAAAAAAEAAAAsAAAAYHVkdGEAAABYbWV0YQAAAAAAAAAhaGRscg' \
'AAAAAAAAAAbWRpcmFwcGwAAAAAAAAAAAAAAAAraWxzdAAAACOpdG9vAAAAG2RhdGEAAAABAAAAAExhdmY1My4yMS4x'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', nargs='?', default='demo/', help='Output demo files to directory')
parser.add_argument('-i', '--input', nargs='?', default='random-movie-list.csv', help='Input csv file with movie names')
parser.add_argument('-d', '--delimiter', nargs='?', default=',', help='Csv file delimiter')
args = parser.parse_args()
if not os.path.exists(args.output):
os.makedirs(args.output)
if not args.output.endswith('/') or args.output.endswith('\\'):
args.output = args.output + '/'
try:
with open(args.input, 'r') as movielist:
read_csv = csv.reader(movielist, delimiter=args.delimiter)
for row in read_csv:
movie = " ".join(row)
fh = open(args.output + str(movie) + '.mp4', 'wb')
fh.write(base64.b64decode(demo_movie_base64))
fh.close()
except FileNotFoundError:
print('Input file ´' + args.input + '´ not found.')
if __name__ == '__main__':
main()
| {"/movielst/movielst.py": ["/movielst/API.py", "/movielst/database.py"], "/web/main.py": ["/movielst/__init__.py", "/web/forms.py", "/web/dependency.py"], "/movielst/database.py": ["/movielst/config.py"], "/movielst/omdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/__init__.py": ["/movielst/movielst.py"], "/movielst/tmdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/API.py": ["/movielst/omdb.py", "/movielst/tmdb.py"], "/web/dependency.py": ["/movielst/__init__.py"]} |
68,377 | Mozzo1000/movielst | refs/heads/master | /web/dependency.py | from movielst import config
import requests
import os
dep_folder = config.CONFIG_PATH + 'dep/'
dependencies = ['https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css',
'https://code.jquery.com/jquery-3.3.1.min.js',
'https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.3/umd/popper.min.js',
'https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js',
'https://cdnjs.cloudflare.com/ajax/libs/blazy/1.8.2/blazy.min.js']
def check_for_dep():
if not os.path.exists(dep_folder):
os.makedirs(dep_folder)
for url in dependencies:
if not os.path.exists(dep_folder + url.rsplit('/', 1)[-1]):
download_dep(url)
def download_dep(url):
local_filename = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(dep_folder + local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return local_filename
| {"/movielst/movielst.py": ["/movielst/API.py", "/movielst/database.py"], "/web/main.py": ["/movielst/__init__.py", "/web/forms.py", "/web/dependency.py"], "/movielst/database.py": ["/movielst/config.py"], "/movielst/omdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/__init__.py": ["/movielst/movielst.py"], "/movielst/tmdb.py": ["/movielst/config.py", "/movielst/API_util.py"], "/movielst/API.py": ["/movielst/omdb.py", "/movielst/tmdb.py"], "/web/dependency.py": ["/movielst/__init__.py"]} |
68,397 | HerikkWang/weibo | refs/heads/master | /Login.py | import weiboLogin
import urllib.request
import re
def login():
username = '13777015316'
pwd = '595432157'
weibologin_herik = weiboLogin.WeiboLogin(username, pwd)
if weibologin_herik.Login():
print('登录成功!')
| {"/Login.py": ["/weiboLogin.py"], "/get_raw_html.py": ["/weiboLogin.py", "/Login.py"], "/weiboLogin.py": ["/WeiboSearch.py"], "/scrapy_1.py": ["/Login.py"]} |
68,398 | HerikkWang/weibo | refs/heads/master | /get_raw_html.py | # -*- coding: utf-8 -*-
import weiboLogin
import Login
import urllib.request
import re
import sys
#调用模拟登录的程序,从网页中抓取指定URL的数据,获取原始的HTML信息存入raw_html.txt中
def get_rawHTML(url):
#Login.login()
content = urllib.request.urlopen(url).read().decode('utf-8')
fp_raw = open("example_page.txt","w+")
fp_raw.write(content)
fp_raw.close() # 获取原始的HTML写入文件
# print "成功爬取指定网页源文件并且存入raw_html.txt"
return content # 返回值为原始的HTML文件内容
if __name__ == '__main__':
Login.login() #先调用登录函数
#url = 'http://weibo.com/yaochen?is_search=0&visible=0&is_tag=0&profile_ftype=1&page=1#feedtop' #姚晨
url = 'https://weibo.cn/1961640291/info' #自行设定要抓取的网页
get_rawHTML(url) | {"/Login.py": ["/weiboLogin.py"], "/get_raw_html.py": ["/weiboLogin.py", "/Login.py"], "/weiboLogin.py": ["/WeiboSearch.py"], "/scrapy_1.py": ["/Login.py"]} |
68,399 | HerikkWang/weibo | refs/heads/master | /scrapy_3.py | import time
from selenium import webdriver
import re
import pymysql
# 连接mysql
csv_read_path = 'weibo_new_page_url.csv'
conn = pymysql.connect(host='localhost', user='root', password='herik', database='weibodata', charset='utf8mb4')
cur = conn.cursor()
cur.execute('use weibodata')
# 获取答主微博认证,粉丝数,标签信息
chromePath = r'C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe'
wd = webdriver.Chrome(executable_path= chromePath) # 构建浏览器
wd.set_page_load_timeout(30)
loginUrl = 'http://passport.weibo.cn/signin/login?entry=mweibo&r=http%3A%2F%2Fweibo.cn%2F&backTitle=%CE%A2%B2%A9&vt='
wd.get(loginUrl) # 进入登陆界面
# wd.find_element_by_xpath('//*[@id="loginname"]').send_keys('13777015316') # 输入用户名
# wd.find_element_by_xpath('//*[@id="pl_login_form"]/div/div[3]/div[2]/div/input').send_keys('595432157') # 输入密码
# wd.find_element_by_xpath('//*[@id="pl_login_form"]/div/div[3]/div[6]/a').click() # 点击登陆
# wd.find_element_by_xpath('//*[@id="pl_login_form"]/div/div[3]/div[3]/div/input').send_keys(input("输入验证码: "))
# wd.find_element_by_xpath('//*[@id="pl_login_form"]/div/div[3]/div[6]/a').click()#再次点击登陆
time.sleep(20) # 等待20秒用于输入用户名,密码并登录
cur.execute('select replier_id, page_url from weibo_scrapy_1')
tup = cur.fetchall()
t = 0 # 计数器
while t < len(tup):
page_url = tup[t][1][1:-1]
replier_id = tup[t][0][1:-1] # 从数据库中取出的字符串带有''
fans_num_url = 'http://weibo.cn/%s' % replier_id # 用于获取用户粉丝,关注数量的url
tag_url = 'http://weibo.cn/account/privacy/tags/?uid=%s' % replier_id # 用于获取用户标签信息的url
info_url = 'http://weibo.cn/%s/info' % replier_id # 用于获取用户微博认证信息的url
replier_fans_num = '0'
tags_info = ''
replier_authentication = ''
try: # 获取用户粉丝数量
wd.get(fans_num_url)
text_1 = wd.find_element_by_class_name('tip2').text
replier_fans_num = re.findall(".*粉丝\[(.*)\]\s分组.*", text_1)[0]
except Exception:
pass
try: # 获取用户标签信息
wd.get(tag_url)
try: # 只有一个标签的情况下,尝试采集
tag = wd.find_element_by_xpath('/html/body/div[6]/a').text
tags_info += tag
except Exception:
pass
# 有多个标签的情况下,尝试采集,最大采集数量设置为15
n = 2
while n <= 15:
try:
tags = wd.find_element_by_xpath('/html/body/div[6]/a[%d]'%n).text
tags_info = tags_info + ' ' + tags
n = n+1
except Exception:
n = n+1
except Exception:
pass
try: # 采集用户微博认证信息
wd.get(info_url)
authentication_info = wd.find_element_by_xpath('/html/body/div[6]').text
if authentication_info.split('\n')[1].startswith('认证'):
replier_authentication = authentication_info.split('\n')[1][3:]
except Exception:
pass
time.sleep(4) # 防止频繁访问引起的访问拒绝
t = t+1
print(str(t)+': '+str([replier_fans_num, replier_authentication, tags_info]))
cur.execute("insert into weibo_scrapy_3 (page_url, replier_fans_num, replier_authentication, tags_info) values"
"(\"%s\", %s, \"%s\", \"%s\")", (page_url, replier_fans_num, replier_authentication, tags_info))
cur.connection.commit()
cur.close()
conn.close()
wd.close()
| {"/Login.py": ["/weiboLogin.py"], "/get_raw_html.py": ["/weiboLogin.py", "/Login.py"], "/weiboLogin.py": ["/WeiboSearch.py"], "/scrapy_1.py": ["/Login.py"]} |
68,400 | HerikkWang/weibo | refs/heads/master | /WeiboSearch.py | import re
import json
def sServerData(serverData):
p = re.compile('\((.*)\)')
jsondata = p.search(serverData).group(1)
data = json.loads(jsondata)
return data
def sRedirectData(text):
p = re.compile('location\.replace\([\'"](.*?)[\'"]\)')
loginUrl = p.search(text).group(1)
print('loginUrl: '+loginUrl)
return loginUrl | {"/Login.py": ["/weiboLogin.py"], "/get_raw_html.py": ["/weiboLogin.py", "/Login.py"], "/weiboLogin.py": ["/WeiboSearch.py"], "/scrapy_1.py": ["/Login.py"]} |
68,401 | HerikkWang/weibo | refs/heads/master | /weiboLogin.py | import urllib.request
import urllib.parse
import http.cookiejar
import WeiboSearch
import time
import json
import re
import cv2
import numpy as np
import rsa
import binascii
import base64
class WeiboLogin:
def __init__(self, user, pwd, enableProxy=False):
print('初始化新浪微博登录...')
self.userName = user
self.passWord = pwd
self.enableProxy = enableProxy
self.postHeader = {"User-Agent": "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0"}
self.loginUrl = 'http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.19)'
self.login_url_1 = 'https://passport.weibo.com/wbsso/login'
def EnableCookie(self, enableProxy):
cookiejar = http.cookiejar.CookieJar()
cookie = urllib.request.HTTPCookieProcessor(cookiejar)
opener = urllib.request.build_opener(cookie)
urllib.request.install_opener(opener)
def GetServerTime(self):
print('getting server time and nonce...')
encodedUserName = self.GetUserName(self.userName)
serverUrl = "http://login.sina.com.cn/sso/prelogin.php?entry=weibo&callback=sinaSSOController.preloginCallBack&su=%s&rsakt=mod&checkpin=1&client=ssologin.js(v1.4.19)&_=%s"%(encodedUserName,str(int(time.time() * 1000)))
serverData = urllib.request.urlopen(serverUrl).read().decode('utf-8')
print(serverData)
# urlopen方法返回的是bytes对象,通过decode('utf-8')将bytes对象转换为str
try:
serverdata = WeiboSearch.sServerData(serverData)
print('Get servertime sucessfully!')
return serverdata
except:
print('解析serverdata出错!')
return None
def getData(self,url):
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
content = response.read()
return content
def GetUserName(self,userName):
userNameTemp = urllib.request.quote(userName)
userNameEncoded = base64.b64encode(userNameTemp.encode('utf-8'))
return userNameEncoded.decode('utf-8')
def get_pwd(self,password, servertime, nonce, pubkey):
rsaPublickey = int(pubkey, 16)
key = rsa.PublicKey(rsaPublickey, int('10001', 16))
message = (str(servertime) + '\t' + str(nonce) + '\n' + str(password)).encode('utf-8')
passwd = rsa.encrypt(message, key)
passwd = binascii.b2a_hex(passwd)
return passwd.decode()
def get_raw_html(self,url):
content = urllib.request.urlopen(url).read().decode('utf-8')
raw_file = open('main_html.txt', 'w+', encoding='utf-8')
raw_file.write(content)
raw_file.close()
return content
def Login(self):
self.EnableCookie(self.enableProxy)
serverdata = self.GetServerTime()
serverTime = str(serverdata['servertime'])
nonce = serverdata['nonce']
pubkey = serverdata['pubkey']
rsakv = serverdata['rsakv']
encodedUserName = self.GetUserName(self.userName)
encodedPassWord = self.get_pwd(self.passWord, serverTime, nonce, pubkey)
postData = {
"entry": "weibo",
"gateway": "1",
"from": "",
"savestate": "7",
"userticket": "1",
"vsnf": "1",
"service": "miniblog",
"encoding": "UTF-8",
"pwencode": "rsa2",
"sr": "1280*800",
"prelt": "529",
"url": "http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack",
"rsakv": rsakv,
"servertime": serverTime,
"nonce": nonce,
"su": encodedUserName,
"sp": encodedPassWord,
"returntype": "TEXT",
}
try:
if serverdata['showpin'] == 1:
url_pincheck = 'http://login.sina.com.cn/cgi/pin.php?r=%d&s=0&p=%s'%(int(time.time()),serverdata['pcid'])
resp = urllib.request.urlopen(url_pincheck)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
cv2.imshow("Image", image)
cv2.waitKey(0)
code = input('请输入验证码:')
postData['pcid'] = serverdata['pcid']
postData['door'] = code
except KeyError:
print('无需输入验证码')
postData = urllib.parse.urlencode(postData)
postData = postData.encode('utf-8')
print('Get postdata successfully!')
req = urllib.request.Request(self.loginUrl, postData, self.postHeader)
result = urllib.request.urlopen(req)
text = result.read()
print(text)
json_data_1 = json.loads(text.decode('utf-8'))
if json_data_1['retcode'] == '0':
print('预登录成功!')
params = {
'callback': 'sinaSSOController.callbackLoginStatus',
'client': 'ssologin.js(v1.4.19)',
'ticket': json_data_1['ticket'],
'ssosavestate': int(time.time()),
'_': int(time.time()*1000),
}
params = urllib.parse.urlencode(params).encode('utf-8')
response = urllib.request.Request(self.login_url_1, params, self.postHeader)
result_2 = urllib.request.urlopen(response)
text_2 = result_2.read().decode('utf-8')
json_data_2 = json.loads(re.search(r'\((?P<result>.*)\)',text_2).group('result'))
if json_data_2['result'] is True:
user_uniqueid = json_data_2['userinfo']['uniqueid']
user_nick = json_data_2['userinfo']['displayname']
if user_nick and user_uniqueid:
print('user id: '+user_uniqueid, 'user name: '+user_nick)
return True
else:
return False
| {"/Login.py": ["/weiboLogin.py"], "/get_raw_html.py": ["/weiboLogin.py", "/Login.py"], "/weiboLogin.py": ["/WeiboSearch.py"], "/scrapy_1.py": ["/Login.py"]} |
68,402 | HerikkWang/weibo | refs/heads/master | /scrapy_1.py | from bs4 import BeautifulSoup
import lxml
import html5lib
import Login
from urllib.request import urlopen
import csv
import pymysql
# scrape the data, whose attributes include question_content, question_value, questioner_name,
# questioner_id, replier_name, replier_id, question_time
csv_read_path = 'weibo_new_page_url.csv'
conn = pymysql.connect(host='localhost', user='root', password='herik', database='weibodata', charset='utf8mb4')
cur = conn.cursor()
cur.execute('use weibodata')
def get_data_1(url):
try:
html = urlopen(url)
bsObj = BeautifulSoup(html.read(), 'lxml')
question_content = bsObj.find('div', {'class': 'ask_con'}).get_text().strip()
question_value = bsObj.find('em', {'class': 'S_spetxt'}).get_text().strip()[1:]
questioner_name = bsObj.find_all('a', {'class': 'S_txt1'})[1].get_text().strip()
questioner_id = bsObj.find_all('a', {'class': 'S_txt1'})[1].attrs['href'][3:]
replier_name = bsObj.find_all('a', {'class': 'S_txt1'})[3].get_text().strip()
replier_id = bsObj.find_all('a', {'class': 'S_txt1'})[3].attrs['href'][3:]
question_time = bsObj.find('div', {'class': 'S_txt2'}).get_text()[:-2]
except:
question_content = ''
question_value = ''
questioner_name = ''
questioner_id = ''
replier_id = ''
replier_name = ''
question_time = ''
data = [question_content, question_value, questioner_name, questioner_id, replier_name, replier_id, question_time]
print(question_content + '\n' + question_value + '\n' + questioner_name + '\n' + questioner_id + '\n'
+ replier_name + '\n' + replier_id + '\n' + question_time)
return data
Login.login()
with open(csv_read_path, 'r', encoding='utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
page_url = row['page_url']
data = get_data_1(page_url)
cur.execute('''insert into weibo_scrapy_1 (page_url, question_content, question_value, questioner_name,
questioner_id, replier_name, replier_id, question_time) values (\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\")
''', (page_url, data[0], data[1], data[2], data[3], data[4], data[5], data[6]))
cur.connection.commit()
cur.close()
conn.close()
| {"/Login.py": ["/weiboLogin.py"], "/get_raw_html.py": ["/weiboLogin.py", "/Login.py"], "/weiboLogin.py": ["/WeiboSearch.py"], "/scrapy_1.py": ["/Login.py"]} |
68,403 | HerikkWang/weibo | refs/heads/master | /get_raw_pageurls.py | import time
from selenium import webdriver
import csv
import random
chromePath = r'C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe'
wd = webdriver.Chrome(executable_path= chromePath) # 构建浏览器
wd.set_page_load_timeout(30)
loginUrl = 'http://www.weibo.com/login.php'
wd.get(loginUrl) # 进入登陆界面
time.sleep(5)
wd.find_element_by_xpath('//*[@id="loginname"]').send_keys('13777015316') # 输入用户名
wd.find_element_by_xpath('//*[@id="pl_login_form"]/div/div[3]/div[2]/div/input').send_keys('595432157') # 输入密码
wd.find_element_by_xpath('//*[@id="pl_login_form"]/div/div[3]/div[6]/a').click() # 点击登陆
# wd.find_element_by_xpath('//*[@id="pl_login_form"]/div/div[3]/div[3]/div/input').send_keys(input("输入验证码: "))
# wd.find_element_by_xpath('//*[@id="pl_login_form"]/div/div[3]/div[6]/a').click()#再次点击登陆
time.sleep(20) # 等待20秒用于输入用户名,密码并登录
url_part_1 = 'https://s.weibo.com/weibo/%25E5%259B%25B4%25E8%25A7%2582%25E4%25BA%2586%25E7%259A%2584%25E5%259B%259E%25E7%25AD%2594&nodup=1'
# 搜索‘围观了的回答’的微博结果
# url_part_1 = 'http://s.weibo.com/weibo/%25E6%2588%2591%25E5%2585%258D%25E8%25B4%25B9%25E5%259B%25B4%25E8%25A7%2582%25E4%25BA%2586&b=1&nodup=1'
# 搜索‘我免费围观了’的微博结果
wd.get(url_part_1)
set_2 = set()
try:
for element in wd.find_elements_by_class_name('W_btn_c6'):
set_2.add(element.get_attribute('href'))
print(element.get_attribute('href'))
except:
pass
n = 1
t = 1
while t <= 48:
t = t+1
url_part_2 = '&page=%d'%t
try:
wd.get(url_part_1+url_part_2)
time.sleep(9)
for element in wd.find_elements_by_class_name('W_btn_c6'):
url = element.get_attribute('href')
set_2.add(url)
print(str(n)+': '+url)
n = n+1
except:
pass
# 清除重复问题的url链接
s1 = set()
with open('weibo_1_utf-8.csv', 'r', encoding='utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
s1.add(row['page_url'])
s2 = set()
with open('weibo_new_page_url.csv', 'r', encoding='utf-8') as f1:
reader = csv.DictReader(f1)
for row in reader:
s2.add(row['page_url'])
s3 = set_2 - s1 - s2
group = []
for part in s3:
group.append([part])
with open('weibo_new_page_url.csv', 'a+', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
for row in group:
writer.writerow(row)
wd.close()
| {"/Login.py": ["/weiboLogin.py"], "/get_raw_html.py": ["/weiboLogin.py", "/Login.py"], "/weiboLogin.py": ["/WeiboSearch.py"], "/scrapy_1.py": ["/Login.py"]} |
68,404 | HerikkWang/weibo | refs/heads/master | /scrapy_2.py | import time
from selenium import webdriver
import csv
import re
from selenium.common.exceptions import TimeoutException
import pymysql
csv_read_path = 'weibo_new_page_url.csv'
conn = pymysql.connect(host='localhost', user='root', password='herik', database='weibodata', charset='utf8mb4')
cur = conn.cursor()
cur.execute('use weibodata')
# 获取问题的围观数量和打赏数量
chromePath = r'C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe'
wd = webdriver.Chrome(executable_path=chromePath) # 构建浏览器
wd.set_page_load_timeout(30)
loginUrl = 'http://www.weibo.com/login.php'
wd.get(loginUrl) # 进入登陆界面
time.sleep(5)
wd.find_element_by_xpath('//*[@id="loginname"]').send_keys('13777015316') # 输入用户名
wd.find_element_by_xpath('//*[@id="pl_login_form"]/div/div[3]/div[2]/div/input').send_keys('595432157') # 输入密码
wd.find_element_by_xpath('//*[@id="pl_login_form"]/div/div[3]/div[6]/a').click() # 点击登陆
# wd.find_element_by_xpath('//*[@id="pl_login_form"]/div/div[3]/div[3]/div/input').send_keys(input("输入验证码: "))
# wd.find_element_by_xpath('//*[@id="pl_login_form"]/div/div[3]/div[6]/a').click()#再次点击登陆
time.sleep(20) # 等待20秒用于输入验证码
n = 0 # 计数器
with open(csv_read_path, 'r', encoding='utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
page_url = row['page_url']
onlooker_num = '0'
reward_num = '0'
n = n+1
try:
wd.get(page_url)
try:
onlooker_description = wd.find_element_by_xpath(
'//*[@id="plc_main"]/div/div/div/div/div[3]/div[3]/div/div/p').text
onlooker_num = re.findall(".*...等(.*)人.*", onlooker_description.split(',')[0])[0]
if ',' in onlooker_description:
reward_num = onlooker_description.split(',')[1][4:-1]
else:
reward_num = '0'
except Exception:
pass
try:
onlooker_description = wd.find_element_by_xpath(
'//*[@id="plc_main"]/div/div/div/div/div[2]/div[3]/div/div/p').text
onlooker_num = re.findall(".*...等(.*)人.*", onlooker_description.split(',')[0])[0]
if ',' in onlooker_description:
reward_num = onlooker_description.split(',')[1][4:-1]
else:
reward_num = '0'
except:
pass
except TimeoutException:
wd.execute_script('window.stop()') # 页面加载超时后停止加载
print(str(n) + ': ' + str([reward_num, onlooker_num]))
cur.execute("insert into weibo_scrapy_2(page_url, reward_num, onlooker_num) values(\"%s\", %s, %s)", (page_url, reward_num, onlooker_num))
# 将数字类型数据传入sql语句时,去掉引号,同时使用字符串传入数字数据
cur.connection.commit()
cur.close()
conn.close()
wd.close()
| {"/Login.py": ["/weiboLogin.py"], "/get_raw_html.py": ["/weiboLogin.py", "/Login.py"], "/weiboLogin.py": ["/WeiboSearch.py"], "/scrapy_1.py": ["/Login.py"]} |
68,405 | HerikkWang/weibo | refs/heads/master | /data_processing_1.py | import csv
path1 = 'weibo_data_2_1.csv'
path2 = 'weibo_data_2_1_processed.csv'
data = []
with open(path1, 'r+', encoding='utf-8') as f:
reader = csv.reader(f)
for row in reader:
if row[0] != '':
data.append(row)
fx = open(path2, 'w+', encoding='utf-8', newline='')
writer = csv.writer(fx)
for row in data:
writer.writerow(row)
fx.close()
| {"/Login.py": ["/weiboLogin.py"], "/get_raw_html.py": ["/weiboLogin.py", "/Login.py"], "/weiboLogin.py": ["/WeiboSearch.py"], "/scrapy_1.py": ["/Login.py"]} |
68,406 | NYUBigDataProject/SparkClean | refs/heads/master | /sparkclean/df_outliers.py | from pyspark.sql.session import SparkSession
from pyspark.sql.functions import *
from pyspark.mllib.clustering import KMeans
from sparkclean.df_transformer import DataFrameTransformer
from pyspark.sql.types import *
from pyspark.sql.functions import udf
import math
import numpy as np
class OutlierDetector:
#class for detecting outliers
def __init__(self, df, column, tolerance = 1.5):
#:param df: dataframe
#:param column: column to be checked type: string
#:param k: number of klusters the user wants to form
self._df = df
self._column = column
self.rownum = self._df.count()
self.transformer = DataFrameTransformer(self._df)
self.transform = False
def normalize(self,col):
max_column = self._df.agg({col:'max'}).collect()[0][0]
min_column = self._df.agg({col:'min'}).collect()[0][0]
def change(value):
if max_column == min_column:
return 0
return (value-min_column)/(max_column-min_column)
df = self.transformer.df
udfValue = udf(change,DoubleType())
df = df.withColumn('new'+col, udfValue(col))
df = df.drop(col).withColumnRenamed('new'+col,col)
self.transformer = DataFrameTransformer(df)
return
def column_datatype(self,normalize = True):
'''
: check datatype for dataframe with input columns
'''
if isinstance(self._column, str):
self._column = [self._column]
number_type = ["DoubleType", "FloatType", "IntegerType","LongType", "ShortType"]
for c in self._column:
if c not in self._df.columns:
print("This column does not exit.")
return
else:
if str(self._df.schema[c].dataType) not in number_type:
print('The type of '+c+" is "+str(self._df.schema[c].dataType))
print("But we only accept numeric types here.")
return
if normalize:
for c in self._column:
self.normalize(c)
self.transformer.addPrimaryKey()
self._column.append('id')
self._df = self.transformer.df
self.transform = True
return
def kmeans_check(self,T,k = 3,normalize = True):
'''
#:param: indegree threshold. if T<1, at least one vector in each group would be removed
#:return: outlier list
'''
if not self.transform:
self.column_datatype(self._column)
trans_df = self._df.select(self._column).rdd.map(lambda x : np.array(x))
clusters = KMeans.train(trans_df.map(lambda x: x[:-1]),k, maxIterations = 10, runs = 1, initializationMode = 'random')
maxIngroup = trans_df.map(lambda x: (clusters.predict(x[:-1]), \
np.linalg.norm(clusters.centers[clusters.predict(x[:-1])]-x[:-1]))).reduceByKey(lambda x,y: x if x>y else y).collect()
maxIngroup = sorted(maxIngroup)
distForAll = trans_df.map(lambda x: (x[-1],np.linalg.norm(clusters.centers[clusters.predict(x[:-1])]-x[:-1])/ \
maxIngroup[clusters.predict(x[:-1])][1]))
outlier_index = distForAll.filter(lambda x: x[1]>T).map(lambda x: int(x[0])).collect()
print('Around %.2f of rows are outliers.' %(len(outlier_index)/self.rownum))
self.transform = False
return outlier_index
def delete_outlier_kmeans(self,T,k = 3,normalize = True):
#:param T: indegree threshold in kmeans
#:delete outliers and return new df
res = self.kmeans_check(T,k,normalize)
self.transformer.delete_row(self.transformer.df['id'].isin(res) == False)
self._df = self.transformer.df
return
def delete_outlier_one(self,tolerance = 1.5,normalize = True):
"""
:param tolerance: how much interquantile range the user would like to limit dataset in
:delete the row and return new df
"""
res = self.outlier(tolerance,normalize)
self.transformer.delete_row(self.transformer.df['id'].isin(res) == False)
self._df = self.transformer.df
return
def outlier(self,tolerance = 1.5,normalize = True):
"""
:param tolerance: how much interquantile range the user would like to limit dataset in
:return outlier list
"""
if isinstance(self._column, str):
self._column = [self._column]
else:
print('This function only works for one column.')
return
if not self.transform:
self.column_datatype(self._column)
def quantile(n):
#:param n: n percentage of quantile n in (0,1]
#:return quantile number of the column
return self._df.approxQuantile(self._column[0], [n], 0.01)[0]
median_val = quantile(0.50)
q1 = quantile(0.25)
q3 = quantile(0.75)
iqrange = q3-q1
distance_range = tolerance * (q3-q1)
lower_bound,upper_bound = q1-distance_range,q3+distance_range
outlier_list = self._df.select(self._column).rdd.map(lambda x: np.array(x)).filter \
(lambda x: x[0] < lower_bound or x[0] > upper_bound).map(lambda x: int(x[1])).collect()
outlier_num = len(outlier_list)
print('Around %.2f of rows are outliers.' %(outlier_num/self.rownum))
self.transform = False
return outlier_list
def show(self, n=10, truncate=True, withId = False):
if withId:
return self.transformer.df.show(n, truncate)
else:
return self.transformer.df.drop("id").show(n, truncate)
| {"/sparkclean/df_outliers.py": ["/sparkclean/df_transformer.py"], "/test/__init__.py": ["/test/load.py"], "/sparkclean/df_deduplicator.py": ["/sparkclean/__init__.py"], "/sparkclean/__init__.py": ["/sparkclean/utilities.py", "/sparkclean/df_transformer.py", "/sparkclean/df_deduplicator.py", "/sparkclean/df_outliers.py"]} |
68,407 | NYUBigDataProject/SparkClean | refs/heads/master | /sparkclean/df_transformer.py | """
Copied From Optimus/df_transformer.py
Fixed some errors.
Added useful functions for sparkclean.
"""
# Importing sql types
from pyspark.sql.types import StringType, IntegerType, FloatType, DoubleType, ArrayType
# Importing sql functions
from pyspark.sql.functions import col, udf, trim, lit, format_number, months_between, date_format, unix_timestamp, \
current_date, abs as mag, monotonically_increasing_id
from pyspark.ml.feature import MinMaxScaler, VectorAssembler
import re
import string
import unicodedata
import pyspark.sql.dataframe
from pyspark.ml.feature import Imputer
class DataFrameTransformer:
"""DataFrameTransformer is a class to make transformations in dataFrames"""
def __init__(self, df):
"""Class constructor.
:param df DataFrame to be transformed.
"""
assert (isinstance(df, pyspark.sql.dataframe.DataFrame)), \
"Error, df argument must be a pyspark.sql.dataframe.DataFrame instance"
# Dataframe
self._df = df
# SparkContext:
self._sql_context = self._df.sql_ctx
self._number_of_transformations = 0
@classmethod
def _assert_type_str_or_list(cls, variable, name_arg):
"""This function asserts if variable is a string or a list dataType."""
assert isinstance(variable, (str, list)), \
"Error: %s argument must be a string or a list." % name_arg
@classmethod
def _assert_type_int_or_float(cls, variable, name_arg):
"""This function asserts if variable is a string or a list dataType."""
assert isinstance(variable, (int, float)), \
"Error: %s argument must be a int or a float." % name_arg
@classmethod
def _assert_type_str(cls, variable, name_arg):
"""This function asserts if variable is a string or a list dataType."""
assert isinstance(variable, str), \
"Error: %s argument must be a string." % name_arg
@classmethod
def _assert_cols_in_df(cls, columns_provided, columns_df):
"""This function asserts if columns_provided exists in dataFrame.
Inputs:
columns_provided: the list of columns to be process.
columns_df: list of columns's dataFrames
"""
col_not_valids = (
set([column for column in columns_provided]).difference(set([column for column in columns_df])))
assert (col_not_valids == set()), 'Error: The following columns do not exits in dataFrame: %s' % col_not_valids
def _add_transformation(self):
self._number_of_transformations += 1
if self._number_of_transformations > 50:
self.check_point()
self._number_of_transformations = 0
def set_data_frame(self, df):
"""This function set a dataframe into the class for subsequent actions.
"""
assert isinstance(df, pyspark.sql.dataframe.DataFrame), "Error: df argument must a sql.dataframe type"
self._df = df
@property
def df(self):
"""This function return the dataframe of the class
:rtype: pyspark.sql.dataframe.DataFrame
"""
return self._df
def show(self, n=10, truncate=True):
"""This function shows the dataframe of the class
:param n: number or rows to show
:param truncate: If set to True, truncate strings longer than 20 chars by default.
:rtype: pyspark.sql.dataframe.DataFrame.show()
"""
return self._df.show(n, truncate)
def lower_case(self, columns):
"""This function set all strings in columns of dataframe specified to lowercase.
Columns argument must be a string or a list of string. In order to apply this function to all
dataframe, columns must be equal to '*'"""
func = lambda cell: cell.lower() if cell is not None else cell
self.set_col(columns, func, 'string')
return self
def upper_case(self, columns):
"""This function set all strings in columns of dataframe specified to uppercase.
Columns argument must be a string or a list of string. In order to apply this function to all
dataframe, columns must be equal to '*'"""
func = lambda cell: cell.upper() if cell is not None else cell
self.set_col(columns, func, 'string')
return self
def impute_missing(self, columns, out_cols, strategy):
"""
Imputes missing data from specified columns using the mean or median.
:param columns: List of columns to be analyze.
:param out_cols: List of output columns with missing values imputed.
:param strategy: String that specifies the way of computing missing data. Can be "mean" or "median"
:return: Transformer object (DF with columns that has the imputed values).
"""
# Check if columns to be process are in dataframe
self._assert_cols_in_df(columns_provided=columns, columns_df=self._df.columns)
assert isinstance(columns, list), "Error: columns argument must be a list"
assert isinstance(out_cols, list), "Error: out_cols argument must be a list"
# Check if columns argument a string datatype:
self._assert_type_str(strategy, "strategy")
assert (strategy == "mean" or strategy == "median"), "Error: strategy has to be 'mean' or 'median'."
def impute(cols):
imputer = Imputer(inputCols=cols, outputCols=out_cols)
model = imputer.setStrategy(strategy).fit(self._df)
self._df = model.transform(self._df)
impute(columns)
return self
def replace_na(self, value, columns=None):
"""
Replace nulls with specified value.
:param columns: optional list of column names to consider. Columns specified in subset that do not have
matching data type are ignored. For example, if value is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
:param value: Value to replace null values with. If the value is a dict, then subset is ignored and value
must be a mapping from column name (string) to replacement value. The replacement
value must be an int, long, float, or string.
:return: Transformer object (DF with columns with replaced null values).
"""
if columns == "*":
columns = None
# Columns to list
if isinstance(columns, str):
columns = [columns]
if columns is not None:
assert isinstance(columns, list), "Error: columns argument must be a list"
assert isinstance(value, (int, float, str, dict)), "Error: value argument must be an " \
"int, long, float, string, or dict"
def replace_it(val, col):
self._df = self._df.fillna(val, subset=col)
replace_it(val=value, col=columns)
self._add_transformation()
return self
def check_point(self):
"""This method is a very useful function to break lineage of transformations. By default Spark uses the lazy
evaluation approach in processing data: transformation functions are not computed into an action is called.
Sometimes when transformations are numerous, the computations are very extensive because the high number of
operations that spark needs to run in order to get the results.
Other important thing is that apache spark usually save task but not result of dataFrame, so tasks are
accumulated and the same situation happens.
The problem can be deal it with the checkPoint method. This method save the resulting dataFrame in disk, so
the lineage is cut.
"""
# Checkpointing of dataFrame. One question can be thought. Why not use cache() or persist() instead of
# checkpoint. This is because cache() and persis() apparently do not break the lineage of operations,
print("Saving changes at disk by checkpoint...")
self._df = self._df.checkpoint()
#self._df.count()
#self._df = self._sql_context.createDataFrame(self._df, self._df.schema)
print("Done.")
execute = check_point
def trim_col(self, columns):
"""This methods cut left and right extra spaces in column strings provided by user.
:param columns list of column names of dataFrame.
If a string "*" is provided, the method will do the trimming operation in whole dataFrame.
:return transformer object
"""
# Function to trim spaces in columns with strings datatype
def col_trim(columns):
exprs = [trim(col(c)).alias(c)
if (c in columns) and (c in valid_cols)
else c
for (c, t) in self._df.dtypes]
self._df = self._df.select(*exprs)
# Check if columns argument must be a string or list datatype:
self._assert_type_str_or_list(columns, "columns")
# Filters all string columns in dataFrame
valid_cols = [c for (c, t) in filter(lambda t: t[1] == 'string', self._df.dtypes)]
# If None or [] is provided with column parameter:
if columns == "*": columns = valid_cols
# Columns
if isinstance(columns, str): columns = [columns]
# Check if columns to be process are in dataframe
self._assert_cols_in_df(columns_provided=columns, columns_df=self._df.columns)
# Trimming spaces in columns:
col_trim(columns)
self._add_transformation()
# Returning the transformer object for able chaining operations
return self
def drop_col(self, columns):
"""This method eliminate the list of columns provided by user.
:param columns list of columns names or a string (a column name).
:return transformer object
"""
def col_drop(columns):
exprs = filter(lambda c: c not in columns, self._df.columns)
self._df = self._df.select(*exprs)
# Check if columns argument must be a string or list datatype:
self._assert_type_str_or_list(columns, "columns")
# Columns
if isinstance(columns, str): columns = [columns]
# Check if columns to be process are in dataframe
self._assert_cols_in_df(columns_provided=columns, columns_df=self._df.columns)
# Calling colDrop function
col_drop(columns)
self._add_transformation()
# Returning the transformer object for able chaining operations
return self
def replace_col(self, search, change_to, columns):
"""This method search the 'search' value in DataFrame columns specified in 'columns' in order to replace it
for 'change_to' value.
:param search value to search in dataFrame.
:param change_to value used to replace the old one in dataFrame.
:param columns list of string column names or a string (column name). If columns = '*' is provided,
searching and replacing action is made in all columns of DataFrame that have same
dataType of search and change_to.
search and change_to arguments are expected to be numbers and same dataType ('integer', 'string', etc) each other.
olumns argument is expected to be a string or list of string column names.
:return transformer object
"""
def col_replace(columns):
self._df = self._df.replace(search, change_to, subset=columns)
# Check if columns argument must be a string or list datatype:
self._assert_type_str_or_list(columns, "columns")
# Asserting search parameter is a string or a number
assert isinstance(search, str) or isinstance(search, float) or isinstance(search,
int), \
"Error: Search parameter must be a number or string"
# Asserting change_to parameter is a string or a number
assert isinstance(change_to, str) or isinstance(change_to, float) or isinstance(change_to,
int), \
"Error: change_to parameter must be a number or string"
# Asserting search and change_to have same type
assert isinstance(search, type(change_to)), \
'Error: Search and ChangeTo must have same datatype: Integer, String, Float'
# Change
types = {type(''): 'string', type(int(1)): 'int', type(float(1.2)): 'float', type(1.2): 'double'}
valid_cols = [c for (c, t) in filter(lambda t: t[1] == types[type(search)], self._df.dtypes)]
# If None or [] is provided with column parameter:
if columns == "*": columns = valid_cols[:]
# If columns is string, make a list:
if isinstance(columns, str): columns = [columns]
# Check if columns to be process are in dataframe
self._assert_cols_in_df(columns_provided=columns, columns_df=self._df.columns)
col_not_valids = (set([column for column in columns]).difference(set([column for column in valid_cols])))
assert (
col_not_valids == set()), 'Error: The following columns do not have same datatype argument provided: %s' % \
col_not_valids
col_replace(columns)
self._add_transformation()
# Returning the transformer object for able chaining operations
return self
def delete_row(self, func):
"""This function is an alias of filter and where spark functions.
:param func func must be an expression with the following form:
func = col('col_name') > value.
func is an expression where col is a pyspark.sql.function.
"""
self._df = self._df.filter(func)
self._add_transformation() # checkpoint in case
# Returning the transformer object for able chaining operations
return self
def set_col(self, columns, func, data_type):
"""This method can be used to make math operations or string manipulations in row of dataFrame columns.
:param columns list of columns (or a single column) of dataFrame.
:param func function or string type which describe the data_type that func function should return.
:param data_type string indicating one of the following options: 'integer', 'string', 'double','float'.
'columns' argument is expected to be a string or a list of columns names.
It is a requirement for this method that the data_type provided must be the same to data_type of columns.
On the other hand, if user writes columns == '*' the method makes operations in func if only if columns
have same data_type that data_type argument.
:return transformer object
"""
dict_types = {'string': StringType(), 'str': StringType(), 'integer': IntegerType(),
'int': IntegerType(), 'float': FloatType(), 'double': DoubleType(), 'Double': DoubleType()}
types = {'string': 'string', 'str': 'string', 'String': 'string', 'integer': 'int',
'int': 'int', 'float': 'float', 'double': 'double', 'Double': 'double'}
try:
function = udf(func, dict_types[data_type])
except KeyError:
assert False, "Error, data_type not recognized"
def col_set(columns, function):
exprs = [function(col(c)).alias(c) if c in columns else c for (c, t) in self._df.dtypes]
try:
self._df = self._df.select(*exprs)
except Exception as e:
print(e)
assert False, "Error: Make sure if operation is compatible with row datatype."
# Check if columns argument must be a string or list datatype:
self._assert_type_str_or_list(columns, "columns")
# Filters all string columns in dataFrame
valid_cols = [c for (c, t) in filter(lambda t: t[1] == types[data_type], self._df.dtypes)]
# If None or [] is provided with column parameter:
if columns == "*": columns = valid_cols[:]
# If columns is string, make a list:
if isinstance(columns, str): columns = [columns]
# Check if columns to be process are in dataframe
self._assert_cols_in_df(columns_provided=columns, columns_df=self._df.columns)
col_not_valids = (set([column for column in columns]).difference(set([column for column in valid_cols])))
assert (
col_not_valids == set()), 'Error: The following columns do not have same datatype argument provided: %s' \
% col_not_valids
col_set(columns, function)
self._add_transformation() # checkpoint in case
# Returning the transformer object for able chaining operations
return self
# Drop
def keep_col(self, columns):
"""This method keep only columns specified by user with columns argument in DataFrame.
:param columns list of columns or a string (column name).
:return transformer object
"""
def col_keep(columns):
exprs = filter(lambda c: c in columns, self._df.columns)
self._df = self._df.select(*exprs)
# Check if columns argument must be a string or list datatype:
self._assert_type_str_or_list(columns, "columns")
# Check is column if a string.
if isinstance(columns, str): columns = [columns]
# Check if columns to be process are in dataframe
self._assert_cols_in_df(columns_provided=columns, columns_df=self._df.columns)
# Calling colDrop function
col_keep(columns)
self._add_transformation() # checkpoint in case
# Returning the transformer object for able chaining operations
return self
def clear_accents(self, columns):
"""This function deletes accents in strings column dataFrames, it does not eliminate main characters,
but only deletes special tildes.
:param columns String or a list of column names.
"""
# Check if columns argument must be a string or list datatype:
self._assert_type_str_or_list(columns, "columns")
# Filters all string columns in dataFrame
valid_cols = [c for (c, t) in filter(lambda t: t[1] == 'string', self._df.dtypes)]
# If None or [] is provided with column parameter:
if columns == "*": columns = valid_cols[:]
# If columns is string, make a list:
if isinstance(columns, str): columns = [columns]
# Check if columns to be process are in dataframe
self._assert_cols_in_df(columns_provided=columns, columns_df=self._df.columns)
col_not_valids = (set([column for column in columns]).difference(set([column for column in valid_cols])))
assert (
col_not_valids == set()), 'Error: The following columns do not have same datatype argument provided: %s' \
% col_not_valids
# Receives a string as an argument
def remove_accents(input_str):
# first, normalize strings:
nfkd_str = unicodedata.normalize('NFKD', input_str)
# Keep chars that has no other char combined (i.e. accents chars)
with_out_accents = u"".join([c for c in nfkd_str if not unicodedata.combining(c)])
return with_out_accents
function = udf(lambda x: remove_accents(x) if x is not None else x, StringType())
exprs = [function(col(c)).alias(c) if (c in columns) and (c in valid_cols) else c for c in self._df.columns]
self._df = self._df.select(*exprs)
self._add_transformation() # checkpoint in case
# Returning the transformer object for able chaining operations
return self
def remove_special_chars(self, columns):
"""This function remove special chars in string columns, such as: .!"#$%&/()
:param columns list of names columns to be processed.
columns argument can be a string or a list of strings."""
# Check if columns argument must be a string or list datatype:
self._assert_type_str_or_list(columns, "columns")
# Filters all string columns in dataFrame
valid_cols = [c for (c, t) in filter(lambda t: t[1] == 'string', self._df.dtypes)]
# If None or [] is provided with column parameter:
if columns == "*": columns = valid_cols[:]
# If columns is string, make a list:
if isinstance(columns, str):
columns = [columns]
# Check if columns to be process are in dataframe
self._assert_cols_in_df(columns_provided=columns, columns_df=self._df.columns)
col_not_valids = (set([column for column in columns]).difference(set([column for column in valid_cols])))
assert (
col_not_valids == set()), 'Error: The following columns do not have same datatype argument provided: %s' \
% col_not_valids
def rm_spec_chars(input_str):
# Remove all punctuation and control characters
for punct in (set(input_str) & set(string.punctuation)):
input_str = input_str.replace(punct, "")
return input_str
# User define function that does operation in cells
function = udf(lambda cell: rm_spec_chars(cell) if cell is not None else cell, StringType())
exprs = [function(c).alias(c) if (c in columns) and (c in valid_cols) else c for c in self._df.columns]
self._df = self._df.select(*exprs)
self._add_transformation() # checkpoint in case
# Returning the transformer object for able chaining operations
return self
def remove_special_chars_regex(self, columns, regex):
"""This function remove special chars in string columns using a regex, such as: .!"#$%&/()
:param columns list of names columns to be processed.
:param regex string that contains the regular expression
columns argument can be a string or a list of strings."""
# Check if columns argument must be a string or list datatype:
self._assert_type_str_or_list(columns, "columns")
# Filters all string columns in dataFrame
valid_cols = [c for (c, t) in filter(lambda t: t[1] == 'string', self._df.dtypes)]
# If None or [] is provided with column parameter:
if columns == "*": columns = valid_cols[:]
# If columns is string, make a list:
if isinstance(columns, str):
columns = [columns]
# Check if columns to be process are in dataframe
self._assert_cols_in_df(columns_provided=columns, columns_df=self._df.columns)
col_not_valids = (set([column for column in columns]).difference(set([column for column in valid_cols])))
assert (
col_not_valids == set()), 'Error: The following columns do not have same datatype argument provided: %s' \
% col_not_valids
def rm_spec_chars_regex(input_str, regex):
for _ in set(input_str):
input_str = re.sub(regex, '', input_str)
return input_str
# User define function that does operation in cells
function = udf(lambda cell: rm_spec_chars_regex(cell, regex) if cell is not None else cell, StringType())
exprs = [function(c).alias(c) if (c in columns) and (c in valid_cols) else c for c in self._df.columns]
self._df = self._df.select(*exprs)
self._add_transformation() # checkpoint in case
# Returning the transformer object for able chaining operations
return self
def rename_col(self, columns):
""""This functions change the name of columns datraFrame.
:param columns List of tuples. Each tuple has de following form: (oldColumnName, newColumnName).
"""
# Asserting columns is string or list:
assert isinstance(columns, list) and isinstance(columns[0], tuple), \
"Error: Column argument must be a list of tuples"
col_not_valids = (
set([column[0] for column in columns]).difference(set([column for column in self._df.columns])))
assert (col_not_valids == set()), 'Error: The following columns do not exits in dataFrame: %s' % col_not_valids
old_names = [column[0] for column in columns]
not_in_type = filter(lambda c: c not in old_names, self._df.columns)
exprs = [col(column[0]).alias(column[1]) for column in columns] + [col(column) for column in not_in_type]
self._add_transformation() # checkpoint in case
self._df = self._df.select(*exprs)
return self
def lookup(self, column, str_to_replace, list_str=None):
"""This method search a list of strings specified in `list_str` argument among rows
in column dataFrame and replace them for `str_to_replace`.
:param column Column name, this variable must be string dataType.
:param str_to_replace string that going to replace all others present in list_str argument
:param list_str List of strings to be replaced
`lookup` can only be runned in StringType columns.
"""
# Check if columns argument a string datatype:
self._assert_type_str(column, "column")
# Asserting columns is string or list:
assert isinstance(str_to_replace, (str, dict)), "Error: str_to_replace argument must be a string or a dict"
if isinstance(str_to_replace, dict):
assert (str_to_replace != {}), "Error, str_to_replace must be a string or a non empty python dictionary"
assert (
list_str is None), "Error, If a python dictionary if specified, list_str argument must be None: list_str=None"
# Asserting columns is string or list:
assert isinstance(list_str, list) and list_str != [] or (
list_str is None), "Error: Column argument must be a non empty list"
if isinstance(str_to_replace, str):
assert list_str is not None, "Error: list_str cannot be None if str_to_replace is a String, please you need to specify \
the list_str string"
# Filters all string columns in dataFrame
valid_cols = [c for (c, t) in filter(lambda t: t[1] == 'string', self._df.dtypes)]
if isinstance(column, str):
column = [column]
# Check if columns to be process are in dataframe
self._assert_cols_in_df(columns_provided=column, columns_df=self._df.columns)
# Asserting if selected column datatype and search and changeTo parameters are the same:
col_not_valids = (set(column).difference(set([column for column in valid_cols])))
assert (col_not_valids == set()), 'Error: The column provided is not a column string: %s' % col_not_valids
# User defined function to search cell value in list provide by user:
if isinstance(str_to_replace, str) and list_str is not None:
def check(cell):
if cell is not None and (cell in list_str):
return str_to_replace
else:
return cell
func = udf(lambda cell: check(cell), StringType())
else:
def replace_from_dic(str_test):
for key in str_to_replace.keys():
if str_test in str_to_replace[key]:
str_test = key
return str_test
func = udf(lambda cell: replace_from_dic(cell) if cell is not None else cell, StringType())
# Calling udf for each row of column provided by user. The rest of dataFrame is
# maintained the same.
exprs = [func(col(c)).alias(c) if c == column[0] else c for c in self._df.columns]
self._df = self._df.select(*exprs)
self._add_transformation() # checkpoint in case
return self
def move_col(self, column, ref_col, position):
"""This funcion change column position in dataFrame.
:param column: Name of the column to be moved in dataFrame. column argument must be a string.
:param ref_col: Name of reference column in dataFrame. This column will be a reference to place the
column to be moved.
:param position: Can be one of the following options: 'after' or 'before'. If 'after' is provided, column
provided will be placed just after the ref_col selected."""
# Columns of dataFrame
columns = self._df.columns
# Check if columns argument a string datatype:
self._assert_type_str(column, "column")
# Check if column to be process are in dataframe
self._assert_cols_in_df(columns_provided=[column], columns_df=self._df.columns)
# Check if columns argument a string datatype:
self._assert_type_str(ref_col, "ref_col")
# Asserting parameters are not empty strings:
assert (
(column != '') and (ref_col != '') and (position != '')), "Error: Input parameters can't be empty strings"
# Check if ref_col is in dataframe
self._assert_cols_in_df(columns_provided=[ref_col], columns_df=self._df.columns)
# Check if columns argument a position string datatype:
self._assert_type_str(position, "position")
# Asserting if position is 'after' or 'before'
assert (position == 'after') or (
position == 'before'), "Error: Position parameter only can be 'after' or 'before'"
# Finding position of column to move:
find_col = lambda columns, column: [index for index, c in enumerate(columns) if c == column]
new_index = find_col(columns, ref_col)
old_index = find_col(columns, column)
# if position is 'after':
if position == 'after':
# Check if the movement is from right to left:
if new_index[0] >= old_index[0]:
columns.insert(new_index[0], columns.pop(old_index[0])) # insert and delete a element
else: # the movement is form left to right:
columns.insert(new_index[0] + 1, columns.pop(old_index[0]))
else: # If position if before:
if new_index[0] >= old_index[0]: # Check if the movement if from right to left:
columns.insert(new_index[0] - 1, columns.pop(old_index[0]))
elif new_index[0] < old_index[0]: # Check if the movement if from left to right:
columns.insert(new_index[0], columns.pop(old_index[0]))
self._df = self._df[columns]
self._add_transformation() # checkpoint in case
return self
def count_items(self, col_id, col_search, new_col_feature, search_string):
"""
This function can be used to create Spark DataFrames with frequencies for picked values of
selected columns.
:param col_id column name of the columnId of dataFrame
:param col_search column name of the column to be split.
:param new_col_feature Name of the new column.
:param search_string string of value to be counted.
:returns Spark Dataframe.
Please, see documentation for more explanations about this method.
"""
# Asserting if position is string or list:
assert isinstance(search_string, str), "Error: search_string argument must be a string"
# Asserting parameters are not empty strings:
assert (
(col_id != '') and (col_search != '') and (new_col_feature != '')), \
"Error: Input parameters can't be empty strings"
# Check if col_search argument is string datatype:
self._assert_type_str(col_search, "col_search")
# Check if new_col_feature argument is a string datatype:
self._assert_type_str(new_col_feature, "new_col_feature")
# Check if col_id argument is a string datatype:
self._assert_type_str(col_id, "col_id")
# Check if col_id to be process are in dataframe
self._assert_cols_in_df(columns_provided=[col_id], columns_df=self._df.columns)
# Check if col_search to be process are in dataframe
self._assert_cols_in_df(columns_provided=[col_search], columns_df=self._df.columns)
# subset, only PAQ and Tipo_Unidad:
subdf = self._df.select(col_id, col_search)
# subset de
new_column = subdf.where(subdf[col_search] == search_string).groupBy(col_id).count()
# Left join:
new_column = new_column.withColumnRenamed(col_id, col_id + '_other')
exprs = (subdf[col_id] == new_column[col_id + '_other']) & (subdf[col_search] == search_string)
df_mod = subdf.join(new_column, exprs, 'left_outer')
# Cleaning dataframe:
df_mod = df_mod.drop(col_id + '_other').drop(col_search).withColumnRenamed('count', new_col_feature) \
.dropna("any")
print("Counting existing " + search_string + " in " + col_search)
return df_mod.sort(col_id).drop_duplicates([col_id])
def date_transform(self, columns, current_format, output_format):
"""
:param columns Name date columns to be transformed. Columns ha
:param current_format current_format is the current string dat format of columns specified. Of course,
all columns specified must have the same format. Otherwise the function is going
to return tons of null values because the transformations in the columns with
different formats will fail.
:param output_format output date string format to be expected.
"""
# Check if current_format argument a string datatype:
self._assert_type_str(current_format, "current_format")
# Check if output_format argument a string datatype:
self._assert_type_str(output_format, "output_format")
# Check if columns argument must be a string or list datatype:
self._assert_type_str_or_list(columns, "columns")
if isinstance(columns, str):
columns = [columns]
# Check if columns to be process are in dataframe
self._assert_cols_in_df(columns_provided=columns, columns_df=self._df.columns)
exprs = [date_format(unix_timestamp(c, current_format).cast("timestamp"), output_format).alias(
c) if c in columns else c for c in self._df.columns]
self._df = self._df.select(*exprs)
self._add_transformation() # checkpoint in case
return self
def age_calculate(self, column, dates_format, name_col_age):
"""
This method compute the age of clients based on their born dates.
:param column Name of the column born dates column.
:param dates_format String format date of the column provided.
:param name_col_age Name of the new column, the new columns is the resulting column of ages.
"""
# Check if column argument a string datatype:
self._assert_type_str(column, "column")
# Check if dates_format argument a string datatype:
self._assert_type_str(dates_format, "dates_format")
# Asserting if column if in dataFrame:
assert column in self._df.columns, "Error: Column assigned in column argument does not exist in dataFrame"
# Output format date
format_dt = "yyyy-MM-dd" # Some SimpleDateFormat string
exprs = format_number(
mag(
months_between(date_format(
unix_timestamp(column, dates_format).cast("timestamp"), format_dt), current_date()) / 12), 4).alias(
name_col_age)
self._df = self._df.withColumn(name_col_age, exprs)
self._add_transformation() # checkpoint in case
return self
def cast_func(self, cols_and_types):
"""
:param cols_and_types List of tuples of column names and types to be casted. This variable should have the
following structure:
colsAndTypes = [('columnName1', 'integer'), ('columnName2', 'float'), ('columnName3', 'string')]
The first parameter in each tuple is the column name, the second is the finale datatype of column after
the transformation is made.
:return:
"""
dict_types = {'string': StringType(), 'str': StringType(), 'integer': IntegerType(),
'int': IntegerType(), 'float': FloatType(), 'double': DoubleType(), 'Double': DoubleType()}
types = {'string': 'string', 'str': 'string', 'String': 'string', 'integer': 'int',
'int': 'int', 'float': 'float', 'double': 'double', 'Double': 'double'}
# Asserting cols_and_types is string or list:
assert isinstance(cols_and_types, (str, list)), "Error: Column argument must be a string or a list."
if isinstance(cols_and_types, str):
cols_and_types = [cols_and_types]
column_names = [column[0] for column in cols_and_types]
# Check if columnNames to be process are in dataframe
self._assert_cols_in_df(columns_provided=column_names, columns_df=self._df.columns)
not_specified_columns = filter(lambda c: c not in column_names, self._df.columns)
exprs = [col(column[0]).cast(dict_types[types[column[1]]]).alias(column[0]) for column in cols_and_types] + [
col(column) for column in not_specified_columns]
self._df = self._df.select(*exprs)
self._add_transformation() # checkpoint in case
return self
# This function replace a string specified
def empty_str_to_str(self, columns, custom_str):
# Check if custom_str argument a string datatype:
self._assert_type_str(custom_str, "custom_str")
# Check if columns argument must be a string or list datatype:
self._assert_type_str_or_list(columns, "columns")
# Filters all string columns in dataFrame
valid_cols = [c for (c, t) in filter(lambda t: t[1] == 'string', self._df.dtypes)]
# If None or [] is provided with column parameter:
if columns == "*": columns = valid_cols[:]
# If columns is string, make a list:
if isinstance(columns, str):
columns = [columns]
# Check if columns to be process are in dataframe
self._assert_cols_in_df(columns_provided=columns, columns_df=self._df.columns)
def blank_as_null(x):
return when(col(x) != "", col(x)).otherwise(custom_str)
exprs = [blank_as_null(c).alias(c) if (c in columns) and (c in valid_cols) else c for c in self._df.columns]
self._df = self._df.select(*exprs)
self._add_transformation() # checkpoint in case
return self
def row_filter_by_type(self, column_name, type_to_delete):
"""This function has built in order to deleted some type of dataframe """
# Check if column_name argument a string datatype:
self._assert_type_str(column_name, "column_name")
# Asserting if column_name exits in dataframe:
assert column_name in self._df.columns, \
"Error: Column specified as column_name argument does not exist in dataframe"
# Check if type_to_delete argument a string datatype:
self._assert_type_str(type_to_delete, "type_to_delete")
# Asserting if dataType argument has a valid type:
assert (type_to_delete in ['integer', 'float', 'string',
'null']), \
"Error: dataType only can be one of the followings options: integer, float, string, null."
# Function for determine if register value is float or int or string:
def data_type(value):
try: # Try to parse (to int) register value
int(value)
# Add 1 if suceed:
return 'integer'
except ValueError:
try:
# Try to parse (to float) register value
float(value)
# Add 1 if suceed:
return 'float'
except ValueError:
# Then, it is a string
return 'string'
except TypeError:
return 'null'
func = udf(data_type, StringType())
self._df = self._df.withColumn('types', func(col(column_name))).where((col('types') != type_to_delete)).drop(
'types')
self._add_transformation() # checkpoint in case
return self
def scale_vec_col(self, columns, name_output_col):
"""
This function groups the columns specified and put them in a list array in one column, then a scale
process is made. The scaling proccedure is spark scaling default (see the example
bellow).
+---------+----------+
|Price |AreaLiving|
+---------+----------+
|1261706.9|16 |
|1263607.9|16 |
|1109960.0|19 |
|978277.0 |19 |
|885000.0 |19 |
+---------+----------+
|
|
|
V
+----------------------------------------+
|['Price', 'AreaLiving'] |
+----------------------------------------+
|[0.1673858972637624,0.5] |
|[0.08966137157852398,0.3611111111111111]|
|[0.11587093205757598,0.3888888888888889]|
|[0.1139820728616421,0.3888888888888889] |
|[0.12260126542983639,0.4722222222222222]|
+----------------------------------------+
only showing top 5 rows
"""
# Check if columns argument must be a string or list datatype:
self._assert_type_str_or_list(columns, "columns")
# Check if columns to be process are in dataframe
self._assert_cols_in_df(columns_provided=columns, columns_df=self._df.columns)
# Check if name_output_col argument a string datatype:
self._assert_type_str(name_output_col, "nameOutpuCol")
# Model to use vectorAssember:
vec_assembler = VectorAssembler(inputCols=columns, outputCol="features_assembler")
# Model for scaling feature column:
mm_scaler = MinMaxScaler(inputCol="features_assembler", outputCol=name_output_col)
# Dataframe with feature_assembler column
temp_df = vec_assembler.transform(self._df)
# Fitting scaler model with transformed dataframe
model = mm_scaler.fit(temp_df)
exprs = list(filter(lambda x: x not in columns, self._df.columns))
exprs.extend([name_output_col])
self._df = model.transform(temp_df).select(*exprs)
self._add_transformation() # checkpoint in case
return self
def split_str_col(self, column, feature_names, mark):
"""This functions split a column into different ones. In the case of this method, the column provided should
be a string of the following form 'word,foo'.
:param column Name of the target column, this column is going to be replaced.
:param feature_names List of strings of the new column names after splitting the strings.
:param mark String that specifies the splitting mark of the string, this frequently is ',' or ';'.
"""
# Check if column argument is a string datatype:
self._assert_type_str(column, "column")
# Check if mark argument is a string datatype:
self._assert_type_str(mark, "mark")
assert (column in self._df.columns), "Error: column specified does not exist in dataFrame."
assert (isinstance(feature_names, list)), "Error: feature_names must be a list of strings."
# Setting a udf that split the string into a list of strings.
# This is "word, foo" ----> ["word", "foo"]
func = udf(lambda x: x.split(mark), ArrayType(StringType()))
self._df = self._df.withColumn(column, func(col(column)))
self.undo_vec_assembler(column=column, feature_names=feature_names)
self._add_transformation() # checkpoint in case
return self
def remove_empty_rows(self, how="all"):
"""
Removes rows with null values. You can choose to drop the row if 'all' values are nulls or if
'any' of the values is null.
:param how: ‘any’ or ‘all’. If ‘any’, drop a row if it contains any nulls. If ‘all’, drop a row only if all its
values are null. The default is 'all'.
:return: Returns a new DataFrame omitting rows with null values.
"""
assert isinstance(how, str), "Error, how argument provided must be a string."
assert how == 'all' or (
how == 'any'), "Error, how only can be 'all' or 'any'."
self._df = self._df.dropna(how)
return self
def remove_duplicates(self, cols=None):
"""
:param cols: List of columns to make the comparison, this only will consider this subset of columns,
for dropping duplicates. The default behavior will only drop the identical rows.
:return: Return a new DataFrame with duplicate rows removed
"""
assert isinstance(cols, list), "Error, cols argument provided must be a list."
self._df = self._df.drop_duplicates(cols)
return self
def write_df_as_json(self, path):
p = re.sub("}\'", "}", re.sub("\'{", "{", str(self._df.toJSON().collect())))
with open(path, 'w') as outfile:
# outfile.write(str(json_cols).replace("'", "\""))
outfile.write(p)
def to_csv(self, path_name, header="true", mode="overwrite", sep=",", *args, **kargs):
"""
Write dataframe as CSV.
:param path_name: Path to write the DF and the name of the output CSV file.
:param header: True or False to include header
:param mode: Specifies the behavior of the save operation when data already exists.
"append": Append contents of this DataFrame to existing data.
"overwrite" (default case): Overwrite existing data.
"ignore": Silently ignore this operation if data already exists.
"error": Throw an exception if data already exists.
:param sep: sets the single character as a separator for each field and value. If None is set,
it uses the default value.
:return: Dataframe in a CSV format in the specified path.
"""
self._assert_type_str(path_name, "path_name")
assert header == "true" or header == "false", "Error header must be 'true' or 'false'"
if header == 'true':
header = True
else:
header = False
return self._df.write.options(header=header).mode(mode).csv(path_name, sep=sep, *args, **kargs)
def string_to_index(self, input_cols):
"""
Maps a string column of labels to an ML column of label indices. If the input column is
numeric, we cast it to string and index the string values.
:param input_cols: Columns to be indexed.
:return: Dataframe with indexed columns.
"""
# Check if columns argument must be a string or list datatype:
self._assert_type_str_or_list(input_cols, "input_cols")
if isinstance(input_cols, str):
input_cols = [input_cols]
from pyspark.ml import Pipeline
from pyspark.ml.feature import StringIndexer
indexers = [StringIndexer(inputCol=column, outputCol=column + "_index").fit(self._df) for column in
list(set(input_cols))]
pipeline = Pipeline(stages=indexers)
self._df = pipeline.fit(self._df).transform(self._df)
return self
def index_to_string(self, input_cols):
"""
Maps a column of indices back to a new column of corresponding string values. The index-string mapping is
either from the ML attributes of the input column, or from user-supplied labels (which take precedence over
ML attributes).
:param input_cols: Columns to be indexed.
:return: Dataframe with indexed columns.
"""
# Check if columns argument must be a string or list datatype:
self._assert_type_str_or_list(input_cols, "input_cols")
if isinstance(input_cols, str):
input_cols = [input_cols]
from pyspark.ml import Pipeline
from pyspark.ml.feature import IndexToString
indexers = [IndexToString(inputCol=column, outputCol=column + "_string") for column in
list(set(input_cols))]
pipeline = Pipeline(stages=indexers)
self._df = pipeline.fit(self._df).transform(self._df)
return self
def one_hot_encoder(self, input_cols):
"""
Maps a column of label indices to a column of binary vectors, with at most a single one-value.
:param input_cols: Columns to be encoded.
:return: Dataframe with encoded columns.
"""
# Check if columns argument must be a string or list datatype:
self._assert_type_str_or_list(input_cols, "input_cols")
if isinstance(input_cols, str):
input_cols = [input_cols]
from pyspark.ml import Pipeline
from pyspark.ml.feature import OneHotEncoder
encode = [OneHotEncoder(inputCol=column, outputCol=column + "_encoded") for column in
list(set(input_cols))]
pipeline = Pipeline(stages=encode)
self._df = pipeline.fit(self._df).transform(self._df)
return self
def sql(self, sql_expression):
"""
Implements the transformations which are defined by SQL statement. Currently we only support
SQL syntax like "SELECT ... FROM __THIS__ ..." where "__THIS__" represents the
underlying table of the input dataframe.
:param sql_expression: SQL expression.
:return: Dataframe with columns changed by SQL statement.
"""
self._assert_type_str(sql_expression, "sql_expression")
from pyspark.ml.feature import SQLTransformer
sql_trans = SQLTransformer(statement=sql_expression)
self._df = sql_trans.transform(self._df)
return self
def vector_assembler(self, input_cols):
"""
Combines a given list of columns into a single vector column.
:param input_cols: Columns to be assembled.
:return: Dataframe with assembled column.
"""
# Check if columns argument must be a string or list datatype:
self._assert_type_str_or_list(input_cols, "input_cols")
if isinstance(input_cols, str):
input_cols = [input_cols]
from pyspark.ml import Pipeline
assembler = [VectorAssembler(inputCols=input_cols, outputCol="features")]
pipeline = Pipeline(stages=assembler)
self._df = pipeline.fit(self._df).transform(self._df)
return self
def normalizer(self, input_cols, p=2.0):
"""
Transforms a dataset of Vector rows, normalizing each Vector to have unit norm. It takes parameter p, which
specifies the p-norm used for normalization. (p=2) by default.
:param input_cols: Columns to be normalized.
:param p: p-norm used for normalization.
:return: Dataframe with normalized columns.
"""
# Check if columns argument must be a string or list datatype:
self._assert_type_str_or_list(input_cols, "input_cols")
if isinstance(input_cols, str):
input_cols = [input_cols]
assert isinstance(p, (float, int)), "Error: p argument must be a numeric value."
from pyspark.ml import Pipeline
from pyspark.ml.feature import Normalizer
normal = [Normalizer(inputCol=column, outputCol=column + "_normalized", p=p) for column in
list(set(input_cols))]
pipeline = Pipeline(stages=normal)
self._df = pipeline.fit(self._df).transform(self._df)
return self
def select(self, columns):
"""
Select specified columns by name.
:param columns: String or List of columns to select.
:return: Dataframe with selected columns.
"""
self._assert_type_str_or_list(columns, "columns")
self._df = self._df.select(columns)
return self
def select_idx(self, indices):
"""
Select specified columns by index.
:param indices: Indices to select from DF.
:return: Dataframe with selected columns.
"""
assert isinstance(indices, list), "Error: indices must a list"
if isinstance(indices, int):
indices = [indices]
self._df = self._df.select(*(self._df.columns[i] for i in indices))
return self
# Alias for select
iloc = select_idx
def collect(self):
"""
:return:
"""
return self._df.collect()
def addPrimaryKey(self, keyname="id", func=monotonically_increasing_id):
"""
Generate Ids for dataframe.
"""
self._assert_type_str(keyname, "keyname"), "Error: keyname should be a string"
self._df = self._df.withColumn(keyname, func())
self._add_transformation()
return self
def replace_by_id(self, new_value, update_col, id_list, id_col = "id"):
"""
replace the rows identified by id_col and id_list,
change the value in the update_col to be new_value
"""
assert isinstance(id_list, list), "Error: indices must a list"
id_list = set(id_list)
dataType = self._df.schema[update_col].dataType
tempName = "_" + update_col
def my_replace( rowId, val_in_update_col):
if rowId in id_list:
return new_value
else:
return val_in_update_col
my_replace_udf = udf(my_replace, dataType)
# Add tempCol
self._df = self._df.withColumn( tempName, my_replace_udf(id_col, update_col))
# Rename
self._df = self._df.drop(update_col).withColumnRenamed( tempName, update_col)
self._add_transformation()
return self
def get_dataframe(self):
"""
return the dataframe you have cleaned.
"""
return self._df
def addCount(self, col_name="counts", initial=0):
self._assert_type_str(col_name, "col_name")
self._assert_type_int_or_float(initial, "initial")
self._df = self._df.withColumn(col_name, lit(initial))
self._add_transformation()
return self
def replace_sub_df(self, df_to_replace, df, lhs_attrs):
assert isinstance(df_to_replace, pyspark.sql.dataframe.DataFrame), "Error: the first input must be a dataframe"
assert isinstance(df, pyspark.sql.dataframe.DataFrame), "Error: the second input must be a dataframe"
assert df_to_replace.schema.names == df.schema.names, "Error: the two input dataframes don't match on schema"
to_change = self._df
keys = df_to_replace.select(lhs_attrs).distinct().collect()
for i in range(0, len(keys)):
for j, c in enumerate(lhs_attrs):
to_change = to_change.where(col(c) == keys[i][j])
no_change = self._df.subtract(to_change)
df_to_replace = df_to_replace.collect()
df = df.collect()
for k in range(0, len(df[0])):
udf = UserDefinedFunction(lambda x: df[0][k] if x == df_to_replace[0][k] else x)
col_replace = df[0].__fields__[k]
to_change = to_change.select(*[udf(column).alias(col_replace) if column == col_replace \
else column for column in to_change.columns])
self._add_transformation()
self._df = to_change.union(no_change)
| {"/sparkclean/df_outliers.py": ["/sparkclean/df_transformer.py"], "/test/__init__.py": ["/test/load.py"], "/sparkclean/df_deduplicator.py": ["/sparkclean/__init__.py"], "/sparkclean/__init__.py": ["/sparkclean/utilities.py", "/sparkclean/df_transformer.py", "/sparkclean/df_deduplicator.py", "/sparkclean/df_outliers.py"]} |
68,408 | NYUBigDataProject/SparkClean | refs/heads/master | /test/__init__.py | from test.load import loadDemo | {"/sparkclean/df_outliers.py": ["/sparkclean/df_transformer.py"], "/test/__init__.py": ["/test/load.py"], "/sparkclean/df_deduplicator.py": ["/sparkclean/__init__.py"], "/sparkclean/__init__.py": ["/sparkclean/utilities.py", "/sparkclean/df_transformer.py", "/sparkclean/df_deduplicator.py", "/sparkclean/df_outliers.py"]} |
68,409 | NYUBigDataProject/SparkClean | refs/heads/master | /sparkclean/utilities.py | """
Some codes are copied From Optimus/utilities.py
"""
# -*- coding: utf-8 -*-
# Importing os module for system operative utilities
import os
# Importing SparkSession:
from pyspark.sql.session import SparkSession
# Importing module to delete folders
from shutil import rmtree
# Importing module to work with urls
import urllib.request
# Importing module for regex
import re
# Importing SparkContext
import pyspark
# URL reading
import tempfile
from urllib.request import Request, urlopen
class Utilities:
def __init__(self):
# Setting spark as a global variable of the class
self.spark = SparkSession.builder.enableHiveSupport().getOrCreate()
# Setting SparkContent as a global variable of the class
self.__sc = self.spark.sparkContext
# Set empty container for url
self.url = ""
def create_data_frame(self, data, names):
"""
Create a spark Dataframe from a list of tuples. This will infer the type for each column.
:param data: List of tuples with data
:param names: List of names for the columns
:return: Spark dataframe
"""
assert isinstance(data, list) and isinstance(data[0], tuple), \
"data should be a list of tuples"
assert isinstance(names, list) and isinstance(names[0], str), \
"names should be a list of strings"
return self.spark.createDataFrame(data, names)
def read_csv(self, path, sep=',', header='true', infer_schema='true', *args, **kargs):
"""This funcion read a dataset from a csv file.
:param path Path or location of the file.
:param sep Usually delimiter mark are ',' or ';'.
:param header: Tell the function whether dataset has a header row. 'true' default.
:param infer_schema: infers the input schema automatically from data.
It requires one extra pass over the data. 'true' default.
:return dataFrame
"""
assert ((header == 'true') or (header == 'false')), "Error, header argument must be 'true' or 'false'. " \
"header must be string dataType, i.e: header='true'"
assert isinstance(sep, str), "Error, delimiter mark argumen must be string dataType."
assert isinstance(path, str), "Error, path argument must be string datatype."
return self.spark.read \
.options(header=header) \
.options(delimiter=sep) \
.options(inferSchema=infer_schema) \
.csv(path, *args, **kargs)
def read_url(self, path=None, ty="csv"):
"""
Reads dataset from URL.
:param path: string for URL to read
:param ty: type of the URL backend (can be csv or json)
:return: pyspark dataframe from URL.
"""
if "https://" in str(path) or "http://" in str(path) or "file://" in str(path):
if ty is 'json':
self.url = str(path)
return self.json_load_spark_data_frame_from_url(str(path))
else:
return self.load_spark_data_frame_from_url(str(path))
else:
print("Unknown sample data identifier. Please choose an id from the list below")
def json_data_loader(self, path):
res = open(path, 'r').read()
print("Loading file using a pyspark dataframe for spark 2")
data_rdd = self.__sc.parallelize([res])
return self.spark.read.json(data_rdd)
def data_loader(self, path):
print("Loading file using 'SparkSession'")
csvload = self.spark.builder.getOrCreate() \
.read \
.format("csv") \
.options(header=True) \
.options(mode="DROPMALFORMED")
return csvload.option("inferSchema", "true").load(path)
def load_spark_data_frame_from_url(self, data_url):
i = data_url.rfind('/')
data_name = data_url[(i + 1):]
data_def = {
"displayName": data_name,
"url": data_url
}
return Downloader(data_def).download(self.data_loader)
def json_load_spark_data_frame_from_url(self, data_url):
i = data_url.rfind('/')
data_name = data_url[(i + 1):]
data_def = {
"displayName": data_name,
"url": data_url
}
return Downloader(data_def).download(self.json_data_loader)
def read_dataset_parquet(self, path):
"""This function allows user to read parquet files. It is import to clarify that this method is just based
on the spark.read.parquet(path) Apache Spark method. Only assertion instructions has been added to
ensure user has more hints about what happened when something goes wrong.
:param path Path or location of the file. Must be string dataType.
:return dataFrame"""
assert isinstance(path, str), "Error: path argument must be string dataType."
assert (("file:///" == path[0:8]) or ("hdfs:///" == path[0:8])), "Error: path must be with a 'file://' prefix \
if the file is in the local disk or a 'path://' prefix if the file is in the Hadood file system"
return self.spark.read.parquet(path)
def csv_to_parquet(self, input_path, output_path, sep_csv, header_csv, num_partitions=None):
"""This method transform a csv dataset file into a parquet.
The method reads a existing csv file using the inputPath, sep_csv and headerCsv arguments.
:param input_path Address location of the csv file.
:param output_path Address where the new parquet file will be stored.
:param sep_csv Delimiter mark of the csv file, usually is ',' or ';'.
:param header_csv This argument specifies if csv file has header or not.
:param num_partitions Specifies the number of partitions the user wants to write the dataset."""
df = self.read_csv(input_path, sep_csv, header=header_csv)
if num_partitions is not None:
assert (num_partitions <= df.rdd.getNumPartitions()), "Error: num_partitions specified is greater that the" \
"partitions in file store in memory."
# Writing dataset:
df.coalesce(num_partitions).write.parquet(output_path)
else:
df.write.parquet(output_path)
def set_check_point_folder(self, path, file_system):
"""Function that receives a workspace path where a folder is created.
This folder will store temporal
dataframes when user writes the DataFrameTransformer.checkPoint().
This function needs the sc parameter, which is the spark context in order to
tell spark where is going to save the temporal files.
It is recommended that users deleted this folder after all transformations are completed
and the final dataframe have been saved. This can be done with deletedCheckPointFolder function.
:param path Location of the dataset (string).
:param file_system Describes if file system is local or hadoop file system.
"""
assert (isinstance(file_system, str)), \
"Error: file_system argument must be a string."
assert (file_system == "hadoop") or (file_system == "local"), \
"Error, file_system argument only can be 'local' or 'hadoop'"
if file_system == "hadoop":
folder_path = path + "/" + "checkPointFolder"
self.delete_check_point_folder(path=path, file_system=file_system)
# Creating file:
print("Creation of hadoop folder...")
command = "hadoop fs -mkdir " + folder_path
print("$" + command)
os.system(command)
print("Hadoop folder created. \n")
print("Setting created folder as checkpoint folder...")
self.__sc.setCheckpointDir(folder_path)
print("Done.")
else:
# Folder path:
folder_path = path + "/" + "checkPointFolder"
# Checking if tempFolder exits:
print("Deleting previous folder if exists...")
if os.path.isdir(folder_path):
# Deletes folder if exits:
rmtree(folder_path)
print("Creation of checkpoint directory...")
# Creates new folder:
os.mkdir(folder_path)
print("Done.")
else:
print("Creation of checkpoint directory...")
# Creates new folder:
os.mkdir(folder_path)
print("Done.")
self.__sc.setCheckpointDir(dirName="file:///" + folder_path)
@classmethod
def delete_check_point_folder(cls, path, file_system):
"""Function that deletes the temporal folder where temp files were stored.
The path required is the same provided by user in setCheckPointFolder().
:param file_system Describes if file system is local or hadoop file system.
"""
assert (isinstance(file_system, str)), "Error: file_system argument must be a string."
assert (file_system == "hadoop") or (file_system == "local"), \
"Error, file_system argument only can be 'local' or 'hadoop'"
if file_system == "hadoop":
# Folder path:
folder_path = path + "/" + "checkPointFolder"
print("Deleting checkpoint folder...")
command = "hadoop fs -rm -r " + folder_path
os.system(command)
print("$" + command)
print("Folder deleted. \n")
else:
print("Deleting checkpoint folder...")
# Folder path:
folder_path = path + "/" + "checkPointFolder"
# Checking if tempFolder exits:
if os.path.isdir(folder_path):
# Deletes folder if exits:
rmtree(folder_path)
# Creates new folder:
print("Folder deleted. \n")
else:
print("Folder deleted. \n")
pass
@classmethod
def get_column_names_by_type(cls, df, data_type):
"""This function returns column names of dataFrame which have the same
datatype provided. It analyses column datatype by dataFrame.dtypes method.
:return List of column names of a type specified.
"""
assert (data_type in ['string', 'integer', 'float', 'date', 'double']), \
"Error, data_type only can be one of the following values: 'string', 'integer', 'float', 'date', 'double'"
dicc = {'string': 'string', 'integer': 'int', 'float': 'float', 'double': 'double'}
return list(y[0] for y in filter(lambda x: x[1] == dicc[data_type], df.dtypes))
class Downloader(object):
def __init__(self, data_def):
self.data_def = data_def
self.headers = {"User-Agent": "PixieDust Sample Data Downloader/1.0"}
def download(self, data_loader):
display_name = self.data_def["displayName"]
bytes_downloaded = 0
if "path" in self.data_def:
path = self.data_def["path"]
else:
url = self.data_def["url"]
req = Request(url, None, self.headers)
print("Downloading '{0}' from {1}".format(display_name, url))
with tempfile.NamedTemporaryFile(delete=False) as f:
bytes_downloaded = self.write(urlopen(req), f)
path = f.name
self.data_def["path"] = path = f.name
if path:
try:
if bytes_downloaded > 0:
print("Downloaded {} bytes".format(bytes_downloaded))
print("Creating {1} DataFrame for '{0}'. Please wait...".format(display_name, 'pySpark'))
return data_loader(path)
finally:
print("Successfully created {1} DataFrame for '{0}'".format(display_name, 'pySpark'))
@staticmethod
def write(response, file, chunk_size=8192):
total_size = response.headers['Content-Length'].strip() if 'Content-Length' in response.headers else 100
total_size = int(total_size)
bytes_so_far = 0
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
file.write(chunk)
total_size = bytes_so_far if bytes_so_far > total_size else total_size
return bytes_so_far
class Airtable:
def __init__(self, path):
# Setting airtable dataset variable
self._air_table = None
# Setting spark as a global variable of the class
self.spark = SparkSession()
self.sc = self.spark.sparkContext
# Reading dataset
self.read_air_table_csv(path)
def read_air_table_csv(self, path):
"""This function reads the airtable or csv file that user have to fill when the original dataframe
is analyzed.
The path could be a dowload link or a path from a local directory.
When a local directory is provided, it is necessary to write the corresponding prefix. i.e.:
file://.... or hdfs://...
"""
if re.match("https", path):
print("Reading file from web...")
# Doing a request to url:
response = urllib.request.urlopen(path)
# Read obtained data:
data = response.read() # a `bytes` object
# Decoding data:
text = data.decode('utf-8')
# Here text is splitted by ',' adn replace
values = [line.replace("\"", "").split(",") for line in text.split("\n")]
# Airtable datafra that corresponds to airtable table:
self._air_table = self.sc.parallelize(values[1:]).toDF(values[0])
print("Done")
else:
print("Reading local file...")
self._air_table = self.spark.read \
.format('csv') \
.options(header='true') \
.options(delimiter=",") \
.options(inferSchema='true') \
.load(path)
print("Done")
def set_air_table_df(self, df):
"""This function set a dataframe into the class for subsequent actions.
"""
assert isinstance(df, pyspark.sql.dataframe.DataFrame), "Error: df argument must a sql.dataframe type"
self._air_table = df
def get_air_table_df(self):
"""This function return the dataframe of the class"""
return self._air_table
@classmethod
def extract_col_from_df(cls, df, column):
"""This function extract column from dataFrame.
Returns a list of elements of column specified.
:param df Input dataframe
:param column Column to extract from dataframe provided.
:return list of elements of column extracted
"""
return [x[0] for x in df.select(column).collect()]
def read_col_names(self):
"""This function extract the column names from airtable dataset.
:return list of tuples: [(oldNameCol1, newNameCol1),
(oldNameCol2, newNameCol2)...]
Where oldNameColX are the original names of columns in
dataset, names are extracted from the airtable
dataframe column 'Feature Name'.
On the other hand, newNameColX are the new names of columns
of dataset. New names are extracted from the airtable
dataframe 'Code Name'
"""
return list(zip(self.extract_col_from_df(self._air_table, 'Feature Name'),
self.extract_col_from_df(self._air_table, 'Code Name')))
def read_new_col_names(self):
"""This function extract the new column names from airtable dataset.
:return list of new column names. Those names are from column 'Code Name'
"""
return self.extract_col_from_df(self._air_table, 'Code Name')
def read_old_col_names(self):
"""This function extract the old column names from airtable dataset.
:return list of old column names. Those names are from column 'Feature Name'
"""
return self.extract_col_from_df(self._air_table, 'Feature Name')
def read_col_types(self, names='newNames'):
"""This function extract the types of columns detailled in airtable dataset.
:param names Name columns of dataFrame. names variable consist of a list
of columns read it from airtable dataFrame in the column specified.
names as argument can only have two values as input of this function:
'newNames' and 'oldNames. When 'newNames' is provided, the names are
read from 'Code Name' airtable dataFrame. On the other hand, when
'oldNames' is provided, 'Feature Name' is read it from airtable
dataFrame.
Column names are read using the function readNewColNames(self).
Types are extracted from 'DataType'
:return list of colNames and their data types in the following form:
[(colName1, 'string'), (colName2, 'integer'), (colName3, 'float')]
"""
assert (names == 'newNames') or (names == 'oldNames'), "Error, names argument" \
"only can be the following values:" \
"'newNames' or 'oldNames'"
col_names = {'newNames': 'Code Name', 'oldNames': 'Feature Name'}
return list(zip(self.extract_col_from_df(self._air_table, col_names[names]),
self.extract_col_from_df(self._air_table, 'DataType')))
| {"/sparkclean/df_outliers.py": ["/sparkclean/df_transformer.py"], "/test/__init__.py": ["/test/load.py"], "/sparkclean/df_deduplicator.py": ["/sparkclean/__init__.py"], "/sparkclean/__init__.py": ["/sparkclean/utilities.py", "/sparkclean/df_transformer.py", "/sparkclean/df_deduplicator.py", "/sparkclean/df_outliers.py"]} |
68,410 | NYUBigDataProject/SparkClean | refs/heads/master | /sparkclean/df_ic.py | from pyspark.sql.functions import countDistinct, array, lit, col, min, max, UserDefinedFunction
from pyspark.sql.session import SparkSession
import json
from tqdm import tqdm
class ICViolationCorrector:
def __init__(self, transformer):
self.spark = SparkSession.builder.enableHiveSupport().getOrCreate()
self.transformer = transformer
self.transformer.addPrimaryKey("t_identifier")
self.df = transformer._df
self.rules = {}
self.violation = {}
self.violation_counts = {}
self.lhs_attrs = list()
self.rhs_attrs = list()
self.attrs = list()
self.vio_dict = {}
self.value_min_changes = None
self.value_max_changes = None
def parse_ic(self, path):
# Restrict to rules that are defined in only one table
ic_dict = json.load(open(path))
# Check input format
assert(len(ic_dict.keys()) == 1 and list(ic_dict.keys())[0] == 'rules') \
,'Invalid ic input'
self.number = len(ic_dict['rules'])
try:
for i in range(0, self.number):
type = ic_dict['rules'][i]['type']
value = ic_dict['rules'][i]['value'][0]
if type not in self.rules:
self.rules[type] = [value]
else:
self.rules[type].append(value)
except:
print("Parse error?!")
for type, valueList in self.rules.items():
if type == 'fd':
for value in valueList:
LHS, RHS = value.split('|')
lhs_attrs = LHS.split(',')
for index, attr in enumerate(lhs_attrs):
lhs_attrs[index] = attr.strip()
rhs_attrs = RHS.split(',')
for index, attr in enumerate(rhs_attrs):
rhs_attrs[index] = attr.strip()
attrs = ['t_identifier'] + lhs_attrs + rhs_attrs
if lhs_attrs not in self.lhs_attrs:
self.lhs_attrs.append(lhs_attrs)
if rhs_attrs not in self.rhs_attrs:
self.rhs_attrs.append(rhs_attrs)
if attrs not in self.attrs:
self.attrs.append(attrs)
return self
def check_violations(self):
# refresh df, in case it has been changed
self.df = self.transformer._df
num_index = len(self.lhs_attrs)
self.vio_dict = {}
for i in range(0, num_index):
if 't_identifier' not in self.attrs[i]:
self.attrs[i].append('t_identifier')
groupedData = self.df.select(self.attrs[i]).groupBy(self.lhs_attrs[i])
tmp = groupedData.agg(countDistinct(array(self.rhs_attrs[i])).alias('num'))
self.violation[i] = tmp.where(tmp.num > 1)
violation_rows = self.df.join(self.violation[i], self.lhs_attrs[i],
'inner').select(self.attrs[i]).withColumn('redundant', lit(0))
identifier_collection = violation_rows.select('t_identifier').collect()
for k in identifier_collection:
x = k['t_identifier']
if x in self.vio_dict:
self.vio_dict[x].append(i)
else:
self.vio_dict[x] = [i]
if 't_identifier' in self.attrs[i]:
self.attrs[i].remove('t_identifier')
self.violation_counts[i] = violation_rows.drop('t_identifier').groupBy(self.attrs[i]).count().orderBy(self.lhs_attrs[i])
return self
def display_violation_rows(self):
num_index = len(self.lhs_attrs)
for i in range(0, num_index):
print(self.lhs_attrs[i],' | ', self.rhs_attrs[i])
self.violation_counts[i].show(self.violation_counts[i].count())
# def roll_back(self):
# self.transformer.replace_sub_df(self.value_max_changes, self.value_min_changes)
def correct_violations(self, fix_rule):
if fix_rule == 'single_fd_greedy':
num_index = len(self.lhs_attrs)
for index in range(0, num_index):
keys = self.violation_counts[index].select(self.lhs_attrs[index]).distinct().collect()
for i in range(0, len(keys)):
rhs_values = self.violation_counts[index]
for j, c in enumerate(self.lhs_attrs[index]):
rhs_values = rhs_values.where(col(c) == keys[i][j])
min_changes = rhs_values.select(min('count')).collect()[0][0]
max_changes = rhs_values.select(max('count')).collect()[0][0]
if min_changes == max_changes:
print('This violation cannot be fixed by single_fd_greedy rule')
rhs_values.show()
continue
# Values more likely to be flawed
self.value_min_changes = rhs_values.where(col('count') == min_changes). \
drop(col('count'))
# Values less likely to be flawed
self.value_max_changes = rhs_values.where(col('count') == max_changes). \
drop(col('count'))
print("Modify:")
self.value_min_changes.show(self.value_min_changes.count())
print("To:")
self.value_max_changes.show(self.value_max_changes.count())
self.transformer.replace_sub_df(self.value_min_changes,self.value_max_changes, self.lhs_attrs[index])
if fix_rule == 'holistic':
fields = []
# print(self.vio_dict)
for id in tqdm(self.vio_dict):
# print(id)
vio_fds = self.vio_dict[id]
if len(vio_fds) > 1:
len_vio_fds = len(vio_fds)
set_val = []
for i in range(0, len_vio_fds-1):
for j in range(i+1, len_vio_fds):
base = set(self.rhs_attrs[vio_fds[i]])
# Find intersected fields of rules
intersection = list(base.intersection(set(self.rhs_attrs[vio_fds[j]])))
if len(intersection) > 0:
row = self.df.where(col('t_identifier') == id)
rhs_values = row.select(intersection).collect()
lhs_values = row.select(self.lhs_attrs[vio_fds[i]]).collect()
for k in range(0,len(lhs_values)):
for r, c in enumerate(self.lhs_attrs[vio_fds[i]]):
to_select = intersection + ['count']
set_val = self.violation_counts[vio_fds[i]].where(col(c) == lhs_values[0][k]).select(array(to_select)).collect()
count = 0
less_likely = None
item_count = 0
for k,item in enumerate(set_val):
item_count = int(item[0][len(intersection)])
# print(item_count)
if item_count > count:
count = item_count
less_likely = k
if less_likely == None:
continue
to_compare = set_val[less_likely][0][:-1]
if [rhs_values[0][0]] == to_compare:
continue
else:
print(set_val)
print("Field", ','.join(intersection), "with value", ','.join(rhs_values[0]),
"of row {0:d} may be wrong".format(id))
print("Suggested values are:", ','.join(to_compare))
for n,field in enumerate(intersection):
self.transformer.replace_by_id(to_compare[n], field, [id], 't_identifier')
fields = fields + intersection
if len(fields) == 0:
print('holistic data repairing might not be a good choice')
return self
| {"/sparkclean/df_outliers.py": ["/sparkclean/df_transformer.py"], "/test/__init__.py": ["/test/load.py"], "/sparkclean/df_deduplicator.py": ["/sparkclean/__init__.py"], "/sparkclean/__init__.py": ["/sparkclean/utilities.py", "/sparkclean/df_transformer.py", "/sparkclean/df_deduplicator.py", "/sparkclean/df_outliers.py"]} |
68,411 | NYUBigDataProject/SparkClean | refs/heads/master | /sparkclean/df_deduplicator.py | # Importing sql types
from pyspark.sql.types import StringType, IntegerType, FloatType, DoubleType, ArrayType, StructType, StructField
# Importing sql functions
from pyspark.sql.functions import col, udf, trim, lit, format_number, months_between, date_format, unix_timestamp, \
current_date, abs as mag
import pyspark.sql.dataframe
import textdistance
from sparkclean import DataFrameTransformer
import re, string
from unidecode import unidecode
from collections import Counter
from collections import defaultdict
class DataFrameDeduplicator:
"""DataFrameDeduplicater is a class to deduplicate in dataFrames
It will generate "id" to identify every Row, so if a DataFrame already include "id" column,
remove it before using this class
"""
def __init__(self, df):
self._tf = DataFrameTransformer(df)
self._tf.addPrimaryKey()
def keyCollisionClustering(self, colName):
"""colName: the column to be clustered"""
self._tf._assert_cols_in_df(columns_provided=[colName], columns_df=self._tf._df.columns)
rdd = self._tf._df.select(["id", colName]).rdd.map(list)
def getFingerPrint(s):
s = str(s)
PUNCTUATION = re.compile('[%s]' % re.escape(string.punctuation))
# preprocess
preprocessed = PUNCTUATION.sub('', s.strip().lower())
# unique_preserving_order
seen = set()
seen_add = seen.add
unique = [x for x in preprocessed if not (x in seen or seen_add(x))]
# latinize
latinized = unidecode(''.join(unique))
return latinized
def fingerPrintMapper(x):
_id, _col = x
_s = str(_col)
fingerPrint = getFingerPrint(_s)
return (fingerPrint, [_id, _col])
def previewMapper(l):
fingerPrintMapper, raws = l
words = list(map(lambda x:x[1], raws))
ids = list(map(lambda x:x[0], raws))
d = Counter(words)
info = (d.most_common(1)[0][0],d.most_common(1)[0][1],len(raws))
return (fingerPrintMapper, info, d, ids)
clusters = rdd.map(fingerPrintMapper).groupByKey().mapValues(list).filter(lambda x:len(x[1])>1)
objects = clusters.map(previewMapper).filter(lambda x:len(x[2].keys())>1)
return colName, objects
def preview(self, colName, objects, num=-1):
"""Preview the obejects pending to be changed
objects: "clusters" by fingerprint, returned from keyCollisionClustering
num: the number of objects you want to preview.
"""
if num > 0:
samples = objects.take(num)
else:
samples = objects.collect()
for i,obj in enumerate(samples):
fingerPrintMapper, info, d, ids = obj
print("------------ Cluster %d -------------------" % i)
for key,count in d.most_common():
print("colName: %s| Item: %s, Count:%d" %(colName,key,count))
print("colName: %s|"%colName," Will be changed to \"%s\", takes %d/%d" % info)
print("")
def resolve(self, colName, objects, optimized = False):
"""Resolve the changes
colName: the column to apply this change
objects: "clusters" by fingerprint, returned from keyCollisionClustering
"""
objList = objects.collect()
if not optimized:
def applyToTransformer(x):
fingerPrintMapper, info, d, ids = x
str_to_replace = info[0]
list_str = list(d.keys())
self._tf.lookup(colName, str_to_replace, list_str)
for i,obj in enumerate(objList):
if i % 100 == 0:
print("Resolving cluster %d/%d" %(i,len(objList)))
applyToTransformer(obj)
else:
def merge(objList):
str_to_replace = dict()
for x in objList:
fingerPrintMapper, info, d, ids = x
change_to = info[0]
list_str = list(d.keys())
for s in list_str:
str_to_replace[change_to] = s
return str_to_replace
str_to_replace = merge(objList)
self._tf.lookup(colName, str_to_replace, None)
totalRowsAffected = objects.map(lambda x:x[1][2]).reduce(lambda x,y:x+y)
print("Total rows affected: %d rows" % totalRowsAffected)
def localitySensitiveHashing(self, colName, blockSize=6, method = "levenshtein", threshold = 0.81):
"""
colName: the column to be clustered
blockSize: size of blocking
method: methods to calculate the similarity
threshold: controls how similar two items should be thought to be the same
"""
self._tf._assert_cols_in_df(columns_provided=[colName], columns_df=self._tf._df.columns)
rdd = self._tf._df.select(["id", colName]).rdd.map(list)
methodDict = {
# Edit Based
"hamming": textdistance.hamming.normalized_similarity,
"mlipns": textdistance.mlipns.normalized_similarity,
"levenshtein": textdistance.levenshtein.normalized_similarity,
# Token based
"jaccard": textdistance.jaccard.normalized_similarity,
"overlap": textdistance.overlap.normalized_similarity,
"cosine": textdistance.cosine.normalized_similarity,
# Sequence based
"lcsseq": textdistance.lcsseq.normalized_similarity,
"lcsstr": textdistance.lcsstr.normalized_similarity,
# Phonetic based
"mra": textdistance.mra.normalized_similarity,
}
try:
sim = methodDict[method]
except:
print("Waring: %s is not a valid method\n, changes into levenshtein by default.")
sim = textdistance.levenshtein.normalized_similarity
def getFingerPrint(s):
PUNCTUATION = re.compile('[%s]' % re.escape(string.punctuation))
# preprocess
preprocessed = PUNCTUATION.sub('', s.strip().lower())
# unique_preserving_order
seen = set()
seen_add = seen.add
unique = [x for x in preprocessed if not (x in seen or seen_add(x))]
# latinize
latinized = unidecode(''.join(unique))
return latinized
def fingerPrintMapper(x):
_id, _col = x
_s = str(_col)
fingerPrint = getFingerPrint(_s)
return (fingerPrint, [_id, _col])
def LSHflatMapper(x):
fingerPrint, l = x
n = len(fingerPrint)
res = []
if n > blockSize:
for i in range(0,n,blockSize):
if i+blockSize>n:break
res.append((fingerPrint[i:i+blockSize],l))
else:
res.append((fingerPrint,l))
return res
def previewMapper(l):
fingerPrintMapper, raws = l
words = list(map(lambda x:x[1], raws))
ids = list(map(lambda x:x[0], raws))
d = Counter(words)
info = (d.most_common(1)[0][0],d.most_common(1)[0][1],len(raws))
return (fingerPrintMapper, info, d, ids)
def thresholdFlatMapper(x):
fingerPrintMapper, info, d, ids = x
keys = list(d.keys())
res = []
n = len(keys)
for i in range(n):
for j in range(i+1,n):
if sim(str(keys[i]),str(keys[j]))>threshold:
newCounter = Counter()
newCounter[keys[i]] = d[keys[i]]
newCounter[keys[j]] = d[keys[j]]
cand, freq = newCounter.most_common(1)[0]
newInfo = (cand, freq, sum(newCounter.values()))
res.append((fingerPrintMapper, newInfo, newCounter, ids))
return res
clusters = rdd.map(fingerPrintMapper).flatMap(LSHflatMapper).groupByKey().mapValues(list).filter(lambda x:len(x[1])>1)
objects = clusters.map(previewMapper).flatMap(thresholdFlatMapper).filter(lambda x:len(x[2].keys())>1)
return colName, objects
def buildPairs(self,colNames):
"""
:return a dataframe of pairs for compairing similarity
Example.
df: city| country|population
=>
res: city|country|population|id|_city|_country|_population|_id
"""
self._tf._assert_cols_in_df(columns_provided=colNames, columns_df=self._tf._df.columns)
tf = self._tf
schema = tf._df.schema
tf_copy = DataFrameTransformer(tf._df)
tf_copy.rename_col(self.colNameMapper(schema))
res = tf._df.join(tf_copy._df, tf._df.id < tf_copy._df._id)
colNames += ["id"]
pick = colNames + list(map(lambda x:"_"+x, colNames))
return res.select(pick)
def colNameMapper(self, schema):
"""
:return [(oldColumnName, newColumnName)] from dataframe schema for rename_cols in df_transformer
"""
return list(map(lambda x:(x.name, "_"+x.name), schema))
def recordMatching(self, matchColNames, fixColNames):
"""
matchColNames: colNames used for keyCollision clustering
fixColNames: colNames we try to fix
"""
self._tf._assert_cols_in_df(columns_provided=matchColNames, columns_df=self._tf._df.columns)
self._tf._assert_cols_in_df(columns_provided=fixColNames, columns_df=self._tf._df.columns)
colNames = list(set(matchColNames + fixColNames))
colNameIndex = dict(zip(colNames,range(1,1+len(colNames))))
rdd = self._tf._df.select(["id"]+colNames).rdd.map(list)
def getFingerPrint(s):
s = str(s)
PUNCTUATION = re.compile('[%s]' % re.escape(string.punctuation))
# preprocess
preprocessed = PUNCTUATION.sub('', s.strip().lower())
# unique_preserving_order
seen = set()
seen_add = seen.add
unique = [x for x in preprocessed if not (x in seen or seen_add(x))]
# latinize
latinized = unidecode(''.join(unique))
return latinized
def multiFingerPrinterMapper(x):
_id = x[0]
multiFingerPrinter = []
for s in matchColNames:
index = colNameIndex[s]
multiFingerPrinter.append(getFingerPrint(x[index]))
multiFingerPrinter = tuple(multiFingerPrinter)
return (multiFingerPrinter, x)
def previewMapper(l):
multiFingerPrinter, raws = l
ids = list(map(lambda x:x[0], raws))
fixs = dict()
for s in fixColNames:
index = colNameIndex[s]
words = list(map(lambda x:x[index], raws))
d = Counter(words)
info = (d.most_common(1)[0][0],d.most_common(1)[0][1],len(raws),d)
if info[1]!=info[2]:
fixs[s] = info
return (multiFingerPrinter, fixs, ids)
clusters = rdd.map(multiFingerPrinterMapper).groupByKey().mapValues(list).filter(lambda x:len(x[1])>1)
objects = clusters.map(previewMapper).filter(lambda x:len(x[1].keys())>0)
return fixColNames,objects
def previewReords(self, fixColNames, objects, num=-1):
"""Preview the obejects pending to be changed
fixColNames: the columns to be fixed, actually not needed in this function, but just want to be symetry with one col case
objects: "clusters" by multi fingerprint, returned from recordMatching
num: the number of objects you want to preview.
"""
if num > 0:
samples = objects.take(num)
else:
samples = objects.collect()
for i in range(len(samples)):
multiFingerPrinter, fixs, ids = samples[i]
print("------------ Cluster %d -------------------" % i)
print("Record id:",ids)
for col in fixs.keys():
Item, Count, Total, d = fixs[col]
for key,count in d.most_common():
print("colName: %s| Item: %s, Count:%d" %(col,key,count))
print("colName: %s| Change: Will be changed to \"%s\", takes %d/%d" % (col, Item, Count, Total))
print("")
def resolveRecords(self, fixColNames, objects):
"""
fixColNames, objects returned by recordMatching
"""
totalRowsAffected = 0
samples = objects.collect()
for i in range(len(samples)):
if i % 100 == 0:
print("Resolving cluster %d/%d" %(i,len(samples)))
multiFingerPrinter, fixs, ids = samples[i]
totalRowsAffected += len(ids)
fixs = list(fixs.items())
for fix in fixs:
update_col = fix[0]
new_value = fix[1][0]
id_list = ids
id_col = "id"
self._tf.replace_by_id(new_value, update_col, id_list, id_col)
print("Total rows affected: %d rows" % totalRowsAffected)
def show(self, n=10, truncate=True, withId = False):
if withId:
return self._tf._df.show(n, truncate)
else:
return self._tf._df.drop("id").show(n, truncate)
def remove_duplicates(self, cols=None):
"""
:param cols: List of columns to make the comparison, this only will consider this subset of columns,
for dropping duplicates. The default behavior will only drop the identical rows.
:return: Return a new DataFrame with duplicate rows removed
"""
assert isinstance(cols, list), "Error, cols argument provided must be a list."
self._tf.remove_duplicates(cols)
return self
def get_dataframe(self):
"""
return the dataframe you have cleaned.
"""
return self._tf._df.drop("id")
| {"/sparkclean/df_outliers.py": ["/sparkclean/df_transformer.py"], "/test/__init__.py": ["/test/load.py"], "/sparkclean/df_deduplicator.py": ["/sparkclean/__init__.py"], "/sparkclean/__init__.py": ["/sparkclean/utilities.py", "/sparkclean/df_transformer.py", "/sparkclean/df_deduplicator.py", "/sparkclean/df_outliers.py"]} |
68,412 | NYUBigDataProject/SparkClean | refs/heads/master | /test/load.py | from pyspark.sql.types import StringType, IntegerType, StructType, StructField
from pyspark.sql.session import SparkSession
spark = SparkSession.builder.enableHiveSupport().getOrCreate()
sc = spark.sparkContext
def loadDemo():
schema = StructType([
StructField("city", StringType(), True),
StructField("country", StringType(), True),
StructField("population", IntegerType(), True)])
countries = ['Colombia', 'America', 'Brazil', 'Spain', 'America', 'Amarica','Colombia']
cities = ['Bogota', 'New York', ' São Paulo', '~Madrid', 'New York', 'New York ','Bogotá']
population = [37800000,9795791,12341418,6489162,19795791,19795791,37800000]
# Dataframe:
df = spark.createDataFrame(list(zip(cities, countries, population)), schema=schema)
return df
| {"/sparkclean/df_outliers.py": ["/sparkclean/df_transformer.py"], "/test/__init__.py": ["/test/load.py"], "/sparkclean/df_deduplicator.py": ["/sparkclean/__init__.py"], "/sparkclean/__init__.py": ["/sparkclean/utilities.py", "/sparkclean/df_transformer.py", "/sparkclean/df_deduplicator.py", "/sparkclean/df_outliers.py"]} |
68,413 | NYUBigDataProject/SparkClean | refs/heads/master | /sparkclean/__init__.py | from IPython.display import display, HTML
from pyspark.sql.session import SparkSession
from sparkclean.utilities import *
from sparkclean.df_transformer import DataFrameTransformer
from sparkclean.df_deduplicator import DataFrameDeduplicator
from sparkclean.df_outliers import OutlierDetector
import os
# -*- coding: utf-8 -*-
try:
get_ipython
def print_html(html):
display(HTML(html))
print_html("<div>Starting or getting SparkSession and SparkContext.</div>")
spark = SparkSession.builder.enableHiveSupport().getOrCreate()
sc = spark.sparkContext
Utilities().set_check_point_folder(os.getcwd(), "local")
message = "<b><h2>SparkClean successfully imported. Have fun :).</h2></b>"
print_html(
message
)
except Exception:
print("Shell detected")
print("Starting or getting SparkSession and SparkContext.")
spark = SparkSession.builder.enableHiveSupport().getOrCreate()
sc = spark.sparkContext
Utilities().set_check_point_folder(os.getcwd(), "local")
print("SparkClean successfully imported. Have fun :).")
print("---------------------------------------------------------------")
print("| |")
print("| |")
print("| |")
print("| SparkClean |")
print("| |")
print("| V 0.1 |")
print("| |")
print("---------------------------------------------------------------")
# module level doc-string
__doc__ = """
SparkClean, data cleaning library for pyspark.
""" | {"/sparkclean/df_outliers.py": ["/sparkclean/df_transformer.py"], "/test/__init__.py": ["/test/load.py"], "/sparkclean/df_deduplicator.py": ["/sparkclean/__init__.py"], "/sparkclean/__init__.py": ["/sparkclean/utilities.py", "/sparkclean/df_transformer.py", "/sparkclean/df_deduplicator.py", "/sparkclean/df_outliers.py"]} |
68,447 | chenjh16/Submitter | refs/heads/master | /submitter.py |
import os
from splinter import Browser
class Submitter:
def __init__(self, url, username, password, course_id, homework_id, submit_list):
self._callback = None
self._browser = Browser()
self._url = url
self._username = username
self._password = password
self._course_id = course_id
self._homework_id = homework_id
self._submit_list = submit_list
def _login(self):
self._browser.visit(self._url)
self._browser.fill("i_user", self._username)
self._browser.fill("i_pass", self._password)
self._browser.find_by_id("loginButtonId").click()
def _nvi2course(self):
self._browser.find_link_by_partial_text(self._course_id).first.click()
self._browser.windows.current.close()
def _nvi2homework(self):
self._browser.find_link_by_partial_text("课程作业").first.click()
self._browser.find_link_by_partial_text(self._homework_id).first.click()
def _submit(self, stu_id, grade, comment, ex_file):
xpath_str = '//tbody/tr[td[3]=' + stu_id + ']/td[last()]/a'
self._browser.find_by_xpath(xpath_str).last.click()
self._browser.fill('cj', grade)
self._browser.fill('pynr', comment)
if os.path.splitext(ex_file)[1] == '.pdf':
self._browser.driver.find_element_by_name('fileupload').send_keys(ex_file)
submit_btn_css = 'div[class="sub-back sub-back-3 absolute"] > input[class="btn"]'
self._browser.find_by_css(submit_btn_css).first.click()
while not self._browser.is_text_present('关闭', wait_time=1):
pass
self._browser.find_by_text('关闭').click()
self._browser.back()
self._browser.back()
def add_single_task_callback(self, callback):
self._callback = callback
def start(self):
self._login()
self._nvi2course()
self._nvi2homework()
for stu_id, grade, comment, ex_file in self._submit_list:
self._submit(stu_id, grade, comment, ex_file)
self._callback([stu_id, grade, comment, ex_file])
self._browser.quit()
@staticmethod
def clean():
work_dir = os.getcwd()
os.remove(work_dir + "/geckodriver.log")
| {"/main.py": ["/submitter.py"]} |
68,448 | chenjh16/Submitter | refs/heads/master | /main.py | #!/usr/bin/env python3
###############################################################################
#
# A submitter for web learning 2018, Tsinghua University.
#
# Features:
# 1. Submit the grades, the comments, and the files.
# 2. Extract the grades from the annotations in the PDF file.
#
# Usage:
# 1. Install `PyPDF3` and `splinter` packages, and make splinter functional.
# Firefox is recommended.
# 2. Run `main.py homework_dir`, e.g.,
# `main.py ./离散数学\(1\)_2020-10-13\ 09_00_第四周作业/`.
#
# Notice:
# 1. The `course_id` and the `homework_id` are automatically parsed from the
# folder name, e.g., for `离散数学(1)_2020-09-22 09_00_第一周作业`, the
# `course_id` is `离散数学(1)` and the `homework_id` is `第一周作业`.
# 2. Note that the `homework_id` should be **fully showed** in the browser.
# 3. The `grade` is automatically parsed from the **annotations** of the PDF
# file, e.g., `10.0` or `X.X`.
#
# Customization:
# Override `get_tutor_info`, `get_submit_list`, `get_submit_info` methods.
#
###############################################################################
import os
import re
import sys
import PyPDF3
from submitter import Submitter
def get_tutor_info():
"""
This method is used to collect the login information of the Web Learning 2018.
Return an url of login page, an user name, and a password.
:return: login_url, username, password
"""
login_url = "http://learn.tsinghua.edu.cn"
# For example, the user name and the password are saved in a file.
lines = open(os.path.expanduser('~') + '/.learn.pw', 'r').readlines()
username = lines[0].strip()
password = lines[1].strip()
return login_url, username, password
def get_submit_info():
"""
This method is used to collect the information of the course ID and the homework ID.
They can be parsed from the file name of the downloaded zip file.
Default name of the homework folder unzipped from *.zip has the form of
`courseId_XXXX_homeworkId`, we can just split it by '_' and the get the course ID
and the homework ID.
:return: course_id : string, homework_id : string
"""
file_dir = sys.argv[1]
identifiers = os.path.basename(file_dir).split('_')
return identifiers[0], identifiers[-1]
def get_submit_list():
"""
The submit list is a tuple (studentId, grade, comment, extraFile)
and the fail list is arbitrary list.
:return: submit_list : (4) list, fail_list : a list
"""
submit_list = [] # a list of tuples: [(studentId, grade, comment, extraFile)]
fail_list = []
file_dir = os.path.abspath(sys.argv[1])
print(file_dir)
for item in os.listdir(file_dir): # like: 2020010111_NAME_XXXX.pdf
if 'Submitted' in item: # skip submitted files.
continue
if 'log' in item: # skip other file like logs.
continue
stu_id = item[:10] # todo: use a robuster method.
if stu_id.isalnum():
exfile = os.path.join(file_dir, item)
if os.path.splitext(item)[-1] == '.pdf':
comment = '详见附件!'
grade = get_grade_from_pdf_file(exfile)
if grade == "":
grade = get_grade_from_filename(item)
if grade == "":
fail_list.append((stu_id, grade, comment, exfile))
else:
exfile = rename_file_with_grade(exfile, grade)
submit_list.append((stu_id, grade, comment, exfile))
else:
comment = '非PDF文件不予批阅,请补交PDF文件作业!'
submit_list.append((stu_id, "0.0", comment, exfile))
return submit_list, fail_list
def single_task_finished(task):
print("Submitted!", task)
file = task[-1]
if os.path.isfile(file):
file_dir = os.path.dirname(file)
filename = os.path.basename(file)
fns = filename.split('_')
fns.insert(2, 'Submitted')
new_file = os.path.join(file_dir, '_'.join(fns))
os.rename(file, new_file)
def get_grade_from_pdf_file(pdf_file):
"""
Return a grade extracted from the PDF file, such as "10.0" or "9.5".
:param pdf_file: file_path : string
:return: grade : string
"""
input1 = PyPDF3.PdfFileReader(open(pdf_file, "rb"), strict=False)
page0 = input1.getPage(0)
if '/Annots' in page0:
for annot in page0['/Annots']:
annot_obj = annot.getObject()
if '/Contents' in annot_obj:
content = str(annot_obj['/Contents'])
if is_grade(content):
return content
return ""
def get_grade_from_filename(filename):
"""
The file name always begin with student ID, e.g. 2015012065,
and then a grade, like '_9.5_', is appended to the student ID.
"""
names = filename.split('_')
if len(names) >= 2:
grade = names[1]
if is_grade(grade):
return grade
return ""
def rename_file_with_grade(file, grade):
"""
Insert the grade to the PDF file name.
:param file: a string of the PDF filename
:param grade: a string of the grade
:return: a string of the new filename
"""
file_dir = os.path.dirname(file)
filename = os.path.basename(file)
fns = filename.split('_')
if len(fns) >= 2 and is_grade(fns[1]):
fns[1] = grade
else:
fns.insert(1, grade)
new_file = os.path.join(file_dir, '_'.join(fns))
os.rename(file, new_file)
return new_file
def is_grade(num):
"""
Judge if a string is a grade like `X.X` or `10.0`.
:param num: a string.
:return: True or False
"""
return True if re.compile(r'^[0-9]\d*\.\d$').match(num) else False
def print_list(plist):
"""
Print a list.
:param plist: a list.
:return: None.
"""
for item in plist:
print(item)
def main():
print('Process the files in: ')
submit_list, fail_list = get_submit_list()
print('\nSubmit list: ' + str(len(submit_list)))
print_list(submit_list)
print('\nFail list: ' + str(len(fail_list)))
print_list(fail_list)
run = input('\nInput Y to begin submit now: ')
if run == 'Y':
print('\nBegin submit: ')
url, username, password = get_tutor_info()
course_id, homework_id = get_submit_info()
submitter = Submitter(url, username, password, course_id, homework_id, submit_list)
submitter.add_single_task_callback(single_task_finished)
submitter.start()
submitter.clean()
if __name__ == "__main__":
main()
| {"/main.py": ["/submitter.py"]} |
68,450 | ccy1997/media-recommender | refs/heads/master | /mediarecommender/itemdatabase/feature_extractor.py | from gensim.models import KeyedVectors
import numpy as np
import pandas as pd
from mediarecommender.itemdatabase.parameters import *
def calculate_item_vector(word_list, d, word_vectors):
item_vector = np.zeros(shape=(d,))
for word in word_list:
try:
word_vector = word_vectors[word]
item_vector += word_vector
except KeyError as e:
print(e)
return np.divide(item_vector, len(word_list))
def vectorize_items(in_file_str, out_file_str, word_vectors, dup_check_col):
item_df = pd.read_csv(in_file_str)
item_df.set_index('id', inplace=True)
for i, row in item_df.iterrows():
print(f'Vectorizing {in_file_str}: item_id = {str(i)}')
word_list = row['words'].split(' ')
item_df.at[i, 'words'] = np.array2string(calculate_item_vector(word_list, word_vectors.vector_size, word_vectors)).strip(' []')
item_df.rename(columns = {'words': 'vector'}, inplace=True)
item_df.drop_duplicates(subset=dup_check_col, keep='first', inplace=True)
item_df.to_csv(out_file_str, sep=',', encoding='utf-8')
def main():
# Load trained word vectors
print('Loading trained word vectors...')
word_vectors = KeyedVectors.load(word_vectors_kv, mmap='r')
print('Word vectors loaded')
# Vectorize items by their corresponding keywords
vectorize_items(preprocessed_movie_csv, vectorized_movie_csv, word_vectors, 'imdb_id')
vectorize_items(preprocessed_game_csv, vectorized_game_csv, word_vectors, 'steam_id')
vectorize_items(preprocessed_book_csv, vectorized_book_csv, word_vectors, 'goodreads_id')
if __name__ == '__main__':
main() | {"/mediarecommender/itemdatabase/feature_extractor.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_collection.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_preprocessing.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/recommender/recommender.py": ["/mediarecommender/recommender/models.py"], "/mediarecommender/main.py": ["/mediarecommender/__init__.py"], "/mediarecommender/recommender/routes.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/recommender/recommender.py"], "/mediarecommender/itemdatabase/word2vec_trainer.py": ["/mediarecommender/itemdatabase/data_preprocessing.py", "/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/reset_database.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/itemdatabase/parameters.py"]} |
68,451 | ccy1997/media-recommender | refs/heads/master | /mediarecommender/itemdatabase/data_collection.py | from imdb import IMDb
from requests.exceptions import RequestException
from xml.sax._exceptions import SAXException
from bs4 import BeautifulSoup
import requests
import pandas as pd
import xml.sax
import untangle
import time
from mediarecommender.itemdatabase.parameters import *
def extract_movies(num_of_samples):
imdb_ids = read_sample_movies_imdb_id()[0:num_of_samples]
ia = IMDb()
movie_df = pd.DataFrame(columns=['id', 'imdb_id', 'title', 'genres', 'rating', 'url', 'documents'])
for i, imdb_id in enumerate(imdb_ids):
try:
movie = ia.get_movie(imdb_id, info=['main', 'synopsis', 'plot'])
title = f"{movie['title']} ({str(movie['year'])})"
print(f'Extracting movies, title = {title}, imdb_id = {imdb_id}, iter = {i}')
genres = '::'.join(movie['genre'])
url = f'https://www.imdb.com/title/tt{imdb_id}'
rating = 0.0
synopsis = []
plot_list = []
reviews = scrap_movie_reviews(imdb_id)
if 'rating' in movie:
rating = movie['rating']
if 'synopsis' in movie:
synopsis = movie['synopsis']
if 'plot' in movie:
plot_list = [summary.split('::')[0] for summary in movie['plot']]
document_list = synopsis + plot_list + reviews
movie_df.loc[len(movie_df)] = [len(movie_df), imdb_id, title, genres, rating, url, '::'.join(document_list)]
except:
continue
movie_df.to_csv(raw_movie_csv, index=False, sep=',', encoding='utf-8')
def extract_games(num_of_samples):
games = read_sample_games()[0:num_of_samples]
game_df = pd.DataFrame(columns=['id', 'steam_id', 'title', 'rating', 'url', 'documents'])
for i, g in enumerate(games):
steam_id = g[0]
release_date = g[2]
title = g[1]
year = release_date[len(release_date)-4:]
rating = g[3]
description = g[4]
site_detail_url = f'https://store.steampowered.com/app/{str(steam_id)}'
retry_count = 0
while True:
try:
# Search IGN review
review_p_list = []
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'}
url = f'https://uk.ign.com/search?q={title}&page=0&count=1&type=article&filter=articles'
html = requests.get(url, headers=headers).text
soup = BeautifulSoup(html, 'html.parser')
search_item_title = soup.find('div', class_='search-item-title')
if search_item_title == None:
break
review_url = search_item_title.find('a')['href']
response = requests.get(review_url, headers=headers)
print(f"Extracting games, title = {title}, steam_id = {steam_id}, response = {response}, iter = {i}")
html = response.text
soup = BeautifulSoup(html, 'html.parser')
body = soup.find('body', attrs={'layout': 'default-html5'})
if body == None:
print('body == None')
break
article_content = body.find('div', attrs={'id': 'article-content'})
if article_content == None:
print('article_content == None')
break
review_ps = article_content.find_all('p')
for p in review_ps:
review_p_list.append(p.getText())
document = description + '::' + ' '.join(review_p_list)
game_df.loc[len(game_df)] = [len(game_df), steam_id, f'{title} ({year})', rating, site_detail_url, document]
except RequestException:
print(f'Requests error when extracting {title}, steam_id = {steam_id}, retrying, count = {str(retry_count)}')
time.sleep(request_error_sleep_seconds)
retry_count += 1
if retry_count > request_error_retry_limit:
break
continue
break
game_df.to_csv(raw_game_csv, index=False, sep=',', encoding='utf-8')
def extract_books(num_of_samples):
goodreads_books_ids = read_sample_books_goodreads_ids()[0:num_of_samples]
book_df = pd.DataFrame(columns=['id', 'goodreads_id', 'title', 'rating', 'url', 'documents'])
for i, goodreads_book_id in goodreads_books_ids.iteritems():
while True:
try:
print(f'Extracting books, id = {str(goodreads_book_id)}, iter = {i}')
url = 'https://www.goodreads.com/book/show/?id=' + str(goodreads_book_id) + '&format=xml&key=CoBtO9PVTZqNZ5tDLr9yGQ'
detail_url = 'https://www.goodreads.com/book/show/?id=' + str(goodreads_book_id)
parsed_xml = untangle.parse(url)
year = parsed_xml.GoodreadsResponse.book.publication_year.cdata
title = f'{parsed_xml.GoodreadsResponse.book.title.cdata} ({year})'
print(f'Book title: {title}')
rating = parsed_xml.GoodreadsResponse.book.average_rating.cdata
description = parsed_xml.GoodreadsResponse.book.description.cdata
book_df.loc[len(book_df)] = [len(book_df), goodreads_book_id, title, rating, detail_url, description]
except RequestException:
print('Requests error, retrying...')
continue
except SAXException:
print('Non-xml response, skipping')
break
break
book_df.to_csv(raw_book_csv, index=False, sep=',', encoding='utf-8')
def scrap_movie_reviews(id):
url = f'https://www.imdb.com/title/tt{id}/reviews'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'}
html = requests.get(url, headers=headers).text
soup = BeautifulSoup(html, 'html.parser')
main = soup.find('div', attrs={'id': 'main'})
review_divs = main.find_all('div', class_='text show-more__control')
reviews = [rd.text for rd in review_divs]
return reviews
def read_sample_movies_imdb_id():
dtypes = {'movieId': 'int', 'imdbId': 'str', 'tmdbId': 'str'}
movie_imdb_ids_df = pd.read_csv(movie_samples_csv, sep=',', dtype=dtypes, encoding='utf-8')
return movie_imdb_ids_df['imdbId'].values
def read_sample_games():
game = pd.read_csv(game_samples_csv, sep=',', encoding='utf-8')
return game[['ResponseID', 'ResponseName', 'ReleaseDate', 'Metacritic', 'DetailedDescrip']].values
def read_sample_books_goodreads_ids():
books_df = pd.read_csv(book_samples_csv, encoding='utf-8')
books_df.set_index('id', inplace=True)
return books_df['book_id']
def main():
extract_movies(num_of_movie_samples)
extract_games(num_of_game_samples)
extract_books(num_of_book_samples)
if __name__ == '__main__':
main() | {"/mediarecommender/itemdatabase/feature_extractor.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_collection.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_preprocessing.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/recommender/recommender.py": ["/mediarecommender/recommender/models.py"], "/mediarecommender/main.py": ["/mediarecommender/__init__.py"], "/mediarecommender/recommender/routes.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/recommender/recommender.py"], "/mediarecommender/itemdatabase/word2vec_trainer.py": ["/mediarecommender/itemdatabase/data_preprocessing.py", "/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/reset_database.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/itemdatabase/parameters.py"]} |
68,452 | ccy1997/media-recommender | refs/heads/master | /mediarecommender/itemdatabase/data_preprocessing.py | import nltk
import pandas as pd
import re
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from bs4 import BeautifulSoup
from unidecode import unidecode
from mediarecommender.itemdatabase.parameters import *
def extract_nouns_and_adjs(tokenized_text):
tagged_text = nltk.pos_tag(tokenized_text)
grammar = 'NOUN_OR_ADJ:{<NN>||<JJ>}'
parse_result = nltk.RegexpParser(grammar).parse(tagged_text)
nouns_and_adjs = []
for elem in parse_result:
if type(elem) == nltk.tree.Tree:
nouns_and_adjs.append(' '.join([pair[0] for pair in elem.leaves()]))
return nouns_and_adjs
def lemmatize_words(tokenized_text):
lemmatizer = WordNetLemmatizer()
lemmatized_text = []
tagged_text = nltk.pos_tag(tokenized_text)
for pair in tagged_text:
if pair[1] == 'NN':
lemmatized_text.append(lemmatizer.lemmatize(pair[0], pos='n'))
elif pair[1] == 'JJ':
lemmatized_text.append(lemmatizer.lemmatize(pair[0], pos='a'))
return lemmatized_text
def remove_html_tags(text):
soup = BeautifulSoup(text, 'lxml')
return soup.get_text()
def replace_accents(text):
return unidecode(text)
def replace_simple_apostrophe(text):
return re.sub(r'’+', '\'', text)
def remove_non_alphabet_and_useless_symbols(text):
return re.sub(r'[^a-zA-Z-\'’]+', ' ', text)
def remove_stop_words(word_list):
filtered_word_list = []
stop_words = set(stopwords.words('english'))
for w in word_list:
if w not in stop_words:
filtered_word_list.append(w)
return filtered_word_list
def preprocess_text(text):
text_no_html_tags = remove_html_tags(text)
text_simple_apostrophe = replace_simple_apostrophe(text_no_html_tags)
text_accents_replaced = replace_accents(text_simple_apostrophe)
text_alphabets_dash_apostrophe = remove_non_alphabet_and_useless_symbols(text_accents_replaced)
text_accents_replaced = replace_accents(text_alphabets_dash_apostrophe)
tokenized_text = nltk.word_tokenize(text_accents_replaced)
nouns_and_adjs = extract_nouns_and_adjs(tokenized_text)
lemmatized_text = lemmatize_words(nouns_and_adjs)
return ' '.join(lemmatized_text)
def preprocess_text_for_word2vec(text):
text_no_html_tags = remove_html_tags(text)
text_simple_apostrophe = replace_simple_apostrophe(text_no_html_tags)
text_accents_replaced = replace_accents(text_simple_apostrophe)
text_alphabets_dash_apostrophe = remove_non_alphabet_and_useless_symbols(text_accents_replaced)
text_accents_replaced = replace_accents(text_alphabets_dash_apostrophe)
tokenized_text = nltk.word_tokenize(text_accents_replaced)
text_no_stop_words = remove_stop_words(tokenized_text)
lemmatized_text = lemmatize_words(text_no_stop_words)
return lemmatized_text
def preprocess_item_documents(in_file_str, out_file_str):
item_df = pd.read_csv(in_file_str)
item_df.set_index('id', inplace=True)
item_remove_id = []
for i, row in item_df.iterrows():
print(f'Preprocessing {in_file_str}, item id = {str(i)}')
if pd.isnull(item_df.at[i, 'documents']):
item_remove_id.append(i)
else:
item_df.at[i, 'title'] = row['title'].replace(',', ' ')
documents = row['documents'].split('::')
keywords = ' '.join([preprocess_text(d) for d in documents])
if keywords != '':
item_df.at[i, 'documents'] = keywords
else:
item_remove_id.append(i)
print(f'Id of items to be removed: {item_remove_id}')
item_df.drop(item_remove_id, inplace=True)
item_df.rename(columns = {'documents': 'words'}, inplace=True)
item_df.to_csv(out_file_str, sep=',', encoding='utf-8')
def main():
preprocess_item_documents(raw_movie_csv, preprocessed_movie_csv)
preprocess_item_documents(raw_game_csv, preprocessed_game_csv)
preprocess_item_documents(raw_book_csv, preprocessed_book_csv)
if __name__ == '__main__':
main() | {"/mediarecommender/itemdatabase/feature_extractor.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_collection.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_preprocessing.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/recommender/recommender.py": ["/mediarecommender/recommender/models.py"], "/mediarecommender/main.py": ["/mediarecommender/__init__.py"], "/mediarecommender/recommender/routes.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/recommender/recommender.py"], "/mediarecommender/itemdatabase/word2vec_trainer.py": ["/mediarecommender/itemdatabase/data_preprocessing.py", "/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/reset_database.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/itemdatabase/parameters.py"]} |
68,453 | ccy1997/media-recommender | refs/heads/master | /setup.py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='media-recommender',
version='0.0.3',
author='Chung Yin Cheung',
author_email='cxc574@student.bham.ac.uk',
description='Media Recommender provides recommendation for movies, videos games and books',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://git-teaching.cs.bham.ac.uk/mod-ug-proj-2018/cxc574',
packages=setuptools.find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'beautifulsoup4',
'Flask',
'Flask-SQLAlchemy',
'gensim',
'IMDbPY',
'nltk',
'numpy',
'pandas',
'requests',
'scikit-learn',
'SQLAlchemy',
'untangle'
],
classifiers=[
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
],
) | {"/mediarecommender/itemdatabase/feature_extractor.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_collection.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_preprocessing.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/recommender/recommender.py": ["/mediarecommender/recommender/models.py"], "/mediarecommender/main.py": ["/mediarecommender/__init__.py"], "/mediarecommender/recommender/routes.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/recommender/recommender.py"], "/mediarecommender/itemdatabase/word2vec_trainer.py": ["/mediarecommender/itemdatabase/data_preprocessing.py", "/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/reset_database.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/itemdatabase/parameters.py"]} |
68,454 | ccy1997/media-recommender | refs/heads/master | /mediarecommender/recommender/models.py | from mediarecommender.recommender import db
class Movie(db.Model):
id = db.Column(db.Integer, primary_key=True)
imdb_id = db.Column(db.Integer, nullable=False)
title = db.Column(db.String(), nullable=False)
genres = db.Column(db.String(), nullable=False)
rating = db.Column(db.Float(), nullable=False)
url = db.Column(db.String(), nullable=False)
vector = db.Column(db.String(), nullable=False)
def __repr__(self):
return f"Movie({self.id}, '{self.title}', '{self.kind}')"
class Game(db.Model):
id = db.Column(db.Integer, primary_key=True)
steam_id = db.Column(db.Integer, nullable=False)
title = db.Column(db.String(), nullable=False)
rating = db.Column(db.Float(), nullable=False)
url = db.Column(db.String(), nullable=False)
vector = db.Column(db.String(), nullable=False)
def __repr__(self):
return f"Game({self.id}, '{self.title}')"
class Book(db.Model):
id = db.Column(db.Integer, primary_key=True)
goodreads_id = db.Column(db.Integer, nullable=False)
title = db.Column(db.String(), nullable=False)
rating = db.Column(db.Float(), nullable=False)
url = db.Column(db.String(), nullable=False)
vector = db.Column(db.String(), nullable=False)
def __repr__(self):
return f"Book({self.id}, '{self.title}')" | {"/mediarecommender/itemdatabase/feature_extractor.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_collection.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_preprocessing.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/recommender/recommender.py": ["/mediarecommender/recommender/models.py"], "/mediarecommender/main.py": ["/mediarecommender/__init__.py"], "/mediarecommender/recommender/routes.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/recommender/recommender.py"], "/mediarecommender/itemdatabase/word2vec_trainer.py": ["/mediarecommender/itemdatabase/data_preprocessing.py", "/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/reset_database.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/itemdatabase/parameters.py"]} |
68,455 | ccy1997/media-recommender | refs/heads/master | /mediarecommender/recommender/recommender.py | from enum import Enum
import numpy as np
from sklearn.neighbors import NearestNeighbors
from mediarecommender.recommender.models import Movie, Game, Book
class Media(Enum):
MOVIE = 'Movie'
GAME = 'Game'
BOOK = 'Book'
class Recommender:
def __init__(self, user_favorites_dict):
self.user_favorites_dict = user_favorites_dict
self.user_vector = self.__user_favorites_to_user_vector()
def generate_k_recommendations(self, media_type, k):
item_vectors = []
all_items_without_favorites = []
if media_type == Media.MOVIE:
favorite_movies_id = self.__get_favorites_id('Movie')
all_items_without_favorites = Movie.query.filter(~Movie.id.in_(favorite_movies_id), Movie.rating > 6.5).all()
item_vectors = np.vstack([np.fromstring(movie.vector, dtype=float, sep=' ') for movie in all_items_without_favorites])
elif media_type == Media.GAME:
favorite_games_id = self.__get_favorites_id('Game')
all_items_without_favorites = Game.query.filter(~Game.id.in_(favorite_games_id), Game.rating > 65.0).all()
item_vectors = np.vstack([np.fromstring(game.vector, dtype=float, sep=' ') for game in all_items_without_favorites])
elif media_type == Media.BOOK:
favorite_books_id = self.__get_favorites_id('Book')
all_items_without_favorites = Book.query.filter(~Book.id.in_(favorite_books_id), Book.rating > 3.0).all()
item_vectors = np.vstack([np.fromstring(book.vector, dtype=float, sep=' ') for book in all_items_without_favorites])
else:
print('Unknown media type: generate_k_recommendations')
return
nbrs = NearestNeighbors(n_neighbors=k, algorithm='brute').fit(item_vectors)
_, indices = nbrs.kneighbors(self.user_vector)
recommended_movies = []
for i in indices[0]:
recommended_movie_dict = {
'id': all_items_without_favorites[i].id,
'title': all_items_without_favorites[i].title,
'url': all_items_without_favorites[i].url
}
recommended_movies.append(recommended_movie_dict)
return recommended_movies
def __user_favorites_to_user_vector(self):
favorite_vectors = []
for f in self.user_favorites_dict['favorites']:
item_id = int(f['id'])
media_type = f['type']
if Media(media_type) == Media.MOVIE:
movie = Movie.query.filter(Movie.id == item_id).first()
favorite_vectors.append(np.fromstring(movie.vector, dtype=float, sep=' '))
elif Media(media_type) == Media.GAME:
game = Game.query.filter(Game.id == item_id).first()
favorite_vectors.append(np.fromstring(game.vector, dtype=float, sep=' '))
elif Media(media_type) == Media.BOOK:
book = Book.query.filter(Book.id == item_id).first()
favorite_vectors.append(np.fromstring(book.vector, dtype=float, sep=' '))
else:
print('Unknown media type: __user_favorites_to_user_vector')
return
return np.mean(np.asarray(favorite_vectors), axis=0).reshape(1, -1)
def __get_favorites_id(self, media_type):
favorites_id = []
for f in self.user_favorites_dict['favorites']:
item_id = f['id']
f_media_type = f['type']
if Media(f_media_type) == Media(media_type):
favorites_id.append(item_id)
return favorites_id | {"/mediarecommender/itemdatabase/feature_extractor.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_collection.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_preprocessing.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/recommender/recommender.py": ["/mediarecommender/recommender/models.py"], "/mediarecommender/main.py": ["/mediarecommender/__init__.py"], "/mediarecommender/recommender/routes.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/recommender/recommender.py"], "/mediarecommender/itemdatabase/word2vec_trainer.py": ["/mediarecommender/itemdatabase/data_preprocessing.py", "/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/reset_database.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/itemdatabase/parameters.py"]} |
68,456 | ccy1997/media-recommender | refs/heads/master | /mediarecommender/main.py | from mediarecommender import MediaRecommender
mr = MediaRecommender()
mr.run() | {"/mediarecommender/itemdatabase/feature_extractor.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_collection.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_preprocessing.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/recommender/recommender.py": ["/mediarecommender/recommender/models.py"], "/mediarecommender/main.py": ["/mediarecommender/__init__.py"], "/mediarecommender/recommender/routes.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/recommender/recommender.py"], "/mediarecommender/itemdatabase/word2vec_trainer.py": ["/mediarecommender/itemdatabase/data_preprocessing.py", "/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/reset_database.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/itemdatabase/parameters.py"]} |
68,457 | ccy1997/media-recommender | refs/heads/master | /mediarecommender/recommender/routes.py | from flask import render_template, url_for, request, jsonify
import pandas as pd
import numpy as np
import json
from mediarecommender.recommender import app
from mediarecommender.recommender import db
from mediarecommender.recommender.models import Movie, Game, Book
from mediarecommender.recommender.recommender import Recommender, Media
@app.route("/")
def home():
return render_template('home.html', title='Media Recommender')
@app.route("/search", methods=['GET'])
def search():
query = request.args.get('query')
movie_results = Movie.query.filter(Movie.title.like('%' + query + '%')).all()
game_results = Game.query.filter(Game.title.like('%' + query + '%')).all()
book_results = Book.query.filter(Book.title.like('%' + query + '%')).all()
results = movie_results + game_results + book_results
results_list_of_dict = [{'id': r.id, 'title': r.title, 'type': type(r).__name__} for r in results][0:10]
return jsonify({'results': results_list_of_dict})
@app.route("/submit", methods=['GET'])
def submit():
favorites_json = request.args.get('favorites')
favorites_dict = json.loads(favorites_json)
r = Recommender(favorites_dict)
# Generate recommendation
movie_recommendation = r.generate_k_recommendations(Media.MOVIE, 5)
game_recommendation = r.generate_k_recommendations(Media.GAME, 5)
book_recommendation = r.generate_k_recommendations(Media.BOOK, 5)
return jsonify({
'movie': movie_recommendation,
'game': game_recommendation,
'book': book_recommendation
})
| {"/mediarecommender/itemdatabase/feature_extractor.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_collection.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_preprocessing.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/recommender/recommender.py": ["/mediarecommender/recommender/models.py"], "/mediarecommender/main.py": ["/mediarecommender/__init__.py"], "/mediarecommender/recommender/routes.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/recommender/recommender.py"], "/mediarecommender/itemdatabase/word2vec_trainer.py": ["/mediarecommender/itemdatabase/data_preprocessing.py", "/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/reset_database.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/itemdatabase/parameters.py"]} |
68,458 | ccy1997/media-recommender | refs/heads/master | /mediarecommender/itemdatabase/word2vec_trainer.py | from gensim.utils import simple_preprocess
from gensim.test.utils import get_tmpfile
from gensim.models import Word2Vec
import pandas as pd
import csv
from mediarecommender.itemdatabase.data_preprocessing import lemmatize_words
from mediarecommender.itemdatabase.data_preprocessing import remove_html_tags
from mediarecommender.itemdatabase.data_preprocessing import preprocess_text_for_word2vec
from mediarecommender.itemdatabase.parameters import *
def prepare_word2vec_training_data():
processed_book_documents = read_book_documents()
processed_game_documents = read_game_documents()
processed_movie_documents = read_movie_documents()
return processed_movie_documents + processed_game_documents + processed_book_documents
def read_movie_documents():
movies_df = pd.read_csv(raw_movie_csv)
documents = movies_df['documents'].values
processed_documents = []
with open(movie_samples_csv, encoding='utf-8') as movie_metadata_csv:
csv_reader = csv.reader(movie_metadata_csv, delimiter=',')
for i, row in enumerate(csv_reader):
print(f'Reading movie documents, iter = {i}, file = movies_metadata.csv')
if i != 0:
d = row[9]
processed_d = preprocess_text_for_word2vec(d)
processed_documents.append(processed_d)
for i, d in enumerate(documents):
print(f'Reading movie documents, iter = {i}, file = raw_movies.csv')
if pd.notna(d):
d_list = d.split('::')
for el in d_list:
processed_documents.append(preprocess_text_for_word2vec(el))
return processed_documents
def read_game_documents():
games_df = pd.read_csv(raw_game_csv)
documents = games_df['documents'].values
processed_documents = []
for i, d in enumerate(documents):
print(f'Reading game documents, iter = {i}, file = games_metadata.csv')
if pd.notna(d):
d_list = d.split('::')
for el in d_list:
processed_documents.append(preprocess_text_for_word2vec(el))
return processed_documents
def read_book_documents():
books_df = pd.read_csv(raw_book_csv)
documents = books_df['documents'].values
processed_documents = []
with open(book_samples_csv, encoding='utf-8') as book_metedata_csv:
csv_reader = csv.reader(book_metedata_csv, delimiter='\t')
for i, row in enumerate(csv_reader):
print(f'Reading book documents, iter = {i}, file = books_metadata.csv')
if i != 0:
d = row[6]
processed_d = preprocess_text_for_word2vec(d)
processed_documents.append(processed_d)
for i, d in enumerate(documents):
print(f'Reading book documents, iter = {i}, file = raw_books.csv')
if pd.notna(d):
d_list = d.split('::')
for el in d_list:
processed_documents.append(preprocess_text_for_word2vec(el))
return processed_documents
def word2vec_train(training_documents):
print(f'Total number of training examples: {str(len(training_documents))}')
print('Training model...')
model = Word2Vec(training_documents, size=50, min_count=2, workers=4)
model.train(training_documents, total_examples=len(training_documents), epochs=10)
print('Model trained')
return model
def save_model_vectors(model):
word_vectors = model.wv
word_vectors.save(word_vectors_kv)
def main():
training_documents = prepare_word2vec_training_data()
model = word2vec_train(training_documents)
save_model_vectors(model)
if __name__ == '__main__':
main()
| {"/mediarecommender/itemdatabase/feature_extractor.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_collection.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_preprocessing.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/recommender/recommender.py": ["/mediarecommender/recommender/models.py"], "/mediarecommender/main.py": ["/mediarecommender/__init__.py"], "/mediarecommender/recommender/routes.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/recommender/recommender.py"], "/mediarecommender/itemdatabase/word2vec_trainer.py": ["/mediarecommender/itemdatabase/data_preprocessing.py", "/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/reset_database.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/itemdatabase/parameters.py"]} |
68,459 | ccy1997/media-recommender | refs/heads/master | /mediarecommender/__init__.py | from mediarecommender.recommender import app
class MediaRecommender:
def __init__(self):
pass
def run(self):
app.run(debug=False) | {"/mediarecommender/itemdatabase/feature_extractor.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_collection.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_preprocessing.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/recommender/recommender.py": ["/mediarecommender/recommender/models.py"], "/mediarecommender/main.py": ["/mediarecommender/__init__.py"], "/mediarecommender/recommender/routes.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/recommender/recommender.py"], "/mediarecommender/itemdatabase/word2vec_trainer.py": ["/mediarecommender/itemdatabase/data_preprocessing.py", "/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/reset_database.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/itemdatabase/parameters.py"]} |
68,460 | ccy1997/media-recommender | refs/heads/master | /mediarecommender/itemdatabase/reset_database.py | # A script for resetting server's database
import pandas as pd
from mediarecommender.recommender import db
from mediarecommender.recommender.models import Movie, Game, Book
from mediarecommender.itemdatabase.parameters import *
# Clear the database first
db.drop_all()
# Create tables for all models
db.create_all()
# Read items in DataFrame
movie_df = pd.read_csv(vectorized_movie_csv)
movie_df.set_index('id', inplace=True)
game_df = pd.read_csv(vectorized_game_csv)
game_df.set_index('id', inplace=True)
book_df = pd.read_csv(vectorized_book_csv)
book_df.set_index('id', inplace=True)
# Populate movie table
for i, row in movie_df.iterrows():
movie = Movie(
id=i,
imdb_id=row['imdb_id'],
title=row['title'],
genres=row['genres'],
rating=row['rating'],
url=row['url'],
vector=row['vector']
)
db.session.add(movie)
db.session.commit()
# Populate game table
for i, row in game_df.iterrows():
game = Game(
id=i,
steam_id=row['steam_id'],
title=row['title'],
rating=row['rating'],
url=row['url'],
vector=row['vector']
)
db.session.add(game)
db.session.commit()
# Populate book table
for i, row in book_df.iterrows():
book = Book(
id=i,
goodreads_id=row['goodreads_id'],
title=row['title'],
rating=row['rating'],
url=row['url'],
vector=row['vector']
)
db.session.add(book)
db.session.commit() | {"/mediarecommender/itemdatabase/feature_extractor.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_collection.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_preprocessing.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/recommender/recommender.py": ["/mediarecommender/recommender/models.py"], "/mediarecommender/main.py": ["/mediarecommender/__init__.py"], "/mediarecommender/recommender/routes.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/recommender/recommender.py"], "/mediarecommender/itemdatabase/word2vec_trainer.py": ["/mediarecommender/itemdatabase/data_preprocessing.py", "/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/reset_database.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/itemdatabase/parameters.py"]} |
68,461 | ccy1997/media-recommender | refs/heads/master | /mediarecommender/itemdatabase/parameters.py | # Module for holding constant parameters
raw_movie_csv = './data/generated/raw_movies.csv'
raw_game_csv = './data/generated/raw_games.csv'
raw_book_csv = './data/generated/raw_books.csv'
preprocessed_movie_csv = './data/generated/preprocessed_movies.csv'
preprocessed_game_csv = './data/generated/preprocessed_games.csv'
preprocessed_book_csv = './data/generated/preprocessed_books.csv'
vectorized_movie_csv = './data/generated/vectorized_movies.csv'
vectorized_game_csv = './data/generated/vectorized_games.csv'
vectorized_book_csv = './data/generated/vectorized_books.csv'
movie_samples_csv = './data/item_source/movie_links.csv'
game_samples_csv = './data/item_source/games.csv'
book_samples_csv = './data/item_source/books.csv'
train_movie_csv = './data/word2vec_train/movies_metadata.csv'
train_games_csv = './data/word2vec_train/games_metadata.csv'
train_books_csv = './data/word2vec_train/books_metadata.csv'
word_vectors_kv = './data/generated/word_vectors.kv'
request_error_sleep_seconds = 10
request_error_retry_limit = 10
num_of_movie_samples = 9742
num_of_game_samples = 13357
num_of_book_samples = 10000
| {"/mediarecommender/itemdatabase/feature_extractor.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_collection.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/data_preprocessing.py": ["/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/recommender/recommender.py": ["/mediarecommender/recommender/models.py"], "/mediarecommender/main.py": ["/mediarecommender/__init__.py"], "/mediarecommender/recommender/routes.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/recommender/recommender.py"], "/mediarecommender/itemdatabase/word2vec_trainer.py": ["/mediarecommender/itemdatabase/data_preprocessing.py", "/mediarecommender/itemdatabase/parameters.py"], "/mediarecommender/itemdatabase/reset_database.py": ["/mediarecommender/recommender/models.py", "/mediarecommender/itemdatabase/parameters.py"]} |
68,479 | BradKentAllen/schedule_machine | refs/heads/master | /tests/sched_test.py |
from schedule_machine.chrono import Chronograph, Timers, get_time_stamp, job_function_tester
from time import sleep
global poll_count
poll_count = 0
def poll_test():
global poll_count
print('-', end='')
poll_count +=1
def poll_test2():
#print('-', end='')
print('+', end='')
#sleep(.05)
def second_function():
print('\n')
print(get_time_stamp('US/Pacific'))
def five_second_function():
global poll_count
print(f'\n--{poll_count}--')
poll_count = 0
def fifteen_second_function():
print('start 15 second function')
sleep(4)
print('end 15 second function')
def minute_function():
print('minute function runs')
def test_function():
print('this is the test function')
print('test run')
#### Create Timers
maker = Timers()
maker.create_timer('every poll', poll_test)
maker.create_timer('every poll', poll_test2)
maker.create_timer('every second', second_function)
maker.create_timer('on the 5 second', five_second_function)
maker.create_timer('on the 15 second', fifteen_second_function)
#maker.create_timer('every minute', minute_function)
#maker.create_timer('schedule', test_function, '17:32')
#### helper method to check function times
#job_function_tester(maker.timer_jobs)
#exit()
#### Run Chronograph
#Chronograph(maker.timer_jobs, 'US/Pacific')
chrono = Chronograph(maker.timer_jobs, 'US/Pacific', wait_to_run=True)
# 'US/Aleutian', 'US/Hawaii', 'US/Alaska', 'US/Arizona', 'US/Michigan'
# 'US/Pacific', 'US/Mountain', 'US/Central', 'US/Eastern'
chrono.run_timers(debug=True)
| {"/tests/sched_test.py": ["/schedule_machine/chrono.py"]} |
68,480 | BradKentAllen/schedule_machine | refs/heads/master | /schedule_machine/chrono.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# chron.py
'''primary code for schedule_machine
Simple schedule package for managing timed functions in a python
based machine such as Raspberry Pi. Target is to get reasonably
consistent timing to .1 seconds.
AditNW LLC, Redmond , WA
www.AditNW.com
'''
__author__ = 'Brad Allen, AditNW LLC'
__all__ = ['chronograph',]
# Rev 0.1.0 - first production release
__version__ = '0.1.0'
from datetime import datetime
from time import time, sleep
import pytz
import threading
from queue import Queue
import types
import timeit
class Timers:
'''Class to create dictionary of timers for use in Chronograph.
'''
def __init__(self):
'''self.timer_jobs is the primary resource in Timers
This is filled by Timers
It is then accessed by the source
and served to Chronograph
'''
#### timer job lists
self.timer_jobs = {
'every poll': [],
'every second': [],
'on the 5 second': [],
'on the 15 second': [],
'on the 30 second': [],
'every minute': [],
'on the 5 minute': [],
'on the 15 minute': [],
'on the 30 minute': [],
'every hour': [],
'schedule': [], # (function, 'HH:MM')
'thread_jobs': [], # these must also be in a timer
}
def create_timer(self, T_mode, func, mark=None, use_thread=False):
'''Add a timer to self.timer_jobs
'on' and 'every' timers require a function
'schedule' timers require function and a time
Time must be a string in 24 hr format
Two types of timers (T-mode):
1) 'on' and 'every' set up regular timers
2) 'schedule' timers occur at a specific, local time
'''
#### validate timer
# allow capitalization in timers
timer_mode = T_mode.lower()
# is a string
if not isinstance(timer_mode, str):
raise ValueError(f'Timer mode must be in quotes (a string). e.g. "on the 5 seconds"')
# check if timer is in timer_jobs
if timer_mode not in list(self.timer_jobs.keys()):
raise ValueError(f'Attempted to use non-timer: "{T_mode}", available timers are: {list(self.timer_jobs.keys())}')
#### validate function
#if not isinstance(func, types.FunctionType):
if not hasattr(func, '__call__'):
raise ValueError(f'Timer\'s function must be a function object, it should not have () on the end. e.g. myfunction, not myfunction()')
if timer_mode[:2] == 'on' or timer_mode[:5] == 'every':
#### on and every can be directly placed in timer_jobs
self.timer_jobs[timer_mode].append(func)
if use_thread == True:
self.timer_jobs['thread_jobs'].append(func)
elif timer_mode == 'schedule':
#### check format of the schedule time
# is 24 hour format string
if not isinstance(mark, str) or len(mark) != 5:
raise ValueError(f'Schedule time ({mark}) must be a string in 24 hour format. e.g. "07:02"')
# validate timer hours and minutes are formatted correctly
try:
# validate hours
int(mark[:2])
except ValueError:
raise ValueError(f'Schedule time format issue, are hours in 24 hour format? e.g. "07:02"')
try:
# validate minutes
int(mark[-2:])
except ValueError:
raise ValueError(f'Schedule time ({mark}) format issue, are minutes two digits? e.g. 17:02')
#### add schedule timer to timer_jobs
if 0 <= int(mark[:2]) < 24 and 0 <= int(mark[-2:]) < 60:
self.timer_jobs['schedule'].append((func, mark))
if use_thread == True:
self.timer_jobs['thread_jobs'].append(func)
else:
# error caused by hours or minutes not within range
raise ValueError(f'Scheduled time ({mark}) not in 24 hour format HH:MM')
else:
# error for not being on, every, or schedule (this should never happen)
raise ValueError(f'Attempted to use non-timer: {T_mode}')
class Chronograph:
def __init__(self, jobs, local_time_zone='UTC', poll_millis=100, wait_to_run=False):
'''Chronograph object runs timers
Standard Polling is .1 seconds
every poll timers run in primary thread
every second timers run in thread1
A separate thread (chrono_thread) is created for all other timers
chrono_thread has a lock so only one can run at a time
If chrono_thread is locked, the next chrono_thread will be skipped
This effectively gives every_poll timers priority
'''
# self.jobs are all of the timers
# it is a dictionary created by the Timers class
self.jobs = jobs
self.poll_queue = Queue(maxsize = 10)
self.seconds_queue = Queue(maxsize = 10)
self.general_queue = Queue(maxsize = 10)
# polling time in milliseconds
self.POLL_MILLIS = poll_millis
self.local_time_zone = local_time_zone
if wait_to_run == False:
self.run_timers()
def run_timers(self, poll_adjuster = .99, debug=False):
'''runs timers as follows:
Step 1: run every poll jobs
Step 2: load timer queues for next poll
Step 3: delay function which runs previous poll queues
poll_adjustor allows time for other timing
'''
if debug == True: print('\n\n run_timer with debug=True')
#### set up last varables
(last_hour, last_minute, last_second) = get_time(self.local_time_zone)
last_milli = 0
start_milli = time() * 1000
while True:
milli = (time() * 1000) - start_milli
#### deal with millis rolling
# this should never happen
if milli < 0:
milli = (time() * 1000)
last_milli = 0
if (milli - last_milli) >= self.POLL_MILLIS:
HHMMSS = get_time(self.local_time_zone)
#### polling marker
last_milli = milli
#### Run Every poll jobs ####
if self.jobs['every poll'] != []:
for job in self.jobs['every poll']:
#print(f'poll: {job.__name__}')
job()
#### Second ####
if last_second != HHMMSS[2]:
#### Every second jobs ####
for job in self.jobs['every second']:
self.seconds_queue.put(job)
last_second = HHMMSS[2]
#### On second jobs ####
if int(HHMMSS[2])%5 == 0 or int(HHMMSS[2]) == 0:
for job in self.jobs['on the 5 second']:
self.general_queue.put(job)
if int(HHMMSS[2])%15 == 0 or int(HHMMSS[2]) == 0:
for job in self.jobs['on the 15 second']:
self.general_queue.put(job)
if int(HHMMSS[2])%30 == 0 or int(HHMMSS[2]) == 0:
for job in self.jobs['on the 30 second']:
self.general_queue.put(job)
#### Minute ####
if last_minute != HHMMSS[1]:
#### Every minute jobs ####
for job in self.jobs['every minute']:
self.general_queue.put(job)
last_minute = HHMMSS[1]
#### On minute jobs ####
if int(HHMMSS[1])%5 == 0 or int(HHMMSS[1]) == 0:
for job in self.jobs['on the 5 minute']:
self.general_queue.put(job)
if int(HHMMSS[1])%15 == 0 or int(HHMMSS[1]) == 0:
for job in self.jobs['on the 15 minute']:
self.general_queue.put(job)
if int(HHMMSS[1])%30 == 0 or int(HHMMSS[1]) == 0:
for job in self.jobs['on the 30 minute']:
self.general_queue.put(job)
#### schedule jobs
if self.jobs['schedule'] != []:
for details in self.jobs['schedule']:
if details[1][:2] == HHMMSS[0] and details[1][-2:] == HHMMSS[1]:
self.general_queue.put(details[0])
#### Hour ####
if last_hour != HHMMSS[0]:
#### Every hour jobs ####
for job in self.jobs['every hour']:
self.general_queue.put(job)
last_hour = HHMMSS[0]
#### Delay function
# runs queue jobs while waiting for poll time
# poll_adjustor must take into account longest poll job
while (milli - last_milli) < (poll_adjuster * self.POLL_MILLIS):
#### run queues
if self.seconds_queue.empty() == False:
job = self.seconds_queue.get()
if job in self.jobs['thread_jobs']:
# run job as thread
self.run_thread_job(job)
else:
# run job normal as blocker
#print(f'second: {job.__name__}')
job()
else:
if self.general_queue.empty() == False:
job = self.general_queue.get()
if job in self.jobs['thread_jobs']:
# run job as thread
self.run_thread_job(job)
else:
# run job normal as blocker
#print(f'general: {job.__name__}')
job()
else:
pass # all queues empty
#### update milli
milli = (time() * 1000) - start_milli
def run_thread_job(self, job):
this_thread_job= threading.Thread(target=job, daemon=True)
this_thread_job.start()
def job_function_tester(jobs):
'''runs each function in the timer_jobs dictionary and
returns the run time required for each'''
time_results = {}
print('\n\n Evaluate each functions time to run:')
print(f'function name run time')
def elapsed_time(millis):
if millis < 1000:
return f'{millis:.3f} milliseconds'
else:
return f'{(millis/1000):.2f} seconds'
for key, details in jobs.items():
for job in details:
# check for function to find on and every timers
if isinstance(job, types.FunctionType):
print('\n')
start_milli = (time() * 1000)
job()
total_milli = ((time() * 1000) - start_milli)
print(f'{job.__name__}: {elapsed_time(total_milli)}')
# check for tuple to find schedule timers
elif isinstance(job, tuple):
print('\n')
start = timeit.timeit()
job[0]()
end = timeit.timeit()
print(f'{job[0].__name__}: {elapsed_time(total_milli)}')
else:
print('\nimproper timer')
def get_time_stamp(local_time_zone='UTC', time_format='HMS'):
now_local = datetime.now(pytz.timezone(local_time_zone))
if time_format == 'YMD:HM':
return now_local.strftime('%Y-%m-%d' + '-' + '%H:%M')
else:
return now_local.strftime('%H:%M:%S')
def get_time(local_time_zone='UTC'):
now_local = datetime.now(pytz.timezone(local_time_zone))
HH = now_local.strftime('%H')
MM = now_local.strftime('%M')
SS = now_local.strftime('%S')
return (HH, MM, SS)
| {"/tests/sched_test.py": ["/schedule_machine/chrono.py"]} |
68,481 | BradKentAllen/schedule_machine | refs/heads/master | /setup.py | #setup.py
'''
setup file for schedule_machine
'''
from setuptools import setup
with open("README.md") as file:
read_me_description = file.read()
# This call to setup() does all the work
setup(
name="schedule_machine",
REQUIRES_PYTHON = '>=3.6.0',
version="0.1.0",
description="A simple python scheduler",
long_description=read_me_description,
long_description_content_type="text/markdown",
url="https://github.com/realpython/reader",
author="Brad Allen - AditNW LLC",
author_email="brad.allen@aditnw.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=["schedule_machine"],
include_package_data=True,
install_requires=["pytz"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
) | {"/tests/sched_test.py": ["/schedule_machine/chrono.py"]} |
68,507 | zmanhcong/Flask_webapp | refs/heads/master | /market/__init__.py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///market.db'
app.config['SECRET_KEY'] = 'fb8565bbd6c7c6dd5eaa6592' #crete key by cmd: uramndom(12).hex() #app.config is config key name and create market.db::: SQLALCHEMY_DATABASE_URI: is key name
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view = "login_page" #Not loggin yet, must loggin first.
login_manager.login_message_category = "info" #make color for message: Please log in to access this page.
from market import routes
#use this stament for connect with routes.
#app.config is config key name and create market.db::: SQLALCHEMY_DATABASE_URI: is key name
| {"/market/models.py": ["/market/__init__.py"]} |
68,508 | zmanhcong/Flask_webapp | refs/heads/master | /market/models.py | from market import db, login_manager #installed in __init__.py
from market import bcrypt #installed in __init__.py
from flask_login import UserMixin #use for login: it contain: is_authenticated(login_required use in routes.py), is_active,is_anonymous, get_id()
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id)) #The function you set should take a user ID (a unicode) and return a user object, or None if the user does not exist.
#You will need to provide a user_loader callback. This callback is used to reload the user object from the user ID stored in the session. It should take the unicode ID of a user, and return the corresponding user object.
class User(db.Model, UserMixin): #db.Model:inhert from Model,It’s stored on the SQLAlchemy instance you have to create
id = db.Column(db.Integer(), primary_key=True)
username = db.Column(db.String(length=30), nullable=False, unique=True)
email_address = db.Column(db.String(length=50), nullable=False, unique=True)
password_hash = db.Column(db.String(length=60), nullable=False)
budget = db.Column(db.Integer(), nullable=False, default=1000)
items = db.relationship('Item', backref='owned_user', lazy=True) #backref: back refferent: tu dong tim primary_key
@property #this a a property of property() function: it contain: getter, setter, delet, init
def prettier_budget(self):
if len(str(self.budget)) >= 4:
return f'{str(self.budget)[:-3]},{str(self.budget)[-3:]}$' #1000 -> [1] + [,000]
else:
return f"{self.budget}$"
@property #getter
def password(self):
return self.password
@password.setter #getter: set value for property, use for user authen, we encrypt password
def password(self, plain_text_password):
self.password_hash = bcrypt.generate_password_hash(plain_text_password).decode('utf-8')
def check_password_correction(self, attempted_password):
return bcrypt.check_password_hash(self.password_hash, attempted_password) #return True or False.... bcrypt.check_password_hash:built-in function
def can_purchase(self, item_obj):
return self.budget >= item_obj.price #Only can purchase if budget >= price
def can_sell(self, item_obj):
return item_obj in self.items #Chung minh la minh so huu, nen minh co the ban
class Item(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(length=30),nullable=False, unique=True)
price = db.Column(db.Integer(), nullable=False)
barcode = db.Column(db.String(length=12),nullable=False, unique=True)
description = db.Column(db.String(length=1024), nullable=False, unique=True)
owner = db.Column(db.Integer(), db.ForeignKey('user.id')) #foreignKey: tim primary key
#limit length of string, nullable=False:this is constraint, it mean NOT NULL in that column, unique=True indicates that the Index should be created with the unique flag.(name not duplicate)
def __repr__(self):
return f'Item {self.name}' #data duoc nhap tu commmand vao database -> khi ta them chuc nang này thì nó sẽ hiện tên chứ không hiện ra item.
def buy(self, user):
self.owner = user.id
user.budget -= self.price
db.session.commit()
# p_item_object.owner = current_user.id (p_item_object -> self...... p_item_object -> user)
# current_user.budget -= p_item_object.price
# db.session.commit()
def sell(self, user):
self.owner = None #nguoc lai voi bye
user.budget += self.price
db.session.commit() | {"/market/models.py": ["/market/__init__.py"]} |
68,529 | n00j/externalsort | refs/heads/master | /frequencyCount.py | #!/usr/bin/python
import argparse
import os
import sys
class WordFrequencyCount:
def __init__(self, memoryUsageInMegabytes, inputfilename, outputfilename):
self.inputFileName = inputfilename
self.outputFileName = outputfilename
self.maxMemoryUsage = memoryUsageInMegabytes * 1024 * 1024
self.wordArray = []
def count(self):
inFile = open(self.inputFileName, 'r')
outFile = open(self.outputFileName, 'w')
memoryUsed = 0
currWord = None
currCount = 0
while(True):
line = inFile.readline().rstrip()
if(line == ""):
break;
if(line != currWord):
if(currWord is not None):
entry = (currWord, currCount)
self.wordArray.append(entry)
memoryUsed += sys.getsizeof(entry) + sys.getsizeof(currWord) + sys.getsizeof(currCount)
if(memoryUsed > self.maxMemoryUsage):
for word, count in self.wordArray:
outFile.write(word + ' ' + str(count) + '\n')
memoryUsed = 0
self.wordArray = []
currWord = line
currCount = 1
else:
currCount += 1
sys.stdout.write("\r" + str(memoryUsed))
sys.stdout.flush()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('memory',
'<memory>',
nargs=1,
help='amount of memory to use')
parser.add_argument('inputfilename',
metavar='<input_filename>',
nargs=1,
help='name of input file')
parser.add_argument('outputfilename',
metavar='<outputfilename>',
nargs=1,
help='name of input file')
args = parser.parse_args()
print args
freqCount = WordFrequencyCount(int(args.memory[0]), args.inputfilename[0], args.outputfilename[0])
freqCount.count()
if __name__ == '__main__':
main() | {"/main.py": ["/externalSort.py", "/frequencyCount.py"]} |
68,530 | n00j/externalsort | refs/heads/master | /externalSort.py | #!/usr/bin/python
import argparse
import os
class SplitFile:
def __init__(self, fileName, blockSize):
self.fileName = fileName
self.blockSize = blockSize
self.blockFileNames = []
def split(self):
file = open(self.fileName, 'r')
fileIndex = 0
while(True):
lines = file.readlines(self.blockSize)
if(lines == []):
break;
lines.sort()
self.writeFile(lines, fileIndex)
fileIndex = fileIndex + 1
def writeFile(self, data, fileIndex):
filename = 'tmpfile_' + str(fileIndex) + '.txt'
file = open(filename, 'w')
file.write(''.join(data))
file.close()
self.blockFileNames.append(filename)
def getFileNames(self):
print self.blockFileNames
return self.blockFileNames
def cleanUp(self):
[os.remove(f) for f in self.blockFileNames]
class MergeFiles:
def __init__(self, fileName, fileList):
self.fileList = fileList
self.fileName = fileName
self.numFiles = len(fileList)
self.numBuffers = self.numFiles
def merge(self):
# 1. Open buffers to all the files
# 2. Do an Nway merge. Assuming the files have been sorted
outputfile = open(self.fileName + '.out', 'w')
buffers = [None for x in range(self.numBuffers)]
bufferFileHandles = [None for x in range(self.numFiles)]
index = 0
while(True):
if(index < self.numFiles):
bufferFileHandles[index] = open(self.fileList[index], 'r')
buffers[index] = bufferFileHandles[index].readline()
else:
break;
index += 1
while(True):
index = self.selectMinBuffer(buffers)
if(index == -1):
break
outputfile.write(buffers[index]);
buffers[index] = None;
line = bufferFileHandles[index].readline();
if(line != ""):
buffers[index] = line
for i in range(self.numFiles):
bufferFileHandles[index].close()
def selectMinBuffer(self, buffers):
minBufferIndex = -1
minStr = None
for i in range(len(buffers)):
if buffers[i] is not None and (minStr is None or buffers[i] < minStr):
minBufferIndex = i
minStr = buffers[i]
return minBufferIndex
class ExternalSort:
def __init__(self, memoryInMegabytes, fileName):
self.blockSize = memoryInMegabytes * 1024 * 1024
self.fileName = fileName
def sort(self):
numberOfBlocks = self.getNumberOfBlocks()
fileSplitter = SplitFile(self.fileName, self.blockSize)
fileSplitter.split()
merger = MergeFiles(self.fileName, fileSplitter.getFileNames())
merger.merge()
fileSplitter.cleanUp()
def getNumberOfBlocks(self):
return os.stat(self.fileName).st_size / self.blockSize + 1
def main():
parser = argparse.ArgumentParser()
parser.add_argument('memory',
'<memory>',
nargs=1,
help='amount of memory to use')
parser.add_argument('filename',
metavar='<filename>',
nargs=1,
help='name of file to sort')
args = parser.parse_args()
sorter = ExternalSort(int(args.memory[0]), args.filename[0])
sorter.sort()
if __name__ == '__main__':
main()
| {"/main.py": ["/externalSort.py", "/frequencyCount.py"]} |
68,531 | n00j/externalsort | refs/heads/master | /generateRandomFile.py | #!/usr/bin/python
import random
import string
import argparse
import math
import sys
def main():
BATCH_SIZE = 500000
parser = argparse.ArgumentParser()
parser.add_argument('words',
metavar='<words>',
nargs=1,
help='number of words to generate, one word per line')
parser.add_argument('filename',
metavar='<filename>',
nargs=1,
help='name of output file')
args = parser.parse_args()
wordCount = int(args.words[0])
file = open(args.filename[0], 'w')
progress = 0
index = 0
while(index < wordCount):
wordList = None
if(index + BATCH_SIZE <= wordCount):
wordList = [''.join(random.sample(string.letters, 30)) + '\n' for x in range(BATCH_SIZE)]
else:
wordsToGenerate = wordCount - index
wordList = [''.join(random.sample(string.letters, 30)) + '\n' for x in range(wordsToGenerate)]
file.write(''.join(wordList))
index = index + BATCH_SIZE
new_progress = float(index) / float(wordCount) * 100.0
if(new_progress > progress):
progress = new_progress
sys.stdout.write("\r" + str(progress))
sys.stdout.flush()
sys.stdout.write("\n")
file.close()
if __name__ == '__main__':
main()
| {"/main.py": ["/externalSort.py", "/frequencyCount.py"]} |
68,532 | n00j/externalsort | refs/heads/master | /main.py | #!/usr/bin/python
import argparse
import os
import sys
import externalSort
import frequencyCount
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-m',
'--memory',
help='amount of memory to use in megabytes',
default='100')
parser.add_argument('inputfilename',
metavar='<input_filename>',
nargs=1,
help='name of input file')
parser.add_argument('outputfilename',
metavar='<outputfilename>',
nargs=1,
help='name of input file')
args = parser.parse_args()
sorter = externalSort.ExternalSort(int(args.memory), args.inputfilename[0])
sorter.sort()
freqCount = frequencyCount.WordFrequencyCount(int(args.memory), args.inputfilename[0] + '.out', args.outputfilename[0])
freqCount.count()
if __name__ == '__main__':
main() | {"/main.py": ["/externalSort.py", "/frequencyCount.py"]} |
68,535 | hind-hb/my-first-blog | refs/heads/main | /company/views.py |
from django.views.decorators.csrf import csrf_exempt
from company.models import Company , Department ,Employee , User
from django.http import HttpResponseRedirect
from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import render, redirect
from django.contrib import messages
from operator import itemgetter
from django.db import connection
# Create your views here.
def home(request):
return render(request,'base.html')
def index(request):
return render(request,'index.html')
def pagehome(request):
return render(request,'pagehome.html')
def signup(request):
if request.method=='POST':
user = User()
user.fname.request.POST['fname']
user.lname.request.POST['lname']
user.email.request.POST['email']
user.password.request.POST['password']
user.repassword.request.POST['repassword']
if user.password != user.repassword :
return redirect('signup')
elif user.fname=="" or user.password=="":
messages.info(request ,'some fields are empty')
return redirect('signup')
else:
user.save()
return render (request,'signup.html')
def signin(request):
con = connection.connect(name='db.sqlite3')
cursor=con.cursor('email')
con2 = connection.connect['db.sqlite3']
cursor2 = con.execute('password')
sqlcommand='select email from User'
sqlcommand2 = 'select password from User'
cursor.execute(sqlcommand)
cursor2.execute(sqlcommand2)
e=[]
p=[]
for i in cursor:
e.append(i)
for j in cursor2:
p.append(j)
res=list(map(itemgetter(0),e))
res2 = list(map(itemgetter(0), p))
if request.method=='POST':
email = request.POST['email']
password = request.POST['password ']
i=1
k=len(res)
while i <k:
if res[i]==email and res2[i]==password:
return render(request,'base.html',{'email':email})
break
i+1
else:
messages.info(request,'check username or password')
return redirect('signin')
return render (request,'signin.html')
def add_employee(request):
content=request.POST['content']
Employee.objects.create(text=content)
return HttpResponseRedirect("/")
| {"/company/views.py": ["/company/models.py"], "/company/admin.py": ["/company/models.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.