code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
cfg = Config.instance()
default = cfg.get_expanded(section, option, **kwargs)
return cfg.get_expanded(section, "{}_{}".format(self.workflow_type, option),
default=default, **kwargs) | def get_prefixed_config(self, section, option, **kwargs) | TODO. | 5.379511 | 5.336412 | 1.008076 |
tol = abs(tol)
im = np.zeros((p.imzmldict["max count of pixels y"], p.imzmldict["max count of pixels x"]))
for i, (x, y, z_) in enumerate(p.coordinates):
if z_ == 0:
UserWarning("z coordinate = 0 present, if you're getting blank images set getionimage(.., .., z=0)")
if z_ ==... | def getionimage(p, mz_value, tol=0.1, z=1, reduce_func=sum) | Get an image representation of the intensity distribution
of the ion with specified m/z value.
By default, the intensity values within the tolerance region are summed.
:param p:
the ImzMLParser (or anything else with similar attributes) for the desired dataset
:param mz_value:
m/z valu... | 4.948709 | 5.223704 | 0.947356 |
mz_group = int_group = None
slist = None
elem_iterator = self.iterparse(self.filename, events=("start", "end"))
if sys.version_info > (3,):
_, self.root = next(elem_iterator)
else:
_, self.root = elem_iterator.next()
for event, elem in e... | def __iter_read_spectrum_meta(self) | This method should only be called by __init__. Reads the data formats, coordinates and offsets from
the .imzML file and initializes the respective attributes. While traversing the XML tree, the per-spectrum
metadata is pruned, i.e. the <spectrumList> element(s) are left behind empty.
Supported ... | 3.121592 | 2.955619 | 1.056155 |
d = {}
scan_settings_list_elem = self.root.find('%sscanSettingsList' % self.sl)
instrument_config_list_elem = self.root.find('%sinstrumentConfigurationList' % self.sl)
supportedparams1 = [("max count of pixels x", int), ("max count of pixels y", int),
... | def __readimzmlmeta(self) | This method should only be called by __init__. Initializes the imzmldict with frequently used metadata from
the .imzML file.
This method reads only a subset of the available meta information and may be extended in the future. The keys
are named similarly to the imzML names. Currently supported ... | 2.245338 | 1.921898 | 1.168292 |
try:
pixel_size_x = self.imzmldict["pixel size x"]
pixel_size_y = self.imzmldict["pixel size y"]
except KeyError:
raise KeyError("Could not find all pixel size attributes in imzML file")
image_x, image_y = self.coordinates[i][:2]
return image_... | def get_physical_coordinates(self, i) | For a pixel index i, return the real-world coordinates in nanometers.
This is equivalent to multiplying the image coordinates of the given pixel with the pixel size.
:param i: the pixel index
:return: a tuple of x and y coordinates.
:rtype: Tuple[float]
:raises KeyError: if the... | 3.14895 | 2.340383 | 1.345485 |
mz_bytes, intensity_bytes = self.get_spectrum_as_string(index)
mz_array = np.frombuffer(mz_bytes, dtype=self.mzPrecision)
intensity_array = np.frombuffer(intensity_bytes, dtype=self.intensityPrecision)
return mz_array, intensity_array | def getspectrum(self, index) | Reads the spectrum at specified index from the .ibd file.
:param index:
Index of the desired spectrum in the .imzML file
Output:
mz_array: numpy.ndarray
Sequence of m/z values representing the horizontal axis of the desired mass
spectrum
intensity_a... | 2.810451 | 2.797519 | 1.004623 |
offsets = [self.mzOffsets[index], self.intensityOffsets[index]]
lengths = [self.mzLengths[index], self.intensityLengths[index]]
lengths[0] *= self.sizeDict[self.mzPrecision]
lengths[1] *= self.sizeDict[self.intensityPrecision]
self.m.seek(offsets[0])
mz_string = ... | def get_spectrum_as_string(self, index) | Reads m/z array and intensity array of the spectrum at specified location
from the binary file as a byte string. The string can be unpacked by the struct
module. To get the arrays as numbers, use getspectrum
:param index:
Index of the desired spectrum in the .imzML file
:rty... | 2.494165 | 2.358719 | 1.057424 |
'''reads a mz array from the currently open ibd file'''
self.ibd.seek(mz_offset)
data = self.ibd.read(mz_enc_len)
self.ibd.seek(0, 2)
data = self.mz_compression.decompress(data)
return tuple(np.fromstring(data, dtype=self.mz_dtype)) | def _read_mz(self, mz_offset, mz_len, mz_enc_len) | reads a mz array from the currently open ibd file | 4.031571 | 2.985821 | 1.350239 |
'''given an mz array, return the mz_data (disk location)
if the mz array was not previously written, write to disk first'''
mzs = tuple(mzs) # must be hashable
if mzs in self.lru_cache:
return self.lru_cache[mzs]
# mz not recognized ... check hash
mz_hash = ... | def _get_previous_mz(self, mzs) | given an mz array, return the mz_data (disk location)
if the mz array was not previously written, write to disk first | 3.759005 | 2.78922 | 1.347691 |
# must be rounded now to allow comparisons to later data
# but don't waste CPU time in continuous mode since the data will not be used anyway
if self.mode != "continuous" or self.first_mz is None:
mzs = self.mz_compression.rounding(mzs)
intensities = self.intensity_c... | def addSpectrum(self, mzs, intensities, coords, userParams=[]) | Add a mass spectrum to the file.
:param mz:
mz array
:param intensities:
intensity array
:param coords:
* 2-tuple of x and y position OR
* 3-tuple of x, y, and z position
note some applications want coords to be 1-indexed | 2.912286 | 2.966696 | 0.98166 |
'''alias of close()'''
self.ibd.close()
self._write_xml()
self.xml.close() | def finish(self) | alias of close() | 20.66239 | 14.284315 | 1.446509 |
global settings, project_settings
# Global changes settings
settings = Changes.load()
# Project specific settings
project_settings = Project.load(GitHubRepository(auth_token=settings.auth_token)) | def initialise() | Detects, prompts and initialises the project.
Stores project and tool configuration in the `changes` module. | 14.724542 | 12.665328 | 1.162587 |
rmtree('dist', ignore_errors=True)
build_package_command = 'python setup.py clean sdist bdist_wheel'
result = shell.dry_run(build_package_command, context.dry_run)
packages = Path('dist').files() if not context.dry_run else "nothing"
if not result:
raise Exception('Error building pack... | def build_distributions(context) | Builds package distributions | 4.634826 | 4.43005 | 1.046224 |
if not context.dry_run and build_distributions(context):
with util.mktmpdir() as tmp_dir:
venv.create_venv(tmp_dir=tmp_dir)
for distribution in Path('dist').files():
try:
venv.install(distribution, tmp_dir)
log.info('Succe... | def install_package(context) | Attempts to install the sdist and wheel. | 4.551575 | 4.267712 | 1.066514 |
if not context.dry_run and build_distributions(context):
upload_args = 'twine upload '
upload_args += ' '.join(Path('dist').files())
if context.pypi:
upload_args += ' -r %s' % context.pypi
upload_result = shell.dry_run(upload_args, context.dry_run)
if not c... | def upload_package(context) | Uploads your project packages to pypi with twine. | 4.163539 | 3.958171 | 1.051885 |
tmp_dir = venv.create_venv()
install_cmd = '%s/bin/pip install %s' % (tmp_dir, context.module_name)
package_index = 'pypi'
if context.pypi:
install_cmd += '-i %s' % context.pypi
package_index = context.pypi
try:
result = shell.dry_run(install_cmd, context.dry_run)
... | def install_from_pypi(context) | Attempts to install your package from pypi. | 2.647385 | 2.59316 | 1.020911 |
log.info('%s? %s' % (probe_name, probe_result))
if not probe_result:
raise exceptions.ProbeException(failure_msg)
else:
return True | def report_and_raise(probe_name, probe_result, failure_msg) | Logs the probe result and raises on failure | 3.410565 | 3.182794 | 1.071563 |
init_path = '{}/__init__.py'.format(python_module)
has_metadata = (
exists(init_path)
and attributes.has_attribute(python_module, '__version__')
and attributes.has_attribute(python_module, '__url__')
)
return report_and_raise(
'Has module metadata',
has_metad... | def has_metadata(python_module) | `<module_name>/__init__.py` with `__version__` and `__url__` | 4.048614 | 3.536479 | 1.144815 |
log.info('Checking project for changes requirements.')
return (
has_tools()
and has_setup()
and has_metadata(python_module)
and has_test_runner()
and has_readme()
and has_changelog()
) | def probe_project(python_module) | Check if the project meets `changes` requirements.
Complain and exit otherwise. | 7.210746 | 5.473756 | 1.317331 |
commit_version_change(context)
if context.github:
# github token
project_settings = project_config(context.module_name)
if not project_settings['gh_token']:
click.echo('You need a GitHub token for changes to create a release.')
click.pause(
'... | def publish(context) | Publishes the project | 4.631872 | 4.660793 | 0.993795 |
try:
run_tests()
if not context.skip_changelog:
generate_changelog(context)
increment_version(context)
build_distributions(context)
install_package(context)
upload_package(context)
install_from_pypi(context)
publish(context)
... | def perform_release(context) | Executes the release process. | 5.368446 | 5.264467 | 1.019751 |
with open('%s/__init__.py' % module_name) as input_file:
for line in input_file:
if line.startswith(attribute_name):
return ast.literal_eval(line.split('=')[1].strip()) | def extract_attribute(module_name, attribute_name) | Extract metatdata property from a module | 2.54507 | 2.438196 | 1.043833 |
init_file = '%s/__init__.py' % module_name
_, tmp_file = tempfile.mkstemp()
with open(init_file) as input_file:
with open(tmp_file, 'w') as output_file:
for line in input_file:
if line.startswith(attribute_name):
line = "%s = '%s'\n" % (attribute... | def replace_attribute(module_name, attribute_name, new_value, dry_run=True) | Update a metadata attribute | 2.463835 | 2.611035 | 0.943624 |
init_file = '%s/__init__.py' % module_name
return any(
[attribute_name in init_line for init_line in open(init_file).readlines()]
) | def has_attribute(module_name, attribute_name) | Is this attribute present? | 3.870974 | 3.664319 | 1.056397 |
if not alternatives:
raise ValueError
if not isinstance(alternatives, list):
raise TypeError
choice_map = OrderedDict(
('{}'.format(i), value) for i, value in enumerate(alternatives, 1)
)
# prepend a termination option
input_terminator = '0'
choice_map.update({... | def choose_labels(alternatives) | Prompt the user select several labels from the provided alternatives.
At least one label must be selected.
:param list alternatives: Sequence of options that are available to select from
:return: Several selected labels | 3.222221 | 3.184933 | 1.011708 |
curdir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
requests_cache.configure(expire_after=60 * 10 * 10)
changes.initialise()
yield
finally:
os.chdir(curdir) | def work_in(dirname=None) | Context manager version of os.chdir. When exited, returns to the working
directory prior to entering. | 4.860586 | 4.761619 | 1.020784 |
with work_in(repo_directory):
if discard:
stage_command.discard(release_name, release_description)
else:
stage_command.stage(draft, release_name, release_description) | def stage(draft, discard, repo_directory, release_name, release_description) | Stages a release | 3.020448 | 3.139434 | 0.9621 |
changelog_content = [
'\n## [%s](%s/compare/%s...%s)\n\n'
% (
context.new_version,
context.repo_url,
context.current_version,
context.new_version,
)
]
git_log_content = None
git_log = 'log --oneline --no-merges --no-color'.sp... | def generate_changelog(context) | Generates an automatic changelog from your commit messages. | 3.631119 | 3.564735 | 1.018623 |
return dict((k, dictionary[k]) for k in keys if k in dictionary) | def extract(dictionary, keys) | Extract only the specified keys from a dict
:param dictionary: source dictionary
:param keys: list of keys to extract
:return dict: extracted dictionary | 2.9793 | 6.869225 | 0.433717 |
long_arguments = extract(arguments, long_keys)
return dict(
[(key.replace(key_prefix, ''), value) for key, value in long_arguments.items()]
) | def extract_arguments(arguments, long_keys, key_prefix='--') | :param arguments: dict of command line arguments | 3.033424 | 3.002808 | 1.010196 |
tag_option = '--annotate'
if probe.has_signing_key(context):
tag_option = '--sign'
shell.dry_run(
TAG_TEMPLATE % (tag_option, context.new_version, context.new_version),
context.dry_run,
)
shell.dry_run('git push --tags', context.dry_run) | def tag_and_push(context) | Tags your git repo with the new version number | 6.109601 | 5.272678 | 1.158728 |
if not dry_run:
cmd_parts = command.split(' ')
# http://plumbum.readthedocs.org/en/latest/local_commands.html#run-and-popen
return local[cmd_parts[0]](cmd_parts[1:])
else:
log.info('Dry run of %s, skipping' % command)
return True | def dry_run(command, dry_run) | Executes a shell command unless the dry run option is set | 3.849571 | 4.033144 | 0.954484 |
project_name = curdir
config_path = Path(join(project_name, PROJECT_CONFIG_FILE))
if not exists(config_path):
store_settings(DEFAULTS.copy())
return DEFAULTS
return toml.load(io.open(config_path)) or {} | def project_config() | Deprecated | 6.483994 | 5.976521 | 1.084911 |
version = semantic_version.Version(version)
if major:
version.major += 1
version.minor = 0
version.patch = 0
elif minor:
version.minor += 1
version.patch = 0
elif patch:
version.patch += 1
return str(version) | def increment(version, major=False, minor=False, patch=True) | Increment a semantic version
:param version: str of the version to increment
:param major: bool specifying major level version increment
:param minor: bool specifying minor level version increment
:param patch: bool specifying patch level version increment
:return: str of the incremented version | 1.642908 | 1.900236 | 0.864581 |
attributes.replace_attribute(
context.module_name, '__version__', context.new_version, dry_run=context.dry_run
)
log.info(
'Bumped version from %s to %s' % (context.current_version, context.new_version)
) | def increment_version(context) | Increments the __version__ attribute of your module's __init__. | 4.927061 | 4.221978 | 1.167003 |
response = _request('GET', href)
response.raise_for_status()
items.extend(response.json())
if "link" not in response.headers:
return
links = link_header.parse(response.headers["link"])
rels = {link.rel: link.href for link in links.links}
if "next" in rels:
_recursive_gh_... | def _recursive_gh_get(href, items) | Recursively get list of GitHub objects.
See https://developer.github.com/v3/guides/traversing-with-pagination/ | 2.242004 | 2.321362 | 0.965814 |
global progress_reporter_cls
progress_reporter_cls.reportProgress = sys.stdout.isatty() and progress
if progress_reporter_cls.reportProgress:
progress_reporter_cls = _progress_bar
global _github_token_cli_arg
_github_token_cli_arg = github_token
global _github_api_url
_github_ap... | def main(github_token, github_api_url, progress) | A CLI to easily manage GitHub releases, assets and references. | 3.687137 | 3.80503 | 0.969017 |
if new_release_sha is None:
return
refs = get_refs(repo_name, tags=True, pattern="refs/tags/%s" % tag_name)
if not refs:
return
assert len(refs) == 1
# If sha associated with "<tag_name>" is up-to-date, we are done.
previous_release_sha = refs[0]["object"]["sha"]
if pre... | def _update_release_sha(repo_name, tag_name, new_release_sha, dry_run) | Update the commit associated with a given release tag.
Since updating a tag commit is not directly possible, this function
does the following steps:
* set the release tag to ``<tag_name>-tmp`` and associate it
with ``new_release_sha``.
* delete tag ``refs/tags/<tag_name>``.
* update the relea... | 2.17288 | 2.214266 | 0.98131 |
funds_df = data.approved_funds_by_projects
project = (
funds_df
.loc[funds_df['PRONAC'] == pronac]
)
project = project.to_dict('records')[0]
info = (
data
.approved_funds_agg.to_dict(orient="index")
[project['idSegmento']]
)
mean, std = info.val... | def approved_funds(pronac, dt) | Verifica se o valor total de um projeto é um
outlier em relação
aos projetos do mesmo seguimento cultural
Dataframes: planilha_orcamentaria | 5.202016 | 5.171587 | 1.005884 |
indicators = obj.indicator_set.all()
if not indicators:
value = 0.0
else:
value = indicators.first().value
return value | def complexidade(obj) | Returns a value that indicates project health, currently FinancialIndicator
is used as this value, but it can be a result of calculation with other
indicators in future | 4.65029 | 4.192709 | 1.109137 |
indicators = project.indicator_set.all()
indicators_detail = [(indicator_details(i)
for i in indicators)][0]
if not indicators:
indicators_detail = [
{'FinancialIndicator':
{'valor': 0.0,
'... | def details(project) | Project detail endpoint,
Returns project pronac, name,
and indicators with details | 8.09786 | 6.441978 | 1.257046 |
metrics = format_metrics_json(indicator)
metrics_list = set(indicator.metrics
.filter(name__in=metrics_name_map.keys())
.values_list('name', flat=True))
null_metrics = default_metrics
for keys in metrics_list:
null_metrics.pop(metrics_name_map[... | def indicator_details(indicator) | Return a dictionary with all metrics in FinancialIndicator,
if there aren't values for that Indicator, it is filled with default values | 5.650019 | 5.344788 | 1.057108 |
assert isinstance(metric, str)
assert '.' in metric, 'metric must declare a namespace'
try:
func = self._metrics[metric]
return func(pronac, self._data)
except KeyError:
raise InvalidMetricError('metric does not exist') | def get_metric(self, pronac, metric) | Get metric for the project with the given pronac number.
Usage:
>>> metrics.get_metric(pronac_id, 'finance.approved_funds') | 5.467054 | 6.412118 | 0.852613 |
def decorator(func):
name = func.__name__
key = f'{category}.{name}'
self._metrics[key] = func
return func
return decorator | def register(self, category) | Usage:
@metrics.register('finance')
def approved_funds(pronac, data):
return metric_from_data_and_pronac_number(data, pronac) | 4.563823 | 3.205209 | 1.423877 |
# TODO: Remove except and use ignore_conflicts
# on bulk_create when django 2.2. is released
with open(MODEL_FILE, "r") as file_content:
query = file_content.read()
db = db_connector()
query_result = db.execute_pandas_sql_query(query)
db.close()
try:
... | def execute_project_models_sql_scripts(force_update=False) | Used to get project information from MinC database
and convert to this application Project models.
Uses bulk_create if database is clean | 3.942353 | 3.798009 | 1.038005 |
missing = missing_metrics(metrics, pronacs)
print(f"There are {len(missing)} missing metrics!")
processors = mp.cpu_count()
print(f"Using {processors} processors to calculate metrics!")
indicators_qs = FinancialIndicator.objects.filter(
project_id__in=[p for p, _ in missing]
)
... | def create_finance_metrics(metrics: list, pronacs: list) | Creates metrics, creating an Indicator if it doesn't already exists
Metrics are created for projects that are in pronacs and saved in
database.
args:
metrics: list of names of metrics that will be calculated
pronacs: pronacs in dataset that is used to calculate those metrics | 3.92227 | 3.861784 | 1.015663 |
dataframe = data.planilha_comprovacao
project = dataframe.loc[dataframe['PRONAC'] == pronac]
segment_id = project.iloc[0]["idSegmento"]
segments_cache = data.segment_projects_agg
segments_cache = segments_cache.to_dict(orient="index")
mean = segments_cache[segment_id]["mean"]
std = se... | def total_receipts(pronac, dt) | This metric calculates the project total of receipts
and compare it to projects in the same segment
output:
is_outlier: True if projects receipts is not compatible
to others projects in the same segment
total_receipts: absolute number of receipts
maxim... | 5.000904 | 4.16001 | 1.202138 |
all_metrics = FinancialIndicator.METRICS
for key in all_metrics:
df = getattr(data, key)
pronac = 'PRONAC'
if key == 'planilha_captacao':
pronac = 'Pronac'
pronacs = df[pronac].unique().tolist()
create_finance_metrics(all_metrics[key], pronacs) | def load_project_metrics() | Create project metrics for financial indicator
Updates them if already exists | 9.193935 | 7.995182 | 1.149934 |
info = data.providers_info
df = info[info['PRONAC'] == pronac]
providers_count = data.providers_count.to_dict()[0]
new_providers = []
segment_id = None
for _, row in df.iterrows():
cnpj = row['nrCNPJCPF']
cnpj_count = providers_count.get(cnpj, 0)
segment_id = row['... | def new_providers(pronac, dt) | Return the percentage of providers of a project
that are new to the providers database. | 3.247592 | 3.20025 | 1.014793 |
segments_percentages = {}
all_projects_percentages = []
providers_count = providers_count.to_dict()[0]
for _, items in providers_info.groupby('PRONAC'):
cnpj_array = items['nrCNPJCPF'].unique()
new_providers = 0
for cnpj in cnpj_array:
cnpj_count = providers_cou... | def average_percentage_of_new_providers(providers_info, providers_count) | Return the average percentage of new providers
per segment and the average percentage of all projects. | 2.640097 | 2.441161 | 1.081493 |
providers_count = {}
cnpj_array = df.values
for a in cnpj_array:
cnpj = a[0]
occurrences = providers_count.get(cnpj, 0)
providers_count[cnpj] = occurrences + 1
return pd.DataFrame.from_dict(providers_count, orient='index') | def providers_count(df) | Returns total occurrences of each provider
in the database. | 2.834273 | 2.810792 | 1.008354 |
cnpj_list = []
for _, items in df.groupby('PRONAC'):
unique_cnpjs = items['nrCNPJCPF'].unique()
cnpj_list += list(unique_cnpjs)
return pd.DataFrame(cnpj_list) | def all_providers_cnpj(df) | Return CPF/CNPJ of all providers
in database. | 6.302386 | 6.378539 | 0.988061 |
df = data.providers_info
grouped = df.groupby('PRONAC')
return grouped.get_group(pronac) | def get_providers_info(pronac) | Return all info about providers of a
project with the given pronac. | 8.903428 | 8.299947 | 1.072709 |
agg = df.groupby(group).agg(info)
agg.columns = agg.columns.droplevel(0)
return agg | def get_info(df, group, info=['mean', 'std']) | Aggregate mean and std with the given group. | 2.689953 | 2.483105 | 1.083302 |
url_keys = {
'pronac': 'idPronac',
'uf': 'uf',
'product': 'produto',
'county': 'idmunicipio',
'item_id': 'idPlanilhaItem',
'stage': 'etapa',
}
if df_values:
values = [item[v] for v in df_values]
url_values = dict(
zip(url_keys... | def get_salic_url(item, prefix, df_values=None) | Mount a salic url for the given item. | 3.264235 | 3.221401 | 1.013297 |
df = data.planilha_projetos
cpf_cnpj = None
row_df = df[df['PRONAC'].astype(str) == str(pronac)]
if not row_df.empty:
cpf_cnpj = row_df.iloc[0]['CgcCpf']
return str(cpf_cnpj) | def get_cpf_cnpj_by_pronac(pronac) | Return the CNPF/CNPJ of the proponent
of the project with the given pronac. | 4.282752 | 3.966133 | 1.079831 |
pronac_id = str(item['idPronac'])
item_id = str(item["idPlanilhaItens"])
combined_id = f'{pronac_id}/{item_id}'
return combined_id in data.receipt.index | def has_receipt(item) | Verify if a item has a receipt. | 9.23429 | 8.773776 | 1.052488 |
df = data.all_items
return (
df[df['idSegmento'] == str(segment_id)]
.drop_duplicates(["PRONAC"])
.values
) | def get_segment_projects(segment_id) | Returns all projects from a segment. | 17.715309 | 16.727488 | 1.059054 |
mutated_df = df[['IdPRONAC', 'idPlanilhaItem']].astype(str)
mutated_df['pronac_planilha_itens'] = (
f"{mutated_df['IdPRONAC']}/{mutated_df['idPlanilhaItem']}"
)
return (
mutated_df
.set_index(['pronac_planilha_itens'])
) | def receipt(df) | Return a dataframe to verify if a item has a receipt. | 5.685514 | 5.426241 | 1.047781 |
if pickles:
save_sql_to_files(f)
if models:
if f:
manage(ctx, 'create_models_from_sql --force True', env={})
else:
manage(ctx, 'create_models_from_sql', env={}) | def update_data(ctx, models=True, pickles=False, f=False) | Updates local django db projects and pickle files using salic database from
MinC
Pickles are saved in /data/raw/ from sql queries in /data/scripts/
Models are created from /data/scripts/models/ | 6.51684 | 5.857473 | 1.112568 |
if f:
manage(ctx, 'create_models_from_sql --force True', env={})
else:
manage(ctx, 'create_models_from_sql', env={}) | def update_models(ctx, f=False) | Updates local django db projects models using salic database from
MinC | 6.66271 | 6.313838 | 1.055255 |
project = Project.objects.get(pronac=project)
indicator, _ = (FinancialIndicator
.objects.update_or_create(project=project))
indicator.is_valid = is_valid
if indicator.is_valid:
p_metrics = metrics_calc.get_project(project.pronac)
... | def create_indicator(self, project, is_valid, metrics_list) | Creates FinancialIndicator object for a project, calculating
metrics and indicator value | 7.11271 | 6.260557 | 1.136115 |
ext_size = len(SQL_EXTENSION)
path = DATA_PATH / 'scripts'
save_dir = DATA_PATH / "raw"
for file in os.listdir(path):
if file.endswith(SQL_EXTENSION):
file_path = os.path.join(save_dir,
file[:-ext_size] + '.' + FILE_EXTENSION)
if... | def save_sql_to_files(overwrite=False) | Executes every .sql files in /data/scripts/ using salic db vpn and
then saves pickle files into /data/raw/ | 3.956542 | 3.806432 | 1.039436 |
# TODO: implment metrics recalculation
max_total = sum(
[self.metrics_weights[metric_name] for metric_name in self.metrics_weights]
)
total = 0
if recalculate_metrics:
self.calculate_indicator_metrics()
for metric in self.metrics.all():
... | def fetch_weighted_complexity(self, recalculate_metrics=False) | Calculates indicator value according to metrics weights
Uses metrics in database
args:
recalculate_metrics: If true metrics values are updated before
using weights | 3.067878 | 2.792649 | 1.098555 |
threshold = 0.1
outlier_info = get_outliers_percentage(pronac)
outlier_info['is_outlier'] = outlier_info['percentage'] > threshold
outlier_info['maximum_expected'] = threshold * outlier_info['total_items']
return outlier_info | def item_prices(pronac, data) | Verify if a project is an outlier compared
to the other projects in his segment, based
on the price of bought items. | 4.38345 | 4.071906 | 1.076511 |
if (segment_id, item_id) not in df.index:
return False
mean = df.loc[(segment_id, item_id)]['mean']
std = df.loc[(segment_id, item_id)]['std']
return gaussian_outlier.is_outlier(
x=price, mean=mean, standard_deviation=std
) | def is_outlier(df, item_id, segment_id, price) | Verify if a item is an outlier compared to the
other occurrences of the same item, based on his price.
Args:
item_id: idPlanilhaItens
segment_id: idSegmento
price: VlUnitarioAprovado | 2.653478 | 3.059657 | 0.867247 |
df = (
raw_df[['idSegmento', 'idPlanilhaItens', 'VlUnitarioAprovado']]
.groupby(by=['idSegmento', 'idPlanilhaItens'])
.agg([np.mean, lambda x: np.std(x, ddof=0)])
)
df.columns = df.columns.droplevel(0)
return (
df
.rename(columns={'<lambda>': 'std'})
) | def aggregated_relevant_items(raw_df) | Aggragation for calculate mean and std. | 4.346748 | 4.046689 | 1.074149 |
start_date = datetime(2013, 1, 1)
df['DataProjeto'] = pd.to_datetime(df['DataProjeto'])
# get only projects newer than start_date
# and items with price > 0
df = df[df.DataProjeto >= start_date]
df = df[df.VlUnitarioAprovado > 0.0]
return df | def relevant_items(df) | Dataframe with items used by cultural projects,
filtered by date and price. | 5.270566 | 4.712513 | 1.118419 |
df = (
raw_df
[['PRONAC', 'idPlanilhaAprovacao', 'Item',
'idPlanilhaItens', 'VlUnitarioAprovado',
'idSegmento', 'DataProjeto', 'idPronac',
'UfItem', 'idProduto', 'cdCidade', 'cdEtapa']]
).copy()
df['VlUnitarioAprovado'] = df['VlUnitarioAprovado'].app... | def items_with_price(raw_df) | Dataframe with price as number. | 9.322672 | 9.317584 | 1.000546 |
items = (
data.items_with_price
.groupby(['PRONAC'])
.get_group(pronac)
)
df = data.aggregated_relevant_items
outlier_items = {}
url_prefix = '/prestacao-contas/analisar/comprovante'
for _, item in items.iterrows():
item_id = item['idPlanilhaItens']
... | def get_outliers_percentage(pronac) | Returns the percentage of items
of the project that are outliers. | 4.225052 | 4.214628 | 1.002473 |
df = data.items_by_project
project = df.loc[df['PRONAC'] == pronac]
seg = project.iloc[0]["idSegmento"]
info = data.items_by_project_agg.to_dict(orient="index")[seg]
mean, std = info.values()
threshold = mean + 1.5 * std
project_items_count = project.shape[0]
is_outlier = project_it... | def number_of_items(pronac, dt) | This metric calculates the project number of declared number of items
and compare it to projects in the same segment
output:
is_outlier: True if projects number of items is not compatible
to others projects in the same segment
valor: absolute number of items
... | 4.795186 | 3.288674 | 1.458091 |
percentage = 0.1
return (
df
.groupby(['idSegmento', 'idPlanilhaItens'])
.count()
.rename(columns={'PRONAC': 'itemOccurrences'})
.sort_values('itemOccurrences', ascending=False)
.reset_index(['idSegmento', 'idPlanilhaItens'])
.groupby('idSegmento')
... | def common_items(df) | Returns the itens that are common in all the segments,
in the format | idSegmento | id planilhaItens |. | 4.431274 | 3.245601 | 1.365317 |
if len(seg_common_items) == 0:
return 0
project_items = get_project_items(pronac).values[:, 0]
project_items_amount = len(project_items)
if project_items_amount == 0:
return 1
common_found_items = sum(
seg_common_items.isin(project_items)['idPlanilhaItens']
)
... | def common_items_percentage(pronac, seg_common_items) | Returns the percentage of items in a project that are
common in the cultural segment. | 4.137002 | 3.856675 | 1.072686 |
segments = common_items.index.unique()
metrics = {}
for seg in segments:
seg_common_items = segment_common_items(seg)
projects = get_segment_projects(seg)
metric_values = []
for proj in projects:
pronac = proj[0]
percentage = common_items_percen... | def common_items_metrics(all_items, common_items) | Calculates the percentage of common items for each project
in each segment and calculates the mean and std of this percentage
for each segment. | 3.276222 | 2.917948 | 1.122783 |
df = data.all_items
return (
df[df['PRONAC'] == pronac]
.drop(columns=['PRONAC', 'idSegmento'])
) | def get_project_items(pronac) | Returns all items from a project. | 8.785918 | 8.124227 | 1.081447 |
df = data.common_items
return (
df
.loc[str(segment_id)]
.reset_index(drop=1)
.drop(columns=["itemOccurrences"])
) | def segment_common_items(segment_id) | Returns all the common items in a segment. | 7.082325 | 7.042107 | 1.005711 |
segment_id = get_segment_id(str(pronac))
seg_common_items = (
segment_common_items(segment_id)
.set_index('idPlanilhaItens')
.index
)
project_items = (
get_project_items(pronac)
.set_index('idPlanilhaItens')
.index
)
diff = list(project_items... | def get_uncommon_items(pronac) | Return all uncommon items of a project
(related to segment common items). | 4.576333 | 3.964573 | 1.154307 |
result = uncommon_items
url_prefix = '/prestacao-contas/analisar/comprovante'
for _, item in filtered_items.iterrows():
item_id = item['idPlanilhaItens']
item_name = uncommon_items[item_id]
result[item_id] = {
'name': item_name,
'salic_url': get_salic_... | def add_info_to_uncommon_items(filtered_items, uncommon_items) | Add extra info to the uncommon items. | 5.647473 | 5.70158 | 0.99051 |
segment_id = get_segment_id(str(pronac))
metrics = data.common_items_metrics.to_dict(orient='index')[segment_id]
ratio = common_items_percentage(pronac, segment_common_items(segment_id))
# constant that defines the threshold to verify if a project
# is an outlier.
k = 1.5
threshold = ... | def common_items_ratio(pronac, dt) | Calculates the common items on projects in a cultural segment,
calculates the uncommon items on projects in a cultural segment and
verify if a project is an outlier compared to the other projects
in his segment. | 4.281772 | 3.943729 | 1.085717 |
dataframe = data.planilha_comprovacao
project = dataframe.loc[dataframe['PRONAC'] == pronac]
segment_id = project.iloc[0]["idSegmento"]
pronac_funds = project[
["idPlanilhaAprovacao", "PRONAC", "vlComprovacao", "idSegmento"]
]
funds_grp = pronac_funds.drop(columns=["idPlanilhaAprova... | def verified_funds(pronac, dt) | Responsable for detecting anomalies in projects total verified funds. | 4.268046 | 4.162892 | 1.02526 |
df['CaptacaoReal'] = df['CaptacaoReal'].apply(
pd.to_numeric
)
return (
df[['Pronac', 'CaptacaoReal']]
.groupby(['Pronac'])
.sum()
) | def raised_funds_by_project(df) | Raised funds organized by project. | 8.513004 | 8.019538 | 1.061533 |
project_raised_funds = data.raised_funds_by_project.loc[pronac]['CaptacaoReal']
dataframe = data.planilha_comprovacao
project_verified = dataframe.loc[dataframe['PRONAC'] == str(pronac)]
if project_verified.empty:
project_verified_funds = 0
else:
pronac_funds = project_verified... | def to_verify_funds(pronac, dt) | Checks how much money is left for the project to verify,
using raised_funds - verified_funds
This value can be negative (a project can verify more money than
the value approved) | 4.086668 | 3.948051 | 1.03511 |
cpf_cnpj = get_cpf_cnpj_by_pronac(pronac)
proponent_submitted_projects = {}
proponent_analyzed_projects = {}
if cpf_cnpj:
submitted_projects = get_proponent_submitted_projects(cpf_cnpj)
analyzed_projects = get_proponent_analyzed_projects(cpf_cnpj)
try:
propone... | def proponent_projects(pronac, data) | Checks the CNPJ/CPF of the proponent of project
with the given pronac and returns all the projects
that have been submitted by this proponent and all
projects that have already been analyzed. | 2.024149 | 1.915527 | 1.056706 |
df = raw_df[['PRONAC', 'proponenteCgcCpf']]
analyzed_projects = df.groupby('proponenteCgcCpf')[
'PRONAC'
].agg(['unique', 'nunique'])
analyzed_projects.columns = ['pronac_list', 'num_pronacs']
return analyzed_projects | def analyzed_projects(raw_df) | Return all projects that was analyzed. | 7.180153 | 6.907331 | 1.039497 |
df = raw_df.astype({'PRONAC': str, 'CgcCpf': str})
submitted_projects = df.groupby('CgcCpf')[
'PRONAC'
].agg(['unique', 'nunique'])
submitted_projects.columns = ['pronac_list', 'num_pronacs']
return submitted_projects | def submitted_projects(raw_df) | Return all submitted projects. | 6.900997 | 6.640459 | 1.039235 |
csv_path = os.path.join(DATA_FOLDER, csv_name)
csv = pd.read_csv(csv_path, low_memory=False,
usecols=usecols, encoding="utf-8")
return csv | def read_csv(csv_name, usecols=None) | Returns a DataFrame from a .csv file stored in /data/raw/ | 2.529575 | 2.569948 | 0.98429 |
csv_path = os.path.join(DATA_FOLDER, csv_name)
csv = pd.read_csv(
csv_path,
usecols=usecols,
encoding="utf-8",
dtype=column_types_dict,
engine="python",
)
for key_column, val_type in column_types_dict.items():
if val_type == str:
csv[key_... | def read_csv_with_different_type(csv_name, column_types_dict, usecols=None) | Returns a DataFrame from a .csv file stored in /data/raw/.
Reads the CSV as string. | 2.188866 | 2.147208 | 1.019401 |
csv_path = os.path.join(DATA_FOLDER, csv_name)
csv = pd.read_csv(csv_path, low_memory=False, usecols=usecols)
for column in integer_columns:
csv = csv[pd.to_numeric(csv[column], errors="coerce").notnull()]
csv[integer_columns] = csv[integer_columns].apply(pd.to_numeric)
return csv | def read_csv_as_integer(csv_name, integer_columns, usecols=None) | Returns a DataFrame from a .csv file stored in /data/raw/.
Converts columns specified by 'integer_columns' to integer. | 2.069399 | 2.006403 | 1.031397 |
df = verified_repeated_receipts_for_pronac(pronac)
comprovantes_cheque = df[df['tpFormaDePagamento'] == 1.0]
return metric_return(comprovantes_cheque) | def check_receipts(pronac, dt) | Checks how many items are in a same receipt when payment type is check
- is_outlier: True if there are any receipts that have more than one
- itens_que_compartilham_comprovantes: List of items that share receipt | 14.824362 | 13.341099 | 1.11118 |
df = verified_repeated_receipts_for_pronac(pronac)
comprovantes_transferencia = df[df['tpFormaDePagamento'] == 2.0]
return metric_return(comprovantes_transferencia) | def transfer_receipts(pronac, dt) | Checks how many items are in a same receipt when payment type is bank
transfer
- is_outlier: True if there are any receipts that have more than one
- itens_que_compartilham_comprovantes: List of items that share receipt | 13.299489 | 12.094478 | 1.099633 |
df = verified_repeated_receipts_for_pronac(pronac)
comprovantes_saque = df[df['tpFormaDePagamento'] == 3.0]
return metric_return(comprovantes_saque) | def money_receipts(pronac, dt) | Checks how many items are in a same receipt when payment type is
withdraw/money
- is_outlier: True if there are any receipts that have more than one
- itens_que_compartilham_comprovantes: List of items that share receipt | 14.961937 | 15.003139 | 0.997254 |
is_outlier, mean, std, total_raised_funds = get_outlier_info(pronac)
maximum_expected_funds = gaussian_outlier.maximum_expected_value(mean, std)
return {
'is_outlier': is_outlier,
'total_raised_funds': total_raised_funds,
'maximum_expected_funds': maximum_expected_funds
} | def raised_funds(pronac, data) | Returns the total raised funds of a project
with the given pronac and if this project is an
outlier based on this value. | 3.432835 | 3.329156 | 1.031143 |
grouped = df.groupby('Segmento')
aggregated = grouped.agg(['mean', 'std'])
aggregated.columns = aggregated.columns.droplevel(0)
return aggregated | def segment_raised_funds_average(df) | Return some info about raised funds. | 4.372418 | 4.396574 | 0.994506 |
df = data.planilha_captacao
raised_funds_averages = data.segment_raised_funds_average.to_dict('index')
segment_id = df[df['Pronac'] == pronac]['Segmento'].iloc[0]
mean = raised_funds_averages[segment_id]['mean']
std = raised_funds_averages[segment_id]['std']
project_raised_funds = get_pr... | def get_outlier_info(pronac) | Return if a project with the given
pronac is an outlier based on raised funds. | 4.141648 | 3.614941 | 1.145703 |
items_df = data.approved_verified_items
items_df = items_df.loc[items_df['PRONAC'] == pronac]
items_df[[APPROVED_COLUMN, VERIFIED_COLUMN]] = items_df[
[APPROVED_COLUMN, VERIFIED_COLUMN]
].astype(float)
items_df["Item"] = items_df["Item"].str.replace("\r", "")
items_df["Item"] = item... | def verified_approved(pronac, dt) | This metric compare budgetary items of SALIC projects in terms of
verified versus approved value
Items that have vlComprovacao > vlAprovacao * 1.5 are considered outliers
output:
is_outlier: True if any item is outlier
valor: Absolute number of items that are outliers
out... | 3.436749 | 2.958867 | 1.161509 |
for file in os.listdir(path):
base, ext = os.path.splitext(file)
if ext != ".csv":
continue
LOG(f"converting {file} to pickle")
df = pd.read_csv(path / file, low_memory=True)
WRITE_DF(df, path / (base + "." + FILE_EXTENSION), **WRITE_DF_OPTS)
if cle... | def csv_to_pickle(path=ROOT / "raw", clean=False) | Convert all CSV files in path to pickle. | 3.251387 | 3.140841 | 1.035196 |
path = "%s.%s" % (self._root / "processed" / loc, FILE_EXTENSION)
WRITE_DF(df, path, **WRITE_DF_OPTS)
self._cache[loc] = df | def store(self, loc, df) | Store dataframe in the given location.
Store some arbitrary dataframe:
>>> data.store('my_data', df)
Now recover it from the global store.
>>> data.my_data
... | 8.826602 | 12.440313 | 0.709516 |
if self.gc_book:
self.gc_book.close()
if self.pricedb_session:
self.pricedb_session.close() | def close_databases(self) | Close all database sessions | 5.578297 | 4.984272 | 1.11918 |
book = self.get_gc_book()
collection = SecuritiesAggregate(book)
sec = collection.get_aggregate_for_symbol(symbol)
quantity = sec.get_quantity()
return quantity | def load_stock_quantity(self, symbol: str) -> Decimal(0) | retrieves stock quantity | 8.911856 | 8.548979 | 1.042447 |
if not self.gc_book:
gc_db = self.config.get(ConfigKeys.gnucash_book_path)
if not gc_db:
raise AttributeError("GnuCash book path not configured.")
# check if this is the abs file exists
if not os.path.isabs(gc_db):
gc_db = ... | def get_gc_book(self) | Returns the GnuCash db session | 4.072523 | 3.781544 | 1.076947 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.