aux-search/import-scripts/import_scripts/channel.py

777 lines
26 KiB
Python
Raw Normal View History

import backoff # type: ignore
2020-07-02 12:27:49 +00:00
import boto3 # type: ignore
import botocore # type: ignore
import botocore.client # type: ignore
import botocore.exceptions # type: ignore
2020-03-28 01:34:38 +00:00
import click
2020-07-02 12:27:49 +00:00
import click_log # type: ignore
2020-10-07 06:58:17 +00:00
import dictdiffer # type: ignore
2020-07-02 12:27:49 +00:00
import elasticsearch # type: ignore
import elasticsearch.helpers # type: ignore
import import_scripts.nix # type: ignore
2020-03-28 01:34:38 +00:00
import json
import logging
2020-06-18 22:41:01 +00:00
import os
2020-03-28 01:34:38 +00:00
import os.path
2020-07-02 12:27:49 +00:00
import pypandoc # type: ignore
2020-06-18 10:24:52 +00:00
import re
import requests
import requests.exceptions
2020-03-28 01:34:38 +00:00
import shlex
import subprocess
2020-06-18 22:41:01 +00:00
import sys
2020-07-02 12:27:49 +00:00
import tqdm # type: ignore
2020-06-18 10:24:52 +00:00
import xml.etree.ElementTree
logger = logging.getLogger("import-channel")
click_log.basic_config(logger)
2020-03-28 01:34:38 +00:00
S3_BUCKET = "nix-releases"
2020-03-28 01:34:38 +00:00
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
2020-06-18 22:41:01 +00:00
INDEX_SCHEMA_VERSION = os.environ.get("INDEX_SCHEMA_VERSION", 0)
2020-10-07 06:58:17 +00:00
DIFF_OUTPUT = ["json", "stats"]
CHANNELS = {
"unstable": "nixos/unstable/nixos-21.11pre",
"21.05": "nixos/21.05/nixos-21.05.",
"20.09": "nixos/20.09/nixos-20.09.",
}
2021-01-23 16:25:43 +00:00
ALLOWED_PLATFORMS = ["x86_64-linux", "aarch64-linux", "x86_64-darwin", "i686-linux"]
ANALYSIS = {
2020-06-18 10:24:52 +00:00
"normalizer": {
2020-06-18 22:41:01 +00:00
"lowercase": {"type": "custom", "char_filter": [], "filter": ["lowercase"]}
2020-06-18 10:24:52 +00:00
},
"tokenizer": {
"edge": {
"type": "edge_ngram",
"min_gram": 2,
"max_gram": 50,
"token_chars": [
"letter",
"digit",
# Either we use them or we would need to strip them before that.
"punctuation",
"symbol",
],
},
},
"analyzer": {
"edge": {"tokenizer": "edge", "filter": ["lowercase"]},
2020-06-18 10:24:52 +00:00
"lowercase": {
"type": "custom",
2020-06-18 10:24:52 +00:00
"tokenizer": "keyword",
"filter": ["lowercase"],
},
},
}
2020-06-03 19:02:12 +00:00
MAPPING = {
"properties": {
"type": {"type": "keyword"},
# Package fields
"package_hydra_build": {
2020-06-09 22:55:37 +00:00
"type": "nested",
"properties": {
"build_id": {"type": "keyword"},
"build_status": {"type": "keyword"},
"platform": {"type": "keyword"},
"project": {"type": "keyword"},
"jobset": {"type": "keyword"},
"job": {"type": "keyword"},
"path": {
"type": "nested",
"properties": {
"output": {"type": "keyword"},
2020-06-18 22:41:01 +00:00
"path": {"type": "keyword"},
},
},
"drv_path": {"type": "keyword"},
},
2020-06-09 22:55:37 +00:00
},
"package_attr_name": {
"type": "keyword",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
"package_attr_name_reverse": {
"type": "keyword",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
"package_attr_name_query": {
"type": "keyword",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
"package_attr_name_query_reverse": {
"type": "keyword",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
"package_attr_set": {
"type": "keyword",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
"package_attr_set_reverse": {
"type": "keyword",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
"package_pname": {
"type": "keyword",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
"package_pname_reverse": {
"type": "keyword",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
2020-06-03 19:02:12 +00:00
"package_pversion": {"type": "keyword"},
"package_description": {
"type": "text",
"analyzer": "english",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
"package_description_reverse": {
"type": "text",
"analyzer": "english",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
"package_longDescription": {
"type": "text",
"analyzer": "english",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
"package_longDescription_reverse": {
"type": "text",
"analyzer": "english",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
2020-06-03 19:02:12 +00:00
"package_license": {
"type": "nested",
"properties": {"fullName": {"type": "text"}, "url": {"type": "text"}},
2020-06-03 19:02:12 +00:00
},
2021-01-23 16:25:43 +00:00
"package_license_set": {"type": "keyword"},
2020-06-03 19:02:12 +00:00
"package_maintainers": {
"type": "nested",
"properties": {
"name": {"type": "text"},
"email": {"type": "text"},
"github": {"type": "text"},
},
},
2021-01-23 16:25:43 +00:00
"package_maintainers_set": {"type": "keyword"},
2020-06-03 19:02:12 +00:00
"package_platforms": {"type": "keyword"},
"package_position": {"type": "text"},
"package_homepage": {"type": "keyword"},
"package_system": {"type": "keyword"},
2020-06-03 19:02:12 +00:00
# Options fields
"option_name": {
"type": "keyword",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
"option_name_reverse": {
"type": "keyword",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
"option_name_query": {
"type": "keyword",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
"option_name_query_reverse": {
"type": "keyword",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
"option_description": {
"type": "text",
"analyzer": "english",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
"option_description_reverse": {
"type": "text",
"analyzer": "english",
"fields": {"edge": {"type": "text", "analyzer": "edge"}},
},
2020-06-03 19:02:12 +00:00
"option_type": {"type": "keyword"},
"option_default": {"type": "text"},
"option_example": {"type": "text"},
"option_source": {"type": "keyword"},
},
}
def string_reverse(text):
return text[::-1]
def field_reverse(field):
if isinstance(field, str):
if " " in field:
field = " ".join(map(field_reverse, field.split(" ")))
else:
field = string_reverse(field)
elif isinstance(field, list):
field = list(map(field_reverse, field))
elif isinstance(field, tuple):
field = tuple(map(field_reverse, field))
elif field is None:
pass
else:
raise NotImplementedError(f"Don't know how to reverse {field}")
return field
2020-07-02 12:27:49 +00:00
def parse_query(text):
"""Tokenize package attr_name
Example package:
2020-06-18 10:24:52 +00:00
python37Packages.test_name-test
= index: 0
- python37Packages.test1_name-test2
- python37Packages.test1_name
- python37Packages.test1
- python37
- python
= index: 1
- test1_name-test2
- test1_name
- test1
= index: 2
- name-test2
- name
= index: 3
- test2
"""
tokens = []
2020-06-18 22:41:01 +00:00
regex = re.compile(
".+?(?:(?<=[a-z])(?=[1-9A-Z])|(?<=[1-9A-Z])(?=[A-Z][a-z])|[._-]|$)"
)
parts = [m.group(0) for m in regex.finditer(text)]
2020-06-18 10:24:52 +00:00
for index in range(len(parts)):
prev_parts = ""
for part in parts[index:]:
tokens.append((prev_parts + part).rstrip("_.-"))
prev_parts += part
return tokens
@backoff.on_exception(backoff.expo, botocore.exceptions.ClientError)
def get_last_evaluation(prefix):
logger.debug(f"Retrieving last evaluation for {prefix} prefix.")
s3 = boto3.client(
"s3", config=botocore.client.Config(signature_version=botocore.UNSIGNED)
)
s3_result = s3.list_objects(Bucket=S3_BUCKET, Prefix=prefix, Delimiter="/",)
2020-03-28 01:34:38 +00:00
evaluations = []
for item in s3_result.get("CommonPrefixes"):
2020-03-28 04:09:01 +00:00
if not item:
2020-03-28 01:34:38 +00:00
continue
logger.debug(f"get_last_evaluation: evaluation in raw {item}")
2020-06-18 22:41:01 +00:00
revisions_since_start, git_revision = (
item["Prefix"][len(prefix) :].rstrip("/").split(".")
)
evaluation = {
"revisions_since_start": int(revisions_since_start),
"git_revision": git_revision,
2020-06-18 22:41:01 +00:00
"prefix": item["Prefix"].rstrip("/"),
}
logger.debug(f"get_last_evaluation: evaluation {evaluation}")
evaluations.append(evaluation)
2020-03-28 01:34:38 +00:00
logger.debug(
f"get_last_evaluation: {len(evaluations)} evaluations found for {prefix} prefix"
)
2020-03-28 01:34:38 +00:00
evaluations = sorted(evaluations, key=lambda i: i["revisions_since_start"])
2020-06-09 22:55:37 +00:00
evaluation = evaluations[-1]
result = s3.get_object(Bucket=S3_BUCKET, Key=f"{evaluation['prefix']}/src-url")
2020-06-18 22:41:01 +00:00
evaluation["id"] = (
result.get("Body").read().decode()[len("https://hydra.nixos.org/eval/") :]
)
2020-03-28 01:34:38 +00:00
2020-06-09 22:55:37 +00:00
logger.debug(f"get_last_evaluation: last evaluation is: {evaluation}")
2020-03-28 01:34:38 +00:00
2020-06-09 22:55:37 +00:00
return evaluation
@backoff.on_exception(backoff.expo, requests.exceptions.RequestException)
2020-06-09 22:55:37 +00:00
def get_evaluation_builds(evaluation_id):
2020-06-18 22:41:01 +00:00
logger.debug(
f"get_evaluation_builds: Retrieving list of builds for {evaluation_id} evaluation id"
2020-06-18 22:41:01 +00:00
)
2020-06-09 22:55:37 +00:00
filename = f"eval-{evaluation_id}.json"
if not os.path.exists(filename):
url = f"https://hydra.nixos.org/eval/{evaluation_id}/builds"
logger.debug(f"get_evaluation_builds: Fetching builds from {url} url.")
2020-06-18 22:41:01 +00:00
headers = {"Content-Type": "application/json"}
2020-06-09 22:55:37 +00:00
r = requests.get(url, headers=headers, stream=True)
with tqdm.tqdm.wrapattr(
open(filename, "wb"),
"write",
miniters=1,
2020-06-18 22:41:01 +00:00
total=int(r.headers.get("content-length", 0)),
desc=filename,
2020-06-09 22:55:37 +00:00
) as f:
for chunk in r.iter_content(chunk_size=4096):
f.write(chunk)
with open(filename) as f:
builds = json.loads(f.read())
result = {}
for build in builds:
2020-06-18 22:41:01 +00:00
result.setdefault(build["nixname"], {})
result[build["nixname"]][build["system"]] = build
return result
2020-06-09 22:55:37 +00:00
2020-06-18 10:24:52 +00:00
def get_maintainer(maintainer):
maintainers = []
if type(maintainer) == str:
2020-06-18 22:41:01 +00:00
maintainers.append(dict(name=maintainer, email=None, github=None,))
2020-06-18 10:24:52 +00:00
elif type(maintainer) == dict:
2020-06-18 22:41:01 +00:00
maintainers.append(
dict(
name=maintainer.get("name"),
email=maintainer.get("email"),
github=maintainer.get("github"),
)
)
2020-06-18 10:24:52 +00:00
elif type(maintainer) == list:
for item in maintainer:
maintainers += get_maintainer(item)
else:
logger.error(f"maintainer can not be recognized from: {maintainer}")
sys.exit(1)
return maintainers
def remove_attr_set(name):
# some package sets the prefix is included in pname
sets = [
# Packages
"emscripten",
"lua",
"php",
"pure",
"python",
"lisp",
"perl",
"ruby",
# Plugins
"elasticsearch",
"graylog",
2020-06-19 06:53:49 +00:00
"tmuxplugin",
"vimplugin",
2020-06-18 10:24:52 +00:00
]
# TODO: is this correct
if any([name.startswith(i) for i in sets]):
name = "-".join(name.split("-")[1:])
# node does things a bit different
elif name.startswith("node_"):
2020-06-18 22:41:01 +00:00
name = name[len("node_") :]
2020-06-18 10:24:52 +00:00
return name
@backoff.on_exception(backoff.expo, subprocess.CalledProcessError)
2020-10-07 06:58:17 +00:00
def get_packages_raw(evaluation):
logger.debug(
2021-01-23 16:25:43 +00:00
f"get_packages_raw: Retrieving list of packages for '{evaluation['git_revision']}' revision"
)
2020-03-28 01:34:38 +00:00
result = subprocess.run(
shlex.split(
f"nix-env -f '<nixpkgs>' -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/{evaluation['git_revision']}.tar.gz --arg config 'import {CURRENT_DIR}/packages-config.nix' -qa --json"
),
2020-03-28 01:34:38 +00:00
stdout=subprocess.PIPE,
check=True,
)
packages = json.loads(result.stdout).items()
2020-10-07 06:58:17 +00:00
return list(packages)
2020-03-28 01:34:38 +00:00
2020-06-18 10:24:52 +00:00
2020-10-07 06:58:17 +00:00
def get_packages(evaluation, evaluation_builds):
packages = list(get_packages_raw(evaluation))
2020-06-09 22:55:37 +00:00
2020-10-07 06:58:17 +00:00
def gen():
for attr_name, data in packages:
2020-03-28 01:34:38 +00:00
licenses = data["meta"].get("license")
if licenses:
if type(licenses) == str:
2021-01-23 16:25:43 +00:00
licenses = [dict(fullName=licenses, url=None)]
2020-03-28 01:34:38 +00:00
elif type(licenses) == dict:
licenses = [licenses]
2020-03-28 01:34:38 +00:00
licenses = [
type(license) == str
and dict(fullName=license, url=None)
or dict(fullName=license.get("fullName"), url=license.get("url"),)
2020-03-28 01:34:38 +00:00
for license in licenses
]
else:
2021-01-23 16:25:43 +00:00
licenses = [dict(fullName="No license", url=None)]
2020-06-09 22:55:37 +00:00
2020-06-18 10:24:52 +00:00
maintainers = get_maintainer(data["meta"].get("maintainers", []))
2021-01-23 16:25:43 +00:00
if len(maintainers) == 0:
maintainers = [dict(name="No maintainers", email=None, github=None)]
2020-06-09 22:55:37 +00:00
2020-04-10 08:13:50 +00:00
platforms = [
2021-01-23 16:25:43 +00:00
platform
2020-04-10 08:13:50 +00:00
for platform in data["meta"].get("platforms", [])
2021-01-23 16:25:43 +00:00
if type(platform) == str and platform in ALLOWED_PLATFORMS
2020-04-10 08:13:50 +00:00
]
2021-01-23 16:25:43 +00:00
attr_set = "No package set"
if "." in attr_name:
2021-01-23 16:25:43 +00:00
maybe_attr_set = attr_name.split(".")[0]
2020-06-18 22:41:01 +00:00
if (
2021-01-23 16:25:43 +00:00
maybe_attr_set.endswith("Packages")
or maybe_attr_set.endswith("Plugins")
or maybe_attr_set.endswith("Extensions")
2020-06-18 22:41:01 +00:00
):
2021-01-23 16:25:43 +00:00
attr_set = maybe_attr_set
hydra = None
2020-06-18 22:41:01 +00:00
if data["name"] in evaluation_builds:
hydra = []
2020-06-18 22:41:01 +00:00
for platform, build in evaluation_builds[data["name"]].items():
hydra.append(
{
"build_id": build["id"],
"build_status": build["buildstatus"],
"platform": build["system"],
"project": build["project"],
"jobset": build["jobset"],
"job": build["job"],
"path": [
{"output": output, "path": item["path"]}
for output, item in build["buildoutputs"].items()
],
"drv_path": build["drvpath"],
}
)
2020-06-09 22:55:37 +00:00
2020-10-07 06:58:17 +00:00
position = data["meta"].get("position")
if position and position.startswith("/nix/store"):
2020-12-02 12:53:52 +00:00
position = position.split("/", 4)[-1]
2020-10-07 06:58:17 +00:00
package_attr_name_query = list(parse_query(attr_name))
package_pname = remove_attr_set(data["pname"])
package_description = data["meta"].get("description")
package_longDescription = data["meta"].get("longDescription", "")
2020-06-03 19:02:12 +00:00
yield dict(
type="package",
package_hydra=hydra,
2020-06-03 19:02:12 +00:00
package_attr_name=attr_name,
package_attr_name_reverse=field_reverse(attr_name),
package_attr_name_query=package_attr_name_query,
package_attr_name_query_reverse=field_reverse(package_attr_name_query),
2020-06-03 19:02:12 +00:00
package_attr_set=attr_set,
package_attr_set_reverse=field_reverse(attr_set),
package_pname=package_pname,
package_pname_reverse=field_reverse(package_pname),
2020-06-03 19:02:12 +00:00
package_pversion=data["version"],
package_description=package_description,
package_description_reverse=field_reverse(package_description),
package_longDescription=package_longDescription,
package_longDescription_reverse=field_reverse(package_longDescription),
2020-06-03 19:02:12 +00:00
package_license=licenses,
2021-01-23 16:25:43 +00:00
package_license_set=[i["fullName"] for i in licenses],
2020-06-03 19:02:12 +00:00
package_maintainers=maintainers,
2021-01-23 16:25:43 +00:00
package_maintainers_set=[i["name"] for i in maintainers if i["name"]],
package_platforms=platforms,
2020-06-03 19:02:12 +00:00
package_position=position,
package_homepage=data["meta"].get("homepage"),
package_system=data["system"],
2020-03-28 01:34:38 +00:00
)
logger.debug(f"get_packages: Found {len(packages)} packages")
2020-03-28 01:34:38 +00:00
return len(packages), gen
@backoff.on_exception(backoff.expo, subprocess.CalledProcessError)
2020-10-07 06:58:17 +00:00
def get_options_raw(evaluation):
logger.debug(
f"get_options: Retrieving list of options for '{evaluation['git_revision']}' revision"
2020-10-07 06:58:17 +00:00
)
2020-03-28 01:34:38 +00:00
result = subprocess.run(
shlex.split(
f"nix-build <nixpkgs/nixos/release.nix> --no-out-link -A options -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/{evaluation['git_revision']}.tar.gz"
),
2020-03-28 01:34:38 +00:00
stdout=subprocess.PIPE,
check=True,
)
options = []
options_file = result.stdout.strip().decode()
options_file = f"{options_file}/share/doc/nixos/options.json"
if os.path.exists(options_file):
with open(options_file) as f:
options = json.load(f).items()
2020-10-07 06:58:17 +00:00
return list(options)
def get_options(evaluation):
options = get_options_raw(evaluation)
2020-03-28 01:34:38 +00:00
def gen():
for name, option in options:
if "default" in option:
default = import_scripts.nix.prettyPrint(option.get("default"))
else:
default = None
if "example" in option:
example = import_scripts.nix.prettyPrint(option.get("example"))
else:
example = None
description = option.get("description")
if description is not None:
xml_description = (
2020-06-18 22:41:01 +00:00
f'<xml xmlns:xlink="http://www.w3.org/1999/xlink">'
f"<para>{description}</para>"
f"</xml>"
)
# we first check if there are some xml elements before using pypandoc
# since pypandoc calls are quite slow
root = xml.etree.ElementTree.fromstring(xml_description)
2020-06-18 22:41:01 +00:00
if len(list(root.find("para"))) > 0:
description = pypandoc.convert_text(
2020-06-18 22:41:01 +00:00
xml_description, "html", format="docbook",
)
option_name_query = parse_query(name)
declarations = option.get("declarations", [])
option_source = declarations[0] if declarations else None
2020-03-28 01:34:38 +00:00
yield dict(
2020-06-03 19:02:12 +00:00
type="option",
2020-03-28 01:34:38 +00:00
option_name=name,
option_name_reverse=field_reverse(name),
option_name_query=option_name_query,
option_name_query_reverse=field_reverse(option_name_query),
option_description=description,
option_description_reverse=field_reverse(description),
2020-06-03 19:02:12 +00:00
option_type=option.get("type"),
option_default=default,
option_example=example,
option_source=option_source,
2020-03-28 01:34:38 +00:00
)
return len(options), gen
2020-04-10 08:13:50 +00:00
def ensure_index(es, index, mapping, force=False):
if es.indices.exists(index):
logger.debug(f"ensure_index: index '{index}' already exists")
if not force:
return False
logger.debug(f"ensure_index: Deleting index '{index}'")
es.indices.delete(index)
2020-03-28 01:34:38 +00:00
es.indices.create(
index=index,
body={
"settings": {"number_of_shards": 1, "analysis": ANALYSIS},
"mappings": mapping,
},
2020-03-28 01:34:38 +00:00
)
logger.debug(f"ensure_index: index '{index}' was created")
return True
def create_index_name(channel, evaluation):
2020-06-18 22:41:01 +00:00
evaluation_name = "-".join(
[
evaluation["id"],
str(evaluation["revisions_since_start"]),
evaluation["git_revision"],
evaluation["id"],
str(evaluation["revisions_since_start"]),
evaluation["git_revision"],
2020-06-18 22:41:01 +00:00
]
)
return (
f"latest-{INDEX_SCHEMA_VERSION}-{channel}",
f"evaluation-{INDEX_SCHEMA_VERSION}-{channel}-{evaluation_name}",
2020-03-28 01:34:38 +00:00
)
def update_alias(es, name, index):
if es.indices.exists_alias(name=name):
indexes = set(es.indices.get_alias(name=name).keys())
# indexes to remove from alias
actions = [
{"remove": {"index": item, "alias": name}}
for item in indexes.difference(set([index]))
]
# add index if does not exists in alias
if index not in indexes:
actions.append({"add": {"index": index, "alias": name}})
if actions:
es.indices.update_aliases({"actions": actions})
else:
es.indices.put_alias(index=index, name=name)
indexes = ", ".join(es.indices.get_alias(name=name).keys())
logger.debug(f"'{name}' alias now points to '{indexes}' index")
2020-03-28 01:34:38 +00:00
2020-06-03 19:02:12 +00:00
def write(unit, es, index_name, number_of_items, item_generator):
if number_of_items:
click.echo(f"Indexing {unit}...")
progress = tqdm.tqdm(unit=unit, total=number_of_items)
successes = 0
for ok, action in elasticsearch.helpers.streaming_bulk(
client=es, index=index_name, actions=item_generator()
):
progress.update(1)
successes += ok
click.echo(f"Indexed {successes}/{number_of_items} {unit}")
2020-10-07 06:58:17 +00:00
def setup_logging(verbose):
logging_level = "CRITICAL"
if verbose == 1:
logging_level = "WARNING"
elif verbose >= 2:
logging_level = "DEBUG"
logger.setLevel(getattr(logging, logging_level))
logger.debug(f"Verbosity is {verbose}")
logger.debug(f"Logging set to {logging_level}")
2020-10-07 06:58:17 +00:00
@click.command()
@click.option("-u", "--es-url", help="Elasticsearch connection url.")
@click.option("-c", "--channel", type=click.Choice(CHANNELS.keys()), help="Channel.")
@click.option("-f", "--force", is_flag=True, help="Force channel recreation.")
@click.option("-v", "--verbose", count=True)
def run_import(es_url, channel, force, verbose):
setup_logging(verbose)
evaluation = get_last_evaluation(CHANNELS[channel])
evaluation_builds = dict()
# evaluation_builds = get_evaluation_builds(evaluation["id"])
2020-06-09 22:55:37 +00:00
2020-03-28 01:34:38 +00:00
es = elasticsearch.Elasticsearch([es_url])
alias_name, index_name = create_index_name(channel, evaluation)
index_created = ensure_index(es, index_name, MAPPING, force)
2020-06-03 19:02:12 +00:00
if index_created:
2020-06-18 22:41:01 +00:00
write(
"packages", es, index_name, *get_packages(evaluation, evaluation_builds),
2020-06-18 22:41:01 +00:00
)
write("options", es, index_name, *get_options(evaluation))
update_alias(es, alias_name, index_name)
2020-03-28 01:34:38 +00:00
2020-10-07 06:58:17 +00:00
def prepare_items(key, total, func):
logger.info(f"Preparing items ({key})...")
return {item[key]: item for item in func()}
def get_packages_diff(evaluation):
for attr_name, data in get_packages_raw(evaluation):
data_cmp = dict(attr_name=attr_name, version=data.get("version"),)
yield attr_name, data_cmp, data
def get_options_diff(evaluation):
for name, data in get_options_raw(evaluation):
data_cmp = dict(name=name, type=data.get("type"), default=data.get("default"),)
yield name, data_cmp, data
def create_diff(type_, items_from, items_to):
logger.debug(f"Starting to diff {type_}...")
return dict(
added=[item for key, item in items_to.items() if key not in items_from.keys()],
removed=[
item for key, item in items_from.items() if key not in items_to.keys()
],
updated=[
(
list(dictdiffer.diff(items_from[key][0], items_to[key][0])),
items_from[key],
items_to[key],
)
for key in set(items_from.keys()).intersection(set(items_to.keys()))
if items_from[key][0] != items_to[key][0]
],
)
@click.command()
@click.option("-v", "--verbose", count=True)
@click.option("-o", "--output", default="stats", type=click.Choice(DIFF_OUTPUT))
@click.argument("channel_from", type=click.Choice(CHANNELS.keys()))
@click.argument("channel_to", type=click.Choice(CHANNELS.keys()))
def run_diff(channel_from, channel_to, output, verbose):
setup_logging(verbose)
# TODO: channel_from and channel_to should not be the same
evaluation_from = get_last_evaluation(CHANNELS[channel_from])
evaluation_to = get_last_evaluation(CHANNELS[channel_to])
packages_from = {
key: (item, item_raw)
for key, item, item_raw in get_packages_diff(evaluation_from)
}
packages_to = {
key: (item, item_raw)
for key, item, item_raw in get_packages_diff(evaluation_to)
}
options_from = {
key: (item, item_raw)
for key, item, item_raw in get_options_diff(evaluation_from)
}
options_to = {
key: (item, item_raw) for key, item, item_raw in get_options_diff(evaluation_to)
}
packages_diff = create_diff("packages", packages_from, packages_to)
options_diff = create_diff("options", options_from, options_to)
if output == "stats":
click.echo("Packages:")
click.echo(f" All in {channel_from}: {len(packages_from)}")
click.echo(f" All in {channel_to}: {len(packages_to)}")
click.echo(f" Added: {len(packages_diff['added'])}")
click.echo(f" Removed: {len(packages_diff['removed'])}")
click.echo(f" Updated: {len(packages_diff['updated'])}")
click.echo("Options:")
click.echo(f" All in {channel_from}: {len(options_from)}")
click.echo(f" All in {channel_to}: {len(options_to)}")
click.echo(f" Added: {len(options_diff['added'])}")
click.echo(f" Removed: {len(options_diff['removed'])}")
click.echo(f" Updated: {len(options_diff['updated'])}")
elif output == "json":
click.echo(json.dumps(dict(packages=packages_diff, options=options_diff,)))
else:
click.echo(f"ERROR: unknown output {output}")
2020-03-28 01:34:38 +00:00
if __name__ == "__main__":
run_import()