Source code for swh.web.browse.utils

# Copyright (C) 2017-2024  The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information

import base64
import stat
import textwrap
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union, cast

import chardet
import charset_normalizer
import magic

from django.utils.html import escape, format_html
from django.utils.safestring import mark_safe

from swh.web.config import get_config
from swh.web.utils import (
    archive,
    browsers_supported_image_mimes,
    django_cache,
    format_utc_iso_date,
    highlightjs,
    reverse,
    rst_to_html,
)
from swh.web.utils.exc import NotFoundExc, sentry_capture_exception
from swh.web.utils.typing import SnapshotContext


[docs] @django_cache() def get_directory_entries( sha1_git: str, ) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]: """Function that retrieves the content of a directory from the archive. The directories entries are first sorted in lexicographical order. Sub-directories and regular files are then extracted. Args: sha1_git: sha1_git identifier of the directory Returns: A tuple whose first member corresponds to the sub-directories list and second member the regular files list Raises: NotFoundExc if the directory is not found """ entries: List[Dict[str, Any]] = list(archive.lookup_directory(sha1_git)) for e in entries: e["perms"] = stat.filemode(e["perms"]) if e["type"] == "rev": # modify dir entry name to explicitly show it points to a revision e["name"] = "%s @ %s" % (e["name"], e["target"][:7]) elif e["type"] == "file": # remove unused checksums dict to reduce cache size e.pop("checksums", None) dirs = [e for e in entries if e["type"] in ("dir", "rev")] files = [e for e in entries if e["type"] == "file"] dirs = sorted(dirs, key=lambda d: d["name"]) files = sorted(files, key=lambda f: f["name"]) return dirs, files
[docs] def get_mimetype_and_encoding_for_content(content: bytes) -> Tuple[str, str]: """Function that returns the mime type and the encoding associated to a content buffer using the magic module under the hood. Args: content: a content buffer Returns: A tuple (mimetype, encoding), for instance ('text/plain', 'us-ascii'), associated to the provided content. """ m = magic.Magic(mime=True, mime_encoding=True) mime_encoding = m.from_buffer(content) mime_type, encoding = mime_encoding.split(";") encoding = encoding.replace(" charset=", "") return mime_type, encoding
# maximum authorized content size in bytes for HTML display # with code highlighting content_display_max_size = get_config()["content_display_max_size"]
[docs] def re_encode_content( mimetype: str, encoding: str, content_data: bytes ) -> Tuple[str, str, bytes]: """Try to re-encode textual content if it is not encoded to UTF-8 for proper display in the browse Web UI. Args: mimetype: content mimetype as detected by python-magic encoding: content encoding as detected by python-magic content_data: raw content bytes Returns: A tuple with 3 members: content mimetype, content encoding (possibly updated after processing), content raw bytes (possibly reencoded to UTF-8) """ if mimetype.startswith("text/") and encoding not in ("us-ascii", "utf-8"): # first check if charset_normalizer detects an encoding with confidence result = charset_normalizer.detect(content_data) if result.get("confidence") and cast(float, result["confidence"]) >= 0.9: encoding = cast(str, result["encoding"]) content_data = content_data.decode(encoding, "replace").encode("utf-8") # then try to detect encoding with chardet if the above failed elif (cresult := chardet.detect(content_data)).get("confidence", 0) >= 0.9: encoding = cast(str, cresult["encoding"]) content_data = content_data.decode(encoding, "replace").encode("utf-8") elif encoding == "unknown-8bit": # probably a malformed UTF-8 content, re-encode it # by replacing invalid chars with a substitution one content_data = content_data.decode("utf-8", "replace").encode("utf-8") elif encoding not in ["utf-8", "binary"]: content_data = content_data.decode(encoding, "replace").encode("utf-8") elif mimetype.startswith("application/octet-stream"): # file may detect a text content as binary # so try to decode it for display encodings = ["us-ascii", "utf-8"] encodings += ["iso-8859-%s" % i for i in range(1, 17)] for enc in encodings: try: content_data = content_data.decode(enc).encode("utf-8") except Exception: pass else: # ensure display in content view encoding = enc mimetype = "text/plain" break return mimetype, encoding, content_data
[docs] def request_content( query_string: str, max_size: Optional[int] = content_display_max_size, re_encode: bool = True, ) -> Dict[str, Any]: """Function that retrieves a content from the archive. Raw bytes content is first retrieved, then the content mime type. If the mime type is not stored in the archive, it will be computed using Python magic module. Args: query_string: a string of the form "[ALGO_HASH:]HASH" where optional ALGO_HASH can be either ``sha1``, ``sha1_git``, ``sha256``, or ``blake2s256`` (default to ``sha1``) and HASH the hexadecimal representation of the hash value max_size: the maximum size for a content to retrieve (default to 1MB, no size limit if None) Returns: A dict filled with content info. Raises: NotFoundExc if the content is not found """ content_data = archive.lookup_content(query_string) filetype = None language = None # requests to the indexer db may fail so properly handle # those cases in order to avoid content display errors try: filetype = archive.lookup_content_filetype(query_string) language = archive.lookup_content_language(query_string) except Exception as exc: sentry_capture_exception(exc) mimetype = "unknown" encoding = "unknown" if filetype: mimetype = filetype["mimetype"] encoding = filetype["encoding"] if not max_size or content_data["length"] < max_size: try: content_raw = archive.lookup_content_raw(query_string) except Exception as exc: sentry_capture_exception(exc) raise NotFoundExc( "The bytes of the content are currently not available " "in the archive." ) else: content_data["raw_data"] = content_raw["data"] if not filetype: mimetype, encoding = get_mimetype_and_encoding_for_content( content_data["raw_data"] ) if re_encode: mimetype, encoding, raw_data = re_encode_content( mimetype, encoding, content_data["raw_data"] ) content_data["raw_data"] = raw_data else: content_data["raw_data"] = None content_data["mimetype"] = mimetype content_data["encoding"] = encoding if language: content_data["language"] = language["lang"] else: content_data["language"] = "not detected" return content_data
[docs] def prepare_content_for_display( content_data: bytes, mime_type: str, path: Optional[str] ) -> Dict[str, Any]: """Function that prepares a content for HTML display. The function tries to associate a programming language to a content in order to perform syntax highlighting client-side using highlightjs. The language is determined using either the content filename or its mime type. If the mime type corresponds to an image format supported by web browsers, the content will be encoded in base64 for displaying the image. Args: content_data: raw bytes of the content mime_type: mime type of the content path: path of the content including filename Returns: A dict containing the content bytes (possibly different from the one provided as parameter if it is an image) under the key 'content_data and the corresponding highlightjs language class under the key 'language'. """ language = None if path: language = highlightjs.get_hljs_language_from_filename(path.split("/")[-1]) if language is None: language = highlightjs.get_hljs_language_from_mime_type(mime_type) if language is None: language = "plaintext" processed_content: Union[bytes, str] = content_data if mime_type.startswith("image/"): if mime_type in browsers_supported_image_mimes: processed_content = base64.b64encode(content_data).decode("ascii") if mime_type.startswith("image/svg"): mime_type = "image/svg+xml" if mime_type.startswith("text/") or mime_type.startswith("application/"): processed_content = content_data.decode("utf-8", errors="replace") return { "content_data": processed_content, "language": language, "mimetype": mime_type, }
def _snapshot_context_query_params( snapshot_context: Optional[SnapshotContext], ) -> Dict[str, str]: query_params: Dict[str, str] = {} if not snapshot_context: return query_params if snapshot_context and snapshot_context["origin_info"]: origin_info = snapshot_context["origin_info"] snp_query_params = snapshot_context["query_params"] query_params = {"origin_url": origin_info["url"]} if "timestamp" in snp_query_params: query_params["timestamp"] = str(snp_query_params["timestamp"]) if "visit_id" in snp_query_params: query_params["visit_id"] = str(snp_query_params["visit_id"]) if "snapshot" in snp_query_params and "visit_id" not in query_params: query_params["snapshot"] = str(snp_query_params["snapshot"]) elif snapshot_context: query_params = {"snapshot": snapshot_context["snapshot_id"]} if snapshot_context["release"]: query_params["release"] = snapshot_context["release"] elif snapshot_context["branch"] and snapshot_context["branch"] not in ( "HEAD", snapshot_context["revision_id"], ): query_params["branch"] = snapshot_context["branch"] elif snapshot_context["revision_id"]: query_params["revision"] = snapshot_context["revision_id"] return query_params
[docs] def gen_revision_url( revision_id: str, snapshot_context: Optional[SnapshotContext] = None ) -> str: """ Utility function for generating an url to a revision. Args: revision_id: a revision id snapshot_context: if provided, generate snapshot-dependent browsing url Returns: str: The url to browse the revision """ query_params = _snapshot_context_query_params(snapshot_context) # remove query parameters not needed for a revision view query_params.pop("revision", None) query_params.pop("release", None) return reverse( "browse-revision", url_args={"sha1_git": revision_id}, query_params=query_params )
[docs] def get_revision_log_url( revision_id: str, snapshot_context: Optional[SnapshotContext] = None ) -> str: """ Utility function for getting the URL for a revision log HTML view (possibly in the context of an origin). Args: revision_id: revision identifier the history heads to snapshot_context: if provided, generate snapshot-dependent browsing link Returns: The revision log view URL """ query_params = {} if snapshot_context: query_params = _snapshot_context_query_params(snapshot_context) query_params["revision"] = revision_id if snapshot_context and snapshot_context["origin_info"]: revision_log_url = reverse("browse-origin-log", query_params=query_params) elif snapshot_context: url_args = {"snapshot_id": snapshot_context["snapshot_id"]} del query_params["snapshot"] revision_log_url = reverse( "browse-snapshot-log", url_args=url_args, query_params=query_params ) else: revision_log_url = reverse( "browse-revision-log", url_args={"sha1_git": revision_id} ) return revision_log_url
[docs] def format_log_entries( revision_log: Iterator[Optional[Dict[str, Any]]], per_page: int, snapshot_context: Optional[SnapshotContext] = None, ) -> List[Dict[str, str]]: """ Utility functions that process raw revision log data for HTML display. Its purpose is to: * add links to relevant browse views * format date in human readable format * truncate the message log Args: revision_log: raw revision log as returned by the swh-web api per_page: number of log entries per page snapshot_context: if provided, generate snapshot-dependent browsing link """ revision_log_data = [] for i, rev in enumerate(revision_log): if rev is None: continue if i == per_page: break author_name = "None" author_fullname = "None" committer_fullname = "None" if rev["author"]: author_name = gen_person_mail_link(rev["author"]) author_fullname = rev["author"]["fullname"] if rev["committer"]: committer_fullname = rev["committer"]["fullname"] author_date = format_utc_iso_date(rev["date"]) committer_date = format_utc_iso_date(rev["committer_date"]) tooltip = "revision %s\n" % rev["id"] tooltip += "author: %s\n" % author_fullname tooltip += "author date: %s\n" % author_date tooltip += "committer: %s\n" % committer_fullname tooltip += "committer date: %s\n\n" % committer_date if rev["message"]: tooltip += textwrap.indent(rev["message"], " " * 4) revision_log_data.append( { "author": author_name, "id": rev["id"][:7], "message": rev["message"], "date": author_date, "commit_date": committer_date, "url": gen_revision_url(rev["id"], snapshot_context), "tooltip": tooltip, } ) return revision_log_data
# list of common readme names ordered by preference # (lower indices have higher priority) _common_readme_names = [ "readme.markdown", "readme.md", "readme.rst", "readme.txt", "readme", ]
[docs] def get_readme_to_display( readmes: Dict[str, str] ) -> Tuple[Optional[str], Optional[str], Optional[str]]: """ Process a list of readme files found in a directory in order to find the adequate one to display. Args: readmes: a dict where keys are readme file names and values are readme sha1_gits Returns: A tuple (readme_name, readme_url, readme_html) """ readme_name = None readme_url = None readme_sha1_git = None readme_html = None lc_readmes = { k.lower(): {"orig_name": k, "sha1_git": v} for k, v in readmes.items() } # look for readme names according to the preference order # defined by the _common_readme_names list for common_readme_name in _common_readme_names: if common_readme_name in lc_readmes: readme_name = lc_readmes[common_readme_name]["orig_name"] readme_sha1_git = lc_readmes[common_readme_name]["sha1_git"] readme_url = ( reverse( "browse-content-raw", url_args={"query_string": f"sha1_git:{readme_sha1_git}"}, query_params={"re_encode": "true"}, ) if readme_sha1_git is not None else None ) break # otherwise pick the first readme like file if any if not readme_name and len(readmes.items()) > 0: readme_name = next(iter(readmes)) readme_sha1_git = readmes[readme_name] readme_url = ( reverse( "browse-content-raw", url_args={"query_string": f"sha1_git:{readme_sha1_git}"}, query_params={"re_encode": "true"}, ) if readme_sha1_git is not None else None ) # convert rst README to html server side as there is # no viable solution to perform that task client side if readme_name and readme_name.endswith(".rst"): @django_cache( catch_exception=True, exception_return_value="Readme bytes are not available", ) def _rst_readme_to_html(readme_sha1_git): rst_doc = request_content(f"sha1_git:{readme_sha1_git}") return rst_to_html(rst_doc["raw_data"].decode("utf-8", errors="replace")) readme_html = _rst_readme_to_html(readme_sha1_git) elif readme_sha1_git: # check content exists in the archive if not archive.lookup_hash(f"sha1_git:{readme_sha1_git}")["found"]: readme_url = None return readme_name, readme_url, readme_html