Skip to content

API Reference

AI Service Module

blueprint.ai_service

AI Service module for interacting with different LLM providers.

AIService

Service for interacting with different AI providers.

Source code in src/blueprint/ai_service.py
class AIService:
    """Service for interacting with different AI providers."""

    def __init__(
        self, service_type: str, model: Optional[str] = None, debug: bool = False
    ):
        """Initialize AI service.

        Args:
            service_type: Type of AI service ('ollama' or 'jan')
            model: Model name to use
            debug: Whether to enable debug logging
        """
        self.service_type = service_type
        self.model = model
        self.debug = debug
        self.logger = logging.getLogger(__name__)

        # Set up base URLs for services
        self.base_urls = {
            "ollama": OLLAMA_BASE_URL,
            "jan": JAN_BASE_URL,
        }

        if service_type not in self.base_urls:
            self.logger.error(f"Unsupported service type: {service_type}")
            raise ValueError(f"Unsupported service type: {service_type}")

        self.logger.debug(
            f"Initialized AIService with {service_type} and model {model}"
        )

    def query(self, prompt: str) -> str:
        """Query the AI service with the given prompt.

        Args:
            prompt: The prompt to send to the AI service

        Returns:
            The response from the AI service

        Raises:
            Exception: If there's an error communicating with the AI service
        """
        if self.service_type == "ollama":
            return self._query_ollama(prompt)
        elif self.service_type == "jan":
            return self._query_jan(prompt)
        else:
            self.logger.error(f"Unsupported service type: {self.service_type}")
            raise ValueError(f"Unsupported service type: {self.service_type}")

    def _query_jan(self, prompt: str) -> str:
        """Send query to Jan AI API.

        Args:
            prompt: The prompt text

        Returns:
            Generated text response
        """
        url = self.base_urls["jan"]
        headers = {
            "Content-Type": "application/json",
        }

        data = {
            "model": self.model,
            "messages": [
                {
                    "role": "system",
                    "content": "You are a commit message generator. You only summarize code in git diffs.",
                },
                {"role": "user", "content": prompt},
            ],
            "temperature": 0.2,
        }

        self.logger.debug(f"Sending request to Jan AI API at {url}")
        if self.debug:
            self.logger.debug(f"Request data: {data}")

        try:
            self.logger.debug("Making POST request to Jan AI API")
            # Add timeout parameter to prevent hanging
            response = requests.post(url, headers=headers, json=data, timeout=60)
            self.logger.debug(
                f"Received response with status code: {response.status_code}"
            )

            if self.debug:
                self.logger.debug(f"Response headers: {response.headers}")

            response.raise_for_status()
            result = response.json()

            if not result.get("choices") or not result["choices"][0].get("message"):
                self.logger.error(f"Unexpected response format from Jan AI: {result}")
                if self.debug:
                    self.logger.debug(f"Full response: {result}")
                return ""

            content = result["choices"][0]["message"]["content"]
            return content
        except requests.exceptions.ConnectionError as e:
            self.logger.error(f"Error connecting to Jan AI API: {e}")
            raise Exception(
                "Error connecting to Jan AI: Is Jan AI running on localhost:1337?"
            )
        except requests.exceptions.Timeout as e:
            self.logger.error(f"Jan AI API request timed out: {e}")
            raise Exception("Jan AI request timed out. Service may be overloaded.")
        except Exception as e:
            self.logger.error(f"Error querying Jan AI API: {e}")
            raise Exception(f"Error with Jan AI: {str(e)[:100]}")

    def _query_ollama(self, prompt: str) -> str:
        """Send query to Ollama API.

        Args:
            prompt: The prompt text

        Returns:
            Generated text response
        """
        url = self.base_urls["ollama"]
        data = {"model": self.model, "prompt": prompt, "stream": False}

        self.logger.debug(f"Sending request to Ollama API at {url}")
        if self.debug:
            self.logger.debug(f"Request data: {data}")

        try:
            self.logger.debug("Making POST request to Ollama API")
            # Add timeout parameter to prevent hanging
            response = requests.post(url, json=data, timeout=60)
            self.logger.debug(
                f"Received response with status code: {response.status_code}"
            )

            if self.debug:
                self.logger.debug(f"Response headers: {response.headers}")

            response.raise_for_status()
            result = response.json()

            if not result.get("response"):
                self.logger.error(f"Unexpected response format from Ollama: {result}")
                if self.debug:
                    self.logger.debug(f"Full response: {result}")

            return result.get("response", "")
        except requests.exceptions.ConnectionError as e:
            self.logger.error(f"Error connecting to Ollama API: {e}")
            raise Exception(
                "Error connecting to Ollama: Is Ollama running on localhost:11434?"
            )
        except requests.exceptions.Timeout as e:
            self.logger.error(f"Ollama API request timed out: {e}")
            raise Exception(
                "Ollama request timed out. Service may be overloaded or model is too large."
            )
        except Exception as e:
            self.logger.error(f"Error querying Ollama API: {e}")
            raise Exception(f"Error with Ollama: {str(e)[:100]}")

__init__(service_type, model=None, debug=False)

Initialize AI service.

Parameters:

Name Type Description Default
service_type str

Type of AI service ('ollama' or 'jan')

required
model Optional[str]

Model name to use

None
debug bool

Whether to enable debug logging

False
Source code in src/blueprint/ai_service.py
def __init__(
    self, service_type: str, model: Optional[str] = None, debug: bool = False
):
    """Initialize AI service.

    Args:
        service_type: Type of AI service ('ollama' or 'jan')
        model: Model name to use
        debug: Whether to enable debug logging
    """
    self.service_type = service_type
    self.model = model
    self.debug = debug
    self.logger = logging.getLogger(__name__)

    # Set up base URLs for services
    self.base_urls = {
        "ollama": OLLAMA_BASE_URL,
        "jan": JAN_BASE_URL,
    }

    if service_type not in self.base_urls:
        self.logger.error(f"Unsupported service type: {service_type}")
        raise ValueError(f"Unsupported service type: {service_type}")

    self.logger.debug(
        f"Initialized AIService with {service_type} and model {model}"
    )

query(prompt)

Query the AI service with the given prompt.

Parameters:

Name Type Description Default
prompt str

The prompt to send to the AI service

required

Returns:

Type Description
str

The response from the AI service

Raises:

Type Description
Exception

If there's an error communicating with the AI service

Source code in src/blueprint/ai_service.py
def query(self, prompt: str) -> str:
    """Query the AI service with the given prompt.

    Args:
        prompt: The prompt to send to the AI service

    Returns:
        The response from the AI service

    Raises:
        Exception: If there's an error communicating with the AI service
    """
    if self.service_type == "ollama":
        return self._query_ollama(prompt)
    elif self.service_type == "jan":
        return self._query_jan(prompt)
    else:
        self.logger.error(f"Unsupported service type: {self.service_type}")
        raise ValueError(f"Unsupported service type: {self.service_type}")

Commit Generator Module

blueprint.commit_generator

Git commit message generator using AI models.

create_commit(message, debug=False)

Create a git commit with the selected message.

Parameters:

Name Type Description Default
message str

Commit message to use

required
debug bool

Whether to enable debug logging

False

Returns:

Type Description
bool

True if commit was successful, False otherwise

Source code in src/blueprint/commit_generator.py
def create_commit(message: str, debug: bool = False) -> bool:
    """Create a git commit with the selected message.

    Args:
        message: Commit message to use
        debug: Whether to enable debug logging

    Returns:
        True if commit was successful, False otherwise
    """
    logger = logging.getLogger(__name__)
    logger.debug(f"Creating commit with message: '{message}'")

    try:
        subprocess.run(["git", "commit", "-m", message], check=True)
        logger.debug("Commit created successfully")
        print(f"Committed with message: {message}")
        return True
    except subprocess.CalledProcessError as e:
        logger.error(f"Failed to create commit: {e}")
        print("Error: Failed to create commit.")
        return False

filter_diff(raw_diff, include_filenames=True, debug=False)

Filter git diff to remove metadata and keep only meaningful changes.

Parameters:

Name Type Description Default
raw_diff str

Raw git diff output

required
include_filenames bool

Whether to keep filenames in the output

True
debug bool

Whether to enable debug logging

False

Returns:

Type Description
str

Filtered diff with only relevant content

Source code in src/blueprint/commit_generator.py
def filter_diff(
    raw_diff: str, include_filenames: bool = True, debug: bool = False
) -> str:
    """Filter git diff to remove metadata and keep only meaningful changes.

    Args:
        raw_diff: Raw git diff output
        include_filenames: Whether to keep filenames in the output
        debug: Whether to enable debug logging

    Returns:
        Filtered diff with only relevant content
    """
    logger = logging.getLogger(__name__)
    logger.debug("Filtering git diff to remove metadata")

    if not raw_diff:
        return ""

    filtered_lines = []
    current_file = None

    for line in raw_diff.split("\n"):
        # Skip common metadata lines
        if line.startswith("diff --git") or line.startswith("index "):
            continue

        # Handle filename markers but keep the filename
        if line.startswith("--- "):
            continue
        if line.startswith("+++ "):
            if line.startswith("+++ b/") and include_filenames:
                current_file = line[6:]  # Remove the "+++ b/" prefix
            continue

        # Add filename header if we just found a new file
        if current_file and include_filenames:
            filtered_lines.append(f"File: {current_file}")
            current_file = None

        # Keep everything else: hunk headers, context lines, and actual changes
        filtered_lines.append(line)

    filtered_diff = "\n".join(filtered_lines)

    if debug:
        logger.debug(
            f"Original diff: {len(raw_diff)} chars, Filtered: {len(filtered_diff)} chars"
        )
        logger.debug(f"Removed {len(raw_diff) - len(filtered_diff)} chars of metadata")
        logger.debug(
            "Filtered diff preview (first 500 chars):\n" + filtered_diff[:500]
            if filtered_diff
            else "(empty)"
        )

    return filtered_diff

generate_commit_messages(diff, max_chars=200, service_type='ollama', ollama_model='llama3.1', jan_model='llama3.2-3b-instruct', debug=False)

Generate commit messages based on git diff.

Parameters:

Name Type Description Default
diff str

Git diff to generate commit messages for

required
max_chars int

Suggested maximum characters for commit messages

200
service_type str

'ollama' or 'jan'

'ollama'
ollama_model str

Model name for Ollama

'llama3.1'
jan_model str

Model name for Jan AI

'llama3.2-3b-instruct'
debug bool

Whether to enable debug logging

False

Returns:

Type Description
List[str]

List of generated commit messages

Source code in src/blueprint/commit_generator.py
def generate_commit_messages(
    diff: str,
    max_chars: int = 200,
    service_type: str = "ollama",
    ollama_model: str = "llama3.1",
    jan_model: str = "llama3.2-3b-instruct",
    debug: bool = False,
) -> List[str]:
    """Generate commit messages based on git diff.

    Args:
        diff: Git diff to generate commit messages for
        max_chars: Suggested maximum characters for commit messages
        service_type: 'ollama' or 'jan'
        ollama_model: Model name for Ollama
        jan_model: Model name for Jan AI
        debug: Whether to enable debug logging

    Returns:
        List of generated commit messages
    """
    logger = logging.getLogger(__name__)
    logger.debug("Generating commit messages")

    # Filter the diff to remove noise
    filtered_diff = filter_diff(diff, include_filenames=True, debug=debug)

    # Explicit logging of the filtered diff for debugging
    if debug:
        logger.debug(f"FILTERED DIFF used for prompting LLM:\n{filtered_diff}")
        if not filtered_diff:
            logger.warning("FILTERED DIFF is empty")

    prompt = get_commit_message_prompt(diff, max_chars)

    logger.debug(f"Created prompt with length {len(prompt)} chars")
    if debug:
        logger.debug("FINAL PROMPT:\n" + prompt)

    response = query_ai_service(
        prompt, service_type, ollama_model, jan_model, debug=debug
    )

    if debug and response:
        logger.debug(f"Full response from LLM: {response}")
    elif not response:
        logger.error("Received empty response from AI service")

    messages = parse_commit_messages(response, debug=debug)
    logger.debug(f"Generated {len(messages)} commit messages")
    return messages

get_git_diff(max_chars=5000, debug=False)

Get the git diff of staged changes, or unstaged if no staged changes. Filters out deleted files from the diff.

Parameters:

Name Type Description Default
max_chars int

Maximum number of characters to return

5000
debug bool

Whether to enable debug logging

False

Returns:

Type Description
str

Git diff as string

Raises:

Type Description
SystemExit

If not a git repository or git not installed

Source code in src/blueprint/commit_generator.py
def get_git_diff(max_chars: int = 5000, debug: bool = False) -> str:
    """Get the git diff of staged changes, or unstaged if no staged changes.
    Filters out deleted files from the diff.

    Args:
        max_chars: Maximum number of characters to return
        debug: Whether to enable debug logging

    Returns:
        Git diff as string

    Raises:
        SystemExit: If not a git repository or git not installed
    """
    logger = logging.getLogger(__name__)

    try:
        logger.debug("Checking for staged changes")
        diff = subprocess.check_output(
            ["git", "diff", "--cached", "--diff-filter=ACMTU"], text=True
        )
        if not diff:
            logger.debug("No staged changes found, checking for unstaged changes")
            diff = subprocess.check_output(
                ["git", "diff", "--diff-filter=ACMTU"], text=True
            )

        # Use trim_diff to intelligently truncate if needed
        if len(diff) > max_chars:
            diff = trim_diff(diff, max_chars, debug)

        return diff
    except subprocess.CalledProcessError as e:
        logger.error(f"Git diff failed: {e}")
        print("Error: Not a git repository or git is not installed.")
        sys.exit(1)

parse_commit_messages(response, debug=False)

Parse the LLM response into a list of commit messages.

Parameters:

Name Type Description Default
response str

Text response from AI service

required
debug bool

Whether to enable debug logging

False

Returns:

Type Description
List[str]

List of extracted commit messages

Source code in src/blueprint/commit_generator.py
def parse_commit_messages(response: str, debug: bool = False) -> List[str]:
    """Parse the LLM response into a list of commit messages.

    Args:
        response: Text response from AI service
        debug: Whether to enable debug logging

    Returns:
        List of extracted commit messages
    """
    logger = logging.getLogger(__name__)
    logger.debug("Parsing commit messages from AI response")

    messages = []
    for line in response.split("\n"):
        line = line.strip()
        if debug:
            logger.debug(f"Processing line: {line}")

        if line.startswith(("1.", "2.", "3.")):
            message = line.split(".", 1)[1].strip()
            # Strip surrounding single quotes if present
            if (message.startswith("'") and message.endswith("'")) or (
                message.startswith('"') and message.endswith('"')
            ):
                message = message[1:-1]
            messages.append(message)
            logger.debug(f"Extracted message: {message}")

    logger.debug(f"Parsed {len(messages)} commit messages")
    return messages

query_ai_service(prompt, service_type, ollama_model, jan_model, debug=False)

Query AI service with the given prompt.

Parameters:

Name Type Description Default
prompt str

Prompt text to send to AI service

required
service_type str

Type of AI service ('ollama' or 'jan')

required
ollama_model str

Model name for Ollama

required
jan_model str

Model name for Jan AI

required
debug bool

Whether to enable debug logging

False

Returns:

Type Description
str

Response from AI service

Raises:

Type Description
SystemExit

If there's an error querying the AI service

Source code in src/blueprint/commit_generator.py
def query_ai_service(
    prompt: str,
    service_type: str,
    ollama_model: str,
    jan_model: str,
    debug: bool = False,
) -> str:
    """Query AI service with the given prompt.

    Args:
        prompt: Prompt text to send to AI service
        service_type: Type of AI service ('ollama' or 'jan')
        ollama_model: Model name for Ollama
        jan_model: Model name for Jan AI
        debug: Whether to enable debug logging

    Returns:
        Response from AI service

    Raises:
        SystemExit: If there's an error querying the AI service
    """
    logger = logging.getLogger(__name__)

    try:
        print("Generating commit messages...", end="", flush=True)
        logger.debug(
            f"Querying {service_type} with model {ollama_model if service_type == 'ollama' else jan_model}"
        )

        ai_service = AIService(
            service_type,
            model=ollama_model if service_type == "ollama" else jan_model,
            debug=debug,
        )

        response = ai_service.query(prompt)
        print("Done!")

        logger.debug(f"Received response with length {len(response)} chars")

        return response
    except Exception as e:
        logger.error(f"Error querying {service_type}: {e}")
        print(f"\nError querying {service_type.capitalize()}: {e}")
        sys.exit(1)

trim_diff(diff, max_chars, debug=False)

Intelligently trim a git diff to stay under max_chars by preserving complete files and hunks.

Parameters:

Name Type Description Default
diff str

The git diff to trim

required
max_chars int

Maximum character limit

required
debug bool

Whether to enable debug logging

False

Returns:

Type Description
str

Trimmed diff with complete files and hunks

Source code in src/blueprint/commit_generator.py
def trim_diff(diff: str, max_chars: int, debug: bool = False) -> str:
    """Intelligently trim a git diff to stay under max_chars by preserving complete files and hunks.

    Args:
        diff: The git diff to trim
        max_chars: Maximum character limit
        debug: Whether to enable debug logging

    Returns:
        Trimmed diff with complete files and hunks
    """
    logger = logging.getLogger(__name__)
    logger.debug(f"Trimming diff to stay under {max_chars} chars")

    if len(diff) <= max_chars:
        return diff

    lines = diff.split("\n")
    result_lines: list[str] = []
    current_length = 0
    in_hunk = False

    # First, count the number of actual change lines (+ or -) to prioritize
    change_lines_count = 0
    for line in lines:
        stripped = line.lstrip()
        if (stripped.startswith("+") or stripped.startswith("-")) and stripped not in (
            "+",
            "-",
        ):
            change_lines_count += 1

    # If there are few changes, we want to keep ALL of them
    keep_all_changes = change_lines_count < 50  # arbitrary threshold
    if keep_all_changes and debug:
        logger.debug(
            f"Only {change_lines_count} actual change lines - will prioritize keeping all changes"
        )

    # Initialize important indices set
    important_indices: set[int] = set()

    # First pass: collect critical changes and their context
    if keep_all_changes:
        for i, line in enumerate(lines):
            stripped = line.lstrip()
            # Mark change lines and surrounding context
            if (
                stripped.startswith("+") or stripped.startswith("-")
            ) and stripped not in ("+", "-"):
                # Mark this line and surrounding context (3 lines before and after)
                for j in range(max(0, i - 3), min(len(lines), i + 4)):
                    important_indices.add(j)
            # Always mark hunk headers
            elif stripped.startswith("@@"):
                important_indices.add(i)

    # Second pass: keep important lines and natural boundaries
    for i, line in enumerate(lines):
        line_length = len(line) + 1  # +1 for newline
        stripped = line.lstrip()

        # Start of a new file
        if line.startswith("diff --git"):
            # If adding this new file would exceed our limit, stop here
            if current_length + line_length > max_chars and result_lines:
                # Unless this file contains important changes we want to keep
                if keep_all_changes and any(
                    j in important_indices for j in range(i, min(len(lines), i + 20))
                ):
                    if debug:
                        logger.debug(
                            f"Keeping file at line {i} despite size limit due to important changes"
                        )
                else:
                    break
            in_hunk = False

        # Start of a new hunk
        elif stripped.startswith("@@"):
            in_hunk = True

        # If we're about to exceed the limit but this is an important line, keep it anyway
        if current_length + line_length > max_chars:
            if keep_all_changes and i in important_indices:
                if debug:
                    logger.debug(f"Keeping important line {i} despite size limit")
            # If we're not at a natural boundary and this isn't an important line, stop here
            elif not in_hunk and not line.startswith("diff --git"):
                # We're between hunks or files, safe to stop here
                break

        # Add the line
        result_lines.append(line)
        current_length += line_length

    result = "\n".join(result_lines)

    if debug:
        logger.debug(f"Trimmed diff from {len(diff)} chars to {len(result)} chars")
        logger.debug(f"Preserved {len(result_lines)} of {len(lines)} lines")
        # Check if we preserved all important changes
        if keep_all_changes:
            preserved_important = sum(
                1 for i in important_indices if i < len(result_lines)
            )
            logger.debug(
                f"Preserved {preserved_important} of {len(important_indices)} important lines"
            )

    return result

CLI Module

blueprint.cli

Command-line interface for LLM-powered commit message generator.

main()

Main entry point for the CLI application.

Source code in src/blueprint/cli.py
def main():
    """Main entry point for the CLI application."""
    OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", DEFAULT_OLLAMA_MODEL)
    JAN_MODEL = os.getenv("JAN_MODEL", DEFAULT_JAN_MODEL)

    parser = argparse.ArgumentParser(
        description="Generate git commit messages using LLMs."
    )
    parser.add_argument(
        "--ollama",
        action="store_true",
        help="Use Ollama API instead of Jan AI (default is Jan AI)",
    )
    parser.add_argument(
        "--analytics", action="store_true", help="Display performance analytics"
    )
    parser.add_argument(
        "--vim", action="store_true", help="Use vim-style navigation in fzf"
    )
    parser.add_argument(
        "--num", action="store_true", help="Use number selection for commit messages"
    )
    parser.add_argument(
        "--max_chars",
        type=int,
        default=75,
        help="Suggested maximum number of characters for each commit message (default: 75)",
    )
    parser.add_argument("--debug", action="store_true", help="Enable debug logging")
    args = parser.parse_args()

    # Set up logging
    setup_logging(args.debug)
    logger = logging.getLogger(__name__)
    logger.debug("Debug mode enabled")

    # Start timing
    start_time = time.time()

    # Get git diff
    logger.debug("Getting git diff")
    diff = get_git_diff(debug=args.debug)
    if not diff:
        logger.error("No changes to commit")
        print("No changes to commit.")
        sys.exit(0)

    # Generate commit messages
    service_type = "ollama" if args.ollama else "jan"
    logger.debug(f"Generating commit messages using {service_type}")
    commit_messages = generate_commit_messages(
        diff=diff,
        max_chars=args.max_chars,
        service_type=service_type,
        ollama_model=OLLAMA_MODEL,
        jan_model=JAN_MODEL,
        debug=args.debug,
    )

    # Stop timing for initial generation
    end_time = time.time()

    # Show analytics if requested
    if args.analytics:
        print("\nAnalytics:")
        print(
            f"Time taken to generate commit messages: {end_time - start_time:.2f} seconds"
        )
        print(f"Inference used: {'Ollama' if args.ollama else 'Jan AI'}")
        print(f"Model name: {OLLAMA_MODEL if args.ollama else JAN_MODEL}")
        print("")  # Add a blank line for better readability

    # Check if we have messages
    if not commit_messages:
        logger.error("Could not generate commit messages")
        print("Error: Could not generate commit messages.")
        sys.exit(1)

    # Select message or regenerate
    while True:
        selected_message = select_message_with_fzf(
            commit_messages, use_vim=args.vim, use_num=args.num
        )

        if selected_message == "regenerate":
            # Time regeneration
            start_time = time.time()
            logger.debug("Regenerating commit messages")

            commit_messages = generate_commit_messages(
                diff=diff,
                max_chars=args.max_chars,
                service_type=service_type,
                ollama_model=OLLAMA_MODEL,
                jan_model=JAN_MODEL,
                debug=args.debug,
            )

            end_time = time.time()

            if args.analytics:
                print("\nRegeneration Analytics:")
                print(
                    f"Time taken to regenerate commit messages: {end_time - start_time:.2f} seconds"
                )
                print("")  # Add a blank line for better readability

            if not commit_messages:
                logger.error("Could not regenerate commit messages")
                print("Error: Could not generate commit messages.")
                sys.exit(1)
        elif selected_message:
            logger.debug(f"Creating commit with message: {selected_message}")
            create_commit(selected_message, debug=args.debug)
            break
        else:
            logger.debug("Commit messages rejected")
            print("Commit messages rejected. Please create commit message manually.")
            break

select_message_with_fzf(messages, use_vim=False, use_num=False)

Use fzf to select a commit message, with option to regenerate.

Parameters:

Name Type Description Default
messages List[str]

List of commit messages to select from

required
use_vim bool

Whether to use vim-style navigation

False
use_num bool

Whether to display numbers for selection

False

Returns:

Type Description
Optional[str]

Selected message, "regenerate" to regenerate messages, or None if cancelled

Source code in src/blueprint/cli.py
def select_message_with_fzf(
    messages: List[str],
    use_vim: bool = False,
    use_num: bool = False,
) -> Optional[str]:
    """Use fzf to select a commit message, with option to regenerate.

    Args:
        messages: List of commit messages to select from
        use_vim: Whether to use vim-style navigation
        use_num: Whether to display numbers for selection

    Returns:
        Selected message, "regenerate" to regenerate messages, or None if cancelled
    """
    logger = logging.getLogger(__name__)
    logger.debug("Displaying fzf selector for commit messages")

    try:
        messages.append("Regenerate messages")
        fzf_args = [
            "fzf",
            "--height=10",
            "--layout=reverse",
            "--prompt=Select a commit message (ESC to cancel): ",
            "--no-info",
            "--margin=1,2",
            "--border",
            "--color=prompt:#D73BC9,pointer:#D73BC9",
        ]

        if use_vim:
            fzf_args.extend(["--bind", "j:down,k:up"])
            logger.debug("Using vim-style navigation in fzf")

        if use_num:
            for i, msg in enumerate(messages):
                messages[i] = f"{i + 1}. {msg}"
            fzf_args.extend(
                [
                    "--bind",
                    "1:accept-non-empty,2:accept-non-empty,3:accept-non-empty,4:accept-non-empty",
                ]
            )
            logger.debug("Using number selection in fzf")

        logger.debug(f"Displaying {len(messages)} options in fzf")
        result = subprocess.run(
            fzf_args,
            input="\n".join(messages),
            capture_output=True,
            text=True,
        )
        if result.returncode == 130:  # User pressed ESC
            logger.debug("User cancelled selection with ESC")
            return None
        selected = result.stdout.strip()
        logger.debug(f"User selected: '{selected}'")

        if selected == "Regenerate messages" or selected == "4. Regenerate messages":
            logger.debug("User chose to regenerate messages")
            return "regenerate"

        final_selection = (
            selected.split(". ", 1)[1] if use_num and selected else selected
        )
        logger.debug(f"Final selection: '{final_selection}'")
        return final_selection
    except subprocess.CalledProcessError as e:
        logger.error(f"fzf selection failed: {e}")
        print("Error: fzf selection failed.")
        return None

setup_logging(debug_mode)

Set up logging based on debug mode.

Source code in src/blueprint/cli.py
def setup_logging(debug_mode):
    """Set up logging based on debug mode."""
    log_level = logging.DEBUG if debug_mode else logging.INFO
    logging.basicConfig(
        level=log_level,
        format="%(name)s - %(levelname)s - %(message)s",
        datefmt="%Y-%m-%d %H:%M:%S",
    )