-
-
Notifications
You must be signed in to change notification settings - Fork 0
feat: /image attachment, /browse browser-view, and vision-message support #2
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -53,6 +53,8 @@ def handle(self, text: str) -> bool: | |
| "/map": self._cmd_map, | ||
| "/run": self._cmd_run, | ||
| "/web": self._cmd_web, | ||
| "/browse": self._cmd_browse, | ||
| "/image": self._cmd_image, | ||
| "/git": self._cmd_git, | ||
| "/paste": self._cmd_paste, | ||
| "/ls": self._cmd_ls, | ||
|
|
@@ -211,6 +213,69 @@ def _cmd_web(self, args: str) -> None: | |
| except Exception as exc: | ||
| self.io.print_error(f"Failed to fetch URL: {exc}") | ||
|
|
||
| def _cmd_browse(self, args: str) -> None: | ||
| """Fetch and display a web page with rich browser-like formatting.""" | ||
| if not args: | ||
| self.io.print_warning("Usage: /browse <url>") | ||
| return | ||
| url = args.strip() | ||
| if not url.startswith(("http://", "https://")): | ||
| url = "https://" + url | ||
| try: | ||
| from qgo.utils.web_scraper import fetch_page_info | ||
| self.io.print_info(f"Loading: {url}") | ||
| page_info = fetch_page_info(url) | ||
| self.io.print_browse(page_info) | ||
| content = page_info.get("content", "") | ||
| if content: | ||
| self.coder.messages.append({ | ||
| "role": "user", | ||
| "content": f"Web page content from {url}:\n\n{content[:8000]}", | ||
| }) | ||
|
Comment on lines
+231
to
+234
|
||
| self.io.print_success( | ||
| f"Page content added to context ({len(content):,} chars)." | ||
| ) | ||
|
Rahulchaube1 marked this conversation as resolved.
|
||
| except Exception as exc: | ||
| self.io.print_error(f"Failed to browse {url}: {exc}") | ||
|
|
||
| def _cmd_image(self, args: str) -> None: | ||
| """Attach one or more images (local path or URL) to the next message.""" | ||
| if not args: | ||
| self.io.print_warning("Usage: /image <path_or_url> [path2 ...]") | ||
| return | ||
| for src in args.split(): | ||
| src = src.strip() | ||
| if not src: | ||
| continue | ||
| p = Path(src) | ||
| if p.exists() and p.is_file(): | ||
| # Encode local file as a base64 data URL | ||
| try: | ||
| import base64 | ||
| ext = p.suffix.lower().lstrip(".") | ||
| mime = { | ||
| "jpg": "image/jpeg", "jpeg": "image/jpeg", | ||
| "png": "image/png", "gif": "image/gif", | ||
| "webp": "image/webp", "bmp": "image/bmp", | ||
| }.get(ext, "image/png") | ||
| data = base64.b64encode(p.read_bytes()).decode("ascii") | ||
| data_url = f"data:{mime};base64,{data}" | ||
| self.coder.pending_images.append(data_url) | ||
| self.io.print_image_added(src, len(self.coder.pending_images)) | ||
| except Exception as exc: | ||
| self.io.print_error(f"Failed to load image {src}: {exc}") | ||
| elif src.startswith(("http://", "https://")): | ||
| # Remote image — pass URL directly (vision models support this) | ||
| self.coder.pending_images.append(src) | ||
| self.io.print_image_added(src, len(self.coder.pending_images)) | ||
|
Comment on lines
+267
to
+270
|
||
| else: | ||
| self.io.print_warning(f"Image not found: {src}") | ||
| count = len(self.coder.pending_images) | ||
| if count: | ||
| self.io.print_info( | ||
| f" {count} image(s) queued — they will be sent with your next message." | ||
| ) | ||
|
|
||
| def _cmd_git(self, args: str) -> None: | ||
| if not args: | ||
| self.io.print_warning("Usage: /git <git subcommand>") | ||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||||||
|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -95,7 +95,71 @@ def fetch_url(url: str, timeout: int = 15) -> str: | |||||||||
| return f"[Error fetching {url}: {exc}]" | ||||||||||
|
|
||||||||||
|
|
||||||||||
| def _fetch_plain(url: str, timeout: int = 15) -> str: | ||||||||||
| def fetch_page_info(url: str, timeout: int = 15) -> dict: | ||||||||||
| """Fetch a web page and return structured info for browser-view display. | ||||||||||
|
|
||||||||||
| Returns a dict with: url, title, description, status_code, headings, links, content. | ||||||||||
| """ | ||||||||||
| result: dict = { | ||||||||||
| "url": url, | ||||||||||
| "title": "", | ||||||||||
| "description": "", | ||||||||||
| "status_code": 0, | ||||||||||
| "headings": [], | ||||||||||
| "links": [], | ||||||||||
| "content": "", | ||||||||||
| } | ||||||||||
| try: | ||||||||||
| import requests | ||||||||||
| from bs4 import BeautifulSoup | ||||||||||
|
|
||||||||||
| headers = { | ||||||||||
| "User-Agent": ( | ||||||||||
| "Mozilla/5.0 (compatible; QGo/0.1; +https://github.com/Rahulchaube1/QGo)" | ||||||||||
| ) | ||||||||||
| } | ||||||||||
| response = requests.get(url, headers=headers, timeout=timeout) | ||||||||||
| response.raise_for_status() | ||||||||||
| result["status_code"] = response.status_code | ||||||||||
|
Comment on lines
+122
to
+123
|
||||||||||
| response.raise_for_status() | |
| result["status_code"] = response.status_code | |
| result["status_code"] = response.status_code | |
| response.raise_for_status() |
Copilot
AI
Mar 29, 2026
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The meta description extraction has redundant checks (isinstance(meta, object) and hasattr(meta, "get")) and a # type: ignore. Since soup.find(...) returns a BeautifulSoup Tag (or None), you can simplify this to if meta: result["description"] = meta.get("content", "") to improve readability and remove the ignore.
| if meta and isinstance(meta, object) and hasattr(meta, "get"): | |
| result["description"] = meta.get("content", "") # type: ignore[union-attr] | |
| if meta: | |
| result["description"] = meta.get("content", "") |
Copilot
AI
Mar 29, 2026
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
fetch_page_info() fetches the page via requests.get(...) and then immediately calls fetch_url(url, ...), which performs a second HTTP request for the same URL. This doubles latency and load for /browse. Consider refactoring fetch_url to accept already-fetched HTML (or extracting the HTML→readable-text logic) so fetch_page_info() can reuse response.text/soup without another network call.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
pending_imagesis cleared before_send()is called. If the LLM request fails/raises, the images are lost and won’t be attached on retry. Clearpending_imagesonly after a successful send (or restore them in an exception handler) so queued attachments aren’t dropped on transient errors.