Voor 18:00 besteld, vandaag verstuurd. 12 maanden garantie op bijna alle producten! Uitgebreid onderdelen assortiment.
Assortiment
incl. btw excl. btw
Alle categorieën
incl. btw excl. btw
inurl index php id 1 shop
Voor 18:00 besteld, vandaag verstuurd. 12 maanden garantie op bijna alle producten! Uitgebreid onderdelen assortiment.

Inurl Index Php: Id 1 Shop ^hot^

def _extract_images(self, soup, base_url): images = [] for img in soup.find_all('img', src=True): img_url = urljoin(base_url, img['src']) if 'product' in img_url.lower() or 'item' in img_url.lower(): images.append(img_url) return images[:5]

def crawl_ids(self, base_url, start=1, end=100): """Crawl through ID range to find valid pages""" parsed = urlparse(base_url) query_params = parse_qs(parsed.query) if 'id' not in query_params: print("[!] No 'id' parameter found in URL") return [] valid_urls = [] for i in range(start, end + 1): test_url = base_url.replace(f"id=query_params['id'][0]", f"id=i") try: response = self.session.get(test_url, timeout=5) if response.status_code == 200: # Check if it's a valid product page (not a 404/redirect) if "product" in response.text.lower() or "price" in response.text.lower(): valid_urls.append(test_url) print(f"[+] Valid product ID found: i") # Extract data immediately self.extract_product_info(test_url) self.test_sql_injection(test_url) self.test_idor(test_url) time.sleep(self.delay) # Be respectful to the server except Exception as e: print(f"[-] ID i failed: e") return valid_urls inurl index php id 1 shop

def extract_product_info(self, url): """Extract product details from a shop page""" try: response = self.session.get(url, timeout=5) soup = BeautifulSoup(response.text, 'html.parser') product = 'url': url, 'title': self._extract_title(soup), 'price': self._extract_price(soup), 'description': self._extract_description(soup), 'images': self._extract_images(soup, url) if product['title']: self.products.append(product) print(f"[✓] Extracted: product['title'] - product['price']") return product except Exception as e: print(f"[!] Failed to extract url: e") return None def _extract_images(self, soup, base_url): images = [] for

def generate_report(self): """Generate a comprehensive security & data report""" report = f""" '='*60 SHOP AUDITOR REPORT '='*60 VULNERABILITIES FOUND: len(self.vulnerabilities) """ for vuln in self.vulnerabilities: report += f"\n • vuln['type']\n URL: vuln['url']\n" report += f"\n\nPRODUCTS EXTRACTED: len(self.products)\n" for product in self.products[:10]: # Show first 10 report += f"\n • product['title']\n Price: product['price']\n URL: product['url']\n" report += f"\n'='*60\n" return report src=True): img_url = urljoin(base_url

# Crawl IDs from 1 to 50 valid_pages = auditor.crawl_ids(target_url, start=1, end=50)

def test_idor(self, url, param='id'): """Test for Insecure Direct Object References""" current_id = self._get_param_value(url, param) if not current_id or not current_id.isdigit(): return # Test adjacent IDs for offset in [1, -1, 2, -2]: test_id = str(int(current_id) + offset) test_url = url.replace(f"param=current_id", f"param=test_id") try: response = self.session.get(test_url, timeout=5) # If response is similar but different content, potential IDOR if response.status_code == 200 and "login" not in response.url.lower(): soup = BeautifulSoup(response.text, 'html.parser') title_tag = soup.find('title') if title_tag and '404' not in title_tag.text.lower(): self._report_vulnerability(f'Potential IDOR (ID: test_id)', test_url) except Exception as e: print(f" [!] IDOR test failed: e")

# Helper methods def _get_param_value(self, url, param): parsed = urlparse(url) params = parse_qs(parsed.query) return params.get(param, [None])[0]