diff --git a/websec-audit/.gitignore b/websec-audit/.gitignore new file mode 100644 index 00000000..85df613f --- /dev/null +++ b/websec-audit/.gitignore @@ -0,0 +1,39 @@ +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +env/ +venv/ +ENV/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# Reports +report_*.html +report_*.json + +# IDE +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db diff --git a/websec-audit/README.md b/websec-audit/README.md new file mode 100644 index 00000000..a1ade03b --- /dev/null +++ b/websec-audit/README.md @@ -0,0 +1,309 @@ +# WebSecAudit - Comprehensive Website Security Scanner + +A state-of-the-art security scanning tool that performs comprehensive vulnerability assessments on web applications. + +## Features + +WebSecAudit is a powerful security scanner that checks for vulnerabilities across multiple security domains: + +### Security Categories Tested + +- **SSL/TLS Security** + - Certificate validation and expiration + - Protocol version checks (SSLv3, TLS 1.0, 1.1, 1.2, 1.3) + - Weak cipher suite detection + - Self-signed certificate detection + +- **HTTP Security Headers** + - HSTS (HTTP Strict Transport Security) + - Content Security Policy (CSP) + - X-Frame-Options (Clickjacking protection) + - X-Content-Type-Options + - X-XSS-Protection + - Referrer-Policy + - Permissions-Policy + - Cookie security attributes (Secure, HttpOnly, SameSite) + +- **Cross-Site Scripting (XSS)** + - Reflected XSS detection + - DOM-based XSS indicators + - Form input validation testing + +- **SQL Injection** + - Error-based SQL injection detection + - Form and URL parameter testing + - Database error message detection + +- **Authentication & Session Security** + - Insecure authentication forms + - Password field security + - Session token exposure + - HTTP vs HTTPS authentication + +- **CORS & CSRF** + - CORS misconfiguration detection + - Wildcard origin issues + - Null origin vulnerabilities + - CSRF token validation + +- **Information Disclosure** + - Sensitive file exposure (.env, .git, backups) + - HTML comment analysis + - Directory listing detection + - Admin panel discovery + - Detailed error messages + - Version information disclosure + +- **Technology Fingerprinting** + - Web framework detection + - Server identification + - JavaScript library detection + - CMS detection (WordPress, Drupal, Joomla, etc.) + +## Installation + +### Prerequisites + +- Python 3.7 or higher +- pip (Python package installer) + +### Setup + +1. Clone or download the tool: +```bash +cd websec-audit +``` + +2. Install dependencies: +```bash +pip install -r requirements.txt +``` + +3. Make the scanner executable (Linux/Mac): +```bash +chmod +x scanner.py +``` + +## Usage + +### Basic Scan + +```bash +python scanner.py https://example.com +``` + +### With Verbose Output + +```bash +python scanner.py https://2rbc-ai.com -v +``` + +### URL Without Protocol + +The tool automatically adds HTTPS if no protocol is specified: + +```bash +python scanner.py example.com +``` + +### Command-Line Options + +``` +usage: scanner.py [-h] [-v] [--version] url + +positional arguments: + url Target URL to scan (e.g., https://example.com) + +optional arguments: + -h, --help Show this help message and exit + -v, --verbose Enable verbose output for debugging + --version Show program's version number and exit +``` + +## Output + +WebSecAudit generates three types of output: + +### 1. Console Output + +Real-time scan progress and summary displayed in the terminal with color-coded severity levels: +- **CRITICAL** - Immediate action required +- **HIGH** - Important security issues +- **MEDIUM** - Moderate security concerns +- **LOW** - Minor issues or best practice violations +- **INFO** - Informational findings + +### 2. HTML Report + +A beautiful, detailed HTML report saved as `report__.html` + +Features: +- Executive summary with severity breakdown +- Detailed findings organized by severity +- Color-coded issues +- Recommendations and references +- Professional formatting + +### 3. JSON Report + +Machine-readable JSON format saved as `report__.json` + +Perfect for: +- Integration with other tools +- Automated processing +- Custom analysis +- CI/CD pipelines + +## Example Output + +``` +╔══════════════════════════════════════════════════════════════╗ +║ ║ +║ WebSecAudit - Security Scanner v1.0 ║ +║ Comprehensive Website Vulnerability Scanner ║ +║ ║ +╚══════════════════════════════════════════════════════════════╝ + +Target: https://example.com +Scan Started: 2025-01-15 10:30:45 + +[*] Initializing security scanners... + +[+] Running TechFingerprint... + ✓ TechFingerprint completed - Found 1 issues + +[+] Running SSLScanner... + ✓ SSLScanner completed - Found 2 issues + +[+] Running HeadersScanner... + ✓ HeadersScanner completed - Found 5 issues + +... (more scanners) ... + +====================================================================== + SCAN SUMMARY +====================================================================== + +Target URL: https://example.com +Scan Duration: 12.45 seconds +Total Issues: 15 + +Issues by Severity: + ● CRITICAL : 1 + ● HIGH : 3 + ● MEDIUM : 6 + ● LOW : 4 + ● INFO : 1 + +[✓] HTML Report saved: report_example.com_20250115_103057.html +[✓] JSON Report saved: report_example.com_20250115_103057.json +``` + +## Security Best Practices + +### Ethical Use + +**IMPORTANT**: Only scan websites you own or have explicit permission to test! + +Unauthorized security scanning may be illegal and could be considered: +- Hacking attempts +- Network intrusion +- Violation of computer fraud laws + +### Responsible Disclosure + +If you find vulnerabilities: +1. Report them to the website owner privately +2. Give them reasonable time to fix issues +3. Don't publicly disclose until patched +4. Follow responsible disclosure practices + +## Architecture + +WebSecAudit uses a modular architecture: + +``` +websec-audit/ +├── scanner.py # Main orchestrator +├── modules/ # Security scanner modules +│ ├── ssl_scanner.py # SSL/TLS checks +│ ├── headers_scanner.py # HTTP headers +│ ├── xss_scanner.py # XSS detection +│ ├── injection_scanner.py # SQL injection +│ ├── auth_scanner.py # Authentication +│ ├── cors_scanner.py # CORS/CSRF +│ ├── info_disclosure.py # Information leaks +│ └── tech_fingerprint.py # Technology detection +├── utils/ # Utility modules +│ ├── reporter.py # Report generation +│ └── colors.py # Terminal colors +├── requirements.txt # Python dependencies +└── README.md # This file +``` + +## Extending the Tool + +You can easily add new scanners: + +1. Create a new scanner in `modules/` +2. Inherit from `BaseScanner` +3. Implement the `scan()` method +4. Add it to the scanner list in `scanner.py` + +Example: + +```python +from modules.base_scanner import BaseScanner + +class MyScanner(BaseScanner): + def scan(self): + # Your scanning logic + self.add_finding( + title='Issue Title', + severity='HIGH', + category='My Category', + description='Issue description', + recommendation='How to fix it' + ) + return self.findings +``` + +## Limitations + +- This tool performs automated testing and may not catch all vulnerabilities +- False positives are possible - verify findings manually +- Some tests are basic and don't replace manual security audits +- Rate limiting may affect scan completeness +- Does not test authenticated areas without credentials +- Cannot detect all business logic vulnerabilities + +## Contributing + +Contributions are welcome! Areas for improvement: + +- Additional scanner modules +- Enhanced detection capabilities +- Performance optimizations +- Better reporting formats +- Multi-threading support +- Authentication support +- API testing capabilities + +## License + +This tool is provided for educational and authorized security testing purposes only. + +## References + +- [OWASP Top 10](https://owasp.org/www-project-top-ten/) +- [OWASP Testing Guide](https://owasp.org/www-project-web-security-testing-guide/) +- [OWASP Secure Headers Project](https://owasp.org/www-project-secure-headers/) +- [PortSwigger Web Security Academy](https://portswigger.net/web-security) + +## Support + +For issues, questions, or contributions, please refer to the project repository. + +--- + +**Remember**: With great power comes great responsibility. Use this tool ethically! diff --git a/websec-audit/modules/__init__.py b/websec-audit/modules/__init__.py new file mode 100644 index 00000000..b8fd66f7 --- /dev/null +++ b/websec-audit/modules/__init__.py @@ -0,0 +1 @@ +# Scanner modules for WebSecAudit diff --git a/websec-audit/modules/auth_scanner.py b/websec-audit/modules/auth_scanner.py new file mode 100644 index 00000000..a85e2840 --- /dev/null +++ b/websec-audit/modules/auth_scanner.py @@ -0,0 +1,173 @@ +"""Authentication and Session Security Scanner""" + +import requests +from bs4 import BeautifulSoup +from typing import List, Dict, Any +from modules.base_scanner import BaseScanner + + +class AuthScanner(BaseScanner): + """Scanner for authentication and session security issues""" + + def scan(self) -> List[Dict[str, Any]]: + """Scan for authentication and session security issues""" + try: + self.log(f"Scanning authentication and session security at {self.target_url}") + + # Get the page + response = requests.get( + self.target_url, + timeout=15, + headers={'User-Agent': 'WebSecAudit/1.0 Security Scanner'} + ) + + soup = BeautifulSoup(response.text, 'html.parser') + + # Check for login forms + self._check_login_forms(soup, response) + + # Check password reset functionality + self._check_password_fields(soup) + + # Check for session token exposure + self._check_session_exposure(soup, response) + + # Check authentication over HTTP + if self.target_url.startswith('http://'): + self._check_http_authentication(soup) + + except requests.exceptions.RequestException as e: + self.log(f"Request error during auth scan: {str(e)}") + + return self.findings + + def _check_login_forms(self, soup: BeautifulSoup, response): + """Check login forms for security issues""" + forms = soup.find_all('form') + + for form in forms: + # Check if it's a login form + password_inputs = form.find_all('input', {'type': 'password'}) + + if password_inputs: + self.log("Found login form") + + # Check if form action is HTTPS + action = form.get('action', '') + if action and action.startswith('http://'): + self.add_finding( + title='Login Form Submits Over HTTP', + severity='CRITICAL', + category='Authentication', + description='Login form submits credentials over unencrypted HTTP connection.', + evidence=f'Form action: {action}', + recommendation='Use HTTPS for all authentication forms to protect credentials in transit.', + references=['https://owasp.org/www-community/vulnerabilities/Unencrypted_sensitive_data'] + ) + + # Check for autocomplete on password fields + for pwd_input in password_inputs: + autocomplete = pwd_input.get('autocomplete', '').lower() + if autocomplete not in ['off', 'new-password', 'current-password']: + self.add_finding( + title='Password Field Allows Autocomplete', + severity='LOW', + category='Authentication', + description='Password field does not disable autocomplete.', + recommendation='Set autocomplete="off" or use appropriate autocomplete values for password fields.', + references=['https://owasp.org/www-community/vulnerabilities/Sensitive_Data_Exposure'] + ) + break + + def _check_password_fields(self, soup: BeautifulSoup): + """Check password field implementations""" + password_inputs = soup.find_all('input', {'type': 'password'}) + + for pwd_input in password_inputs: + # Check for password fields in non-HTTPS pages + if self.target_url.startswith('http://'): + self.add_finding( + title='Password Field on Non-HTTPS Page', + severity='CRITICAL', + category='Authentication', + description='Password input field found on a non-HTTPS page.', + recommendation='All pages with password fields must be served over HTTPS.', + references=['https://owasp.org/www-project-web-security-testing-guide/latest/4-Web_Application_Security_Testing/09-Testing_for_Weak_Cryptography/03-Testing_for_Sensitive_Information_Sent_via_Unencrypted_Channels'] + ) + break + + # Check for visible password options + parent_form = pwd_input.find_parent('form') + if parent_form: + # Look for show/hide password functionality + show_password = parent_form.find_all(text=lambda text: text and 'show' in text.lower()) + if show_password: + self.log("Password visibility toggle found") + + def _check_session_exposure(self, soup: BeautifulSoup, response): + """Check for session token exposure in URLs or JavaScript""" + # Check for session tokens in URLs + links = soup.find_all('a', href=True) + + session_keywords = ['sessionid', 'session_id', 'sid', 'token', 'auth', 'jsessionid'] + + for link in links: + href = link['href'].lower() + for keyword in session_keywords: + if keyword in href: + self.add_finding( + title='Potential Session Token in URL', + severity='HIGH', + category='Authentication', + description='Session token or authentication credential appears to be passed in URL.', + evidence=f'Link contains "{keyword}" parameter', + recommendation='Use HTTP-only cookies for session management. Never pass session tokens in URLs.', + references=[ + 'https://owasp.org/www-community/vulnerabilities/Session_ID_in_URL_Rewriting', + 'https://cheatsheetseries.owasp.org/cheatsheets/Session_Management_Cheat_Sheet.html' + ] + ) + break + + # Check for session tokens in JavaScript + scripts = soup.find_all('script') + for script in scripts: + if script.string: + script_lower = script.string.lower() + for keyword in session_keywords: + if keyword in script_lower: + self.add_finding( + title='Potential Session Token in JavaScript', + severity='MEDIUM', + category='Authentication', + description='Possible session token or credential found in JavaScript code.', + evidence=f'JavaScript contains "{keyword}"', + recommendation='Avoid exposing session tokens in client-side code. Use HTTP-only cookies.', + references=['https://owasp.org/www-community/vulnerabilities/Session_fixation'] + ) + break + + def _check_http_authentication(self, soup: BeautifulSoup): + """Check for authentication forms on HTTP pages""" + forms = soup.find_all('form') + + for form in forms: + inputs = form.find_all('input') + + # Check for authentication-related forms + auth_indicators = ['password', 'username', 'email', 'login'] + + for input_field in inputs: + input_type = input_field.get('type', '').lower() + input_name = input_field.get('name', '').lower() + + if input_type in auth_indicators or any(indicator in input_name for indicator in auth_indicators): + self.add_finding( + title='Authentication Form on HTTP Page', + severity='CRITICAL', + category='Authentication', + description='Authentication form found on unencrypted HTTP page.', + recommendation='Serve all authentication pages over HTTPS only.', + references=['https://owasp.org/www-community/vulnerabilities/Unencrypted_sensitive_data'] + ) + return diff --git a/websec-audit/modules/base_scanner.py b/websec-audit/modules/base_scanner.py new file mode 100644 index 00000000..20a857ec --- /dev/null +++ b/websec-audit/modules/base_scanner.py @@ -0,0 +1,44 @@ +"""Base scanner class for all security scanners""" + +from typing import List, Dict, Any +from abc import ABC, abstractmethod + + +class BaseScanner(ABC): + """Abstract base class for all security scanners""" + + def __init__(self, target_url: str, verbose: bool = False): + self.target_url = target_url + self.verbose = verbose + self.findings = [] + + @abstractmethod + def scan(self) -> List[Dict[str, Any]]: + """Execute the security scan and return findings""" + pass + + def add_finding(self, title: str, severity: str, category: str, + description: str = None, evidence: str = None, + recommendation: str = None, references: List[str] = None): + """Add a security finding to the results""" + finding = { + 'title': title, + 'severity': severity.upper(), + 'category': category, + } + + if description: + finding['description'] = description + if evidence: + finding['evidence'] = evidence + if recommendation: + finding['recommendation'] = recommendation + if references: + finding['references'] = references + + self.findings.append(finding) + + def log(self, message: str): + """Log verbose messages""" + if self.verbose: + print(f" [DEBUG] {message}") diff --git a/websec-audit/modules/cors_scanner.py b/websec-audit/modules/cors_scanner.py new file mode 100644 index 00000000..c5497f22 --- /dev/null +++ b/websec-audit/modules/cors_scanner.py @@ -0,0 +1,169 @@ +"""CORS and CSRF Security Scanner""" + +import requests +from bs4 import BeautifulSoup +from typing import List, Dict, Any +from modules.base_scanner import BaseScanner + + +class CORSScanner(BaseScanner): + """Scanner for CORS and CSRF vulnerabilities""" + + def scan(self) -> List[Dict[str, Any]]: + """Scan for CORS and CSRF issues""" + try: + self.log(f"Scanning for CORS/CSRF issues at {self.target_url}") + + # Check CORS configuration + self._check_cors_configuration() + + # Check for CSRF protection + response = requests.get( + self.target_url, + timeout=15, + headers={'User-Agent': 'WebSecAudit/1.0 Security Scanner'} + ) + + soup = BeautifulSoup(response.text, 'html.parser') + self._check_csrf_protection(soup) + + except requests.exceptions.RequestException as e: + self.log(f"Request error during CORS/CSRF scan: {str(e)}") + + return self.findings + + def _check_cors_configuration(self): + """Check CORS configuration""" + try: + # Test with a malicious origin + test_origin = 'https://evil.com' + + response = requests.get( + self.target_url, + timeout=10, + headers={ + 'User-Agent': 'WebSecAudit/1.0 Security Scanner', + 'Origin': test_origin + } + ) + + # Check CORS headers + acao = response.headers.get('Access-Control-Allow-Origin') + acac = response.headers.get('Access-Control-Allow-Credentials') + + if acao: + self.log(f"CORS enabled: {acao}") + + # Check for wildcard with credentials + if acao == '*': + if acac and acac.lower() == 'true': + self.add_finding( + title='CORS Misconfiguration: Wildcard with Credentials', + severity='CRITICAL', + category='CORS', + description='CORS allows all origins (*) with credentials enabled.', + evidence=f'Access-Control-Allow-Origin: *, Access-Control-Allow-Credentials: true', + recommendation='Never use wildcard (*) origin with credentials. Specify exact origins.', + references=[ + 'https://owasp.org/www-community/attacks/CORS_OriginHeaderScrutiny', + 'https://portswigger.net/web-security/cors' + ] + ) + else: + self.add_finding( + title='CORS Allows All Origins', + severity='MEDIUM', + category='CORS', + description='CORS policy allows requests from any origin (*).', + evidence='Access-Control-Allow-Origin: *', + recommendation='Restrict CORS to specific trusted origins instead of using wildcard.', + references=['https://owasp.org/www-community/attacks/CORS_OriginHeaderScrutiny'] + ) + + # Check if it reflects our malicious origin + elif acao == test_origin: + self.add_finding( + title='CORS Reflects Arbitrary Origins', + severity='HIGH', + category='CORS', + description='CORS policy reflects the Origin header without validation.', + evidence=f'Origin header: {test_origin}, Reflected: {acao}', + recommendation='Validate Origin header against a whitelist of trusted domains.', + references=['https://owasp.org/www-community/attacks/CORS_OriginHeaderScrutiny'] + ) + + # Check for null origin + response_null = requests.get( + self.target_url, + timeout=10, + headers={ + 'User-Agent': 'WebSecAudit/1.0 Security Scanner', + 'Origin': 'null' + } + ) + + acao_null = response_null.headers.get('Access-Control-Allow-Origin') + if acao_null == 'null': + self.add_finding( + title='CORS Allows Null Origin', + severity='HIGH', + category='CORS', + description='CORS policy allows the null origin.', + evidence='Access-Control-Allow-Origin: null', + recommendation='Never allow null origin. Validate against specific trusted domains.', + references=['https://portswigger.net/web-security/cors'] + ) + + except requests.exceptions.RequestException as e: + self.log(f"Error checking CORS: {str(e)}") + + def _check_csrf_protection(self, soup: BeautifulSoup): + """Check for CSRF protection mechanisms""" + forms = soup.find_all('form') + + if not forms: + return + + self.log(f"Checking {len(forms)} forms for CSRF protection") + + for idx, form in enumerate(forms[:10]): # Check up to 10 forms + method = form.get('method', 'get').lower() + + # CSRF is primarily a concern for state-changing requests (POST, PUT, DELETE) + if method not in ['post', 'put', 'delete']: + continue + + # Look for CSRF token fields + csrf_token_found = False + inputs = form.find_all('input', {'type': 'hidden'}) + + csrf_keywords = [ + 'csrf', 'token', 'authenticity_token', 'xsrf', + '_token', 'csrftoken', 'csrf_token', '__requestverificationtoken' + ] + + for input_field in inputs: + input_name = input_field.get('name', '').lower() + if any(keyword in input_name for keyword in csrf_keywords): + csrf_token_found = True + self.log(f"CSRF token found in form #{idx + 1}") + break + + if not csrf_token_found: + # Get form identifier + form_id = form.get('id', '') + form_action = form.get('action', '') + form_identifier = f"ID: {form_id}" if form_id else f"Action: {form_action}" if form_action else f"Form #{idx + 1}" + + self.add_finding( + title=f'Missing CSRF Protection in Form', + severity='HIGH', + category='CSRF', + description=f'Form ({form_identifier}) does not appear to have CSRF protection.', + evidence=f'POST form without visible CSRF token field', + recommendation='Implement CSRF tokens for all state-changing operations. Use framework-provided CSRF protection.', + references=[ + 'https://owasp.org/www-community/attacks/csrf', + 'https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html' + ] + ) diff --git a/websec-audit/modules/headers_scanner.py b/websec-audit/modules/headers_scanner.py new file mode 100644 index 00000000..651a354c --- /dev/null +++ b/websec-audit/modules/headers_scanner.py @@ -0,0 +1,255 @@ +"""HTTP Security Headers Scanner""" + +import requests +from typing import List, Dict, Any +from modules.base_scanner import BaseScanner + + +class HeadersScanner(BaseScanner): + """Scanner for HTTP security headers""" + + # Security headers that should be present + SECURITY_HEADERS = { + 'Strict-Transport-Security': { + 'severity': 'HIGH', + 'description': 'HTTP Strict Transport Security (HSTS) is not set.', + 'recommendation': 'Add Strict-Transport-Security header: max-age=31536000; includeSubDomains; preload', + 'references': ['https://owasp.org/www-project-secure-headers/#http-strict-transport-security'] + }, + 'X-Frame-Options': { + 'severity': 'MEDIUM', + 'description': 'X-Frame-Options header is missing.', + 'recommendation': 'Add X-Frame-Options header: DENY or SAMEORIGIN to prevent clickjacking.', + 'references': ['https://owasp.org/www-project-secure-headers/#x-frame-options'] + }, + 'X-Content-Type-Options': { + 'severity': 'MEDIUM', + 'description': 'X-Content-Type-Options header is missing.', + 'recommendation': 'Add X-Content-Type-Options: nosniff to prevent MIME type sniffing.', + 'references': ['https://owasp.org/www-project-secure-headers/#x-content-type-options'] + }, + 'Content-Security-Policy': { + 'severity': 'HIGH', + 'description': 'Content-Security-Policy (CSP) header is missing.', + 'recommendation': 'Implement a Content-Security-Policy to prevent XSS and injection attacks.', + 'references': ['https://owasp.org/www-project-secure-headers/#content-security-policy'] + }, + 'X-XSS-Protection': { + 'severity': 'LOW', + 'description': 'X-XSS-Protection header is missing.', + 'recommendation': 'Add X-XSS-Protection: 1; mode=block (Note: CSP is preferred).', + 'references': ['https://owasp.org/www-project-secure-headers/#x-xss-protection'] + }, + 'Referrer-Policy': { + 'severity': 'LOW', + 'description': 'Referrer-Policy header is missing.', + 'recommendation': 'Add Referrer-Policy header: no-referrer or strict-origin-when-cross-origin', + 'references': ['https://owasp.org/www-project-secure-headers/#referrer-policy'] + }, + 'Permissions-Policy': { + 'severity': 'LOW', + 'description': 'Permissions-Policy header is missing.', + 'recommendation': 'Add Permissions-Policy to control browser features and APIs.', + 'references': ['https://owasp.org/www-project-secure-headers/#permissions-policy'] + } + } + + # Headers that should NOT be present (information disclosure) + DANGEROUS_HEADERS = { + 'Server': 'Server version information disclosed', + 'X-Powered-By': 'Technology stack information disclosed', + 'X-AspNet-Version': 'ASP.NET version information disclosed', + 'X-AspNetMvc-Version': 'ASP.NET MVC version information disclosed' + } + + def scan(self) -> List[Dict[str, Any]]: + """Scan HTTP security headers""" + try: + self.log(f"Fetching headers from {self.target_url}") + + # Make request with timeout + response = requests.get( + self.target_url, + timeout=15, + allow_redirects=True, + verify=True, + headers={'User-Agent': 'WebSecAudit/1.0 Security Scanner'} + ) + + headers = response.headers + self.log(f"Received {len(headers)} headers") + + # Check for missing security headers + self._check_missing_security_headers(headers) + + # Check for insecure header values + self._check_insecure_header_values(headers) + + # Check for information disclosure headers + self._check_information_disclosure(headers) + + # Check cookie security + self._check_cookie_security(response) + + except requests.exceptions.SSLError as e: + self.add_finding( + title='SSL Certificate Error', + severity='HIGH', + category='HTTP Headers', + description='SSL certificate validation failed.', + evidence=str(e), + recommendation='Ensure a valid SSL certificate is properly configured.' + ) + except requests.exceptions.Timeout: + self.add_finding( + title='Connection Timeout', + severity='MEDIUM', + category='HTTP Headers', + description='Connection to the server timed out.', + recommendation='Verify that the server is accessible and responsive.' + ) + except requests.exceptions.RequestException as e: + self.log(f"Request error: {str(e)}") + self.add_finding( + title='Connection Error', + severity='MEDIUM', + category='HTTP Headers', + description='Unable to connect to the server.', + evidence=str(e), + recommendation='Verify that the URL is correct and the server is accessible.' + ) + + return self.findings + + def _check_missing_security_headers(self, headers: Dict): + """Check for missing security headers""" + for header_name, header_info in self.SECURITY_HEADERS.items(): + if header_name not in headers: + self.add_finding( + title=f'Missing Security Header: {header_name}', + severity=header_info['severity'], + category='HTTP Headers', + description=header_info['description'], + recommendation=header_info['recommendation'], + references=header_info['references'] + ) + + def _check_insecure_header_values(self, headers: Dict): + """Check for insecure values in existing headers""" + + # Check HSTS header value + if 'Strict-Transport-Security' in headers: + hsts_value = headers['Strict-Transport-Security'] + if 'max-age' in hsts_value: + try: + # Extract max-age value + for part in hsts_value.split(';'): + if 'max-age' in part: + max_age = int(part.split('=')[1].strip()) + if max_age < 31536000: # Less than 1 year + self.add_finding( + title='Weak HSTS max-age Value', + severity='MEDIUM', + category='HTTP Headers', + description=f'HSTS max-age is set to {max_age} seconds, which is less than recommended.', + evidence=f'Strict-Transport-Security: {hsts_value}', + recommendation='Set HSTS max-age to at least 31536000 (1 year).' + ) + except: + pass + + # Check X-Frame-Options + if 'X-Frame-Options' in headers: + xfo_value = headers['X-Frame-Options'].upper() + if xfo_value == 'ALLOW-FROM': + self.add_finding( + title='Deprecated X-Frame-Options Value', + severity='LOW', + category='HTTP Headers', + description='X-Frame-Options ALLOW-FROM is deprecated.', + evidence=f'X-Frame-Options: {headers["X-Frame-Options"]}', + recommendation='Use Content-Security-Policy frame-ancestors directive instead.' + ) + + # Check CSP + if 'Content-Security-Policy' in headers: + csp_value = headers['Content-Security-Policy'].lower() + + if 'unsafe-inline' in csp_value: + self.add_finding( + title='Weak CSP: unsafe-inline Detected', + severity='MEDIUM', + category='HTTP Headers', + description="CSP contains 'unsafe-inline' which weakens XSS protection.", + evidence='unsafe-inline directive found in CSP', + recommendation="Remove 'unsafe-inline' and use nonces or hashes for inline scripts." + ) + + if 'unsafe-eval' in csp_value: + self.add_finding( + title='Weak CSP: unsafe-eval Detected', + severity='MEDIUM', + category='HTTP Headers', + description="CSP contains 'unsafe-eval' which weakens XSS protection.", + evidence='unsafe-eval directive found in CSP', + recommendation="Remove 'unsafe-eval' to prevent code injection via eval()." + ) + + def _check_information_disclosure(self, headers: Dict): + """Check for headers that disclose sensitive information""" + for header_name, description in self.DANGEROUS_HEADERS.items(): + if header_name in headers: + self.add_finding( + title=f'Information Disclosure: {header_name}', + severity='LOW', + category='HTTP Headers', + description=description, + evidence=f'{header_name}: {headers[header_name]}', + recommendation=f'Remove or obfuscate the {header_name} header to avoid disclosing version information.', + references=['https://owasp.org/www-project-web-security-testing-guide/latest/4-Web_Application_Security_Testing/01-Information_Gathering/02-Fingerprint_Web_Server'] + ) + + def _check_cookie_security(self, response): + """Check cookie security attributes""" + if 'Set-Cookie' in response.headers: + cookies = response.headers.get('Set-Cookie', '') + cookie_headers = response.raw.headers.getlist('Set-Cookie') if hasattr(response.raw.headers, 'getlist') else [cookies] + + for cookie in cookie_headers: + cookie_lower = cookie.lower() + + # Check for HttpOnly flag + if 'httponly' not in cookie_lower: + self.add_finding( + title='Cookie Missing HttpOnly Flag', + severity='MEDIUM', + category='HTTP Headers', + description='Cookie is set without the HttpOnly flag.', + evidence=cookie[:100] + ('...' if len(cookie) > 100 else ''), + recommendation='Set the HttpOnly flag on cookies to prevent XSS attacks from accessing them.', + references=['https://owasp.org/www-community/HttpOnly'] + ) + + # Check for Secure flag on HTTPS sites + if self.target_url.startswith('https://') and 'secure' not in cookie_lower: + self.add_finding( + title='Cookie Missing Secure Flag', + severity='MEDIUM', + category='HTTP Headers', + description='Cookie is set without the Secure flag on an HTTPS site.', + evidence=cookie[:100] + ('...' if len(cookie) > 100 else ''), + recommendation='Set the Secure flag on cookies to ensure they are only sent over HTTPS.', + references=['https://owasp.org/www-community/controls/SecureCookieAttribute'] + ) + + # Check for SameSite attribute + if 'samesite' not in cookie_lower: + self.add_finding( + title='Cookie Missing SameSite Attribute', + severity='MEDIUM', + category='HTTP Headers', + description='Cookie is set without the SameSite attribute.', + evidence=cookie[:100] + ('...' if len(cookie) > 100 else ''), + recommendation='Set the SameSite attribute (Strict or Lax) to prevent CSRF attacks.', + references=['https://owasp.org/www-community/SameSite'] + ) diff --git a/websec-audit/modules/info_disclosure.py b/websec-audit/modules/info_disclosure.py new file mode 100644 index 00000000..fa713fbf --- /dev/null +++ b/websec-audit/modules/info_disclosure.py @@ -0,0 +1,247 @@ +"""Information Disclosure Scanner""" + +import requests +from bs4 import BeautifulSoup, Comment +from typing import List, Dict, Any +from modules.base_scanner import BaseScanner + + +class InfoDisclosureScanner(BaseScanner): + """Scanner for information disclosure vulnerabilities""" + + # Common sensitive file paths to check + SENSITIVE_FILES = [ + '/.git/HEAD', + '/.git/config', + '/.env', + '/.env.local', + '/.env.production', + '/web.config', + '/Web.config', + '/.htaccess', + '/phpinfo.php', + '/info.php', + '/test.php', + '/README.md', + '/readme.md', + '/CHANGELOG.md', + '/composer.json', + '/package.json', + '/.DS_Store', + '/backup.zip', + '/backup.sql', + '/dump.sql', + '/.svn/entries', + '/crossdomain.xml', + '/robots.txt', + '/sitemap.xml', + ] + + # Sensitive keywords to look for in comments + SENSITIVE_KEYWORDS = [ + 'password', 'passwd', 'pwd', 'secret', 'token', 'api_key', 'apikey', + 'private_key', 'access_key', 'secret_key', 'admin', 'root', + 'todo', 'fixme', 'hack', 'bug', 'debug' + ] + + def scan(self) -> List[Dict[str, Any]]: + """Scan for information disclosure issues""" + try: + self.log(f"Scanning for information disclosure at {self.target_url}") + + # Get the main page + response = requests.get( + self.target_url, + timeout=15, + headers={'User-Agent': 'WebSecAudit/1.0 Security Scanner'} + ) + + soup = BeautifulSoup(response.text, 'html.parser') + + # Check HTML comments + self._check_html_comments(soup) + + # Check for exposed sensitive files + self._check_sensitive_files() + + # Check for directory listing + self._check_directory_listing(response) + + # Check for error messages + self._check_error_messages(soup) + + # Check for exposed admin panels + self._check_admin_panels() + + except requests.exceptions.RequestException as e: + self.log(f"Request error during info disclosure scan: {str(e)}") + + return self.findings + + def _check_html_comments(self, soup: BeautifulSoup): + """Check HTML comments for sensitive information""" + comments = soup.find_all(string=lambda text: isinstance(text, Comment)) + + for comment in comments: + comment_lower = comment.lower() + + # Check for sensitive keywords + for keyword in self.SENSITIVE_KEYWORDS: + if keyword in comment_lower: + # Truncate long comments + preview = comment[:100].strip() + ('...' if len(comment) > 100 else '') + + self.add_finding( + title='Sensitive Information in HTML Comments', + severity='LOW', + category='Information Disclosure', + description=f'HTML comment contains potentially sensitive keyword: "{keyword}"', + evidence=f'Comment preview: {preview}', + recommendation='Remove sensitive information and developer comments from production code.', + references=['https://owasp.org/www-project-web-security-testing-guide/latest/4-Web_Application_Security_Testing/01-Information_Gathering/05-Review_Webpage_Content_for_Information_Leakage'] + ) + break # Only report once per comment + + def _check_sensitive_files(self): + """Check for exposed sensitive files""" + base_url = self.target_url.rstrip('/') + + found_files = [] + + for file_path in self.SENSITIVE_FILES: + try: + url = base_url + file_path + response = requests.get( + url, + timeout=5, + headers={'User-Agent': 'WebSecAudit/1.0 Security Scanner'}, + allow_redirects=False + ) + + # Consider it found if status is 200 + if response.status_code == 200: + # Check if it's not a 404 page disguised as 200 + if len(response.content) > 0 and '404' not in response.text.lower()[:200]: + found_files.append(file_path) + self.log(f"Found exposed file: {file_path}") + + except requests.exceptions.RequestException: + # File doesn't exist or connection error + continue + + if found_files: + severity = 'CRITICAL' if any(f in found_files for f in ['/.git/HEAD', '/.env', '/web.config']) else 'HIGH' + + self.add_finding( + title='Sensitive Files Exposed', + severity=severity, + category='Information Disclosure', + description='Sensitive files are publicly accessible.', + evidence=f'Exposed files: {", ".join(found_files)}', + recommendation='Remove or restrict access to sensitive files. Use proper access controls.', + references=['https://owasp.org/www-project-web-security-testing-guide/latest/4-Web_Application_Security_Testing/02-Configuration_and_Deployment_Management_Testing/03-Test_File_Extensions_Handling_for_Sensitive_Information'] + ) + + def _check_directory_listing(self, response): + """Check for directory listing vulnerability""" + # Common indicators of directory listing + listing_indicators = [ + 'Index of /', + 'Directory Listing', + 'Parent Directory', + 'Index of', + ] + + response_text = response.text + + for indicator in listing_indicators: + if indicator in response_text: + self.add_finding( + title='Directory Listing Enabled', + severity='MEDIUM', + category='Information Disclosure', + description='Server has directory listing enabled.', + evidence=f'Indicator found: {indicator}', + recommendation='Disable directory listing in web server configuration.', + references=['https://owasp.org/www-project-web-security-testing-guide/latest/4-Web_Application_Security_Testing/02-Configuration_and_Deployment_Management_Testing/04-Review_Old_Backup_and_Unreferenced_Files_for_Sensitive_Information'] + ) + break + + def _check_error_messages(self, soup: BeautifulSoup): + """Check for detailed error messages""" + # Look for common error patterns + error_patterns = [ + 'stack trace', + 'exception', + 'fatal error', + 'mysql error', + 'postgresql error', + 'oracle error', + 'warning:', + 'traceback', + 'at line', + ] + + page_text = soup.get_text().lower() + + for pattern in error_patterns: + if pattern in page_text: + self.add_finding( + title='Detailed Error Messages Exposed', + severity='MEDIUM', + category='Information Disclosure', + description='Application exposes detailed error messages.', + evidence=f'Error pattern detected: {pattern}', + recommendation='Configure application to show generic error messages to users. Log detailed errors server-side.', + references=['https://owasp.org/www-community/Improper_Error_Handling'] + ) + break + + def _check_admin_panels(self): + """Check for exposed admin panels""" + base_url = self.target_url.rstrip('/') + + admin_paths = [ + '/admin', + '/administrator', + '/admin.php', + '/admin/', + '/wp-admin', + '/phpmyadmin', + '/cpanel', + '/admin/login', + '/administrator/login', + ] + + found_admin = [] + + for path in admin_paths: + try: + url = base_url + path + response = requests.get( + url, + timeout=5, + headers={'User-Agent': 'WebSecAudit/1.0 Security Scanner'}, + allow_redirects=True + ) + + # Check if we got a login page or admin interface + if response.status_code == 200: + response_lower = response.text.lower() + if any(keyword in response_lower for keyword in ['login', 'admin', 'dashboard', 'username', 'password']): + found_admin.append(path) + self.log(f"Found admin panel: {path}") + + except requests.exceptions.RequestException: + continue + + if found_admin: + self.add_finding( + title='Admin Panel Publicly Accessible', + severity='MEDIUM', + category='Information Disclosure', + description='Admin panel or login page is publicly accessible.', + evidence=f'Admin paths found: {", ".join(found_admin)}', + recommendation='Implement IP whitelisting, VPN access, or other access controls for admin panels. Use strong authentication and rate limiting.', + references=['https://owasp.org/www-project-web-security-testing-guide/latest/4-Web_Application_Security_Testing/02-Configuration_and_Deployment_Management_Testing/'] + ) diff --git a/websec-audit/modules/injection_scanner.py b/websec-audit/modules/injection_scanner.py new file mode 100644 index 00000000..5e587020 --- /dev/null +++ b/websec-audit/modules/injection_scanner.py @@ -0,0 +1,208 @@ +"""SQL Injection and Other Injection Vulnerability Scanner""" + +import requests +from bs4 import BeautifulSoup +from urllib.parse import urljoin, urlparse, parse_qs +from typing import List, Dict, Any +from modules.base_scanner import BaseScanner + + +class InjectionScanner(BaseScanner): + """Scanner for SQL Injection and other injection vulnerabilities""" + + # SQL injection test payloads + SQL_PAYLOADS = [ + "'", + "' OR '1'='1", + "' OR '1'='1' --", + "' OR '1'='1' /*", + "admin' --", + "1' OR '1' = '1", + "' UNION SELECT NULL--", + ] + + # SQL error patterns + SQL_ERROR_PATTERNS = [ + 'sql syntax', + 'mysql_fetch', + 'mysql_num_rows', + 'ora-01', + 'postgresql', + 'pg_query', + 'sqlite_', + 'unclosed quotation mark', + 'quoted string not properly terminated', + 'microsoft sql server', + 'odbc sql server driver', + 'microsoft ole db provider', + ] + + # Command injection payloads + COMMAND_PAYLOADS = [ + '; ls', + '| ls', + '& dir', + '; cat /etc/passwd', + '`whoami`', + '$(whoami)', + ] + + def scan(self) -> List[Dict[str, Any]]: + """Scan for injection vulnerabilities""" + try: + self.log(f"Scanning for injection vulnerabilities at {self.target_url}") + + # Get the page + response = requests.get( + self.target_url, + timeout=15, + headers={'User-Agent': 'WebSecAudit/1.0 Security Scanner'} + ) + + # Parse HTML + soup = BeautifulSoup(response.text, 'html.parser') + + # Find all forms + forms = soup.find_all('form') + self.log(f"Found {len(forms)} forms to test for injection") + + # Test each form + for idx, form in enumerate(forms[:5]): # Limit to 5 forms + self._test_sql_injection(form, idx + 1) + + # Check URL parameters for SQL injection + self._check_url_sql_injection() + + except requests.exceptions.RequestException as e: + self.log(f"Request error during injection scan: {str(e)}") + + return self.findings + + def _test_sql_injection(self, form, form_number: int): + """Test a form for SQL injection vulnerabilities""" + try: + # Get form action and method + action = form.get('action', '') + method = form.get('method', 'get').lower() + form_url = urljoin(self.target_url, action) + + self.log(f"Testing form #{form_number} for SQL injection") + + # Get all input fields + inputs = form.find_all(['input', 'textarea']) + + if not inputs: + return + + # Test with SQL payloads + for payload in self.SQL_PAYLOADS[:3]: # Test with first 3 payloads + form_data = {} + + for input_field in inputs: + input_name = input_field.get('name') + input_type = input_field.get('type', 'text') + + if input_name and input_type not in ['submit', 'button']: + # Use payload for text inputs, normal values for others + if input_type in ['text', 'email', 'search', 'url'] or input_field.name == 'textarea': + form_data[input_name] = payload + elif input_type == 'hidden': + form_data[input_name] = input_field.get('value', '') + else: + form_data[input_name] = 'test' + + if not form_data: + continue + + try: + # Send request + if method == 'post': + response = requests.post(form_url, data=form_data, timeout=10, + headers={'User-Agent': 'WebSecAudit/1.0 Security Scanner'}) + else: + response = requests.get(form_url, params=form_data, timeout=10, + headers={'User-Agent': 'WebSecAudit/1.0 Security Scanner'}) + + response_text = response.text.lower() + + # Check for SQL error messages + for error_pattern in self.SQL_ERROR_PATTERNS: + if error_pattern in response_text: + self.add_finding( + title=f'Potential SQL Injection in Form #{form_number}', + severity='CRITICAL', + category='SQL Injection', + description='SQL error message detected in response, indicating possible SQL injection vulnerability.', + evidence=f'Form: {form_url}, Method: {method.upper()}, Error pattern: {error_pattern}, Payload: {payload}', + recommendation='Use parameterized queries (prepared statements) to prevent SQL injection. Never concatenate user input into SQL queries.', + references=[ + 'https://owasp.org/www-community/attacks/SQL_Injection', + 'https://cheatsheetseries.owasp.org/cheatsheets/SQL_Injection_Prevention_Cheat_Sheet.html' + ] + ) + self.log(f"SQL error detected in form #{form_number}") + return # Found vulnerability, no need to test more + + # Check for unusual response (timing-based detection could be added here) + # This is a simplified check + if response.status_code == 500: + self.add_finding( + title=f'Server Error on SQL Payload in Form #{form_number}', + severity='HIGH', + category='SQL Injection', + description='Server returned 500 error when SQL payload was submitted.', + evidence=f'Form: {form_url}, Method: {method.upper()}, Payload: {payload}', + recommendation='Investigate server-side error handling and implement proper input validation.', + references=['https://owasp.org/www-community/attacks/SQL_Injection'] + ) + return + + except requests.exceptions.RequestException as e: + self.log(f"Error testing SQL injection in form #{form_number}: {str(e)}") + continue + + except Exception as e: + self.log(f"Error processing form #{form_number} for SQL injection: {str(e)}") + + def _check_url_sql_injection(self): + """Check URL parameters for SQL injection""" + parsed_url = urlparse(self.target_url) + params = parse_qs(parsed_url.query) + + if not params: + return + + self.log("Checking URL parameters for SQL injection") + + # Test each parameter + for param_name, param_values in params.items(): + for payload in self.SQL_PAYLOADS[:2]: # Test with first 2 payloads + test_params = params.copy() + test_params[param_name] = [payload] + + # Build test URL + test_url = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}" + + try: + response = requests.get(test_url, params=test_params, timeout=10, + headers={'User-Agent': 'WebSecAudit/1.0 Security Scanner'}) + + response_text = response.text.lower() + + # Check for SQL errors + for error_pattern in self.SQL_ERROR_PATTERNS: + if error_pattern in response_text: + self.add_finding( + title=f'Potential SQL Injection in URL Parameter: {param_name}', + severity='CRITICAL', + category='SQL Injection', + description='SQL error message detected when testing URL parameter.', + evidence=f'Parameter: {param_name}, Payload: {payload}, Error: {error_pattern}', + recommendation='Use parameterized queries and proper input validation for all URL parameters.', + references=['https://owasp.org/www-community/attacks/SQL_Injection'] + ) + return + + except requests.exceptions.RequestException as e: + self.log(f"Error testing SQL injection in URL parameter {param_name}: {str(e)}") + continue diff --git a/websec-audit/modules/ssl_scanner.py b/websec-audit/modules/ssl_scanner.py new file mode 100644 index 00000000..a2dbd424 --- /dev/null +++ b/websec-audit/modules/ssl_scanner.py @@ -0,0 +1,191 @@ +"""SSL/TLS Security Scanner""" + +import ssl +import socket +from urllib.parse import urlparse +from datetime import datetime +from typing import List, Dict, Any + +from modules.base_scanner import BaseScanner + + +class SSLScanner(BaseScanner): + """Scanner for SSL/TLS configuration issues""" + + # Weak/deprecated protocols + WEAK_PROTOCOLS = ['SSLv2', 'SSLv3', 'TLSv1', 'TLSv1.1'] + + # Weak ciphers (examples) + WEAK_CIPHERS = [ + 'DES', 'RC4', 'MD5', 'NULL', 'anon', 'EXPORT', + '3DES', 'ADH', 'AECDH' + ] + + def scan(self) -> List[Dict[str, Any]]: + """Scan SSL/TLS configuration""" + parsed_url = urlparse(self.target_url) + + # Skip if not HTTPS + if parsed_url.scheme != 'https': + self.add_finding( + title='HTTPS Not Used', + severity='HIGH', + category='SSL/TLS', + description='The website does not use HTTPS encryption.', + evidence=f'URL scheme: {parsed_url.scheme}', + recommendation='Enable HTTPS for all pages to protect data in transit.', + references=[ + 'https://owasp.org/www-project-web-security-testing-guide/latest/4-Web_Application_Security_Testing/09-Testing_for_Weak_Cryptography/01-Testing_for_Weak_Transport_Layer_Security' + ] + ) + return self.findings + + hostname = parsed_url.netloc.split(':')[0] + port = parsed_url.port or 443 + + self.log(f"Checking SSL/TLS for {hostname}:{port}") + + try: + # Check certificate validity + self._check_certificate(hostname, port) + + # Check protocol versions + self._check_protocol_versions(hostname, port) + + # Check cipher suites + self._check_cipher_suites(hostname, port) + + except Exception as e: + self.log(f"Error during SSL/TLS scan: {str(e)}") + self.add_finding( + title='SSL/TLS Connection Error', + severity='MEDIUM', + category='SSL/TLS', + description='Unable to establish SSL/TLS connection for testing.', + evidence=str(e), + recommendation='Verify that the server is accessible and properly configured.' + ) + + return self.findings + + def _check_certificate(self, hostname: str, port: int): + """Check SSL certificate validity""" + try: + context = ssl.create_default_context() + + with socket.create_connection((hostname, port), timeout=10) as sock: + with context.wrap_socket(sock, server_hostname=hostname) as ssock: + cert = ssock.getpeercert() + + # Check certificate expiration + not_after = datetime.strptime(cert['notAfter'], '%b %d %H:%M:%S %Y %Z') + days_until_expiry = (not_after - datetime.now()).days + + if days_until_expiry < 0: + self.add_finding( + title='Expired SSL Certificate', + severity='CRITICAL', + category='SSL/TLS', + description='The SSL certificate has expired.', + evidence=f'Expiry date: {not_after}', + recommendation='Renew the SSL certificate immediately.', + references=['https://www.ssl.com/faqs/what-happens-when-your-ssl-certificate-expires/'] + ) + elif days_until_expiry < 30: + self.add_finding( + title='SSL Certificate Expiring Soon', + severity='MEDIUM', + category='SSL/TLS', + description=f'The SSL certificate will expire in {days_until_expiry} days.', + evidence=f'Expiry date: {not_after}', + recommendation='Renew the SSL certificate soon to avoid service disruption.' + ) + + # Check for self-signed certificate + issuer = dict(x[0] for x in cert['issuer']) + subject = dict(x[0] for x in cert['subject']) + + if issuer == subject: + self.add_finding( + title='Self-Signed SSL Certificate', + severity='HIGH', + category='SSL/TLS', + description='The server is using a self-signed certificate.', + evidence=f'Issuer: {issuer.get("commonName", "Unknown")}', + recommendation='Use a certificate from a trusted Certificate Authority (CA).', + references=['https://owasp.org/www-community/vulnerabilities/Using_Self-Signed_Certificate'] + ) + + except ssl.SSLError as e: + self.add_finding( + title='SSL Certificate Validation Failed', + severity='HIGH', + category='SSL/TLS', + description='SSL certificate validation failed.', + evidence=str(e), + recommendation='Ensure the certificate is valid and properly configured.' + ) + except Exception as e: + self.log(f"Certificate check error: {str(e)}") + + def _check_protocol_versions(self, hostname: str, port: int): + """Check for weak SSL/TLS protocol versions""" + # Check currently used protocol + try: + context = ssl.create_default_context() + with socket.create_connection((hostname, port), timeout=10) as sock: + with context.wrap_socket(sock, server_hostname=hostname) as ssock: + version = ssock.version() + self.log(f"Protocol version in use: {version}") + + # Check if using outdated TLS version + if version in self.WEAK_PROTOCOLS: + self.add_finding( + title=f'Weak TLS Protocol: {version}', + severity='HIGH', + category='SSL/TLS', + description=f'The server is using the deprecated {version} protocol.', + evidence=f'Protocol: {version}', + recommendation='Disable SSLv2, SSLv3, TLSv1.0, and TLSv1.1. Use TLS 1.2 or TLS 1.3.', + references=['https://owasp.org/www-project-web-security-testing-guide/latest/4-Web_Application_Security_Testing/09-Testing_for_Weak_Cryptography/01-Testing_for_Weak_Transport_Layer_Security'] + ) + elif version == 'TLSv1.2': + self.add_finding( + title='TLS 1.3 Not Supported', + severity='LOW', + category='SSL/TLS', + description='The server does not appear to support TLS 1.3.', + evidence=f'Current protocol: {version}', + recommendation='Consider enabling TLS 1.3 for improved security and performance.' + ) + + except Exception as e: + self.log(f"Protocol check error: {str(e)}") + + def _check_cipher_suites(self, hostname: str, port: int): + """Check for weak cipher suites""" + try: + context = ssl.create_default_context() + with socket.create_connection((hostname, port), timeout=10) as sock: + with context.wrap_socket(sock, server_hostname=hostname) as ssock: + cipher = ssock.cipher() + if cipher: + cipher_name = cipher[0] + self.log(f"Cipher in use: {cipher_name}") + + # Check for weak ciphers + for weak_cipher in self.WEAK_CIPHERS: + if weak_cipher.upper() in cipher_name.upper(): + self.add_finding( + title=f'Weak Cipher Suite Detected', + severity='HIGH', + category='SSL/TLS', + description=f'The server is using a weak cipher suite: {cipher_name}', + evidence=f'Cipher: {cipher_name}', + recommendation='Disable weak cipher suites and use strong ciphers (AES-GCM, ChaCha20).', + references=['https://wiki.mozilla.org/Security/Server_Side_TLS'] + ) + break + + except Exception as e: + self.log(f"Cipher check error: {str(e)}") diff --git a/websec-audit/modules/tech_fingerprint.py b/websec-audit/modules/tech_fingerprint.py new file mode 100644 index 00000000..a4a0c524 --- /dev/null +++ b/websec-audit/modules/tech_fingerprint.py @@ -0,0 +1,203 @@ +"""Technology Fingerprinting Scanner""" + +import requests +import re +from bs4 import BeautifulSoup +from typing import List, Dict, Any +from modules.base_scanner import BaseScanner + + +class TechFingerprint(BaseScanner): + """Scanner for technology stack fingerprinting""" + + # Technology signatures + TECH_SIGNATURES = { + 'WordPress': { + 'patterns': ['/wp-content/', '/wp-includes/', 'wp-json'], + 'headers': {}, + 'meta': ['generator'] + }, + 'Drupal': { + 'patterns': ['/sites/default/', 'Drupal.settings', '/misc/drupal.js'], + 'headers': {'X-Generator': 'Drupal'}, + 'meta': [] + }, + 'Joomla': { + 'patterns': ['/components/com_', '/media/jui/', 'Joomla!'], + 'headers': {}, + 'meta': ['generator'] + }, + 'Django': { + 'patterns': ['csrfmiddlewaretoken'], + 'headers': {}, + 'meta': [] + }, + 'Laravel': { + 'patterns': ['laravel_session', '_token'], + 'headers': {}, + 'meta': [] + }, + 'React': { + 'patterns': ['react.js', 'react.min.js', 'react-dom'], + 'headers': {}, + 'meta': [] + }, + 'Angular': { + 'patterns': ['ng-version', 'angular.js', 'angular.min.js'], + 'headers': {}, + 'meta': [] + }, + 'Vue.js': { + 'patterns': ['vue.js', 'vue.min.js', 'data-v-'], + 'headers': {}, + 'meta': [] + }, + 'jQuery': { + 'patterns': ['jquery.js', 'jquery.min.js', 'jQuery'], + 'headers': {}, + 'meta': [] + }, + 'Bootstrap': { + 'patterns': ['bootstrap.css', 'bootstrap.min.css', 'bootstrap.js'], + 'headers': {}, + 'meta': [] + }, + 'ASP.NET': { + 'patterns': ['__VIEWSTATE', '__EVENTVALIDATION', 'aspnet'], + 'headers': {'X-AspNet-Version': '', 'X-AspNetMvc-Version': ''}, + 'meta': [] + }, + 'PHP': { + 'patterns': ['.php', 'PHPSESSID'], + 'headers': {'X-Powered-By': 'PHP'}, + 'meta': [] + }, + 'Apache': { + 'patterns': [], + 'headers': {'Server': 'Apache'}, + 'meta': [] + }, + 'Nginx': { + 'patterns': [], + 'headers': {'Server': 'nginx'}, + 'meta': [] + }, + 'IIS': { + 'patterns': [], + 'headers': {'Server': 'Microsoft-IIS'}, + 'meta': [] + }, + 'Cloudflare': { + 'patterns': [], + 'headers': {'Server': 'cloudflare', 'CF-Ray': ''}, + 'meta': [] + }, + } + + def scan(self) -> List[Dict[str, Any]]: + """Fingerprint technology stack""" + try: + self.log(f"Fingerprinting technology stack at {self.target_url}") + + response = requests.get( + self.target_url, + timeout=15, + headers={'User-Agent': 'WebSecAudit/1.0 Security Scanner'} + ) + + soup = BeautifulSoup(response.text, 'html.parser') + + detected_techs = [] + + # Check each technology + for tech_name, signatures in self.TECH_SIGNATURES.items(): + detected = False + + # Check headers + for header_name, header_value in signatures['headers'].items(): + if header_name in response.headers: + if not header_value or header_value.lower() in response.headers[header_name].lower(): + detected = True + self.log(f"Detected {tech_name} via header: {header_name}") + break + + # Check content patterns + if not detected: + for pattern in signatures['patterns']: + if pattern in response.text: + detected = True + self.log(f"Detected {tech_name} via pattern: {pattern}") + break + + # Check meta tags + if not detected and signatures['meta']: + meta_tags = soup.find_all('meta') + for meta in meta_tags: + if meta.get('name', '').lower() in signatures['meta']: + content = meta.get('content', '').lower() + if tech_name.lower() in content: + detected = True + self.log(f"Detected {tech_name} via meta tag") + break + + if detected: + detected_techs.append(tech_name) + + # Report detected technologies + if detected_techs: + self.add_finding( + title='Technology Stack Detected', + severity='INFO', + category='Fingerprinting', + description='The following technologies were detected on the website.', + evidence=f'Detected: {", ".join(detected_techs)}', + recommendation='This is informational. However, exposing technology details can help attackers identify known vulnerabilities.', + references=['https://owasp.org/www-project-web-security-testing-guide/latest/4-Web_Application_Security_Testing/01-Information_Gathering/08-Fingerprint_Web_Application_Framework'] + ) + + # Check for version information + self._check_version_disclosure(response, soup) + + except requests.exceptions.RequestException as e: + self.log(f"Request error during fingerprinting: {str(e)}") + + return self.findings + + def _check_version_disclosure(self, response, soup: BeautifulSoup): + """Check for version information disclosure""" + version_disclosures = [] + + # Check headers for version info + version_headers = ['Server', 'X-Powered-By', 'X-AspNet-Version', 'X-AspNetMvc-Version', 'X-Generator'] + + for header in version_headers: + if header in response.headers: + value = response.headers[header] + # Check if it contains version numbers + if re.search(r'\d+\.\d+', value): + version_disclosures.append(f'{header}: {value}') + + # Check meta generator tag + meta_gen = soup.find('meta', attrs={'name': 'generator'}) + if meta_gen: + content = meta_gen.get('content', '') + if re.search(r'\d+\.\d+', content): + version_disclosures.append(f'Meta generator: {content}') + + # Check for version in HTML comments + comments = soup.find_all(string=lambda text: isinstance(text, str) and 'version' in text.lower()) + for comment in comments[:5]: # Limit to first 5 + if re.search(r'version\s*[:=]?\s*\d+\.\d+', comment.lower()): + preview = comment[:80].strip() + version_disclosures.append(f'Comment: {preview}...') + + if version_disclosures: + self.add_finding( + title='Software Version Information Disclosed', + severity='LOW', + category='Fingerprinting', + description='Software version information is exposed.', + evidence='\n'.join(version_disclosures[:5]), # Limit evidence + recommendation='Remove or obfuscate version information from headers, meta tags, and comments.', + references=['https://owasp.org/www-project-web-security-testing-guide/latest/4-Web_Application_Security_Testing/01-Information_Gathering/02-Fingerprint_Web_Server'] + ) diff --git a/websec-audit/modules/xss_scanner.py b/websec-audit/modules/xss_scanner.py new file mode 100644 index 00000000..82362d90 --- /dev/null +++ b/websec-audit/modules/xss_scanner.py @@ -0,0 +1,184 @@ +"""Cross-Site Scripting (XSS) Scanner""" + +import requests +from bs4 import BeautifulSoup +from urllib.parse import urljoin, urlparse, parse_qs +from typing import List, Dict, Any +from modules.base_scanner import BaseScanner + + +class XSSScanner(BaseScanner): + """Scanner for XSS vulnerabilities""" + + # XSS test payloads (safe, non-harmful payloads for testing) + XSS_PAYLOADS = [ + '<script>alert("XSS")</script>', + '<img src=x onerror=alert("XSS")>', + '"><script>alert("XSS")</script>', + "'><script>alert('XSS')</script>", + '<svg/onload=alert("XSS")>', + 'javascript:alert("XSS")', + '<iframe src="javascript:alert(\'XSS\')">', + ] + + def scan(self) -> List[Dict[str, Any]]: + """Scan for XSS vulnerabilities""" + try: + self.log(f"Scanning for XSS vulnerabilities at {self.target_url}") + + # Get the page + response = requests.get( + self.target_url, + timeout=15, + headers={'User-Agent': 'WebSecAudit/1.0 Security Scanner'} + ) + + # Parse HTML + soup = BeautifulSoup(response.text, 'html.parser') + + # Find all forms + forms = soup.find_all('form') + self.log(f"Found {len(forms)} forms to test") + + # Test each form for XSS + for idx, form in enumerate(forms[:5]): # Limit to 5 forms to avoid too many requests + self._test_form_xss(form, idx + 1) + + # Check for reflected parameters in URL + self._check_url_reflection(response) + + # Check for DOM-based XSS indicators + self._check_dom_xss_indicators(soup) + + except requests.exceptions.RequestException as e: + self.log(f"Request error during XSS scan: {str(e)}") + + return self.findings + + def _test_form_xss(self, form, form_number: int): + """Test a form for XSS vulnerabilities""" + try: + # Get form action and method + action = form.get('action', '') + method = form.get('method', 'get').lower() + form_url = urljoin(self.target_url, action) + + self.log(f"Testing form #{form_number}: {method.upper()} {form_url}") + + # Get all input fields + inputs = form.find_all(['input', 'textarea']) + + if not inputs: + return + + # Build form data with XSS payload + form_data = {} + test_payload = self.XSS_PAYLOADS[0] # Use first payload for testing + + for input_field in inputs: + input_name = input_field.get('name') + input_type = input_field.get('type', 'text') + + if input_name and input_type not in ['submit', 'button', 'hidden']: + form_data[input_name] = test_payload + + if not form_data: + return + + # Send request with payload + try: + if method == 'post': + response = requests.post(form_url, data=form_data, timeout=10, + headers={'User-Agent': 'WebSecAudit/1.0 Security Scanner'}) + else: + response = requests.get(form_url, params=form_data, timeout=10, + headers={'User-Agent': 'WebSecAudit/1.0 Security Scanner'}) + + # Check if payload is reflected in response + if test_payload in response.text: + self.add_finding( + title=f'Potential XSS Vulnerability in Form #{form_number}', + severity='HIGH', + category='Cross-Site Scripting', + description='User input is reflected in the response without proper sanitization.', + evidence=f'Form action: {form_url}, Method: {method.upper()}, Payload reflected in response', + recommendation='Implement proper input validation and output encoding. Use Content-Security-Policy headers.', + references=[ + 'https://owasp.org/www-community/attacks/xss/', + 'https://cheatsheetseries.owasp.org/cheatsheets/Cross_Site_Scripting_Prevention_Cheat_Sheet.html' + ] + ) + self.log(f"XSS payload reflected in form #{form_number}") + + except requests.exceptions.RequestException as e: + self.log(f"Error testing form #{form_number}: {str(e)}") + + except Exception as e: + self.log(f"Error processing form #{form_number}: {str(e)}") + + def _check_url_reflection(self, response): + """Check if URL parameters are reflected in the response""" + parsed_url = urlparse(self.target_url) + params = parse_qs(parsed_url.query) + + if not params: + return + + self.log("Checking for URL parameter reflection") + + # Check each parameter + for param_name, param_values in params.items(): + for param_value in param_values: + if param_value and param_value in response.text: + # Check if it's reflected without encoding + if '<' in param_value or '>' in param_value or '"' in param_value: + self.add_finding( + title='URL Parameter Reflected Without Encoding', + severity='HIGH', + category='Cross-Site Scripting', + description=f'URL parameter "{param_name}" is reflected in the response without proper encoding.', + evidence=f'Parameter: {param_name}={param_value}', + recommendation='Implement proper output encoding for all reflected user input.', + references=['https://owasp.org/www-community/attacks/xss/'] + ) + + def _check_dom_xss_indicators(self, soup: BeautifulSoup): + """Check for DOM-based XSS indicators""" + # Check for dangerous JavaScript functions with user input + scripts = soup.find_all('script') + + dangerous_patterns = [ + 'document.write(', + 'innerHTML', + 'outerHTML', + 'document.location', + 'document.URL', + 'document.documentURI', + 'location.href', + 'eval(', + 'setTimeout(', + 'setInterval(', + ] + + for script in scripts: + if script.string: + script_content = script.string + + for pattern in dangerous_patterns: + if pattern in script_content: + # Check if it's using user-controlled input + if any(source in script_content for source in + ['location', 'document.URL', 'document.referrer', 'window.name']): + self.add_finding( + title='Potential DOM-Based XSS', + severity='MEDIUM', + category='Cross-Site Scripting', + description=f'JavaScript code uses potentially dangerous function "{pattern}" with user-controllable input.', + evidence=f'Pattern detected: {pattern}', + recommendation='Avoid using dangerous JavaScript functions with user input. Use safe APIs and sanitize input.', + references=[ + 'https://owasp.org/www-community/attacks/DOM_Based_XSS', + 'https://cheatsheetseries.owasp.org/cheatsheets/DOM_based_XSS_Prevention_Cheat_Sheet.html' + ] + ) + break # Only report once per script diff --git a/websec-audit/requirements.txt b/websec-audit/requirements.txt new file mode 100644 index 00000000..f5e5eeae --- /dev/null +++ b/websec-audit/requirements.txt @@ -0,0 +1,2 @@ +requests>=2.31.0 +beautifulsoup4>=4.12.0 diff --git a/websec-audit/scanner.py b/websec-audit/scanner.py new file mode 100644 index 00000000..b6584e38 --- /dev/null +++ b/websec-audit/scanner.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 +""" +WebSecAudit - Comprehensive Website Security Scanner +A state-of-the-art security scanning tool for web applications +""" + +import argparse +import json +import sys +import time +from datetime import datetime +from urllib.parse import urlparse +from typing import List, Dict, Any + +from modules.ssl_scanner import SSLScanner +from modules.headers_scanner import HeadersScanner +from modules.xss_scanner import XSSScanner +from modules.injection_scanner import InjectionScanner +from modules.info_disclosure import InfoDisclosureScanner +from modules.cors_scanner import CORSScanner +from modules.auth_scanner import AuthScanner +from modules.tech_fingerprint import TechFingerprint +from utils.reporter import Reporter +from utils.colors import Colors + + +class WebSecAudit: + """Main security scanner orchestrator""" + + def __init__(self, target_url: str, verbose: bool = False): + self.target_url = self._normalize_url(target_url) + self.verbose = verbose + self.findings = [] + self.scan_start_time = None + self.scan_end_time = None + + # Initialize all scanners + self.scanners = [ + TechFingerprint(self.target_url, verbose), + SSLScanner(self.target_url, verbose), + HeadersScanner(self.target_url, verbose), + CORSScanner(self.target_url, verbose), + XSSScanner(self.target_url, verbose), + InjectionScanner(self.target_url, verbose), + InfoDisclosureScanner(self.target_url, verbose), + AuthScanner(self.target_url, verbose), + ] + + def _normalize_url(self, url: str) -> str: + """Normalize URL to include protocol""" + if not url.startswith(('http://', 'https://')): + url = 'https://' + url + return url.rstrip('/') + + def print_banner(self): + """Print tool banner""" + banner = f""" +{Colors.CYAN}╔══════════════════════════════════════════════════════════════╗ +║ ║ +║ WebSecAudit - Security Scanner v1.0 ║ +║ Comprehensive Website Vulnerability Scanner ║ +║ ║ +╚══════════════════════════════════════════════════════════════╝{Colors.RESET} + +{Colors.YELLOW}Target:{Colors.RESET} {self.target_url} +{Colors.YELLOW}Scan Started:{Colors.RESET} {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} + +{Colors.GREEN}[*] Initializing security scanners...{Colors.RESET} +""" + print(banner) + + def run_scan(self): + """Execute all security scans""" + self.scan_start_time = time.time() + self.print_banner() + + # Run each scanner + for scanner in self.scanners: + scanner_name = scanner.__class__.__name__ + print(f"\n{Colors.BLUE}[+] Running {scanner_name}...{Colors.RESET}") + + try: + results = scanner.scan() + if results: + self.findings.extend(results) + print(f"{Colors.GREEN} ✓ {scanner_name} completed - Found {len(results)} issues{Colors.RESET}") + else: + print(f"{Colors.GREEN} ✓ {scanner_name} completed - No issues found{Colors.RESET}") + except Exception as e: + error_msg = f"Error running {scanner_name}: {str(e)}" + print(f"{Colors.RED} ✗ {error_msg}{Colors.RESET}") + if self.verbose: + import traceback + traceback.print_exc() + + self.scan_end_time = time.time() + + # Generate report + self._generate_report() + + def _generate_report(self): + """Generate and display the security report""" + scan_duration = self.scan_end_time - self.scan_start_time + + reporter = Reporter( + target_url=self.target_url, + findings=self.findings, + scan_duration=scan_duration + ) + + # Display console report + reporter.print_summary() + reporter.print_detailed_findings() + + # Save reports + timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') + parsed_url = urlparse(self.target_url) + hostname = parsed_url.netloc.replace(':', '_') + + # HTML Report + html_file = f"report_{hostname}_{timestamp}.html" + reporter.generate_html_report(html_file) + print(f"\n{Colors.GREEN}[✓] HTML Report saved: {html_file}{Colors.RESET}") + + # JSON Report + json_file = f"report_{hostname}_{timestamp}.json" + reporter.generate_json_report(json_file) + print(f"{Colors.GREEN}[✓] JSON Report saved: {json_file}{Colors.RESET}") + + +def main(): + parser = argparse.ArgumentParser( + description='WebSecAudit - Comprehensive Website Security Scanner', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + python scanner.py https://example.com + python scanner.py https://2rbc-ai.com -v + python scanner.py example.com (automatically adds https://) + +Security Categories Tested: + ✓ SSL/TLS Configuration + ✓ HTTP Security Headers + ✓ Cross-Site Scripting (XSS) + ✓ SQL Injection + ✓ Information Disclosure + ✓ CORS Misconfiguration + ✓ CSRF Protection + ✓ Authentication & Session Security + ✓ Technology Fingerprinting + ✓ Common Vulnerabilities (OWASP Top 10) + +Note: Only scan websites you own or have permission to test! + """ + ) + + parser.add_argument('url', help='Target URL to scan (e.g., https://example.com)') + parser.add_argument('-v', '--verbose', action='store_true', help='Enable verbose output') + parser.add_argument('--version', action='version', version='WebSecAudit 1.0') + + args = parser.parse_args() + + try: + scanner = WebSecAudit(args.url, args.verbose) + scanner.run_scan() + except KeyboardInterrupt: + print(f"\n{Colors.YELLOW}[!] Scan interrupted by user{Colors.RESET}") + sys.exit(0) + except Exception as e: + print(f"\n{Colors.RED}[!] Fatal error: {str(e)}{Colors.RESET}") + if args.verbose: + import traceback + traceback.print_exc() + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/websec-audit/utils/__init__.py b/websec-audit/utils/__init__.py new file mode 100644 index 00000000..981db0e0 --- /dev/null +++ b/websec-audit/utils/__init__.py @@ -0,0 +1 @@ +# Utils module for WebSecAudit diff --git a/websec-audit/utils/colors.py b/websec-audit/utils/colors.py new file mode 100644 index 00000000..3a7c580c --- /dev/null +++ b/websec-audit/utils/colors.py @@ -0,0 +1,37 @@ +"""Color constants for terminal output""" + + +class Colors: + """ANSI color codes for terminal output""" + RESET = '\033[0m' + BOLD = '\033[1m' + + # Regular colors + BLACK = '\033[30m' + RED = '\033[31m' + GREEN = '\033[32m' + YELLOW = '\033[33m' + BLUE = '\033[34m' + MAGENTA = '\033[35m' + CYAN = '\033[36m' + WHITE = '\033[37m' + + # Bright colors + BRIGHT_RED = '\033[91m' + BRIGHT_GREEN = '\033[92m' + BRIGHT_YELLOW = '\033[93m' + BRIGHT_BLUE = '\033[94m' + BRIGHT_MAGENTA = '\033[95m' + BRIGHT_CYAN = '\033[96m' + + @staticmethod + def severity_color(severity: str) -> str: + """Get color based on severity level""" + severity_map = { + 'CRITICAL': Colors.BRIGHT_RED + Colors.BOLD, + 'HIGH': Colors.RED, + 'MEDIUM': Colors.YELLOW, + 'LOW': Colors.BLUE, + 'INFO': Colors.CYAN + } + return severity_map.get(severity.upper(), Colors.WHITE) diff --git a/websec-audit/utils/reporter.py b/websec-audit/utils/reporter.py new file mode 100644 index 00000000..945485f1 --- /dev/null +++ b/websec-audit/utils/reporter.py @@ -0,0 +1,413 @@ +"""Report generation module""" + +import json +from datetime import datetime +from typing import List, Dict, Any +from collections import Counter +from utils.colors import Colors + + +class Reporter: + """Generate security scan reports in multiple formats""" + + SEVERITY_ORDER = ['CRITICAL', 'HIGH', 'MEDIUM', 'LOW', 'INFO'] + + def __init__(self, target_url: str, findings: List[Dict], scan_duration: float): + self.target_url = target_url + self.findings = sorted(findings, key=lambda x: self.SEVERITY_ORDER.index(x.get('severity', 'INFO'))) + self.scan_duration = scan_duration + self.severity_counts = Counter(f['severity'] for f in findings) + + def print_summary(self): + """Print scan summary to console""" + total_issues = len(self.findings) + + print(f"\n\n{Colors.CYAN}{'='*70}") + print(f" SCAN SUMMARY") + print(f"{'='*70}{Colors.RESET}\n") + + print(f"{Colors.YELLOW}Target URL:{Colors.RESET} {self.target_url}") + print(f"{Colors.YELLOW}Scan Duration:{Colors.RESET} {self.scan_duration:.2f} seconds") + print(f"{Colors.YELLOW}Total Issues:{Colors.RESET} {total_issues}\n") + + print(f"{Colors.YELLOW}Issues by Severity:{Colors.RESET}") + for severity in self.SEVERITY_ORDER: + count = self.severity_counts.get(severity, 0) + if count > 0: + color = Colors.severity_color(severity) + print(f" {color}● {severity:8s}{Colors.RESET} : {count}") + + print(f"\n{Colors.CYAN}{'='*70}{Colors.RESET}") + + def print_detailed_findings(self): + """Print detailed findings to console""" + if not self.findings: + print(f"\n{Colors.GREEN}[✓] No security issues found!{Colors.RESET}") + return + + print(f"\n\n{Colors.CYAN}{'='*70}") + print(f" DETAILED FINDINGS") + print(f"{'='*70}{Colors.RESET}\n") + + current_severity = None + issue_number = 1 + + for finding in self.findings: + severity = finding.get('severity', 'INFO') + + # Print severity header when it changes + if severity != current_severity: + current_severity = severity + color = Colors.severity_color(severity) + print(f"\n{color}{'─'*70}") + print(f" {severity} SEVERITY ISSUES") + print(f"{'─'*70}{Colors.RESET}\n") + + # Print issue details + color = Colors.severity_color(severity) + print(f"{color}[{issue_number}] {finding['title']}{Colors.RESET}") + print(f" Category: {finding.get('category', 'Unknown')}") + print(f" Severity: {color}{severity}{Colors.RESET}") + + if 'description' in finding: + print(f" Description: {finding['description']}") + + if 'evidence' in finding: + print(f" Evidence: {finding['evidence']}") + + if 'recommendation' in finding: + print(f" {Colors.GREEN}Recommendation:{Colors.RESET} {finding['recommendation']}") + + if 'references' in finding and finding['references']: + print(f" References:") + for ref in finding['references']: + print(f" - {ref}") + + print() + issue_number += 1 + + print(f"{Colors.CYAN}{'='*70}{Colors.RESET}\n") + + def generate_json_report(self, filename: str): + """Generate JSON report""" + report_data = { + 'scan_info': { + 'target_url': self.target_url, + 'scan_date': datetime.now().isoformat(), + 'scan_duration_seconds': round(self.scan_duration, 2), + 'total_issues': len(self.findings) + }, + 'summary': { + 'severity_counts': dict(self.severity_counts) + }, + 'findings': self.findings + } + + with open(filename, 'w') as f: + json.dump(report_data, f, indent=2) + + def generate_html_report(self, filename: str): + """Generate HTML report""" + html_content = f"""<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="UTF-8"> + <meta name="viewport" content="width=device-width, initial-scale=1.0"> + <title>Security Scan Report - {self.target_url} + + + +
+
+

🔒 WebSecAudit Security Report

+

Comprehensive Security Assessment

+
+ +
+

Scan Summary

+
+
+

Target URL

+
{self.target_url}
+
+
+

Scan Date

+
{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
+
+
+

Duration

+
{self.scan_duration:.2f}s
+
+
+

Total Issues

+
{len(self.findings)}
+
+
+ +
+ {self._generate_severity_badges_html()} +
+
+ +
+ {self._generate_findings_html()} +
+ + +
+ +""" + + with open(filename, 'w') as f: + f.write(html_content) + + def _generate_severity_badges_html(self) -> str: + """Generate HTML for severity badges""" + badges_html = [] + for severity in self.SEVERITY_ORDER: + count = self.severity_counts.get(severity, 0) + if count > 0: + severity_lower = severity.lower() + badges_html.append( + f'
' + f'{severity} {count}' + f'
' + ) + return '\n'.join(badges_html) + + def _generate_findings_html(self) -> str: + """Generate HTML for findings""" + if not self.findings: + return '

✓ No security issues found!

' + + findings_html = [] + current_severity = None + + for finding in self.findings: + severity = finding.get('severity', 'INFO') + + # Add section header when severity changes + if severity != current_severity: + if current_severity is not None: + findings_html.append('') # Close previous section + + current_severity = severity + severity_lower = severity.lower() + bg_colors = { + 'CRITICAL': '#dc3545', + 'HIGH': '#fd7e14', + 'MEDIUM': '#ffc107', + 'LOW': '#17a2b8', + 'INFO': '#6c757d' + } + findings_html.append( + f'
' + f'

' + f'{severity} SEVERITY ISSUES

' + ) + + # Add finding card + severity_lower = severity.lower() + card_html = f'
' + card_html += f'
{finding["title"]}
' + + card_html += '
' + card_html += f'Category: {finding.get("category", "Unknown")}' + card_html += f'Severity: {severity}' + card_html += '
' + + if 'description' in finding: + card_html += f'
{finding["description"]}
' + + if 'evidence' in finding: + card_html += f'
Evidence:
{finding["evidence"]}
' + + if 'recommendation' in finding: + card_html += f'
Recommendation:
{finding["recommendation"]}
' + + if 'references' in finding and finding['references']: + card_html += '
References:
' + for ref in finding['references']: + card_html += f'{ref}' + card_html += '
' + + card_html += '
' + findings_html.append(card_html) + + findings_html.append('
') # Close last section + return '\n'.join(findings_html)