forked from rindishkrishna/web-scrape
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtesting.py
More file actions
35 lines (28 loc) · 917 Bytes
/
testing.py
File metadata and controls
35 lines (28 loc) · 917 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import requests
import re
from bs4 import BeautifulSoup as Soup
from fake_useragent import UserAgent
emails=[]
#limit=int(input("enter number of emails to scrape"))
occ=input("enter ocupation")
ua = UserAgent()
proxies = {
"http": "10.10.1.10:3128",
"https": "10.10.1.10:1080",
}
headers = {'User-Agent': ua.random}
keyword=input("enter keyword")
l=['www.instagram.com',]
for i in range(0,500,100):
c = 'http://www.google.com/search?q=~+'+'"'+keyword+'"'+'+'+'"'+occ+'"'+'AND "%40gmail.com" -intitle:"profiles" -inurl:"dir/+"+site:www.linkedin.com/in/+OR+site:www.linkedin.com/pub/&num=100&start='+str(i)
print(c)
s = requests.get(c,headers=headers,proxies=proxies)
f = Soup(s.text, 'html.parser')
# print(f.text)
emails = re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+", f.text)
m = set(emails)
#if(m==0):
#break
# for i in m:
# print(i)
print(m)