Skip to content

Commit e5d1c1b

Browse files
Python Scrapy Added
0 parents  commit e5d1c1b

File tree

11 files changed

+405
-0
lines changed

11 files changed

+405
-0
lines changed

.gitignore

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
*.log
2+
*.pot
3+
*.pyc
4+
__pycache__/
5+
local_settings.py
6+
db.sqlite3
7+
db.sqlite3-journal
8+
media
9+
*.py[cod]
10+
*$py.class
11+
*.so
12+
.Python
13+
build/
14+
develop-eggs/
15+
dist/
16+
downloads/
17+
eggs/
18+
.eggs/
19+
lib/
20+
lib64/
21+
parts/
22+
sdist/
23+
var/
24+
wheels/
25+
share/python-wheels/
26+
*.egg-info/
27+
.installed.cfg
28+
*.egg
29+
MANIFEST
30+
*.manifest
31+
*.spec
32+
pip-log.txt
33+
pip-delete-this-directory.txt
34+
htmlcov/
35+
.tox/
36+
.nox/
37+
.coverage
38+
.coverage.*
39+
.cache
40+
nosetests.xml
41+
coverage.xml
42+
*.cover
43+
*.py,cover
44+
.hypothesis/
45+
.pytest_cache/
46+
cover/
47+
*.mo
48+
instance/
49+
.webassets-cache
50+
.scrapy
51+
docs/_build/
52+
.pybuilder/
53+
target/
54+
.ipynb_checkpoints
55+
profile_default/
56+
ipython_config.py
57+
.pdm.toml
58+
__pypackages__/
59+
celerybeat-schedule
60+
celerybeat.pid
61+
*.sage.py
62+
.env
63+
.venv
64+
env/
65+
venv/
66+
ENV/
67+
env.bak/
68+
venv.bak/
69+
.spyderproject
70+
.spyproject
71+
.ropeproject
72+
/site
73+
.mypy_cache/
74+
.dmypy.json
75+
dmypy.json
76+
.pyre/
77+
.pytype/
78+
cython_debug/

example/__init__.py

Whitespace-only changes.

example/items.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
# Define here the models for your scraped items
2+
#
3+
# See documentation in:
4+
# https://docs.scrapy.org/en/latest/topics/items.html
5+
6+
import scrapy
7+
8+
9+
class ExampleItem(scrapy.Item):
10+
restaurant_name = scrapy.Field()
11+
address = scrapy.Field()
12+
country = scrapy.Field()
13+
state = scrapy.Field()
14+
city = scrapy.Field()
15+
phone_number = scrapy.Field()
16+
kitchen_type = scrapy.Field()
17+
rating = scrapy.Field()
18+
comment_title = scrapy.Field()
19+
comment_content = scrapy.Field()
20+

example/middlewares.py

Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
# Define here the models for your spider middleware
2+
#
3+
# See documentation in:
4+
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
5+
6+
from scrapy import signals
7+
8+
# useful for handling different item types with a single interface
9+
from itemadapter import ItemAdapter
10+
11+
12+
class ExampleSpiderMiddleware:
13+
# Not all methods need to be defined. If a method is not defined,
14+
# scrapy acts as if the spider middleware does not modify the
15+
# passed objects.
16+
17+
@classmethod
18+
def from_crawler(cls, crawler):
19+
# This method is used by Scrapy to create your spiders.
20+
s = cls()
21+
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
22+
return s
23+
24+
def process_spider_input(self, response, spider):
25+
# Called for each response that goes through the spider
26+
# middleware and into the spider.
27+
28+
# Should return None or raise an exception.
29+
return None
30+
31+
def process_spider_output(self, response, result, spider):
32+
# Called with the results returned from the Spider, after
33+
# it has processed the response.
34+
35+
# Must return an iterable of Request, or item objects.
36+
for i in result:
37+
yield i
38+
39+
def process_spider_exception(self, response, exception, spider):
40+
# Called when a spider or process_spider_input() method
41+
# (from other spider middleware) raises an exception.
42+
43+
# Should return either None or an iterable of Request or item objects.
44+
pass
45+
46+
async def process_start(self, start):
47+
# Called with an async iterator over the spider start() method or the
48+
# maching method of an earlier spider middleware.
49+
async for item_or_request in start:
50+
yield item_or_request
51+
52+
def spider_opened(self, spider):
53+
spider.logger.info("Spider opened: %s" % spider.name)
54+
55+
56+
class ExampleDownloaderMiddleware:
57+
# Not all methods need to be defined. If a method is not defined,
58+
# scrapy acts as if the downloader middleware does not modify the
59+
# passed objects.
60+
61+
@classmethod
62+
def from_crawler(cls, crawler):
63+
# This method is used by Scrapy to create your spiders.
64+
s = cls()
65+
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
66+
return s
67+
68+
def process_request(self, request, spider):
69+
# Called for each request that goes through the downloader
70+
# middleware.
71+
72+
# Must either:
73+
# - return None: continue processing this request
74+
# - or return a Response object
75+
# - or return a Request object
76+
# - or raise IgnoreRequest: process_exception() methods of
77+
# installed downloader middleware will be called
78+
return None
79+
80+
def process_response(self, request, response, spider):
81+
# Called with the response returned from the downloader.
82+
83+
# Must either;
84+
# - return a Response object
85+
# - return a Request object
86+
# - or raise IgnoreRequest
87+
return response
88+
89+
def process_exception(self, request, exception, spider):
90+
# Called when a download handler or a process_request()
91+
# (from other downloader middleware) raises an exception.
92+
93+
# Must either:
94+
# - return None: continue processing this exception
95+
# - return a Response object: stops process_exception() chain
96+
# - return a Request object: stops process_exception() chain
97+
pass
98+
99+
def spider_opened(self, spider):
100+
spider.logger.info("Spider opened: %s" % spider.name)

example/pipelines.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
# Define your item pipelines here
2+
#
3+
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
4+
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
5+
6+
7+
# useful for handling different item types with a single interface
8+
from itemadapter import ItemAdapter
9+
10+
11+
class ExamplePipeline:
12+
def process_item(self, item, spider):
13+
return item

example/settings.py

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
# Scrapy settings for example project
2+
#
3+
# For simplicity, this file contains only settings considered important or
4+
# commonly used. You can find more settings consulting the documentation:
5+
#
6+
# https://docs.scrapy.org/en/latest/topics/settings.html
7+
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
8+
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
9+
10+
BOT_NAME = "example"
11+
12+
SPIDER_MODULES = ["example.spiders"]
13+
NEWSPIDER_MODULE = "example.spiders"
14+
15+
ADDONS = {}
16+
17+
18+
# Crawl responsibly by identifying yourself (and your website) on the user-agent
19+
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36"
20+
21+
# Obey robots.txt rules
22+
ROBOTSTXT_OBEY = False
23+
24+
# Concurrency and throttling settings
25+
#CONCURRENT_REQUESTS = 16
26+
CONCURRENT_REQUESTS_PER_DOMAIN = 1
27+
DOWNLOAD_DELAY = 1
28+
29+
# Disable cookies (enabled by default)
30+
#COOKIES_ENABLED = False
31+
32+
# Disable Telnet Console (enabled by default)
33+
#TELNETCONSOLE_ENABLED = False
34+
35+
# Override the default request headers:
36+
#DEFAULT_REQUEST_HEADERS = {
37+
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
38+
# "Accept-Language": "en",
39+
#}
40+
41+
# Enable or disable spider middlewares
42+
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
43+
#SPIDER_MIDDLEWARES = {
44+
# "example.middlewares.ExampleSpiderMiddleware": 543,
45+
#}
46+
47+
# Enable or disable downloader middlewares
48+
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
49+
#DOWNLOADER_MIDDLEWARES = {
50+
# "example.middlewares.ExampleDownloaderMiddleware": 543,
51+
#}
52+
53+
# Enable or disable extensions
54+
# See https://docs.scrapy.org/en/latest/topics/extensions.html
55+
#EXTENSIONS = {
56+
# "scrapy.extensions.telnet.TelnetConsole": None,
57+
#}
58+
59+
# Configure item pipelines
60+
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
61+
#ITEM_PIPELINES = {
62+
# "example.pipelines.ExamplePipeline": 300,
63+
#}
64+
65+
# Enable and configure the AutoThrottle extension (disabled by default)
66+
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
67+
#AUTOTHROTTLE_ENABLED = True
68+
# The initial download delay
69+
#AUTOTHROTTLE_START_DELAY = 5
70+
# The maximum download delay to be set in case of high latencies
71+
#AUTOTHROTTLE_MAX_DELAY = 60
72+
# The average number of requests Scrapy should be sending in parallel to
73+
# each remote server
74+
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
75+
# Enable showing throttling stats for every response received:
76+
#AUTOTHROTTLE_DEBUG = False
77+
78+
# Enable and configure HTTP caching (disabled by default)
79+
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
80+
#HTTPCACHE_ENABLED = True
81+
#HTTPCACHE_EXPIRATION_SECS = 0
82+
#HTTPCACHE_DIR = "httpcache"
83+
#HTTPCACHE_IGNORE_HTTP_CODES = []
84+
#HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
85+
86+
# Set settings whose default value is deprecated to a future-proof value
87+
FEED_EXPORT_ENCODING = "utf-8"

example/spiders/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
# This package will contain the spiders of your Scrapy project
2+
#
3+
# Please refer to the documentation for information on how to create and manage
4+
# your spiders.

example/spiders/example_new.py

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
import scrapy
2+
from example.items import ExampleItem
3+
from scrapy import Request
4+
from urllib.parse import urljoin
5+
6+
7+
class ExampleNewSpider(scrapy.Spider):
8+
name = "example_new"
9+
allowed_domains = ["www.tripadvisor.com"]
10+
start_urls = [
11+
"https://www.tripadvisor.com/Restaurants-g295424-Dubai_Emirate_of_Dubai.html"
12+
]
13+
14+
def start_requests(self):
15+
headers = {
16+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36",
17+
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
18+
"Accept-Language": "en-US,en;q=0.9",
19+
"Accept-Encoding": "gzip, deflate, br",
20+
"Connection": "keep-alive",
21+
"Upgrade-Insecure-Requests": "1",
22+
"Sec-Fetch-Dest": "document",
23+
"Sec-Fetch-Mode": "navigate",
24+
"Sec-Fetch-Site": "none",
25+
"Sec-Fetch-User": "?1"
26+
}
27+
for url in self.start_urls:
28+
yield scrapy.Request(url=url, headers=headers, callback=self.parse)
29+
30+
def parse(self, response):
31+
for href in response.xpath('//div[@class="title"]/a[@class="property_title"]/@href'):
32+
url = urljoin(response.url, href.extract())
33+
yield Request(url, callback=self.parse_page)
34+
35+
next_page = response.xpath('//a[contains(@class, "next")]/@href')
36+
if next_page:
37+
next_page_url = urljoin(response.url, next_page.extract_first())
38+
yield Request(next_page_url, callback=self.parse)
39+
40+
def parse_page(self, response):
41+
item = ExampleItem()
42+
43+
item["restaurant_name"] = response.xpath('//h1/text()').get()
44+
item["address"] = response.xpath('//span[contains(@class,"detailLinkText")]/text()').get()
45+
item["city"] = response.xpath('//span[@class="extended-address"]/text()').get()
46+
item["state"] = response.xpath('//span[@class="locality"]/text()').get()
47+
item["country"] = response.xpath('//span[@class="country-name"]/text()').get()
48+
item["phone_number"] = response.xpath('//span[contains(@class,"mobile")]/text()').get()
49+
item["kitchen_type"] = response.xpath('//div[@class="header_links"]/a/text()').get()
50+
item["rating"] = response.xpath('//span[contains(@class,"ZDEqb")]/text()').get()
51+
52+
item["comment_title"] = response.xpath('(//div[contains(@class,"review-container")]//span[@class="noQuotes"]/text())[1]').get()
53+
item["comment_content"] = response.xpath('(//div[contains(@class,"review-container")]//p[@class="partial_entry"]/text())[1]').get()
54+
55+
yield item

example/spiders/restaurant.json

Whitespace-only changes.

0 commit comments

Comments
 (0)