Skip to content

pholcusvsruia

Apache-2.0 7 1 7,554
Feb 15 2020 v1.3.4(4 years ago)
1,737 3 8 Apache-2.0
Oct 17 2018 477 (month) 0.8.5(1 year, 10 months ago)

Pholcus is a minimalistic web crawler library written in the Go programming language. It is designed to be flexible and easy to use, and it supports concurrent, distributed, and modular crawling.

Note that Pholcus is documented and maintained in the Chinese language and has no english resources other than the code source itself.

Ruia is an async web scraping micro-framework, written with asyncio and aiohttp, aims to make crawling url as convenient as possible.

Ruia is inspired by scrapy however instead of Twisted it's based entirely on asyncio and aiohttp.

It also supports various features like cookies, headers, and proxy, which makes it very useful in dealing with complex web scraping tasks.

Example Use


package main

import (
    "github.com/henrylee2cn/pholcus/exec"
    _ "github.com/henrylee2cn/pholcus/spider/standard" // standard spider
)

func main() {
    // create spider object
    spider := exec.NewSpider(exec.NewTask("demo", "https://www.example.com"))
    // add a callback for URL route by regex pattern. In this case it's any route:
    spider.AddRule(".*", "Parse")
    // Start spider
    spider.Start()
}

// define callback here
func Parse(self *exec.Spider, doc *goquery.Document) {
    // callbacks receive HTMl document reference and 
}
#!/usr/bin/env python
"""
 Target: https://news.ycombinator.com/
 pip install aiofiles
"""
import aiofiles

from ruia import AttrField, Item, Spider, TextField


class HackerNewsItem(Item):
    target_item = TextField(css_select="tr.athing")
    title = TextField(css_select="a.storylink")
    url = AttrField(css_select="a.storylink", attr="href")

    async def clean_title(self, value):
        return value.strip()


class HackerNewsSpider(Spider):
    start_urls = [
        "https://news.ycombinator.com/news?p=1",
        "https://news.ycombinator.com/news?p=2",
    ]
    concurrency = 10
    # aiohttp_kwargs = {"proxy": "http://0.0.0.0:1087"}

    async def parse(self, response):
        async for item in HackerNewsItem.get_items(html=await response.text()):
            yield item

    async def process_item(self, item: HackerNewsItem):
        async with aiofiles.open("./hacker_news.txt", "a") as f:
            self.logger.info(item)
            await f.write(str(item.title) + "\n")


if __name__ == "__main__":
    HackerNewsSpider.start(middleware=None)

Alternatives / Similar


Was this page helpful?