get first paragraph from wikipedia article

2019-07-26 07:55发布

问题:

I'm using following code to get the first paragraph from a Wikipedia article. Here is the result of my code. I need only this paragraph. Is it possible? Or is there any better alternative?

'''Papori''' ({{lang-as|'''?????'''}}) is an [[Assamese language]] feature film directed by [[Jahnu Barua]]. The film stars Gopi Desai, [[Biju Phukan]], Sushil Goswami, Chetana Das and Dulal Roy. The film was released in 1986.

Here is my code:

#!/usr/bin/python
from lxml import etree
import urllib
from BeautifulSoup import BeautifulSoup

class AppURLopener(urllib.FancyURLopener):
    version = "WikiDownloader"

urllib._urlopener = AppURLopener()
query = 'http://en.wikipedia.org/w/api.php?action=query&prop=revisions&rvprop=content&format=xml&titles=papori&rvsection=0'
#data = { 'catname':'', 'wpDownload':1, 'pages':"\n".join(pages)}
#data = urllib.urlencode(data)
f = urllib.urlopen(query)
s = f.read()
#doc = etree.parse(f)
#print(s)
soup = BeautifulSoup(s)
secondPTag = soup.findAll('rev')
print secondPTag

Code Updated: any one help me to remove the text between {{ }}. Because there is no need. Thanks

回答1:

To remove everything from {{ to '''Papori''':

import re
regex = re.compile(r"""{{.*?}}\s*('''Papori''')""", re.DOTALL)
print regex.sub(r"\1", rev_data)

To remove everything from the first "{{" to matching "}}":

prefix, sep, rest = rev_data.partition("{{")
if sep: # found the first "{{"
    rest = sep + rest # put it back
    while rest.startswith("{{"):
        # remove nested "{{expr}}" one by one until there is none
        rest, n = re.subn(r"{{(?:[^{]|(?<!{){)*?}}", "", rest, 1)
        if n == 0: 
            break # the first "{{" is unmatched; can't remove it
    else: # deletion is successful
        rev_data = prefix + rest
print(rev_data)

To remove everything from the first "{{" to matching "}}" without regex:

prefix, sep, rest = rev_data.partition("{{")
if sep: # found the first "{{"
    depth = 1
    prevc = None
    for i, c in enumerate(rest):
        if c == "{" and  prevc == c:  # found "{{"
            depth += 1
            prevc = None # match "{{{ " only once
        elif c == "}" and prevc == c: # found "}}"
            depth -= 1
            if depth == 0: # found matching "}}"
                rev_data = prefix + rest[i+1:] # after matching "}}"
                break
            prevc = None # match "}}} " only once
        else:
            prevc = c
print(rev_data)

Full example

#!/usr/bin/env python
import urllib, urllib2
import xml.etree.cElementTree as etree

# download & parse xml, find rev data
params = dict(action="query", prop="revisions", rvprop="content",
              format="xml", titles="papori", rvsection=0)
request = urllib2.Request(
    "http://en.wikipedia.org/w/api.php?" + urllib.urlencode(params), 
    headers={"User-Agent": "WikiDownloader/1.0",
             "Referer": "http://stackoverflow.com/q/7937855"})
tree = etree.parse(urllib2.urlopen(request))
rev_data = tree.findtext('.//rev')

# remove everything from the first "{{" to matching "}}"
prefix, sep, rest = rev_data.partition("{{")
if sep: # found the first "{{"
    depth = 1
    prevc = None
    for i, c in enumerate(rest):
        if c == "{" and  prevc == c:  # found "{{"
            depth += 1
            prevc = None # match "{{{ " only once
        elif c == "}" and prevc == c: # found "}}"
            depth -= 1
            if depth == 0: # found matching "}}"
                rev_data = prefix + rest[i+1:] # after matching "}}"
                break
            prevc = None # match "}}} " only once
        else:
            prevc = c
print rev_data

Output

'''Papori''' ({{lang-as|'''পাপৰী'''}}) is an [[Assamese
language]] feature film directed by [[Jahnu Barua]]. The film
stars Gopi Desai, [[Biju Phukan]], Sushil Goswami, Chetana Das
and Dulal Roy. The film was released in 1986.<ref name="ab">{{cite
web|url=http://www.chaosmag.in/barua.html|title=Papori – 1986 –
Assamese film|publisher=Chaosmag|accessdate=4 February
2010}}</ref>


回答2:

Yes, It is possible. You can use a HTML parser like HTMLParser but I recommend Beautiful Soup

Use regular expressions to remove sub-strings, like this:

>>> email = "tony@tiremove_thisger.net"
>>> m = re.search("remove_this", email)
>>> email[:m.start()] + email[m.end():]
'tony@tiger.net'