I want to extract all article content from an website using any web crawling/scraping methods.
The problem is I can get content from a single page but not its redirecting links. Anyone please give me the proper solutions
import java.io.FileOutputStream;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.Reader;
import java.net.URI;
import java.net.URL;
import java.net.URLConnection;
import javax.swing.text.EditorKit;
import javax.swing.text.html.HTMLDocument;
import javax.swing.text.html.HTMLEditorKit;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
public class Main3 {
public static void main(String[] argv) throws Exception {
HTMLDocument doc = new HTMLDocument() {
public HTMLEditorKit.ParserCallback getReader(int pos) {
return new HTMLEditorKit.ParserCallback() {
public void handleText(char[] data, int pos) {
System.out.println(data);
}
};
}
};
URL url = new URI("http://tamilblog.ishafoundation.org/").toURL();
URLConnection conn = url.openConnection();
Reader rd = new InputStreamReader(conn.getInputStream());
OutputStreamWriter writer = new OutputStreamWriter(new FileOutputStream("ram.txt"), "UTF-8");
EditorKit kit = new HTMLEditorKit();
kit.read(rd, doc, 0);
try {
Document docs = Jsoup.connect("http://tamilblog.ishafoundation.org/").get();
Elements links = docs.select("a[href]");
Elements elements = docs.select("*");
System.out.println("Total Links :"+links.size());
for (Element element : elements) {
System.out.println(element.ownText());
}
for (Element link : links) {
System.out.println(" * a: link :"+ link.attr("a:href"));
System.out.println(" * a: text :"+ link.text());
System.out.println(" * a: text :"+ link.text());
System.out.println(" * a: Alt :"+ link.attr("alt"));
System.out.println(link.attr("p"));
}
} catch (Exception e) {
e.printStackTrace();
}
}
}`