参考:https://www.zhihu.com/question/30626103
由于例子比较老了,现在的百度抓不到完整的页面了
import java.io.BufferedReader; import java.io.InputStreamReader; import java.net.URL; import java.net.URLConnection; public class Main { public static void main(String[] args) { String url = "https://www.baidu.com/"; String result = ""; // 定义一个缓冲字符输入流 BufferedReader in = null; try { // 将String转化成url对象 URL realUrl = new URL(url); // 初始化一个链接到那个url URLConnection connection = realUrl.openConnection(); // 开始实际的连接 connection.connect(); // 初始化bufferedReader输入流来读取URL的相应 in = new BufferedReader(new InputStreamReader( connection.getInputStream())); // 用来临时存储抓取到的每一行数据 String line; while ((line = in.readLine()) != null) { // 遍历抓取到的每一行并存储到result里面 result += line + " "; } } catch (Exception e) { System.out.println("发送GET请求出现异常!" + e); e.printStackTrace(); } finally { try { if (in != null) { in.close(); } } catch (Exception e2) { // TODO: handle exception } } System.out.println(result); } }