尚未解决:150页左右收到反爬机制暂停

Crawl_data.py

  1. import requests
  2. import pandas as pd
  3. from pprint import pprint
  4. from lxml import etree
  5. import time
  6. import warnings
  7. warnings.filterwarnings("ignore")
  8. for i in range(1,2001):
  9. print("正在爬取第" + str(i) + "页的数据")
  10. url_pre = "https://search.51job.com/list/000000,000000,0000,00,9,99,%25E6%2595%25B0%25E6%258D%25AE,2,"
  11. url_end = ".html?"
  12. url = url_pre + str(i) + url_end
  13. headers = {
  14. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
  15. }
  16. web = requests.get(url, headers=headers)
  17. web.encoding = "gbk"
  18. dom = etree.HTML(web.text)
  19. # 1、岗位名称
  20. job_name = dom.xpath('//div[@class="dw_table"]/div[@class="el"]//p/span/a[@target="_blank"]/@title')
  21. # 2、公司名称
  22. company_name = dom.xpath('//div[@class="dw_table"]/div[@class="el"]/span[@class="t2"]/a[@target="_blank"]/@title')
  23. # 3、工作地点
  24. address = dom.xpath('//div[@class="dw_table"]/div[@class="el"]/span[@class="t3"]/text()')
  25. # 4、工资
  26. salary_mid = dom.xpath('//div[@class="dw_table"]/div[@class="el"]/span[@class="t4"]')
  27. salary = [i.text for i in salary_mid]
  28. # 5、发布日期
  29. release_time = dom.xpath('//div[@class="dw_table"]/div[@class="el"]/span[@class="t5"]/text()')
  30. # 6、获取二级网址url
  31. deep_url = dom.xpath('//div[@class="dw_table"]/div[@class="el"]//p/span/a[@target="_blank"]/@href')
  32. RandomAll = []
  33. JobDescribe = []
  34. CompanyType = []
  35. CompanySize = []
  36. Industry = []
  37. for i in range(len(deep_url)):
  38. web_test = requests.get(deep_url[i], headers=headers)
  39. web_test.encoding = "gbk"
  40. dom_test = etree.HTML(web_test.text)
  41. # 7、爬取经验、学历信息,先合在一个字段里面,以后再做数据清洗。命名为random_all
  42. random_all = dom_test.xpath('//div[@class="tHeader tHjob"]//div[@class="cn"]/p[@class="msg ltype"]/text()')
  43. # 8、岗位描述性息
  44. job_describe = dom_test.xpath('//div[@class="tBorderTop_box"]//div[@class="bmsg job_msg inbox"]/p/text()')
  45. # 9、公司类型
  46. company_type = dom_test.xpath('//div[@class="tCompany_sidebar"]//div[@class="com_tag"]/p[1]/@title')
  47. # 10、公司规模(人数)
  48. company_size = dom_test.xpath('//div[@class="tCompany_sidebar"]//div[@class="com_tag"]/p[2]/@title')
  49. # 11、所属行业(公司)
  50. industry = dom_test.xpath('//div[@class="tCompany_sidebar"]//div[@class="com_tag"]/p[3]/@title')
  51. #print(industry,company_size,company_type,job_describe,random_all)
  52. # 将上述信息保存到各自的列表中
  53. RandomAll.append(random_all)
  54. JobDescribe.append(job_describe)
  55. CompanyType.append(company_type)
  56. CompanySize.append(company_size)
  57. Industry.append(industry)
  58. # 为了反爬,设置睡眠时间
  59. time.sleep(1)
  60. # 由于我们需要爬取很多页,为了防止最后一次性保存所有数据出现的错误,因此,我们每获取一夜的数据,就进行一次数据存取。
  61. df = pd.DataFrame()
  62. df["岗位名称"] = job_name
  63. df["公司名称"] = company_name
  64. df["工作地点"] = address
  65. df["工资"] = salary
  66. df["发布日期"] = release_time
  67. df["经验、学历"] = RandomAll
  68. df["公司类型"] = CompanyType
  69. df["公司规模"] = CompanySize
  70. df["所属行业"] = Industry
  71. df["岗位描述"] = JobDescribe
  72. # 这里在写出过程中,有可能会写入失败,为了解决这个问题,我们使用异常处理。
  73. try:
  74. df.to_csv("job_info.csv", mode="a+", header=None, index=None, encoding="gbk")
  75. except:
  76. print("当页数据写入失败")
  77. time.sleep(1)
  78. print("数据爬取完毕,是不是很开心!!!")

Analysis_data.py

  1. import re
  2. import numpy as np
  3. import pandas as pd
  4. import jieba
  5. import openpyxl
  6. import warnings
  7. warnings.filterwarnings("ignore")
  8. df = pd.read_csv(r"job_info.csv",engine="python",header=None,encoding='gbk')
  9. # 为数据框指定行索引
  10. df.index = range(len(df))
  11. # 为数据框指定列索引
  12. df.columns = ["岗位名","公司名","工作地点","工资","发布日期","经验与学历","公司类型","公司规模","行业","工作描述"]
  13. # 去重之前的记录数
  14. print("去重之前的记录数",df.shape)
  15. # 记录去重
  16. df.drop_duplicates(subset=["公司名","岗位名"],inplace=True)
  17. # 去重之后的记录数
  18. print("去重之后的记录数",df.shape)
  19. df["岗位名"].value_counts()
  20. df["岗位名"] = df["岗位名"].apply(lambda x:x.lower())
  21. #job_info.shape
  22. target_job = ['算法', '开发', '分析', '工程师', '数据', '运营', '运维']
  23. index = [df["岗位名"].str.count(i) for i in target_job]# 筛选和上面七个职位有关的岗位,本来700多,筛选之后3000
  24. index = np.array(index).sum(axis=0) > 0
  25. job_info = df[index]
  26. job_info.shape
  27. job_list = ['数据分析', "数据统计","数据专员",'数据挖掘', '算法',
  28. '大数据','开发工程师', '运营', '软件工程', '前端开发',
  29. '深度学习', 'ai', '数据库', '数据库', '数据产品',
  30. '客服', 'java', '.net', 'andrio', '人工智能', 'c++',
  31. '数据管理',"测试","运维"]
  32. job_list = np.array(job_list)
  33. def rename(x=None,job_list=job_list):
  34. index = [i in x for i in job_list]
  35. if sum(index) > 0:
  36. return job_list[index][0]
  37. else:
  38. return x
  39. job_info["岗位名"] = job_info["岗位名"].apply(rename)
  40. job_info["岗位名"].value_counts() # 查看岗位名这一列下面有哪些不同的值,并计算每个值有多少个重复值
  41. # 数据统计、数据专员、数据分析统一归为数据分析
  42. job_info["岗位名"] = job_info["岗位名"].apply(lambda x:re.sub("数据专员","数据分析",x))
  43. job_info["岗位名"] = job_info["岗位名"].apply(lambda x:re.sub("数据统计","数据分析",x))
  44. job_info["工资"].str[-1].value_counts()
  45. job_info["工资"].str[-3].value_counts()
  46. index1 = job_info["工资"].str[-1].isin(["年","月"])
  47. index2 = job_info["工资"].str[-3].isin(["万","千"])
  48. job_info = job_info[index1 & index2]
  49. def get_money_max_min(x):
  50. try:
  51. if x[-3] == "万":
  52. z = [float(i)*10000 for i in re.findall("[0-9]+\.?[0-9]*",x)]
  53. elif x[-3] == "千":
  54. z = [float(i) * 1000 for i in re.findall("[0-9]+\.?[0-9]*", x)]
  55. if x[-1] == "年":
  56. z = [i/12 for i in z]
  57. return z
  58. except:
  59. return x
  60. salary = job_info["工资"].apply(get_money_max_min)
  61. job_info["最低工资"] = salary.str[0]
  62. job_info["最高工资"] = salary.str[1]
  63. job_info["工资水平"] = job_info[["最低工资","最高工资"]].mean(axis=1)
  64. #job_info["工作地点"].value_counts()
  65. address_list = ['北京', '上海', '广州', '深圳', '杭州', '苏州', '长沙',
  66. '武汉', '天津', '成都', '西安', '东莞', '合肥', '佛山',
  67. '宁波', '南京', '重庆', '长春', '郑州', '常州', '福州',
  68. '沈阳', '济南', '宁波', '厦门', '贵州', '珠海', '青岛',
  69. '中山', '大连','昆山',"惠州","哈尔滨","昆明","南昌","无锡"]
  70. address_list = np.array(address_list)
  71. def rename(x=None,address_list=address_list):
  72. index = [i in x for i in address_list]
  73. if sum(index) > 0:
  74. return address_list[index][0]
  75. else:
  76. return x
  77. job_info["工作地点"] = job_info["工作地点"].apply(rename)
  78. job_info.loc[job_info["公司类型"].apply(lambda x:len(x)<6),"公司类型"] = np.nan
  79. job_info["公司类型"] = job_info["公司类型"].str[2:-2]
  80. # job_info["行业"].value_counts()
  81. job_info["行业"] = job_info["行业"].apply(lambda x:re.sub(",","/",x))
  82. job_info.loc[job_info["行业"].apply(lambda x:len(x)<6),"行业"] = np.nan
  83. job_info["行业"] = job_info["行业"].str[2:-2].str.split("/").str[0]
  84. #学历字段
  85. job_info["学历"] = job_info["经验与学历"].apply(lambda x:re.findall("本科|大专|应届生|在校生|硕士",x))
  86. #re.findall返回的是在"经验与学历"这一列里边与"本科|大专|应届生|在校生|硕士"相匹配的全部字符串,返回形式为数组
  87. # 最好加r表示raw string,不加会把\n识别成换行,而是\和n
  88. # 把找到的学历插入到新建的“学历”中,这是每一行的数据是数组形式
  89. def func(x):
  90. if len(x) == 0:
  91. return np.nan
  92. elif len(x) == 1 or len(x) == 2:
  93. return x[0]
  94. else:
  95. return x[2]
  96. job_info["学历"] = job_info["学历"].apply(func)
  97. #这部分是将数组中的字符串提取并重新插入
  98. #实习经验字段
  99. job_info["经验"] = job_info["经验与学历"].apply(lambda x:re.findall("1年|2年|3-4年|5-7年|8-9年|10年以上",x))
  100. def func2(x):
  101. if len(x) == 0:
  102. return np.nan
  103. else:
  104. return x[0]
  105. job_info["经验"] = job_info["经验"].apply(func2)
  106. with open(r"stopword.txt","r",encoding='UTF-8') as f:
  107. stopword = f.read()
  108. stopword = stopword.split()
  109. stopword = stopword + ["任职","职位"," "]
  110. job_info["工作描述"] = job_info["工作描述"].str[2:-2].apply(lambda x:x.lower()).apply(lambda x:"".join(x))\
  111. .apply(jieba.lcut).apply(lambda x:[i for i in x if i not in stopword])
  112. job_info.loc[job_info["工作描述"].apply(lambda x:len(x) < 6),"工作描述"] = np.nan
  113. #job_info["公司规模"].value_counts()
  114. def func(x):
  115. if x == "['少于50人']":
  116. return "<50"
  117. elif x == "['50-150人']":
  118. return "50-150"
  119. elif x == "['150-500人']":
  120. return '150-500'
  121. elif x == "['500-1000人']":
  122. return '500-1000'
  123. elif x == "['1000-5000人']":
  124. return '1000-5000'
  125. elif x == "['5000-10000人']":
  126. return '5000-10000'
  127. elif x == "['10000人以上']":
  128. return ">10000"
  129. else:
  130. return np.nan
  131. job_info["公司规模"] = job_info["公司规模"].apply(func)
  132. feature = ["公司名","岗位名","工作地点","工资水平","发布日期","学历","经验","公司类型","公司规模","行业"]
  133. final_df = job_info[feature]
  134. final_df.to_excel(r"词云图.xlsx",encoding="gbk",index=None)
  135. job_info.to_excel(r"new_job_info1.xlsx",encoding="gbk",index=None)
  136. df = pd.read_excel(r"new_job_info1.xlsx",encoding="gbk")
  137. df
  138. def get_word_cloud(data=None, job_name=None):
  139. words = []
  140. describe = data['工作描述'][data['岗位名'] == job_name].str[1:-1]
  141. describe.dropna(inplace=True)
  142. [words.extend(i.split(',')) for i in describe]
  143. words = pd.Series(words)
  144. word_fre = words.value_counts()
  145. return word_fre
  146. zz = ['数据分析', '算法', '大数据','开发工程师', '运营', '软件工程','运维', '数据库','java',"测试"]
  147. for i in zz:
  148. word_fre = get_word_cloud(data=df, job_name='{}'.format(i))
  149. word_fre = word_fre[1:].reset_index()[:100]
  150. word_fre["岗位名"] = pd.Series("{}".format(i),index=range(len(word_fre)))
  151. word_fre.to_csv(r"bb.csv", mode='a',index=False, header=None,encoding="gbk")