python刷csdn访问量

import requests
import re
import time
payload = ""
# 请求头
headers = {
  "accept": "*/*",
  "accept-encoding": "gzip, deflate, br",
  "accept-language": "zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3",
  "cookie": "l=aurqcpuigwqdnqv7wvafcor1olrrqw7h; isg=bhp6mnb79chqyxpveirtexyyykncg8yewjglqorvci3ddxqxbltofubgwworz3ad; thw=cn; cna=vsjqeraypn0catrxfeiahcz8; t=0eed37629fe7ef5ec0b8ecb6cd3a3577; tracknick=tb830309_22; _cc_=utassssmfa%3d%3d; tg=0; ubn=p; ucn=unzbyun; x=e%3d1%26p%3d*%26s%3d0%26c%3d0%26f%3d0%26g%3d0%26t%3d0%26__ll%3d-1%26_ato%3d0; miid=981798063989731689; hng=cn%7czh-cn%7ccny%7c156; um=0712f33290ab8a6d01951c8161a2df2cdc7c5278664ee3e02f8f6195b27229b88a7470fd7b89f7facd43ad3e795c914cc2a8beb1fa88729a3a74257d8ee4fbbc; enc=1ueyoen0l7fkx0ypu7l6buipkt%2bdsxe0equm26jcsmdi1ltyazbjqcmj5dku3p0qfgwjn8qqyxc6ojugh%2fhfra%3d%3d; ali_ab=58.215.20.66.1516409089271.6; mt=ci%3d-1_1; cookie2=104f8fc9c13eb24c296768a50cabdd6e; _tb_token_=ee7e1e1e7dbe7; v=0",
  "user-agent": "mozilla/5.0 (windows nt 10.0; win64; x64;` rv:47.0) gecko/20100101 firefox/47.0"
}
# 获得文章列表urls
def geturls(url):

  # 发送请求
  resp = requests.request("get", url, data=payload, headers=headers)
  #设置解码方式
  resp.encoding=resp.apparent_encoding
  #这里会用设置的解码方式解码
  html_source = resp.text
  # 正则表达式,取出网页中的url链接(一些寻找注入点的工具也是这么做出来的)
  urls = re.findall("https://[^>\";\']*\d",html_source)
  new_urls=[]
  for url in urls:
    if 'details' in url:
      if url not in new_urls:
        new_urls.append(url)
  return new_urls

urls = geturls("主页地址")
while true:
  for url in urls:
    requests.request("get", url, data=payload, headers=headers)
    print(url, "ok")
    time.sleep(3)
  time.sleep(3)

python刷博客园访问量 

import requests
import time
import re
from lxml import etree
headers = {
  'referer':'https://i.cnblogs.com/posts',

  "user-agent": "mozilla/5.0 (windows nt 10.0; wow64) applewebkit/537.36 (khtml, like gecko) chrome/88.0.4324.96 safari/537.36"

}
proxy = {

  'http':'xxxx:xxx',#可以使用代理

}

def get_urls(url):
  ret = requests.get(url=url,headers = headers)
  ret.encoding='utf-8'
  urls = re.findall('href="https://www.cnblogs.com/shiguanggege/p/(.*?).html" rel="external nofollow" ',ret.text) #正则条件根据博客地址修改
  return urls

urls = []
for url in [f'https://www.cnblogs.com/shiguanggege/default.html?page={i}' for i in range(1,10)]: #url根据自己博客地址修改
  links = get_urls(url)
  for link in links:
    l = f'https://www.cnblogs.com/shiguanggege/p/{link}.html' #这个地址也是根据博客地址修改
    if l not in urls:
      urls.append(l)
while true:
  for i in urls:
    requests.get(url=i,proxies=proxy)
    print(i,'ok')
    time.sleep(1)
  time.sleep(3)

到此这篇关于使用python刷访问量的示例代码的文章就介绍到这了,更多相关python 刷访问量内容请搜索www.887551.com以前的文章或继续浏览下面的相关文章希望大家以后多多支持www.887551.com!