web上から、アナリストのレーティング変更情報を取得するプログラム

カフェモカです☕

気まぐれに1つプログラムをアップロードしました☕

今回は、https://www.benzinga.com/からアナリストのレーティング変更情報(アップグレード、ダウングレード、目標株価変更など)を取得するプログラムです☕
ここでは過去1週間で変更があった銘柄についてピックして取得するようにしています☕

ここに載ってる情報を取ってくる

ちょっと古いんでうまく回るかはわかりませんが、コードを読めばなんとなくやってること分かると思います☕

プログラムは👇

取得した結果は👇

コードは👇です☕

# -*- coding: utf-8 -*-

#eps,revenueを取得
 
import os
import time
import schedule
import datetime
#import openpyxl
import requests
from bs4 import BeautifulSoup
from requests_html import HTMLSession
import numpy as np
import xlwings as xw
import glob
from concurrent.futures import ThreadPoolExecutor
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
import subprocess
from selenium.webdriver.chrome import service as fs
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import pytz


#options = Options()
#options.add_argument('--headless')

#ATH銘柄のシンボルを取得
symbol = np.full((5000),0,dtype=object)
ddd = np.full((10),0,dtype=object)
nd = np.full((5000),0,dtype=int)
data = np.full((5000,20,10),0,dtype=object)

def ratings(nsym):
  n = 1
  for j in range(1,8):
    tmp = datetime.datetime.now(pytz.timezone('Asia/Tokyo'))- datetime.timedelta(days=int(j))
    ddd[j] = tmp.strftime('%m/%d/%Y')
    print(ddd[j])
  
  for i in range(1,nsym+1):    
    #ChromeDriverのパスを引数に指定しChromeを起動
    if i == 1:
      options = webdriver.ChromeOptions()
      options.add_argument('--ignore-certificate-errors')
      options.add_argument('--ignore-ssl-errors')
      chrome_servie = fs.Service(executable_path=r"C:\Users\tmp\python\pythonv3\driver\chromedriver")
      driver = webdriver.Chrome(service=chrome_servie, chrome_options=options)
      #driver = webdriver.Chrome(r"C:\Users\tmp\python\pythonv3\driver\chromedriver")#,options=options)
    print(str(symbol[i]))
    driver.implicitly_wait(30)    
    if i == 100*n:
      driver.get('chrome://settings/clearBrowserData')
      time.sleep(2)
      actions = ActionChains(driver) 
      actions.send_keys(Keys.TAB * 3 + Keys.DOWN * 3) # send right combination
      actions.perform()
      time.sleep(2)
      actions = ActionChains(driver) 
      actions.send_keys(Keys.TAB * 4 + Keys.ENTER) # confirm
      actions.perform()
      n += 1
    brk = 0
    for l in range(1,6):
      if brk == 1:
        break      
      time.sleep(5)
      driver.get('https://www.benzinga.com/quote/'+str(symbol[i])+'/analyst-ratings')    
      try:        
        #article = WebDriverWait(driver, 30).until(EC.visibility_of_all_elements_located((By.TAG_NAME, "span")))
        article = driver.find_elements(By.TAG_NAME, "div")
        rec = 0
        cnt = 0
        ccnt = 0
        cycnt = 0
        crec = 0      
        for jj in range(len(article)):
          #print(article[jj].text)
          cnt += 1
          if article[jj].text == "Previous / Current Rating":
           crec += 1
            #print(crec)
          elif rec == 1 and article[jj].text == "":
            break
          elif crec == 3 and article[jj].text != "":
            rec = 1
            brk = 1
            for j in range(1,8):
              if article[jj].text.find(str(ddd[j])) != -1:
                toks = article[jj].text.splitlines()
                for k in range(0,len(toks)+1):
                  for kk in range(1,8):
                    if toks[k] == str(ddd[kk]):
                      nd[i] += 1
                      data[i][nd[i]][1] = toks[k]
                      data[i][nd[i]][2] = toks[k+2]
                      data[i][nd[i]][3] = toks[k+3]
                      data[i][nd[i]][4] = toks[k+4]
                      data[i][nd[i]][5] = toks[k+5]
                      print(data[i][nd[i]][1])
                      print(data[i][nd[i]][2])
                      print(data[i][nd[i]][3])
                      print(data[i][nd[i]][4])
                      print(data[i][nd[i]][5])
                break           
                 
      except:
        continue
    

      

def bot():
  print("start")

  cnt = 0
  while cnt < 10:
    cnt += 1
    load_url = 'https://finviz.com/screener.ashx?v=111&f=idx_sp500'
    session = HTMLSession()
    r = session.get(load_url)
    #r.html.render()
    if r.status_code >= 200 and r.status_code < 300:
      print(r.status_code)
      article = r.html.find("td")
      for item_html in article:
        if item_html.text[:4] == "Page":
          ppp = item_html.text.split('/')
          pppp = ppp[1].splitlines()
          npq = int(pppp[0])
          print(npq)
          break
      break
  
  j = 0
  for i in range(1,npq+1):
    #print(i)       

    cnt = 0
    ccnt = 0
    jjj = 0
    n1 = 0
    while cnt < 10:
      cnt += 1
      load_url = 'https://finviz.com/screener.ashx?v=152&f=idx_sp500&o=-marketcap&r='+str(1+(i-1)*20)+'&c=0,1,2,3,4,6,44,46,58,57,65'
      session = HTMLSession()
      r = session.get(load_url)
      #r.html.render()
      if r.status_code >= 200 and r.status_code < 300:
        #print(i)
        # 取得したオブジェクトをhtmlに変換
        #;で分割して配列に格納する        
        article = r.html.find("td")
        for item_html in article:
          #print(item_html.text)
          jjj = jjj+1
          if item_html.text == str("Price") and j == (i-1)*20:
            ccnt += 1
            n1 = jjj+2
          if jjj == n1 and ccnt == 2:
            j += 1
            symbol[j] = item_html.text
            #print(j)
            n1 = jjj+11
            if j == 20+(i-1)*20:
              break
        break

  nsym = j
  
  # Webページを取得して解析する
  #========計算処理========
  print('ratings')
  ratings(nsym)
  cmd = 'taskkill /im chrome.exe /f'
  returncode = subprocess.call(cmd)
 
  #=======================


  f = open("C:\\Users\\tmp\\python\\pythonv3\\rating.csv", 'w', encoding='shift-jis')
  for i in range(1,nsym+1):    
    if nd[i] > 0:
      for j in range(1,nd[i]+1):
        f.write(str(symbol[i])+','+str(data[i][j][1])+','+str(data[i][j][2])+','+str(data[i][j][3])+','+str(data[i][j][4])+','+str(data[i][j][5])+'\n')
  f.close()
  
if __name__ == '__main__':
  bot()



この記事が気に入ったらサポートをしてみませんか?