Hi,
I received a question "I see that to generate the
intraday summary report with 5sec interval for 1 RIC for 1 month takes
long time (more than 30min excluding queuing time). Is it normal? Is there any
way to quicken the process?"
I could not attach .py File though the code from it is copied below, I have removed the Username and Password from the Python script.
Best regards,
Gareth
-----------------------------------------------------------------------------------------------------------------------------------
# coding: utf-8
# In[4]:
#Step 1: token request
import requests
import json
import time
requestUrl = "https://hosted.datascopeapi.reuters.com/RestApi/v1/Authentication/RequestToken"
requestHeaders={
"Prefer":"respond-async",
"Content-Type":"application/json"
}
requestBody={
"Credentials": {
"Username": ,
"Password": ""
}
}
proxies = {'http': 'http://webproxy.ssmb.com:8080',
'https': 'http://webproxy.ssmb.com:8080'}
r1 = requests.post(requestUrl, json=requestBody, headers=requestHeaders, proxies=proxies)
if r1.status_code == 200 :
jsonResponse = json.loads(r1.text.encode('ascii', 'ignore'))
token = jsonResponse["value"]
print ('Authentication token (valid 24 hours):')
print (token)
else:
print ('Please replace myUserName and myPassword with valid credentials, then repeat the request')
# In[5]:
#Step 2: send an on demand extraction request using the received token
requestUrl='https://hosted.datascopeapi.reuters.com/RestApi/v1/Extractions/ExtractRaw'
requestHeaders={
"Prefer":"respond-async",
"Content-Type":"application/json",
"Authorization": "token " + token
}
requestBody={
"ExtractionRequest": {
"@odata.type": "#ThomsonReuters.Dss.Api.Extractions.ExtractionRequests.TickHistoryIntradaySummariesExtractionRequest",
"ContentFieldNames": [
# "Close Ask",
# "Close Bid",
# "High",
# "High Ask",
# "High Bid",
"Last",
# "Low",
# "Low Ask",
# "Low Bid",
# "No. Asks",
# "No. Bids",
"No. Trades",
"Open",
# "Open Ask",
# "Open Bid",
"Volume"
],
# "@odata.type": "#ThomsonReuters.Dss.Api.Extractions.ExtractionRequests.TickHistoryTimeAndSalesExtractionRequest",
# "ContentFieldNames": [
# "Trade - Price",
# "Trade - Volume"
# ],
"IdentifierList": {
"@odata.type": "#ThomsonReuters.Dss.Api.Extractions.ExtractionRequests.InstrumentIdentifierList",
"InstrumentIdentifiers": [{
"Identifier": "ESU7",
"IdentifierType": "Ric"
},
],
"UseUserPreferencesForValidationOptions":"false"
},
"Condition": {
"MessageTimeStampIn": "GmtUtc",
"ReportDateRangeType": "Range",
"QueryStartDate": "2017-06-28T00:00:00.000Z",
"QueryEndDate": "2017-06-29T00:00:00.000Z",
"SummaryInterval": "FiveSeconds",
"TimebarPersistence":"false",
"DisplaySourceRIC":"true"
}
}
}
r2 = requests.post(requestUrl, json=requestBody, headers=requestHeaders, proxies=proxies)
#displaying the response status, and the location url to use to get the status of the extraction request
#initial response status (after approximately 30 seconds wait) will be 202
print (r2.status_code)
print (r2.headers["location"])
# In[6]:
#Step 3: poll the status of the request using the received location URL, and getting the jobId and extraction notes
requestUrl = r2.headers["location"]
requestHeaders={
"Prefer":"respond-async",
"Content-Type":"application/json",
"Authorization":"token " + token
}
while True:
r3 = requests.get(requestUrl, headers=requestHeaders, proxies=proxies)
if r3.status_code == 200:
break
else:
print('Failed...Re-request in 30 secs...')
time.sleep(30)
#when the status of the request is 200 the extraction is complete, we display the jobId and the extraction notes
print ('response status = ' + str(r3.status_code))
if r3.status_code == 200 :
r3Json = json.loads(r3.text.encode('ascii', 'ignore'))
jobId = r3Json["JobId"]
print ('jobId: ' + jobId + '\n')
notes = r3Json["Notes"]
print ('Extraction notes:\n' + notes[0])
else:
print ('execute the cell again, until it returns a response status of 200')
# In[7]:
#Step 4: get the extraction results, using the receive jobId
requestUrl = "https://hosted.datascopeapi.reuters.com/RestApi/v1/Extractions/RawExtractionResults" + "('" + jobId + "')" + "/$value"
requestHeaders={
"Prefer":"respond-async",
"Content-Type":"text/plain",
"Accept-Encoding":"gzip",
"Authorization": "token " + token
}
r4 = requests.get(requestUrl, headers=requestHeaders, proxies=proxies)
# print (r4.text)
# In[8]:
#Step 5 (cosmetic): formating the response using a panda dataframe
from io import StringIO
import pandas as pd
timeSeries = pd.read_csv(StringIO(r4.text))
timeSeries
# In[ ]: