@@ -76,78 +76,78 @@ def main():
76
76
print ("event:" )
77
77
print (event )
78
78
79
- x = requests .post (SPLUNK_HEC_URL , data = event , headers = headers )
80
-
81
-
82
- url = "{url}/repos/{repo}/actions/runs/{run_id}/logs" .format (url = GITHUB_API_URL ,repo = GITHUB_REPOSITORY ,run_id = GITHUB_WORKFLOWID )
83
- print (url )
84
-
85
- try :
86
- x = requests .get (url , stream = True , auth = ('token' ,GITHUB_TOKEN ))
87
-
88
- except requests .exceptions .HTTPError as errh :
89
- output = "GITHUB API Http Error:" + str (errh )
90
- print (f"Error: { output } " )
91
- print (f"::set-output name=result::{ output } " )
92
- return
93
- except requests .exceptions .ConnectionError as errc :
94
- output = "GITHUB API Error Connecting:" + str (errc )
95
- print (f"Error: { output } " )
96
- print (f"::set-output name=result::{ output } " )
97
- return
98
- except requests .exceptions .Timeout as errt :
99
- output = "Timeout Error:" + str (errt )
100
- print (f"Error: { output } " )
101
- print (f"::set-output name=result::{ output } " )
102
- return
103
- except requests .exceptions .RequestException as err :
104
- output = "GITHUB API Non catched error conecting:" + str (err )
105
- print (f"Error: { output } " )
106
- print (f"::set-output name=result::{ output } " )
107
- return
108
-
109
- z = zipfile .ZipFile (io .BytesIO (x .content ))
110
- z .extractall ('/app' )
111
-
112
- timestamp = batch = count = 0
113
-
114
- for name in glob .glob ('/app/*.txt' ):
115
- logfile = open (os .path .join (os .path .dirname (os .path .abspath (__file__ )), name .replace ('./' ,'' )),'r' )
116
- Lines = logfile .readlines ()
117
- for line in Lines :
118
-
119
- if line :
120
- count += 1
121
- if timestamp :
122
- t2 = timestamp
123
- timestamp = re .search ("\d{4}-\d{2}-\d{2}T\d+:\d+:\d+.\d+Z" ,line .strip ())
124
-
125
- if timestamp :
126
- timestamp = re .sub ("\dZ" ,"" ,timestamp .group ())
127
- timestamp = datetime .strptime (timestamp ,"%Y-%m-%dT%H:%M:%S.%f" )
128
- timestamp = (timestamp - datetime (1970 ,1 ,1 )).total_seconds ()
129
- else :
130
- timestamp = t2
131
-
132
- x = re .sub ("\d{4}-\d{2}-\d{2}T\d+:\d+:\d+.\d+Z" ,"" ,line .strip ())
133
- x = x .strip ()
134
- job_name = re .search ("\/\d+\_(?P<job>.*)\.txt" ,name )
135
- job_name = job_name .group ('job' )
136
-
137
- fields = {'lineNumber' :count ,'workflowID' :GITHUB_WORKFLOWID ,'job' :job_name }
138
- if x :
139
- batch += 1
140
- event = {'event' :x ,'sourcetype' :SPLUNK_SOURCETYPE ,'source' :SPLUNK_SOURCE ,'host' :host ,'time' :timestamp ,'fields' :fields }
141
- eventBatch = eventBatch + json .dumps (event )
142
- else :
143
- print ("skipped line " + str (count ))
144
-
145
- if batch >= 1000 :
146
- batch = 0
147
- x = requests .post (SPLUNK_HEC_URL , data = eventBatch , headers = headers )
148
- eventBatch = ""
149
-
150
- response = requests .post (SPLUNK_HEC_URL , data = eventBatch , headers = headers )
79
+ response = requests .post (SPLUNK_HEC_URL , data = event , headers = headers )
80
+
81
+
82
+ # url = "{url}/repos/{repo}/actions/runs/{run_id}/logs".format(url=GITHUB_API_URL,repo=GITHUB_REPOSITORY,run_id=GITHUB_WORKFLOWID)
83
+ # print(url)
84
+
85
+ # try:
86
+ # x = requests.get(url, stream=True, auth=('token',GITHUB_TOKEN))
87
+
88
+ # except requests.exceptions.HTTPError as errh:
89
+ # output = "GITHUB API Http Error:" + str(errh)
90
+ # print(f"Error: {output}")
91
+ # print(f"::set-output name=result::{output}")
92
+ # return
93
+ # except requests.exceptions.ConnectionError as errc:
94
+ # output = "GITHUB API Error Connecting:" + str(errc)
95
+ # print(f"Error: {output}")
96
+ # print(f"::set-output name=result::{output}")
97
+ # return
98
+ # except requests.exceptions.Timeout as errt:
99
+ # output = "Timeout Error:" + str(errt)
100
+ # print(f"Error: {output}")
101
+ # print(f"::set-output name=result::{output}")
102
+ # return
103
+ # except requests.exceptions.RequestException as err:
104
+ # output = "GITHUB API Non catched error conecting:" + str(err)
105
+ # print(f"Error: {output}")
106
+ # print(f"::set-output name=result::{output}")
107
+ # return
108
+
109
+ # z = zipfile.ZipFile(io.BytesIO(x.content))
110
+ # z.extractall('/app')
111
+
112
+ # timestamp = batch = count = 0
113
+
114
+ # for name in glob.glob('/app/*.txt'):
115
+ # logfile = open(os.path.join(os.path.dirname(os.path.abspath(__file__)), name.replace('./','')),'r')
116
+ # Lines = logfile.readlines()
117
+ # for line in Lines:
118
+
119
+ # if line:
120
+ # count+=1
121
+ # if timestamp:
122
+ # t2=timestamp
123
+ # timestamp = re.search("\d{4}-\d{2}-\d{2}T\d+:\d+:\d+.\d+Z",line.strip())
124
+
125
+ # if timestamp:
126
+ # timestamp = re.sub("\dZ","",timestamp.group())
127
+ # timestamp = datetime.strptime(timestamp,"%Y-%m-%dT%H:%M:%S.%f")
128
+ # timestamp = (timestamp - datetime(1970,1,1)).total_seconds()
129
+ # else:
130
+ # timestamp=t2
131
+
132
+ # x = re.sub("\d{4}-\d{2}-\d{2}T\d+:\d+:\d+.\d+Z","",line.strip())
133
+ # x=x.strip()
134
+ # job_name=re.search("\/\d+\_(?P<job>.*)\.txt",name)
135
+ # job_name=job_name.group('job')
136
+
137
+ # fields = {'lineNumber':count,'workflowID':GITHUB_WORKFLOWID,'job':job_name}
138
+ # if x:
139
+ # batch+=1
140
+ # event={'event':x,'sourcetype':SPLUNK_SOURCETYPE,'source':SPLUNK_SOURCE,'host':host,'time':timestamp,'fields':fields}
141
+ # eventBatch=eventBatch+json.dumps(event)
142
+ # else:
143
+ # print("skipped line "+str(count))
144
+
145
+ # if batch>=1000:
146
+ # batch=0
147
+ # x=requests.post(SPLUNK_HEC_URL, data=eventBatch, headers=headers)
148
+ # eventBatch=""
149
+
150
+ # response=requests.post(SPLUNK_HEC_URL, data=eventBatch, headers=headers)
151
151
152
152
if response .ok :
153
153
print ("Logs forwarding successfully!" )
0 commit comments