Skip to content

Commit bd27a51

Browse files
committed
1287 - 할 수 있다
1 parent bd4ee4e commit bd27a51

File tree

2 files changed

+472
-0
lines changed

2 files changed

+472
-0
lines changed
Lines changed: 222 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,222 @@
1+
"""
2+
문제 이름: 할 수 있다
3+
문제 번호: 1287
4+
문제 링크: https://www.acmicpc.net/problem/1287
5+
난이도: Platinum V
6+
태그: 사칙연산, 많은 조건 분기, 구현, 수학, 파싱, 문자열
7+
8+
첫째 줄에는 계산해야 되는 식이 주어진다. (띄어쓰기 없이, 길이는 1,000자를 넘지 않는다.)
9+
10+
식을 구성하는 문자는 다음과 같다.
11+
12+
+, -, /, *, (, ), 1, 2, 3, 4, 5, 6, 7, 8, 9, 0
13+
+, -, /, *는 모두 이항연산자로만 사용할 수 있다. 즉, (+6+3)과 같은 식은 올바르지 않은 식이다.
14+
또한, 계산 과정 중에 소수점이 등장하지 않는다.
15+
16+
5+(1+2)*3 -> 14
17+
18+
"""
19+
20+
import sys
21+
22+
23+
def input(): return sys.stdin.readline().rstrip()
24+
25+
26+
class Token:
27+
def __init__(self, t: str, l: str = ""):
28+
self.token_type = t
29+
self.literal = l
30+
31+
def __str__(self):
32+
return f"{self.token_type}{f'({self.literal})' if len(self.literal)>0 else ''}"
33+
34+
35+
class TokenType:
36+
ADD = 'ADD'
37+
SUB = 'SUB'
38+
DIV = 'DIV'
39+
MUL = 'MUL'
40+
LPAREN = 'LPAREN'
41+
RPAREN = 'RPAREN'
42+
NUM = 'NUM'
43+
44+
45+
token_dict = {
46+
'+': TokenType.ADD,
47+
'-': TokenType.SUB,
48+
'/': TokenType.DIV,
49+
'*': TokenType.MUL,
50+
'(': TokenType.LPAREN,
51+
')': TokenType.RPAREN,
52+
}
53+
54+
55+
def tokenizer(body: str) -> list:
56+
result = []
57+
seek = 0
58+
59+
while seek < len(body):
60+
61+
if body[seek] in token_dict:
62+
result.append(Token(token_dict[body[seek]]))
63+
seek += 1
64+
65+
elif body[seek].isdigit():
66+
literal = read_number(body, seek)
67+
result.append(Token(TokenType.NUM, literal[0]))
68+
seek += literal[1]
69+
else:
70+
seek += 1
71+
return result
72+
73+
74+
def read_number(body: str, start_pos: int) -> tuple:
75+
cur_pos = start_pos
76+
77+
while cur_pos < len(body) and body[cur_pos].isdigit():
78+
cur_pos += 1
79+
80+
return (body[start_pos:cur_pos], cur_pos-start_pos)
81+
82+
83+
class Calc:
84+
85+
def __init__(self) -> None:
86+
self.__token = []
87+
self.__seq = 0
88+
self.__token_count = 0
89+
self.__err = False
90+
self.__ans = 0
91+
92+
def cur_token(self) -> Token:
93+
return (self.__token[self.__seq] if self.__token_count > self.__seq else None)
94+
95+
def next_token(self) -> int:
96+
self.__seq += 1
97+
return self.__seq
98+
99+
def valid(self):
100+
return self.__err == False
101+
102+
def answer(self):
103+
return self.__ans
104+
105+
def expression(self) -> int:
106+
result = self.term()
107+
t = self.cur_token()
108+
while t is not None and (t.token_type == TokenType.ADD or t.token_type == TokenType.SUB):
109+
110+
self.next_token()
111+
rhs = self.term()
112+
113+
if t.token_type == TokenType.ADD:
114+
result += rhs
115+
else:
116+
result -= rhs
117+
t = self.cur_token()
118+
119+
return result
120+
121+
def term(self) -> int:
122+
result = self.factor()
123+
124+
t = self.cur_token()
125+
126+
while t is not None and (t.token_type == TokenType.MUL or t.token_type == TokenType.DIV):
127+
self.next_token()
128+
rhs = self.factor()
129+
# print(result, rhs)
130+
if t.token_type == TokenType.MUL:
131+
result *= rhs
132+
else:
133+
result //= rhs
134+
135+
t = self.cur_token()
136+
137+
return result
138+
139+
def factor(self) -> int:
140+
result = 0
141+
t = self.cur_token()
142+
143+
if t is not None:
144+
145+
if t.token_type == TokenType.LPAREN:
146+
self.next_token()
147+
t = self.cur_token()
148+
149+
# 바로 괄호 닫히면
150+
if t.token_type == TokenType.RPAREN:
151+
self.__err = True
152+
153+
result = self.expression()
154+
155+
# 재귀 호출된 후 현재 가리키고 있는 토큰 타입이 RPAREN이 아닐경우 error
156+
if self.cur_token() is None or self.cur_token().token_type != TokenType.RPAREN:
157+
# print(self.__token[self.__seq-1])
158+
self.__err = True
159+
160+
self.next_token()
161+
# check next token is RPAREN
162+
163+
return result
164+
165+
if t.token_type == TokenType.NUM:
166+
result = int(t.literal)
167+
self.next_token()
168+
169+
return result
170+
171+
def eval(self, s: str) -> None:
172+
self.__err = False
173+
self.__seq = 0
174+
self.__ans = 0
175+
self.__token = tokenizer(s)
176+
177+
self.__token_count = len(self.__token)
178+
179+
oper = [TokenType.ADD, TokenType.SUB, TokenType.MUL, TokenType.DIV]
180+
level = 0
181+
182+
for i in range(self.__token_count):
183+
184+
# 괄호 레벨이 일치하지 않은 경우
185+
if self.__token[i].token_type == TokenType.LPAREN:
186+
level += 1
187+
elif self.__token[i].token_type == TokenType.RPAREN:
188+
level -= 1
189+
190+
if level < 0:
191+
self.__err = True
192+
break
193+
194+
# 연산자 앞 뒤에 또 다른 연산자가 포함된경우
195+
if (self.__token[i].token_type in oper
196+
and (i-1 < 0 or i+1 > self.__token_count
197+
or self.__token[i-1].token_type in [*oper, TokenType.LPAREN]
198+
or self.__token[i+1].token_type in [*oper, TokenType.RPAREN])):
199+
200+
self.__err = True
201+
break
202+
203+
if (self.__token[i].token_type == TokenType.NUM
204+
and self.__token[i-1].token_type == TokenType.LPAREN
205+
and self.__token[i+1].token_type == TokenType.RPAREN):
206+
207+
self.__err = True
208+
break
209+
210+
if self.__err == False:
211+
self.__ans = self.expression()
212+
213+
214+
calc = Calc()
215+
216+
s = input()
217+
218+
if s.count('(')-s.count(')') == 0:
219+
calc.eval(s)
220+
print(calc.answer() if calc.valid() else "ROCK")
221+
else:
222+
print("ROCK")

0 commit comments

Comments
 (0)