-
Notifications
You must be signed in to change notification settings - Fork 11
/
calculate_solver_bill_new.py
86 lines (70 loc) · 2.76 KB
/
calculate_solver_bill_new.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import gym
import itertools
import matplotlib
import numpy as np
import pandas as pd
import sys
#import tensorflow as tf
import collections
import csv
import os
import pathlib
from environment_bill import EnergyEnvironment
import matplotlib.pyplot as plt
import sklearn.pipeline
import sklearn.preprocessing
if "../" not in sys.path:
sys.path.append("../")
# from lib.envs.cliff_walking import CliffWalkingEnv
from lib import plotting
from sklearn.kernel_approximation import RBFSampler
matplotlib.style.use('ggplot')
#GLOBAL_VARIABLES
MAX_CHARGE_RATE = float(sys.argv[3])
ACTION_BOUND = [-MAX_CHARGE_RATE, MAX_CHARGE_RATE]
#print("0187230981723897",ACTION_BOUND)
current_bill = 0
current_soc = float(sys.argv[2]) * 0.5
# our environment
env = EnergyEnvironment()
def compute_bill(env, length):
current_bill=0
#state = env.reset()
#print(state)
for t in range(length):
#print(t)
ACTION_BOUND = [-min(env.state[env.current_index][8], env.state[env.current_index][5], MAX_CHARGE_RATE), min((env.maximum_battery - env.state[env.current_index][8]), MAX_CHARGE_RATE)]
action = actionlist['action'].iloc[t]
action = np.clip(action,*ACTION_BOUND)
tng, next_state, reward, done = env.step(action)
#update bill
current_bill += (-reward)
#print("\rStep {} reward ({})".format(t, reward, end=""))
writer.writerow([t, action, current_bill,-reward])
return current_bill
env.sell_back = float(sys.argv[1])
env.maximum_battery = float(sys.argv[2])
env.battery_starter = env.maximum_battery * 0.5
env.charge_mode = "TOU"
env.datafile = sys.argv[4]
homeid= sys.argv[4].split(".")[0].split("_")[3]
#actionlist=pd.read_csv("rbc_2/{}_2_rbc/sb".format(homeid)+str(int(float(sys.argv[1])*100))+"b"+str(int(float(sys.argv[2])*10))+".csv")['Base_Action']
print("Sell back price is",env.sell_back)
print("battery size is",env.maximum_battery)
env.init_ground_truth()
#env.init_price()
#print out the initial state
#print("inistial state",env.state)
pathlib.Path("olc_4_bill/{}_4_olc_bill".format(homeid)).mkdir(parents=True, exist_ok=True)
csvfile = open("olc_4_bill/{}_4_olc_bill/sb".format(homeid)+str(int(float(sys.argv[1])*100))+"b"+str(int(float(sys.argv[2])*10))+".csv", 'w', newline='')
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(["Step", "Action","Bill","reward"])
start_point = 168
end_point = 8736
actionlist=pd.read_csv("olc_4/{}_4_action/sb".format(homeid)+str(int(float(sys.argv[1])*100))+"b"+str(int(float(sys.argv[2])*10))+".csv",usecols=[0], names=['action'], header=None)[168:8736]
env.month_starter = start_point
state = env.reset()
bill = compute_bill(env, end_point - start_point)
print("this is best bill",bill)
# sell_back_round=int(float(sys.argv[1])*100)
# battery_round=int(float(sys.argv[2])*10)