diff --git a/IRIS_metadata.txt b/EarthScope_metadata.txt
similarity index 100%
rename from IRIS_metadata.txt
rename to EarthScope_metadata.txt
diff --git a/QuARG.py b/QuARG.py
index 2d60b1d..632fa81 100755
--- a/QuARG.py
+++ b/QuARG.py
@@ -21,7 +21,7 @@
"""
-version = "1.1.1"
+version = "1.2.0"
print("QuARG version %s" % version)
# TODO: Need to include MS Gothic.ttf when packaging the scripts
@@ -68,7 +68,6 @@
import os
import datetime
-import time
import shutil # used to remove directories
import webbrowser
import pandas as pd
@@ -82,11 +81,15 @@
import urllib.request
import urllib.error
import requests # used for getting empty transfer_function returns
-
import reportUtils
Config.set("input", "mouse", "mouse,disable_multitouch")
+# Explicit adapters and converters for datetime
+sqlite3.register_adapter(datetime.datetime, lambda dt: dt.isoformat(" "))
+sqlite3.register_converter(
+ "timestamp", lambda s: datetime.datetime.fromisoformat(s.decode())
+)
# PREFERENCE FILE TODOS #
@@ -258,7 +261,7 @@ def get_default_dates(self):
if not MainScreen.start:
self.start = str(lastMonthStart)
- if not MainLScreen.end:
+ if not MainScreen.end:
self.end = str(first)
def set_default_start(self):
@@ -441,8 +444,6 @@ def load_file(self, path, filename):
self.find_file.text = os.path.basename(filename[0])
self.examine_file.text = os.path.basename(filename[0])
- # self.find_file.text = filename[0]
- # self.examine_file.text = filename[0]
ExamineIssuesScreen.issueFile = self.examine_file.text
except Exception as e:
self.warning_popup("WARNING: %s" % e)
@@ -464,7 +465,6 @@ def load_csv(self, path, filename):
self.generate_directory.text = file_directory
self.ids.csv_id.text = os.path.basename(filename[0])
- # self.ids.csv_id.text = filename[0]
except Exception as e:
self.warning_popup("WARNING: %s" % e)
self.dismiss_popup()
@@ -521,14 +521,14 @@ def do_find(self):
if not os.path.isfile(masterDict["metrics_file"]):
self.warning_popup(
- "WARNING: Could not find file of IRIS metrics: %s\nIf connected to the internet, this file can be generated by entering the Thresholds Editor"
+ "WARNING: Could not find file of EarthScope metrics: %s\nIf connected to the internet, this file can be generated by entering the Thresholds Editor"
% masterDict["metrics_file"]
)
return
if not os.path.isfile(masterDict["metadata_file"]):
self.warning_popup(
- "WARNING: Could not find file of IRIS metadata fields: %s\nIf connected to the internet, this file can be generated by entering the Thresholds Editor"
+ "WARNING: Could not find file of EarthScope metadata fields: %s\nIf connected to the internet, this file can be generated by entering the Thresholds Editor"
% masterDict["metadata_file"]
)
return
@@ -607,14 +607,6 @@ def remove_dir(self):
print("Previous copy removed, generating new Report")
self.do_generate()
- #### REMOVE IF NO ISSUES ARISE OUT OF ITS ABSENCE ###
- # def date_checked(self, option, value):
- # if value is True:
- # self.query_options.append(option)
- # else:
- # self.query_options = [v for v in self.query_options if v != option]
- #####################################################
-
def get_ticket_inputs(self, *kwargs):
main_screen = screen_manager.get_screen("mainScreen")
@@ -738,7 +730,6 @@ def grab_tickets(self, *kwargs):
try:
# convert any cases of BH[EHZ] (for example) to lists
for ind, row in allTickets.iterrows():
-
# network(s)
networks = reportUtils.expandCodes(row["network"])
allTickets.at[ind, "networks"] = networks
@@ -758,80 +749,90 @@ def grab_tickets(self, *kwargs):
# Now start subsetting
subsettedTickets = pd.DataFrame(columns=allTickets.columns)
- tmpTickets = pd.DataFrame()
+ # Subset for networks
+ frames_to_concat = [] # list to hold all DataFrames to concatenate
+
for net in masterDict["query_nets"].split(","):
- if net == "" or net == "*" or net == "%" or net == "???":
- tmpTickets = tmpTickets.append(allTickets)
+ if net in ["", "*", "%", "???"]:
+ frames_to_concat.append(allTickets)
else:
- tmpTickets = tmpTickets.append(
- allTickets[
- allTickets["networks"].str.contains(
- ",%s," % net.replace("?", ".?").replace("*", ".*")
- )
- == True
- ]
- )
- tmpTickets = tmpTickets.append(
- subsettedTickets[subsettedTickets["networks"].str.match(",\*,")]
- )
- subsettedTickets = tmpTickets.copy()
+ filtered_all = allTickets[
+ allTickets["networks"].str.contains(
+ ",%s," % net.replace("?", ".?").replace("*", ".*")
+ )
+ ]
+ frames_to_concat.append(filtered_all)
+
+ filtered_subset = subsettedTickets[
+ subsettedTickets["networks"].str.match(r",\*,")
+ ]
+ frames_to_concat.append(filtered_subset)
+
+ subsettedTickets = pd.concat(frames_to_concat, ignore_index=True)
+
+ # Subset for stations
+ frames_to_concat = []
- tmpTickets = pd.DataFrame()
for sta in masterDict["query_stas"].split(","):
- if sta == "" or sta == "*" or sta == "%" or sta == "???":
- tmpTickets = tmpTickets.append(subsettedTickets)
+ if sta in ["", "*", "%", "???"]:
+ frames_to_concat.append(subsettedTickets)
else:
- tmpTickets = tmpTickets.append(
- subsettedTickets[
- subsettedTickets["stations"].str.contains(
- ",%s," % sta.replace("?", ".?").replace("*", ".*")
- )
- == True
- ]
- )
- tmpTickets = tmpTickets.append(
- subsettedTickets[subsettedTickets["stations"].str.match(",\*,")]
- )
- subsettedTickets = tmpTickets.copy()
+ filtered_stas = subsettedTickets[
+ subsettedTickets["stations"].str.contains(
+ ",%s," % sta.replace("?", ".?").replace("*", ".*")
+ )
+ ]
+ frames_to_concat.append(filtered_stas)
+
+ star_stas = subsettedTickets[
+ subsettedTickets["stations"].str.match(r",\*,")
+ ]
+ frames_to_concat.append(star_stas)
+
+ subsettedTickets = pd.concat(frames_to_concat, ignore_index=True)
+
+ # Subset for locations
+ frames_to_concat = []
- tmpTickets = pd.DataFrame()
for loc in masterDict["query_locs"].split(","):
- if loc == "" or loc == "*" or loc == "%" or loc == "???":
- tmpTickets = tmpTickets.append(subsettedTickets)
+ if loc in ["", "*", "%", "???"]:
+ frames_to_concat.append(subsettedTickets)
else:
- tmpTickets = tmpTickets.append(
- subsettedTickets[
- subsettedTickets["locations"].str.contains(
- ",%s," % loc.replace("?", ".?").replace("*", ".*")
- )
- == True
- ]
- )
- tmpTickets = tmpTickets.append(
- subsettedTickets[
- subsettedTickets["locations"].str.match(",\*,")
- ]
- )
- subsettedTickets = tmpTickets.copy()
+ filtered_locs = subsettedTickets[
+ subsettedTickets["locations"].str.contains(
+ ",%s," % loc.replace("?", ".?").replace("*", ".*")
+ )
+ ]
+ frames_to_concat.append(filtered_locs)
+
+ star_locs = subsettedTickets[
+ subsettedTickets["locations"].str.match(r",\*,")
+ ]
+ frames_to_concat.append(star_locs)
+
+ subsettedTickets = pd.concat(frames_to_concat, ignore_index=True)
+
+ # Subset for channels
+ frames_to_concat = []
- tmpTickets = pd.DataFrame()
for chan in masterDict["query_chans"].split(","):
- if chan == "" or chan == "*" or chan == "%" or chan == "???":
- tmpTickets = tmpTickets.append(subsettedTickets)
+ if chan in ["", "*", "%", "???"]:
+ frames_to_concat.append(subsettedTickets)
else:
- tmpTickets = tmpTickets.append(
- subsettedTickets[
- subsettedTickets["channels"].str.contains(
- ",%s," % chan.replace("?", ".?").replace("*", ".*")
- )
- == True
- ]
- )
- tmpTickets = tmpTickets.append(
- subsettedTickets[subsettedTickets["channels"].str.match(",\*,")]
- )
+ filtered_chans = subsettedTickets[
+ subsettedTickets["channels"].str.contains(
+ ",%s," % chan.replace("?", ".?").replace("*", ".*")
+ )
+ ]
+ frames_to_concat.append(filtered_chans)
+
+ star_chans = subsettedTickets[
+ subsettedTickets["channels"].str.match(r",\*,")
+ ]
+ frames_to_concat.append(star_chans)
+
+ subsettedTickets = pd.concat(frames_to_concat, ignore_index=True)
- subsettedTickets = tmpTickets.copy()
subsettedTickets.drop_duplicates(inplace=True)
try:
@@ -846,7 +847,7 @@ def grab_tickets(self, *kwargs):
except:
masterDict["tickets"] = ""
- except:
+ except Exception as e:
masterDict["tickets"] = ""
def go_To_NewTickets(self, *kwargs):
@@ -866,7 +867,6 @@ def generate_csv(self):
with open(self.preference) as f:
local_dict = locals()
exec(compile(f.read(), self.preference, "exec"), globals(), local_dict)
-
try:
if not self.generate_start == "":
datetime.datetime.strptime(self.generate_start, "%Y-%m-%d")
@@ -980,80 +980,81 @@ def generate_csv(self):
# Now start subsetting
subsettedTickets = pd.DataFrame(columns=allTickets.columns)
- tmpTickets = pd.DataFrame()
+ frames_to_concat = []
for net in self.generate_network.split(","):
- if net == "" or net == "*" or net == "%" or net == "???":
- tmpTickets = tmpTickets.append(allTickets)
+ if net in ["", "*", "%", "???"]:
+ frames_to_concat.append(allTickets)
else:
- tmpTickets = tmpTickets.append(
- allTickets[
- allTickets["networks"].str.contains(
- ",%s," % net.replace("?", ".?").replace("*", ".*")
- )
- == True
- ]
- )
- tmpTickets = tmpTickets.append(
- subsettedTickets[subsettedTickets["networks"].str.match(",\*,")]
- )
- subsettedTickets = tmpTickets.copy()
+ filtered_all = allTickets[
+ allTickets["networks"].str.contains(
+ ",%s," % net.replace("?", ".?").replace("*", ".*")
+ )
+ ]
+ frames_to_concat.append(filtered_all)
+
+ filtered_subset = subsettedTickets[
+ subsettedTickets["networks"].str.match(r",\*,")
+ ]
+ frames_to_concat.append(filtered_subset)
- tmpTickets = pd.DataFrame()
+ subsettedTickets = pd.concat(frames_to_concat, ignore_index=True)
+
+ frames_to_concat = []
for sta in self.generate_station.split(","):
- if sta == "" or sta == "*" or sta == "%" or sta == "???":
- tmpTickets = tmpTickets.append(subsettedTickets)
+ if sta in ["", "*", "%", "???"]:
+ frames_to_concat.append(subsettedTickets)
else:
- tmpTickets = tmpTickets.append(
- subsettedTickets[
- subsettedTickets["stations"].str.contains(
- ",%s," % sta.replace("?", ".?").replace("*", ".*")
- )
- == True
- ]
- )
- tmpTickets = tmpTickets.append(
- subsettedTickets[subsettedTickets["stations"].str.match(",\*,")]
- )
- subsettedTickets = tmpTickets.copy()
+ filtered_stas = subsettedTickets[
+ subsettedTickets["stations"].str.contains(
+ ",%s," % sta.replace("?", ".?").replace("*", ".*")
+ )
+ ]
+ frames_to_concat.append(filtered_stas)
- tmpTickets = pd.DataFrame()
+ star_stas = subsettedTickets[
+ subsettedTickets["stations"].str.match(r",\*,")
+ ]
+ frames_to_concat.append(star_stas)
+
+ subsettedTickets = pd.concat(frames_to_concat, ignore_index=True)
+
+ frames_to_concat = []
for loc in self.generate_location.split(","):
- if loc == "" or loc == "*" or loc == "%" or loc == "???":
- tmpTickets = tmpTickets.append(subsettedTickets)
+ if loc in ["", "*", "%", "???"]:
+ frames_to_concat.append(subsettedTickets)
else:
- tmpTickets = tmpTickets.append(
- subsettedTickets[
- subsettedTickets["locations"].str.contains(
- ",%s," % loc.replace("?", ".?").replace("*", ".*")
- )
- == True
- ]
- )
- tmpTickets = tmpTickets.append(
- subsettedTickets[
- subsettedTickets["locations"].str.match(",\*,")
- ]
- )
- subsettedTickets = tmpTickets.copy()
+ filtered_locs = subsettedTickets[
+ subsettedTickets["locations"].str.contains(
+ ",%s," % loc.replace("?", ".?").replace("*", ".*")
+ )
+ ]
+ frames_to_concat.append(filtered_locs)
- tmpTickets = pd.DataFrame()
+ star_locs = subsettedTickets[
+ subsettedTickets["locations"].str.match(r",\*,")
+ ]
+ frames_to_concat.append(star_locs)
+
+ subsettedTickets = pd.concat(frames_to_concat, ignore_index=True)
+
+ frames_to_concat = []
for chan in self.generate_channel.split(","):
- if chan == "" or chan == "*" or chan == "%" or chan == "???":
- tmpTickets = tmpTickets.append(subsettedTickets)
+ if chan in ["", "*", "%", "???"]:
+ frames_to_concat.append(subsettedTickets)
else:
- tmpTickets = tmpTickets.append(
- subsettedTickets[
- subsettedTickets["channels"].str.contains(
- ",%s," % chan.replace("?", ".?").replace("*", ".*")
- )
- == True
- ]
- )
- tmpTickets = tmpTickets.append(
- subsettedTickets[subsettedTickets["channels"].str.match(",\*,")]
- )
+ filtered_chans = subsettedTickets[
+ subsettedTickets["channels"].str.contains(
+ ",%s," % chan.replace("?", ".?").replace("*", ".*")
+ )
+ ]
+ frames_to_concat.append(filtered_chans)
+
+ star_chans = subsettedTickets[
+ subsettedTickets["channels"].str.match(r",\*,")
+ ]
+ frames_to_concat.append(star_chans)
- subsettedTickets = tmpTickets.copy()
+ subsettedTickets = pd.concat(frames_to_concat, ignore_index=True)
subsettedTickets.drop_duplicates(inplace=True)
try:
@@ -1197,24 +1198,21 @@ def generate_report(self):
local_dict,
)
YYYYmmdd = "".join(local_dict["startday"].split("-"))
- # self.startDate.text = local_dict["startday"]
except:
self.warning_popup(
"WARNING: Tried to get Start Date from Preference file(since it was left empty),\nbut failed to read Preference File"
)
return
+
if not self.generate_network == "":
network = self.generate_network
else:
network = local_dict["network"]
# The network report should be put into the same directory as the csv file even if that differs from the preference)files
- # dirToUse = os.path.dirname(self.csv)
dirToUse = self.directory
print(dirToUse)
- # self.report_filename = dirToUse + '/' + local_dict['network'] +'_Netops_Report_' + month
self.report_filename = network + "_Netops_Report_" + YYYYmmdd
- # self.zipDir = local_dict["directory"] + self.report_filename
self.zipDir = dirToUse + "/" + self.report_filename
self.report_fullPath = self.zipDir + "/" + self.report_filename + ".html"
@@ -1254,11 +1252,9 @@ def generate_report(self):
return
# The network report should be put into the same directory as the csv file even if that differs from the preference)files
- # dirToUse = os.path.dirname(self.csv)
dirToUse = self.directory
self.report_filename = network + "_Netops_Report_" + YYYYmmdd
- # self.zipDir = local_dict["directory"] + self.report_filename
self.zipDir = dirToUse + "/" + self.report_filename
self.report_fullPath = self.zipDir + "/" + self.report_filename + ".html"
@@ -1608,11 +1604,6 @@ def help_text(self, whichOne):
fields. [See detailed documentation for the format.]
"""
- # if whichOne == 12:
- # helpText = '''
- #
- # '''
-
return helpText
def open_detailed_documentation(self):
@@ -1744,11 +1735,11 @@ def load_preference_file(self, preferenceFile):
masterDict["preference_chanTypes"]["V"]
)
- if masterDict["preference_metricSource"] == "IRIS":
+ if masterDict["preference_metricSource"] == "EarthScope":
preferences_screen.metric_source_text.text = ""
preferences_screen.metric_source_text.disabled = True
preferences_screen.metric_browse_btn.disabled = True
- preferences_screen.metric_source_btn.text = "IRIS"
+ preferences_screen.metric_source_btn.text = "EarthScope"
else:
preferences_screen.metric_source_text.text = masterDict[
"preference_metricSource"
@@ -1759,11 +1750,11 @@ def load_preference_file(self, preferenceFile):
preferences_screen.metric_source_text.disabled = False
preferences_screen.metric_browse_btn.disabled = False
- if masterDict["preference_metadataSource"] == "IRIS":
+ if masterDict["preference_metadataSource"] == "EarthScope":
preferences_screen.metadata_source_text.text = ""
preferences_screen.metadata_source_text.disabled = True
preferences_screen.metadata_browse_btn.disabled = True
- preferences_screen.metadata_source_btn.text = "IRIS"
+ preferences_screen.metadata_source_btn.text = "EarthScope"
else:
preferences_screen.metadata_source_text.text = masterDict[
"preference_metadataSource"
@@ -1923,7 +1914,7 @@ def load_metric_file(self, path, filename):
def deactivate_metric_source_text(self, *kwargs):
preferences_screen = screen_manager.get_screen("preferencesScreen")
- if preferences_screen.metric_source_btn.text == "IRIS":
+ if preferences_screen.metric_source_btn.text == "EarthScope":
preferences_screen.metric_source_text.disabled = True
preferences_screen.metric_browse_btn.disabled = True
else:
@@ -1932,7 +1923,7 @@ def deactivate_metric_source_text(self, *kwargs):
def deactivate_metadata_source_text(self, *kwargs):
preferences_screen = screen_manager.get_screen("preferencesScreen")
- if preferences_screen.metadata_source_btn.text == "IRIS":
+ if preferences_screen.metadata_source_btn.text == "EarthScope":
preferences_screen.metadata_source_text.disabled = True
preferences_screen.metadata_browse_btn.disabled = True
else:
@@ -1940,12 +1931,6 @@ def deactivate_metadata_source_text(self, *kwargs):
preferences_screen.metadata_browse_btn.disabled = False
def go_to_thresholdGroups(self):
- # if not masterDict['preference_file'] == "":
- # try:
- # masterDict['preference_groupsDict']
- # except:
- # self.warning_popup("WARNING: Preference File has been selected but not loaded\n Either load the file")
-
ThresholdGroupsScreen.go_to_thresholdGroups(ThresholdGroupsScreen)
def exit_confirmation(self, *kwargs):
@@ -2049,7 +2034,6 @@ def save_preference_file(self):
self.selected_instrumentGroups.append(masterDict["groupsDict"][x])
except:
pass
- # self.selected_instrumentGroups = list(set([masterDict['groupsDict'][x] for x in self.instrument_selectionIndices]))
self.selected_thresholdGroups = list(
set(
@@ -2064,13 +2048,13 @@ def save_preference_file(self):
"V": tuple(preferences_screen.pref_V.text.split(",")),
}
- if preferences_screen.metadata_source_btn.text == "IRIS":
- self.metadataSource = "IRIS"
+ if preferences_screen.metadata_source_btn.text == "EarthScope":
+ self.metadataSource = "EarthScope"
else:
self.metadataSource = preferences_screen.metadata_source_text.text
- if preferences_screen.metric_source_btn.text == "IRIS":
- self.metricSource = "IRIS"
+ if preferences_screen.metric_source_btn.text == "EarthScope":
+ self.metricSource = "EarthScope"
else:
self.metricSource = preferences_screen.metric_source_text.text
@@ -2238,7 +2222,7 @@ def do_writing(self, *kwargs):
)
f.write(
- "\n\n# Metric source: either 'IRIS' or the path to the local sqlite database file that ISPAQ generated\n"
+ "\n\n# Metric source: either 'EarthScope' or the path to the local sqlite database file that ISPAQ generated\n"
)
f.write(
"metricSource = '%s'\nmetadataSource = '%s'"
@@ -2624,8 +2608,6 @@ def go_to_thresholdsLayout(self):
my_thresholds = [{"text": x} for x in masterDict["threshold_names"]]
thresholds_screen.threshold_list_rv.data = my_thresholds
thresholds_screen.threshold_list_rv._layout_manager.select_node(0)
- # selectable_nodes = thresholds_screen.threshold_list_rv.get_selectable_nodes()
- # thresholds_screen.threshold_list_rv.select_node(selectable_nodes[0])
## Threshold groups
instrument_groups = list()
@@ -2645,9 +2627,9 @@ def go_to_thresholdsLayout(self):
thresholds_screen.threshold_group_rv._layout_manager.select_node(0)
## Metric names
- # Try to get a list of metrics from service.iris.edu, but if fails
+ # Try to get a list of metrics from service.earthscope.org, but if fails
# then just use the old list.
- URL = "http://service.iris.edu/mustang/metrics/1/query?output=xml&nodata=404"
+ URL = "http://service.earthscope.org/mustang/metrics/1/query?output=xml&nodata=404"
try:
metrics = list()
@@ -2662,7 +2644,7 @@ def go_to_thresholdsLayout(self):
today = datetime.datetime.now()
yesterday = today - datetime.timedelta(days=1)
subURL = (
- "http://service.iris.edu/mustang/measurements/1/query?metric=transfer_function&format=text&timewindow=%s,%s&nodata=404"
+ "http://service.earthscope.org/mustang/measurements/1/query?metric=transfer_function&format=text&timewindow=%s,%s&nodata=404"
% (
yesterday.strftime("%Y-%m-%d"),
today.strftime("%Y-%m-%d"),
@@ -2701,7 +2683,7 @@ def go_to_thresholdsLayout(self):
print("ERROR: %s" % e)
## Do the same for the metadata fields
- URL = "http://service.iris.edu/fdsnws/station/1/query?net=IU&sta=ANMO&loc=00&cha=BHZ&level=channel&format=text&includecomments=true&nodata=404"
+ URL = "http://service.earthscope.org/fdsnws/station/1/query?net=IU&sta=ANMO&loc=00&cha=BHZ&level=channel&format=text&includecomments=true&nodata=404"
try:
metadata = pd.read_csv(URL, nrows=1, sep="|").columns
@@ -2798,7 +2780,6 @@ def new_threshold_popup(self):
additionContent.bind(minimum_height=additionContent.setter("height"))
nameLabel = Label(text="Threshold Name: ", size_hint_x=0.66)
- # self.thresholdTextInput = TextInput(id='thresholdNameID')
self.thresholdTextInput = TextInput()
self.selectExistingThreshold = DropDown()
@@ -2877,16 +2858,13 @@ def all_thresholds_popup(self, *kwargs):
thresholdsDict = sorted(masterDict["thresholdsDict"].keys())
displayList = []
for thresholdName in thresholdsDict:
- # print(thresholdName)
displayList.append(thresholdName)
- # f.write("%s \t" % thresholdName);
for instrumentGroup in masterDict["thresholdsDict"][thresholdName].keys():
defStr = " && ".join(
masterDict["thresholdsDict"][thresholdName][instrumentGroup]
)
- # print(" %s - %s" % (instrumentGroup,defStr));
displayList.append(" %s - %s" % (instrumentGroup, defStr))
displayList.append("")
@@ -2953,7 +2931,6 @@ def new_group_popup(self):
col1.add_widget(Label(text="Channels: "))
col1.add_widget(Label())
- # self.groupTextInput = TextInput(id='groupNameID')
self.groupTextInput = TextInput()
self.netTextInput = TextInput(write_tab=False)
self.staTextInput = TextInput(write_tab=False)
@@ -3066,7 +3043,6 @@ def new_threshold_group_popup(self):
additionContent.bind(minimum_height=additionContent.setter("height"))
nameLabel = Label(text="Group Name: ", size_hint_x=0.66)
- # self.thresholdGroupTextInput = TextInput(id='thresholdGroupID')
self.thresholdGroupTextInput = TextInput()
self.selectExistingThresholdGroup = DropDown()
@@ -3399,7 +3375,9 @@ def what_type_of_field(field):
field_passes = metric in masterDict["metrics"]
if not field_passes:
if is_metadata:
- self.warning_popup("WARNING: Field must be an IRIS metadata field")
+ self.warning_popup(
+ "WARNING: Field must be an EarthScope metadata field"
+ )
else:
self.warning_popup("WARNING: Field must be a MUSTANG metric")
return
@@ -3462,7 +3440,9 @@ def what_type_of_field(field):
field_passes = metric in masterDict["metrics"]
if not field_passes:
if is_metadata:
- self.warning_popup("WARNING: Field must be an IRIS metadata field")
+ self.warning_popup(
+ "WARNING: Field must be an EarthScope metadata field"
+ )
else:
self.warning_popup("WARNING: Field must be a MUSTANG metric")
print("WARNING: Field must be a MUSTANG metric")
@@ -3530,7 +3510,7 @@ def what_type_of_field(field):
if not metric == "":
if is_metadata:
self.warning_popup(
- "WARNING: Field must be an IRIS metadata field"
+ "WARNING: Field must be an EarthScope metadata field"
)
else:
self.warning_popup("WARNING: Field must be a MUSTANG metric")
@@ -3554,7 +3534,6 @@ def what_type_of_field(field):
except Exception as e:
pass
- # print("WARNING: %s" % e)
ensure_threshold()
prevDef = get_existing_defintion()
@@ -3582,7 +3561,7 @@ def what_type_of_field(field):
met2_type = what_type_of_field(met2.split("[")[0])
if met1_type != met2_type:
self.warning_popup(
- "WARNING: Cannot compare MUSTANG metric with IRIS Metadata field"
+ "WARNING: Cannot compare MUSTANG metric with EarthScope Metadata field"
)
return
newPart = "%s / %s " % (met1, met2)
@@ -3719,7 +3698,7 @@ def what_type_of_field(field):
if not metric == "":
if is_metadata:
self.warning_popup(
- "WARNING: Field must be an IRIS metadata field"
+ "WARNING: Field must be an EarthScope metadata field"
)
else:
self.warning_popup("WARNING: Field must be a MUSTANG metric")
@@ -3743,7 +3722,6 @@ def what_type_of_field(field):
except Exception as e:
pass
- # print("WARNING: %s" % e)
ensure_threshold()
prevDef = get_existing_defintion()
@@ -3771,7 +3749,7 @@ def what_type_of_field(field):
met2_type = what_type_of_field(met2.split("[")[0])
if met1_type != met2_type:
self.warning_popup(
- "WARNING: Cannot compare MUSTANG metric with IRIS Metadata field"
+ "WARNING: Cannot compare MUSTANG metric with EarthScope Metadata field"
)
return
# newPart = "%s / %s " %(met1, met2)
@@ -3805,9 +3783,6 @@ def what_type_of_field(field):
)
return
- # if chanToDo != "":
- # metric = "%s[%s]" %(metric, chanToDo)
-
if chanToDo != "":
if len(indices) == 0:
metric = "%s[%s]" % (metric, chanToDo)
@@ -3902,7 +3877,6 @@ def what_type_of_field(field):
newPart = "abs(" + metric + ") :: compare"
else:
newPart = metric + " :: compare"
- # newPart = metric + ' :: compare'
# Everything else (ie, 'normal')
else:
@@ -3916,7 +3890,9 @@ def what_type_of_field(field):
if not field_passes:
if is_metadata:
- self.warning_popup("WARNING: Field must be an IRIS metadata field")
+ self.warning_popup(
+ "WARNING: Field must be an EarthScope metadata field"
+ )
else:
self.warning_popup("WARNING: Field must be a MUSTANG metric")
return
@@ -3960,7 +3936,6 @@ def what_type_of_field(field):
newPart = "abs(" + metric + ") "
else:
newPart = metric + " "
- # newPart = metric + " "
if neq == "down":
newPart = (
newPart + "!"
@@ -4087,7 +4062,7 @@ def write_definition_to_file(self):
.strip()
.split("[")[0]
)
- if ~field3.isnumeric():
+ if not field3.isnumeric():
if field3 not in metricsInThresh:
metricsInThresh.append(field3)
@@ -4098,8 +4073,8 @@ def write_definition_to_file(self):
print(metricThreshDict, file=f)
self.confirmation_popup()
- except:
- self.warning_popup("Error while saving Thresholds")
+ except Exception as e:
+ self.warning_popup("Error while saving Thresholds: {e}")
def confirmation_popup(self):
popupContent = BoxLayout(orientation="vertical", spacing=10)
@@ -4268,9 +4243,7 @@ def update_data(self):
examine_screen.end_day.text = main_screen.endDate.text
def get_examine_inputs(self):
- # if self.ids.examine_start_id.text:
self.startday = self.ids.examine_start_id.text
- # if self.ids.examine_end_id.text:
self.endday = self.ids.examine_end_id.text
self.metrics = self.ids.metrics_id.text
self.threshold = self.ids.threshold_id.text
@@ -4305,9 +4278,6 @@ def exit_confirmation(self):
)
masterDict["_popup"].open()
- # def create_ticket(self):
- # pass
-
def see_databrowser(self):
webbrowser.open("http://www.iris.edu/mustang/databrowser/", new=2)
@@ -4335,7 +4305,7 @@ def see_waveforms(self):
os.mkdir(image_dir)
# Grab all of the pngs and save in the directory
- imageURL = "http://service.iris.edu/irisws/timeseries/1/query?"
+ imageURL = "http://service.earthscope.org/irisws/timeseries/1/query?"
if len(self.startday.split("T")) == 1:
starttime = self.startday + "T00:00:00"
@@ -4363,7 +4333,6 @@ def see_waveforms(self):
cha = cha.strip()
imageURL_cha = imageURL_loc + "&cha=" + cha
- # imageURL_complete = imageURL_cha + "&starttime=" + self.startday + "&endtime=" + self.endday + "&helicordermode=false&format=png"
imageURL_complete = (
imageURL_cha
+ "&starttime="
@@ -4420,7 +4389,7 @@ def see_metrics(self):
return
metricURL = (
- "http://service.iris.edu/mustang/measurements/1/query?metric="
+ "http://service.earthscope.org/mustang/measurements/1/query?metric="
+ self.metrics
)
@@ -4488,7 +4457,7 @@ def see_metric_timeseries(self):
+ ".png"
)
metricURL = (
- "http://service.iris.edu/mustang/measurements/1/query?metric="
+ "http://service.earthscope.org/mustang/measurements/1/query?metric="
+ metric
)
@@ -4628,7 +4597,7 @@ def see_metric_timeseries(self):
def see_pdfs(self):
self.get_examine_inputs()
- pdfURL = "http://service.iris.edu/mustang/noise-pdf-browser/1/gallery?"
+ pdfURL = "http://service.earthscope.org/mustang/noise-pdf-browser/1/gallery?"
if self.network == "":
self.warning_popup("WARNING: Network field required")
@@ -4659,7 +4628,9 @@ def see_spectrograms(self):
self.warning_popup("WARNING: Network field required")
return
- spectURL = "http://service.iris.edu/mustang/noise-pdf-browser/1/spectrogram?"
+ spectURL = (
+ "http://service.earthscope.org/mustang/noise-pdf-browser/1/spectrogram?"
+ )
if self.network:
spectURL = spectURL + "&net=" + self.network
@@ -4705,7 +4676,7 @@ def see_nmt(self):
return
nmtURL = (
- "http://service.iris.edu/mustang/noise-mode-timeseries/1/query?net="
+ "http://service.earthscope.org/mustang/noise-mode-timeseries/1/query?net="
+ self.network
+ "&sta="
+ self.station
@@ -4744,7 +4715,6 @@ def see_goat(self):
self.warning_popup(
"WARNING: Channel code required for GOAT (can be wildcarded)"
)
- # print("Channel code required for GOAT (can be wildcarded)")
return
if not self.startday or not self.endday:
self.warning_popup("WARNING: Start and End times required")
@@ -4818,7 +4788,7 @@ def see_stations(self):
self.warning_popup("WARNING: Network field required")
return
- stationURL = "http://service.iris.edu/fdsnws/station/1/query?"
+ stationURL = "http://service.earthscope.org/fdsnws/station/1/query?"
if self.network:
stationURL = stationURL + "net=" + self.network
@@ -5168,8 +5138,8 @@ def add_notes(self):
print("No issues loaded yet")
return
- self.df["NOTES"].ix[indToChange] = self.notes
- ExamineIssuesScreen.currentDF["NOTES"].ix[indToChange] = self.notes
+ self.df.loc[indToChange, "NOTES"] = self.notes
+ ExamineIssuesScreen.currentDF.loc[indToChange, "NOTES"] = self.notes
self.update_data()
def see_notes(self):
@@ -5177,7 +5147,7 @@ def see_notes(self):
indToChange = list(
set(self.currentDF.iloc[self.selectionIndices].index.values.tolist())
)
- currentNotes = self.currentDF.ix[indToChange]
+ currentNotes = self.currentDF.loc[indToChange]
except:
print("No issues loaded yet")
return
@@ -5262,16 +5232,13 @@ def thresholds_popup_orig(self, *kwargs):
displayList = []
for thresholdName in thresholdsDict:
- # print(thresholdName)
displayList.append(thresholdName)
- # f.write("%s \t" % thresholdName);
for instrumentGroup in masterDict["thresholdsDict"][thresholdName].keys():
defStr = " && ".join(
masterDict["thresholdsDict"][thresholdName][instrumentGroup]
)
- # print(" %s - %s" % (instrumentGroup,defStr));
displayList.append(" %s - %s" % (instrumentGroup, defStr))
displayList.append("")
@@ -5325,16 +5292,13 @@ def thresholds_popup(self, *kwargs):
displayList = []
for thresholdName in thresholdsDict:
- # print(thresholdName)
displayList.append(thresholdName)
- # f.write("%s \t" % thresholdName);
for instrumentGroup in masterDict["thresholdsDict"][thresholdName].keys():
defStr = " && ".join(
masterDict["thresholdsDict"][thresholdName][instrumentGroup]
)
- # print(" %s - %s" % (instrumentGroup,defStr));
displayList.append(" %s - %s" % (instrumentGroup, defStr))
displayList.append("")
@@ -5433,8 +5397,8 @@ def mark_as_todo(self):
indToChange = list(
set(self.currentDF.iloc[self.selectionIndices].index.values.tolist())
)
- self.df["STATE"].ix[indToChange] = "TODO"
- self.currentDF["STATE"].ix[indToChange] = "TODO"
+ self.df.loc[indToChange, "STATE"] = "TODO"
+ self.currentDF.loc[indToChange, "STATE"] = "TODO"
self.update_data()
except:
print("No issues loaded yet")
@@ -5445,8 +5409,8 @@ def mark_as_new(self):
indToChange = list(
set(self.currentDF.iloc[self.selectionIndices].index.values.tolist())
)
- self.df["STATE"].ix[indToChange] = "New"
- self.currentDF["STATE"].ix[indToChange] = "New"
+ self.df.loc[indToChange, "STATE"] = "New"
+ self.currentDF.loc[indToChange, "STATE"] = "New"
self.update_data()
except:
print("No issues loaded yet")
@@ -5457,8 +5421,8 @@ def mark_as_closed(self):
indToChange = list(
set(self.currentDF.iloc[self.selectionIndices].index.values.tolist())
)
- self.df["STATE"].ix[indToChange] = "Closed"
- self.currentDF["STATE"].ix[indToChange] = "Closed"
+ self.df.loc[indToChange, "STATE"] = "Closed"
+ self.currentDF.loc[indToChange, "STATE"] = "Closed"
self.update_data()
except:
print("No issues loaded yet")
@@ -5469,8 +5433,8 @@ def mark_as_existing(self):
indToChange = list(
set(self.currentDF.iloc[self.selectionIndices].index.values.tolist())
)
- self.df["STATE"].ix[indToChange] = "Existing"
- self.currentDF["STATE"].ix[indToChange] = "Existing"
+ self.df.loc[indToChange, "STATE"] = "Existing"
+ self.currentDF.loc[indToChange, "STATE"] = "Existing"
self.update_data()
except:
print("No issues loaded yet")
@@ -5481,8 +5445,8 @@ def mark_as_support(self):
indToChange = list(
set(self.currentDF.iloc[self.selectionIndices].index.values.tolist())
)
- self.df["STATE"].ix[indToChange] = "Support"
- self.currentDF["STATE"].ix[indToChange] = "Support"
+ self.df.loc[indToChange, "STATE"] = "Support"
+ self.currentDF.loc[indToChange, "STATE"] = "Support"
self.update_data()
except:
print("No issues loaded yet")
@@ -5493,8 +5457,8 @@ def mark_as_no_ticket(self):
indToChange = list(
set(self.currentDF.iloc[self.selectionIndices].index.values.tolist())
)
- self.df["STATE"].ix[indToChange] = "No Ticket"
- self.currentDF["STATE"].ix[indToChange] = "No Ticket"
+ self.df.loc[indToChange, "STATE"] = "No Ticket"
+ self.currentDF.loc[indToChange, "STATE"] = "No Ticket"
self.update_data()
except:
print("No issues loaded yet")
@@ -5505,8 +5469,8 @@ def mark_as_false_positive(self):
indToChange = list(
set(self.currentDF.iloc[self.selectionIndices].index.values.tolist())
)
- self.df["STATE"].ix[indToChange] = "False Pos"
- self.currentDF["STATE"].ix[indToChange] = "False Pos"
+ self.df.loc[indToChange, "STATE"] = "False Pos"
+ self.currentDF.loc[indToChange, "STATE"] = "False Pos"
self.update_data()
except:
print("No issues loaded yet")
@@ -5520,10 +5484,10 @@ def get_selected_values(self):
selectedInd = list(
set(self.currentDF.iloc[self.selectionIndices].index.values.tolist())
)
- NewTicketScreen.targets = self.df["SNCL"].ix[selectedInd].values.tolist()
- NewTicketScreen.descriptions = (
- self.df["NOTES"].ix[selectedInd].values.tolist()
- )
+ NewTicketScreen.targets = self.df.loc[selectedInd, "SNCL"].values.tolist()
+ NewTicketScreen.descriptions = self.df.loc[
+ selectedInd, "NOTES"
+ ].values.tolist()
except:
print("No issues loaded yet")
NewTicketScreen.targets = []
@@ -5748,7 +5712,6 @@ def check_image(self, image, state):
if image.text not in self.selectedImages:
self.selectedImages.append(image.text)
self.captionLabel.text = masterDict["imageList"][self.selectedImages[0]]
- # self.captionInput.text = masterDict['imageList'][self.selectedImages[0]]
else:
self.selectedImages = [v for v in self.selectedImages if v != image.text]
@@ -5761,15 +5724,12 @@ def open_image(self, *kwargs):
except Exception as e:
self.warning_popup("WARNING: Unable to open %s: %s" % (file, e))
- # print("WARNING: Unable to open %s: %s" %(file, e))
-
def remove_images(self, *kwargs):
for file in self.selectedImages:
try:
del masterDict["imageList"][file]
except KeyError as e:
self.warning_popup("WARNING: File not found in list - %s" % e)
- # print("WARNING: File not found in list - %s" % e)
self.selectedImages = [v for v in self.selectedImages if v != file]
@@ -5830,8 +5790,6 @@ def link_popup(self, *kwargs):
if len(masterDict["linkList"]) > 0:
link_id = 0
for row in masterDict["linkList"]:
- # b = ToggleButton(text = row, size_hint_y = None, halign = 'left', id=str(link_id),
- # background_color = (.5,.5,.5,1), group='imageButtons')
b = ToggleButton(
text=row,
size_hint_y=None,
@@ -5868,7 +5826,6 @@ def link_popup(self, *kwargs):
upperLayout.add_widget(actionButtons)
captionBox = BoxLayout(orientation="horizontal", size_hint_y=0.25)
- # self.linkInput = TextInput(text="", id='linkID')
self.linkInput = TextInput(text="")
self.linkInput.bind()
captionBox.add_widget(self.linkInput)
@@ -5896,7 +5853,7 @@ def check_link(self, link, state):
if link.text not in self.selectedLinks:
self.selectedLinks.append(link.text)
else:
- self.selectedLinks = [v for v in self.selectedLinks if v != linkn.text]
+ self.selectedLinks = [v for v in self.selectedLinks if v != link.text]
def remove_link(self, *kwargs):
for file in self.selectedLinks:
@@ -6015,7 +5972,9 @@ def create_connection(self, db_file):
:return: Connection object or None
"""
try:
- conn = sqlite3.connect(db_file)
+ conn = sqlite3.connect(
+ db_file, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
+ )
return conn
except Error as e:
print("WARNING: %s" % e)
@@ -6039,14 +5998,14 @@ def create_table(self, conn):
location text NOT NULL,
channel text NOT NULL,
description text,
- start_date datetime,
- end_date datetime,
+ start_date TIMESTAMP,
+ end_date TIMESTAMP,
status text NOT NULL,
thresholds text NOT NULL,
images text,
caption text,
links text,
- updated datetime
+ updated TIMESTAMP
); """
try:
@@ -6417,10 +6376,6 @@ def load_ticket_information(self):
self.selectedThresholds.append(threshold)
def return_to_ticketList(self):
-
- # IF you want to return to the popup, then uncomment these (right now the popup does not update properly, so have it disabled)
- # masterDict["ticket_instance"].disabled = False # reenables the button that had been clicked and disabled
- # masterDict["ticketList_popup"].open()
self.clear_ticket_fields()
def exit_confirmation(self):
@@ -6650,8 +6605,6 @@ def link_popup(self, *kwargs):
if len(masterDict["linkList"]) > 0:
link_id = 0
for row in masterDict["linkList"]:
- # b = ToggleButton(text = row, size_hint_y = None, halign = 'left', id=str(link_id),
- # background_color = (.5,.5,.5,1), group='imageButtons')
b = ToggleButton(
text=row,
size_hint_y=None,
@@ -6663,7 +6616,7 @@ def link_popup(self, *kwargs):
image_layout.add_widget(b)
link_id += 1
- #
+
# The notes (in a box layout) go into a ScrollView
scrl = ScrollView(size_hint_y=4)
scrl.add_widget(image_layout)
@@ -6688,7 +6641,6 @@ def link_popup(self, *kwargs):
upperLayout.add_widget(actionButtons)
captionBox = BoxLayout(orientation="horizontal", size_hint_y=0.25)
- # self.linkInput = TextInput(text="", id='linkID')
self.linkInput = TextInput(text="")
self.linkInput.bind()
captionBox.add_widget(self.linkInput)
@@ -6716,7 +6668,7 @@ def check_link(self, link, state):
if link.text not in self.selectedLinks:
self.selectedLinks.append(link.text)
else:
- self.selectedLinks = [v for v in self.selectedLinks if v != linkn.text]
+ self.selectedLinks = [v for v in self.selectedLinks if v != link.text]
def remove_link(self, *kwargs):
for file in self.selectedLinks:
@@ -6987,41 +6939,44 @@ def go_to_selectedTickets(self):
)
self.theseTickets = masterDict["tickets"]
- self.theseTickets["target"] = (
- self.theseTickets["network"]
- + "."
- + self.theseTickets["station"]
- + "."
- + self.theseTickets["location"]
- + "."
- + self.theseTickets["channel"]
- )
- self.theseTickets = self.theseTickets.sort_values(
- by=[masterDict["ticket_order"]]
- ).reset_index(drop=True)
-
- ticketList = list()
-
- for id, row in self.theseTickets.iterrows():
- row_sub = [
- str(row["id"]),
- row["target"],
- row["start_date"],
- row["end_date"],
- row["subject"],
- row["status"],
- row["tracker"],
- row["updated"],
- ]
- row_sub = [
- row_sub[y].ljust(spacing_dict[y])[0 : spacing_dict[y]]
- for y in range(len(row_sub))
- ]
- label = " ".join(row_sub)
- ticketList.append({"text": label})
+ if type(self.theseTickets) == str:
+ tickets_screen.ticket_list_rv.data = ""
+ else:
+ self.theseTickets["target"] = (
+ self.theseTickets["network"]
+ + "."
+ + self.theseTickets["station"]
+ + "."
+ + self.theseTickets["location"]
+ + "."
+ + self.theseTickets["channel"]
+ )
+
+ self.theseTickets = self.theseTickets.sort_values(
+ by=[masterDict["ticket_order"]]
+ ).reset_index(drop=True)
+
+ ticketList = list()
+ for id, row in self.theseTickets.iterrows():
+ row_sub = [
+ str(row["id"]),
+ row["target"],
+ row["start_date"],
+ row["end_date"],
+ row["subject"],
+ row["status"],
+ row["tracker"],
+ row["updated"],
+ ]
+ row_sub = [
+ row_sub[y].ljust(spacing_dict[y])[0 : spacing_dict[y]]
+ for y in range(len(row_sub))
+ ]
+ label = " ".join(row_sub)
+ ticketList.append({"text": label})
- tickets_screen.ticket_list_rv.data = ticketList
+ tickets_screen.ticket_list_rv.data = ticketList
except Exception as e:
print("Warning: could not retrieve tickets - %s" % e)
tickets_screen.ticket_list_rv.data = ""
@@ -7772,7 +7727,7 @@ def apply_selection(self, rv, index, is_selected):
masterDict["linkList"] = list()
masterDict["thresholds_file"] = "./thresholds.txt"
masterDict["metrics_file"] = "./MUSTANG_metrics.txt"
-masterDict["metadata_file"] = "./IRIS_metadata.txt"
+masterDict["metadata_file"] = "./EarthScope_metadata.txt"
databaseDir = "./db/"
databaseName = "quargTickets.db"
@@ -7819,7 +7774,7 @@ def build(self):
Window.clearcolor = (1, 1, 1, 1)
Window.size = (1377, 700)
- self.title = "IRIS Quality Assurance Report Generator"
+ self.title = "EarthScope Quality Assurance Report Generator"
screen_manager.add_widget(MainScreen(name="mainScreen"))
screen_manager.add_widget(PreferencesScreen(name="preferencesScreen"))
screen_manager.add_widget(ThresholdGroupsScreen(name="thresholdGroupsScreen"))
diff --git a/README.md b/README.md
index 33a9402..e472798 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,7 @@
For detailed documentation, check out [EarthScope.github.io/quarg/](https://EarthScope.github.io/quarg/DOCUMENTATION.html)
-**QuARG**, the Quality Assurance Report Generator, is a Python client that allows network operators to generate quality assurance (QA) reports from start to finish. These reports utilize EarthScope’s database of [MUSTANG](http://service.iris.edu/mustang/) data quality metrics to find and highlight potential issues in the data, reducing the amount of time that analysts need to spend scanning the data for problems.
+**QuARG**, the Quality Assurance Report Generator, is a Python client that allows network operators to generate quality assurance (QA) reports from start to finish. These reports utilize EarthScope’s database of [MUSTANG](http://service.earthscope.org/mustang/) data quality metrics to find and highlight potential issues in the data, reducing the amount of time that analysts need to spend scanning the data for problems.
Over the years that IRIS produced Quality Assurance Reports, we refined the process of generating a report into four primary steps:
@@ -97,16 +97,18 @@ Instructions for Linux or macOS (Intel chip)
```
cd quarg
conda update conda
-conda create --name quarg -c conda-forge --file quarg-conda-install.txt
+conda create --name quarg -c conda-forge python=3.12
conda activate quarg
+conda install -c conda-forge --file quarg-conda-install.txt
```
Instructions for macOS (Apple M1 or M2 chip):
```
cd quarg
conda update conda
-CONDA_SUBDIR=osx-64 conda create --name quarg -c conda-forge --file quarg-conda-install.txt
+CONDA_SUBDIR=osx-64 conda create --name quarg -c conda-forge python=3.12
conda activate quarg
+CONDA_SUBDIR=osx-64 conda install -c conda-forge --file quarg-conda-install.txt
```
See what is installed in our (quarg) environment with:
diff --git a/docs/DOCUMENTATION.html b/docs/DOCUMENTATION.html
index d164386..bf00b6c 100644
--- a/docs/DOCUMENTATION.html
+++ b/docs/DOCUMENTATION.html
@@ -363,20 +363,20 @@
QuARG is a Python client that allows network operators to generate quality assurance (QA) reports from start to finish. These reports utilize IRIS’s database of MUSTANG data quality metrics to find and highlight potential issues in the data, reducing the amount of time that analysts need to spend scanning the data for problems.
+QuARG is a Python client that allows network operators to generate quality assurance (QA) reports from start to finish. These reports utilize EarthScope's database of MUSTANG data quality metrics to find and highlight potential issues in the data, reducing the amount of time that analysts need to spend scanning the data for problems.
Users have the ability to customize QuARG to adapt to their particular network. Some features that can be personalized:
IRIS (Incorporated Research Institutions for Seismology) DMC (Data Management Center) has been performing quality assurance checks on data since the Transportable Array began in 2004. Since that time, we have expanded and improved our quality assurance efforts, including developing a comprehensive quality assurance system called MUSTANG with over 40 metrics available through our webservices.
+EarthScope (formerly IRIS) Data Services has been performing quality assurance checks on data since the Transportable Array began in 2004. Since that time, we have expanded and improved our quality assurance efforts, including developing a comprehensive quality assurance system called MUSTANG with over 40 metrics available through our webservices.
In addition to the weekly QA performed on the TA network, we developed monthly quality assurance reports for the _GSN virtual network. Since then, we added a few more networks to our monthly and quarterly network reports as we refined our methods and improved the QuARG utility. We wrapped up our final network report in Summer 2019 with the goal of providing the QuARG utility to individual networks for the purpose of performing their own QA. While this tool was born at the DMC, intended for generating reports on very specific networks and leveraging tools that we have available in Seattle, the utility has since been expanded to be useful to network operators working on their own networks.
Over the years, we have refined the process of generating a report into four primary steps:
Current list of all metrics button to pull down the full list. Clicking on the Detailed Documentation button for any given metric will take you to a page that describes it more fully.
+Current list of all metrics button to pull down the full list. Clicking on the Detailed Documentation button for any given metric will take you to a page that describes it more fully.
Threshold
-Thresholds take a metric from a simple description of the data and moves it into a way to determine if the data is “good”. We use the term Threshold to mean a Metric plut a Cutoff Value… or, in many cases, a combination of metrics and cutoff values. For QuARG, we actually focus on the data that is “bad”, since Network Operators need to know where things are going wrong so that they can fix it.
One thing to keep in mind is that different instrument types - broadband, short period, strong motion, etc - may have different cutoff values. For example, strong motion instruments have a very different noise profile than broadband instruments, and even a healthy strong motion instrument will have a significant portion of the noise profile that is above the New High Noise Model (Peterson, J, 1993, Observations and Modeling of Seismic Background Noise, U.S.G.S. OFR-93-322) . So thresholds using pct_above_nhnm probably ought to have different cutoff values if applied to strong motion and broadband.
+Thresholds take a metric from a simple description of the data and moves it into a way to determine if the data is “good”. We use the term Threshold to mean a Metric plut a Cutoff Value… or, in many cases, a combination of metrics and cutoff values. For QuARG, we actually focus on the data that is “bad”, since Network Operators need to know where things are going wrong so that they can fix it.
One thing to keep in mind is that different instrument types - broadband, short period, strong motion, etc - may have different cutoff values. For example, strong motion instruments have a very different noise profile than broadband instruments, and even a healthy strong motion instrument will have a significant portion of the noise profile that is above the New High Noise Model (Peterson, J, 1993, Observations and Modeling of Seismic Background Noise, U.S.G.S. OFR-93-322) . So thresholds using pct_above_nhnm probably ought to have different cutoff values if applied to strong motion and broadband.
It should be noted that in QuARG, some Thresholds are based on metadata as well. This can help you find cases where the metadata may be incorrect or incomplete.
Here are some examples of thresholds:
pct_above_nhnm > 90 && dead_channel_lin > 2
@@ -467,11 +467,11 @@
Threshold Definitions Form allows you to do all of these types of comparisons, plus a couple more. This may make the thresholds a little more complicated, but we think that it is worth it to have greater flexibility for you, the user.MUSTANG
-MUSTANG is the Quality Assurance system that we have built at IRIS. It is essentially an entire workflow that ingests data from our archives and outputs a range of about 45 metrics. When data comes into the IRIS DMC, whether in realtime or latent, it triggers a series of steps that lead to metric calculation on that data. The UTC day after data is archived, MUSTANG will begin calculating metrics on the data. Note that archiving can be up to about a day after realtime data streams in, due to the way that the data is pooled prior to archiving.
-
We store the metrics we have calculated in a series of databases that are accessible to users through our web services. Most of the metrics are accessed through the measurements service, though there are also a handful of other services that are primarily related to PSDs and PDFs.
+MUSTANG is the Quality Assurance system that we have built at EarthScope. It is essentially an entire workflow that ingests data from our archives and outputs a range of about 45 metrics. When data comes into EarthScope, whether in realtime or latent, it triggers a series of steps that lead to metric calculation on that data. The UTC day after data is archived, MUSTANG will begin calculating metrics on the data. Note that archiving can be up to about a day after realtime data streams in, due to the way that the data is pooled prior to archiving.
+
We store the metrics we have calculated in a series of databases that are accessible to users through our web services. Most of the metrics are accessed through the measurements service, though there are also a handful of other services that are primarily related to PSDs and PDFs.
If you are unfamiliar with our web services, in simple terms it is a way to input a specific URL into your webbrowser and have the requested metric values returned to you. Or, you can use your favorite language (python in the case of QuARG) to do the work for you.
ISPAQ
-Because MUSTANG is inherently built into the IRIS DMC, and we know that not all data streams into our archive, we have created a portable version of MUSTANG that users can install on their own computer to run metrics on their local data. This utility, ISPAQ, is a command line python tool that can write metrics to a file system or to a sqlite database (in ISPAQ 3.0, to be released soon). Those ISPAQ metrics that are written to a sqlite database can be accessed by QuARG by specifying the Metric Source from within the Preference File Form. This allows greater flexibility - networks can still use QuARG to find issues in their network even if the data does not get archived at the IRIS DMC and we do not have MUSTANG metrics for that data.
Metric Source from within the Preference File Form. This allows greater flexibility - networks can still use QuARG to find issues in their network even if the data does not get archived at EarthScope and we do not have MUSTANG metrics for that data.
@@ -480,7 +480,7 @@ QuARG is distributed through GitHub, via IRIS’s public repository (iris-edu). You will use a git client command to get a copy of the latest stable release. In addition, you will use the miniconda python package manager to create a customized Python environment designed to run QuARG properly.
QuARG is distributed through GitHub, via EarthScope’s public repository (EarthScope). You will use a git client command to get a copy of the latest stable release. In addition, you will use the miniconda python package manager to create a customized Python environment designed to run QuARG properly.
If running macOS, Xcode command line tools should be installed. Check for existence and install if missing:
xcode-select --install
Follow the steps below to begin running QuARG.
@@ -489,7 +489,7 @@You must first have git installed your system. This is a commonly used source code management system and serves well as a mode of software distribution as it is easy to capture updates. See the Git Home Page to begin installation of git before proceeding further.
After you have git installed, you will download the QuARG distribution into a directory of your choosing from GitHub by opening a text terminal and typing:
-git clone https://github.com/iris-edu/quarg.git
+git clone https://github.com/EarthScope/quarg.git
This will produce a copy of this code distribution in the directory you have chosen. When new quarg versions become available, you can update QuARG by typing:
cd quarg
git pull origin main
@@ -768,8 +768,8 @@ # Threshold|Target|Start|End|Ndays|Status|Value|Notes. If that’s the case, then the file generated using the external ticketing system must match this filename. Metric Source: QuARG can retrieve metric values from either the IRIS MUSTANG web services or from a local sqlite database. ISPAQ is a portable version of MUSTANG that can be downloaded from GitHub and used to calculated metrics on your local machine from local data. A new feature in ISPAQ is that it can now write to a sqlite database, which can then easily be read by QuARG. If a local database will be used, use the Metric Source dropdown menu to select “Local ISPAQ SQLite Database` and then either browse to or type in the name of the database file to be used. Otherwise,”IRIS" should be selected from the dropdown menu.
Metadata Source dropdown menu to select “Local Metadata File” and either use the Browse button or type in the name of the file containing the metadata. Otherwise, “IRIS” should be selected in the dropdown menu.Metric Source: QuARG can retrieve metric values from either the EarthScope MUSTANG web services or from a local sqlite database. ISPAQ is a portable version of MUSTANG that can be downloaded from GitHub and used to calculated metrics on your local machine from local data. A new feature in ISPAQ is that it can now write to a sqlite database, which can then easily be read by QuARG. If a local database will be used, use the Metric Source dropdown menu to select “Local ISPAQ SQLite Database` and then either browse to or type in the name of the database file to be used. Otherwise,”EarthScope" should be selected from the dropdown menu.
Metadata Source dropdown menu to select “Local Metadata File” and either use the Browse button or type in the name of the file containing the metadata. Otherwise, “EarthScope” should be selected in the dropdown menu.This section defines which targets (network, station, channel, location) will be used when retrieving quality assurance (likely from MUSTANG, but could also be ISPAQ) metrics and metadata, and therefore which channels will be included in the issue list. It also defines the category of instrumentation used in the report.
+This section defines which targets (network, station, channel, location) will be used when retrieving quality assurance (likely from MUSTANG, but could also be ISPAQ) metrics and metadata, and therefore which channels will be included in the issue list. It also defines the category of instrumentation used in the report.
Start Date: 2020-08-11 and End Date: 2020-08-12In many ways, Thresholds are the entire basis of of QuARG and these Quality Assurance (QA) Reports. They are a way to take pre-computed MUSTANG or ISPAQ metric values and use those metrics as a way to find potential issues in the data. The Thresholds File is what QuARG uses to keep track of Instrument Groups (see Preference File Form) and Threshold Groups, as well as actually defining the thresholds. To edit this file, you use the Threshold Definitions Form. This file is thresholds.txt and is necessary for QuARG to Find Issues, which creates the file that is used to Examine Issues.
+
In many ways, Thresholds are the entire basis of of QuARG and these Quality Assurance (QA) Reports. They are a way to take pre-computed MUSTANG or ISPAQ metric values and use those metrics as a way to find potential issues in the data. The Thresholds File is what QuARG uses to keep track of Instrument Groups (see Preference File Form) and Threshold Groups, as well as actually defining the thresholds. To edit this file, you use the Threshold Definitions Form. This file is thresholds.txt and is necessary for QuARG to Find Issues, which creates the file that is used to Examine Issues.
In case you need a refresher, some defintions are listed here
In More Detail:
Metrics
-At the top is a selectable list of all of the MUSTANG metrics. This list comes from the IRIS MUSTANG webservices and is refreshed whenever QuARG is connected to the internet so it should stay up to date as we add new metrics. When a metric is selected, it will fill in the text box labeled Field below. While you can simply type the metric you are interested in Field box directly, the list makes it easy to know what metrics are availble to use.
Field below. While you can simply type the metric you are interested in Field box directly, the list makes it easy to know what metrics are availble to use.
Channel Options
The channel options allow you to specify whether a threshold, or part of a threshold, should apply to only the horizontal or vertical channels. In most cases, these will not be used since you will want to find issues associated with any and all of the channels. But there are some cases where you would want to limit things. For example, when looking for issues in the metadata you may want to find all cases where the horizontal channels have a Dip != 0. If you applied this threshold to all channels, then every vertical channel should get triggered since they ought to have a non-0 Dip. Another example would be rmsRatio, which compares the sample_rms of the vertical channel to and average of the horizontals.
There are 4 buttons for Channel Options:
In More Detail:
-Metadata List This is a scrollable, selectable list of all metadata fields that can be used in QuARG. These are based on the IRIS station service headers at the channel level in the text format. When a field is selected, it will turn blue and will automatically fill in the Field in column 3. The metadata list is disabled by default, and only becomes available when the Metadata toggle button is selected (see below).
Metadata List This is a scrollable, selectable list of all metadata fields that can be used in QuARG. These are based on the EarthScope station service headers at the channel level in the text format. When a field is selected, it will turn blue and will automatically fill in the Field in column 3. The metadata list is disabled by default, and only becomes available when the Metadata toggle button is selected (see below).
Threshold Options
There are five options available:
Databrowser is a tool that allows users to plot MUSTANG metrics. These include Metric Timeseries (plotting metric values over time), Gap Duration plots, Network and Station boxplots, as well as some other options. It can be useful in looking at a network’s overall health, or to quickly view patterns in metric values over long periods of time. The Databrowser button does not require any of the Input fields to be filled. |
|||
| Waveforms | -This button will retreive and display waveform data from the IRIS timeseriesplot service. This requires all target fields to be specified, though it can accomodate a comma-separated list. Users must be careful with the requested Start and End times, as the service limits the length of time that can be plotted. Note: this returns a static image and is not recommended to be the primary way of viewing waveforms - we expect the analyst to use another more dynamic tool to view waveforms, this is simply for use as a quick view of the data. |
+Waveforms | +This button will retreive and display waveform data from the EarthScope timeseriesplot service. This requires all target fields to be specified, though it can accomodate a comma-separated list. Users must be careful with the requested Start and End times, as the service limits the length of time that can be plotted. Note: this returns a static image and is not recommended to be the primary way of viewing waveforms - we expect the analyst to use another more dynamic tool to view waveforms, this is simply for use as a quick view of the data. |
| Metrics | +Metrics | The Metrics button opens a web browser page that displays metric values from the MUSTANG Measurements web service. It uses input from all of the input fields except for Threshold. Start and End are used to limit the time range for the metrics retrieved; Metrics can be a comma-separated list of any desired metrics; Network, Station, Location, and Channel can all be wildcarded, lists, or left blank. Be careful of leaving fields blank, particularly Network, as that can create a very large query. |
|
The Metric Plot button uses the same inputs as the Metrics button, but rather than opening a web page with tabular data, it generates a simple timeseries plot of the requested values. |
|||
| PDFs | +PDFs | Opens a webpage with monthly PDFs for the requested targets, beginning with the month of Start. |
|
| Spectrograms | +Spectrograms | Opens a webpage with the spectorams for the requested targets, for the time span of Start to End. If no dates are provided, will do for the entire span of the targets (from the beginning of the earliest target until the end of the latest target). |
|
| Noise Modes | +Noise Modes | Opens a webpage to the Noise Mode Timeseries plot. All Network, Station, Location, and Channel fields must be filled, with only one target allowed (ie, no wildcarding or lists). Will use the Start and End dates. |
|
Opens a webpage of the USGS event service based on the Start and End dates specified. It will list all earthquakes M5.5 and larger, as MUSTANG event-based metrics do not calculate on smaller events. |
|||
| Station | -Opens a channel-level web page of the IRIS Station service, using provided target information. Any blank field will be wildcarded, and lists and wildcards are allowed; start and end times are ignored for this diagnosis tool. | +Station | +Opens a channel-level web page of the EarthScope Station service, using provided target information. Any blank field will be wildcarded, and lists and wildcards are allowed; start and end times are ignored for this diagnosis tool. |
id,tracker,target,start_date,category,subject,thresholds,images,caption,links,status,end_date,description
The lines that come after that follow that pattern, with quotation marks (‘"’) around any fields that may have a comma in them. For example:
id,tracker,target,start_date,category,subject,thresholds,images,caption,links,status,end_date,description
-4,Support,UU BEI 01 EHZ,2019-12-01,Other,Example Ticket,"gapsRatioGt12, glitch",/Users/laura/QA_reports/testImage.jpg,"This is a figure caption, with a comma so it has quotation marks",http://service.iris.edu/mustang/measurements/1/query?metric=percent_availability&net=YO&cha=?XH&format=text&nodata=404&orderby=start_asc,In Progress,2019-12-03,"This one has a start and end date, and a link!"
+4,Support,UU BEI 01 EHZ,2019-12-01,Other,Example Ticket,"gapsRatioGt12, glitch",/Users/laura/QA_reports/testImage.jpg,"This is a figure caption, with a comma so it has quotation marks",http://service.earthscope.org/mustang/measurements/1/query?metric=percent_availability&net=YO&cha=?XH&format=text&nodata=404&orderby=start_asc,In Progress,2019-12-03,"This one has a start and end date, and a link!"
The most important thing is that the ticketing system used either has these fields, or has an equivalent, and that the tickets can be exported into a csv file of this format. Any missing fields can be left blank if necessary. For example, using a Redmine ticketing system, we are able to use the ‘Export to CSV’ function and choose what columns are exported. It may take an intermediate step to convert the CSV into the correct format, in which case it is probably worth setting up a workflow to do the conversion for you. Depending on the complexity, it might be worth delving into the code to change the required format - just be wary of doing that: it may create unintended consequences.
broadband is included in the instruments list in the preference file; will print the short period thresholds if short period is included in the instruments list.Clicking on each issue Summary link takes you to a more detailed description of \n"); - f.write("\t that issue, including the metrics used to identify the problem.\n"); - f.write("\t Sorted by category, then station.\n"); - f.write("\t
\n"); - f.write("\t\n"); - f.write("\t "+ str(project) +"\n\n"); - f.write("\t
| Category | \n"); - f.write("\tChannel(s) | \n"); - f.write("\tStatus | \n"); - f.write("\tStart Date | \n"); - f.write("\tSummary | \n"); - f.write("\t
| Category | \n") + f.write("\tChannel(s) | \n") + f.write("\tStatus | \n") + f.write("\tStart Date | \n") + f.write("\tSummary | \n") + f.write("\t
\n"); - f.write("\t "+ str(project) +"\n\n"); - f.write("\t
| Category | \n"); - f.write("\tChannel(s) | \n"); - f.write("\tStatus | \n"); - f.write("\tStart Date | \n"); - f.write("\tSummary | \n"); - f.write("\t
\n") + f.write("\t " + str(project) + "\n\n") + f.write("\t
| Category | \n") + f.write("\tChannel(s) | \n") + f.write("\tStatus | \n") + f.write("\tStart Date | \n") + f.write("\tSummary | \n") + f.write("\t
| " + str(category) + " | \n"); - f.write("\t" + str(sncl).replace(" ",".").replace('--',"") + " | \n"); - f.write("\t" + str(status) + " | \n"); - f.write("\t" + str(start) + " | \n"); - f.write("\t" + str(summary) + " | \n"); - f.write("\t
| " + str(category) + " | \n") + f.write( + "\t" + + str(sncl).replace(" ", ".").replace("--", "") + + " | \n" + ) + f.write("\t" + str(status) + " | \n") + f.write("\t" + str(start) + " | \n") + f.write( + '\t' + + str(summary) + + " | \n" + ) + f.write("\t
"+ str(snclq).replace(" ",".").replace('--',"") +" "+ str(subject) + " -- " + str(start) +"
\n");
+ start = "(Start not identified)"
+ if status == "New":
+ status = "Open"
+ f.write(
+ '\t
'
+ + str(snclq).replace(" ", ".").replace("--", "")
+ + " "
+ + str(subject)
+ + " -- "
+ + str(start)
+ + "
\n"
+ )
else:
- f.write("\t
"+ str(snclq) +" "+ str(subject) + " -- " +str(start) +" to " + str(end) +"
\n");
- f.write("\t STATUS: "+ str(status) +"
\n");
- #f.write("\t Diagnostics: \n");
- #f.write("\t "+ str(diagnostics) +"\n");
- #f.write("\t (what is this?)
\n");
- f.write("\t Thresholds: \n");
- f.write("\t "+ str(thresholds) +"\n");
- f.write("\t (what is this?)
\n");
- f.write("\t "+ str(str(description).replace('\n','
')) +"\n");
- f.write("\t
'
+ + str(snclq)
+ + " "
+ + str(subject)
+ + " -- "
+ + str(start)
+ + " to "
+ + str(end)
+ + "
\n"
+ )
+ f.write('\t STATUS: ' + str(status) + "
\n")
+ f.write('\t Thresholds: \n')
+ f.write('\t ' + str(thresholds) + "\n")
+ f.write('\t (what is this?)
\n')
+ f.write("\t " + str(str(description).replace("\n", "
")) + "\n")
+ f.write("\t
The links below take you to the metrics and other data quality tools used to identify the data issues in this report.\n"); - f.write("\t
\n\n"); - - f.write("\tMUSTANG measurement service metrics:\n"); - f.write("\t
| %s | \n" % metric); - if (ii % nCol == 0): - f.write("\t
MUSTANG noise-mode-timeseries service
\n"); - f.write("\t \n"); - for net in network.split(','): + f.write("\tMUSTANG noise-mode-timeseries service
\n' + ) + f.write( + '\t \n' + ) + for net in network.split(","): net = net.strip() - f.write("\t \n" % (net, net)); - f.write("\t \n"); - f.write("\t \n"); - + f.write( + '\t \n' + % (net, net) + ) + f.write( + '\t \n' + ) + f.write( + '\t \n' + ) # Loop over the thresholds dictionary to print the definitions for instrument groups that are being used. - f.write("\tThresholds used to identify potential data issues for this report were:\n"); - f.write("\t
\n\n"); - -# f.write("\tThresholds used to identify potential data issues for this report were:\n" + ) + f.write("\t
\n\n") + for thresholdName in sorted(thresholdsDict.keys()): f.write("