Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if component <= 0 or component > len(dataDict):
print("ERROR: Component specified must be >= 0 and " "<= number of arguments")
raise ValueError
df = pd.DataFrame(data=dataDict, dtype=float)
scale = StandardScaler()
scaledData = scale.fit_transform(df)
pca = sklearnPCA()
pcaComponents = pca.fit_transform(scaledData)
return pcaComponents[:, component - 1].tolist()
if __name__ == "__main__":
setup_utils.deploy_model("PCA", PCA, "Returns the specified principal component")
ANOVA is a statistical hypothesis test that is used to compare
two or more group means for equality.For more information on
the function and how to use it please refer to tabpy-tools.md
"""
cols = [_arg1, _arg2] + list(_argN)
for col in cols:
if not isinstance(col[0], (int, float)):
print("values must be numeric")
raise ValueError
_, p_value = stats.f_oneway(_arg1, _arg2, *_argN)
return p_value
if __name__ == "__main__":
setup_utils.deploy_model("anova", anova, "Returns the p-value form an ANOVA test")
scores = []
if library == "nltk":
sid = SentimentIntensityAnalyzer()
for text in _arg1:
sentimentResults = sid.polarity_scores(text)
score = sentimentResults["compound"]
scores.append(score)
elif library == "textblob":
for text in _arg1:
currScore = TextBlob(text)
scores.append(currScore.sentiment.polarity)
return scores
if __name__ == "__main__":
setup_utils.deploy_model(
"Sentiment Analysis",
SentimentAnalysis,
"Returns a sentiment score between -1 and 1 for " "a given string",
)
def main():
install_dependencies(["sklearn", "pandas", "numpy", "textblob", "nltk", "scipy"])
print("==================================================================")
# Determine if we run python or python3
if platform.system() == "Windows":
py = "python"
else:
py = "python3"
if len(sys.argv) > 1:
config_file_path = sys.argv[1]
else:
config_file_path = setup_utils.get_default_config_file_path()
print(f"Using config file at {config_file_path}")
port, auth_on, prefix = setup_utils.parse_config(config_file_path)
if auth_on:
auth_args = setup_utils.get_creds()
else:
auth_args = []
directory = str(Path(__file__).resolve().parent / "scripts")
# Deploy each model in the scripts directory
for filename in os.listdir(directory):
subprocess.run([py, f"{directory}/{filename}", config_file_path] + auth_args)
install_dependencies(["sklearn", "pandas", "numpy", "textblob", "nltk", "scipy"])
print("==================================================================")
# Determine if we run python or python3
if platform.system() == "Windows":
py = "python"
else:
py = "python3"
if len(sys.argv) > 1:
config_file_path = sys.argv[1]
else:
config_file_path = setup_utils.get_default_config_file_path()
print(f"Using config file at {config_file_path}")
port, auth_on, prefix = setup_utils.parse_config(config_file_path)
if auth_on:
auth_args = setup_utils.get_creds()
else:
auth_args = []
directory = str(Path(__file__).resolve().parent / "scripts")
# Deploy each model in the scripts directory
for filename in os.listdir(directory):
subprocess.run([py, f"{directory}/{filename}", config_file_path] + auth_args)